Lines Matching +full:close +full:- +full:range

1 // SPDX-License-Identifier: GPL-2.0
8 #include "disk-io.h"
13 #include "delalloc-space.h"
16 #include "file-item.h"
41 * This value is different for compressed/non-compressed extents, thus
51 if (defrag1->root > defrag2->root) in compare_inode_defrag()
53 else if (defrag1->root < defrag2->root) in compare_inode_defrag()
54 return -1; in compare_inode_defrag()
55 else if (defrag1->ino > defrag2->ino) in compare_inode_defrag()
57 else if (defrag1->ino < defrag2->ino) in compare_inode_defrag()
58 return -1; in compare_inode_defrag()
73 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_insert_inode_defrag()
79 p = &fs_info->defrag_inodes.rb_node; in btrfs_insert_inode_defrag()
86 p = &parent->rb_left; in btrfs_insert_inode_defrag()
88 p = &parent->rb_right; in btrfs_insert_inode_defrag()
95 if (defrag->transid < entry->transid) in btrfs_insert_inode_defrag()
96 entry->transid = defrag->transid; in btrfs_insert_inode_defrag()
97 entry->extent_thresh = min(defrag->extent_thresh, in btrfs_insert_inode_defrag()
98 entry->extent_thresh); in btrfs_insert_inode_defrag()
99 return -EEXIST; in btrfs_insert_inode_defrag()
102 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); in btrfs_insert_inode_defrag()
103 rb_link_node(&defrag->rb_node, parent, p); in btrfs_insert_inode_defrag()
104 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); in btrfs_insert_inode_defrag()
125 struct btrfs_root *root = inode->root; in btrfs_add_inode_defrag()
126 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_add_inode_defrag()
133 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) in btrfs_add_inode_defrag()
140 defrag->ino = btrfs_ino(inode); in btrfs_add_inode_defrag()
141 defrag->transid = btrfs_get_root_last_trans(root); in btrfs_add_inode_defrag()
142 defrag->root = btrfs_root_id(root); in btrfs_add_inode_defrag()
143 defrag->extent_thresh = extent_thresh; in btrfs_add_inode_defrag()
145 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_add_inode_defrag()
146 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { in btrfs_add_inode_defrag()
149 * and then re-read this inode, this new inode doesn't have in btrfs_add_inode_defrag()
158 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_add_inode_defrag()
177 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_pick_defrag_inode()
178 p = fs_info->defrag_inodes.rb_node; in btrfs_pick_defrag_inode()
185 p = parent->rb_left; in btrfs_pick_defrag_inode()
187 p = parent->rb_right; in btrfs_pick_defrag_inode()
201 rb_erase(parent, &fs_info->defrag_inodes); in btrfs_pick_defrag_inode()
202 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_pick_defrag_inode()
210 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
213 &fs_info->defrag_inodes, rb_node) in btrfs_cleanup_defrag_inodes()
216 fs_info->defrag_inodes = RB_ROOT; in btrfs_cleanup_defrag_inodes()
218 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
229 struct btrfs_ioctl_defrag_range_args range; in btrfs_run_defrag_inode() local
234 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) in btrfs_run_defrag_inode()
240 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); in btrfs_run_defrag_inode()
246 inode = btrfs_iget(defrag->ino, inode_root); in btrfs_run_defrag_inode()
259 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); in btrfs_run_defrag_inode()
260 memset(&range, 0, sizeof(range)); in btrfs_run_defrag_inode()
261 range.len = (u64)-1; in btrfs_run_defrag_inode()
262 range.start = cur; in btrfs_run_defrag_inode()
263 range.extent_thresh = defrag->extent_thresh; in btrfs_run_defrag_inode()
264 file_ra_state_init(ra, inode->i_mapping); in btrfs_run_defrag_inode()
266 sb_start_write(fs_info->sb); in btrfs_run_defrag_inode()
267 ret = btrfs_defrag_file(inode, ra, &range, defrag->transid, in btrfs_run_defrag_inode()
269 sb_end_write(fs_info->sb); in btrfs_run_defrag_inode()
275 cur = max(cur + fs_info->sectorsize, range.start); in btrfs_run_defrag_inode()
292 atomic_inc(&fs_info->defrag_running); in btrfs_run_defrag_inodes()
297 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) in btrfs_run_defrag_inodes()
315 first_ino = defrag->ino + 1; in btrfs_run_defrag_inodes()
316 root_objectid = defrag->root; in btrfs_run_defrag_inodes()
320 atomic_dec(&fs_info->defrag_running); in btrfs_run_defrag_inodes()
326 wake_up(&fs_info->transaction_wait); in btrfs_run_defrag_inodes()
331 * Check if two blocks addresses are close, used by defrag.
335 if (blocknr < other && other - (blocknr + blocksize) < SZ_32K) in close_blocks()
337 if (blocknr > other && blocknr - (other + blocksize) < SZ_32K) in close_blocks()
344 * disk order is close to key order.
352 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_realloc_node()
353 const u32 blocksize = fs_info->nodesize; in btrfs_realloc_node()
354 const int end_slot = btrfs_header_nritems(parent) - 1; in btrfs_realloc_node()
366 if (unlikely(trans->transaction != fs_info->running_transaction || in btrfs_realloc_node()
367 trans->transid != fs_info->generation)) { in btrfs_realloc_node()
368 btrfs_abort_transaction(trans, -EUCLEAN); in btrfs_realloc_node()
371 parent->start, btrfs_root_id(root), trans->transid, in btrfs_realloc_node()
372 fs_info->running_transaction->transid, in btrfs_realloc_node()
373 fs_info->generation); in btrfs_realloc_node()
374 return -EUCLEAN; in btrfs_realloc_node()
385 bool close = true; in btrfs_realloc_node() local
397 other = btrfs_node_blockptr(parent, i - 1); in btrfs_realloc_node()
398 close = close_blocks(blocknr, other, blocksize); in btrfs_realloc_node()
400 if (!close && i < end_slot) { in btrfs_realloc_node()
402 close = close_blocks(blocknr, other, blocksize); in btrfs_realloc_node()
404 if (close) { in btrfs_realloc_node()
419 (end_slot - i) * blocksize), in btrfs_realloc_node()
426 search_start = cur->start; in btrfs_realloc_node()
427 last_block = cur->start; in btrfs_realloc_node()
452 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_defrag_leaves()
457 ret = -ENOMEM; in btrfs_defrag_leaves()
461 level = btrfs_header_level(root->node); in btrfs_defrag_leaves()
466 if (root->defrag_progress.objectid == 0) { in btrfs_defrag_leaves()
472 root->defrag_max.objectid = 0; in btrfs_defrag_leaves()
474 btrfs_node_key_to_cpu(root_node, &root->defrag_max, in btrfs_defrag_leaves()
475 nritems - 1); in btrfs_defrag_leaves()
480 memcpy(&key, &root->defrag_progress, sizeof(key)); in btrfs_defrag_leaves()
483 path->keep_locks = 1; in btrfs_defrag_leaves()
495 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later in btrfs_defrag_leaves()
498 path->lowest_level = 1; in btrfs_defrag_leaves()
505 if (!path->nodes[1]) { in btrfs_defrag_leaves()
512 * path->slots[1]. in btrfs_defrag_leaves()
514 ASSERT(path->locks[1] != 0); in btrfs_defrag_leaves()
516 path->nodes[1], 0, in btrfs_defrag_leaves()
518 &root->defrag_progress); in btrfs_defrag_leaves()
520 WARN_ON(ret == -EAGAIN); in btrfs_defrag_leaves()
526 * without COWing, this is because even with path->keep_locks = 1, in btrfs_defrag_leaves()
528 * node when path->slots[node_level - 1] does not point to the last in btrfs_defrag_leaves()
532 path->slots[1] = btrfs_header_nritems(path->nodes[1]); in btrfs_defrag_leaves()
536 memcpy(&root->defrag_progress, &key, sizeof(key)); in btrfs_defrag_leaves()
537 ret = -EAGAIN; in btrfs_defrag_leaves()
541 if (ret == -EAGAIN) { in btrfs_defrag_leaves()
542 if (root->defrag_max.objectid > root->defrag_progress.objectid) in btrfs_defrag_leaves()
544 if (root->defrag_max.type > root->defrag_progress.type) in btrfs_defrag_leaves()
546 if (root->defrag_max.offset > root->defrag_progress.offset) in btrfs_defrag_leaves()
551 if (ret != -EAGAIN) in btrfs_defrag_leaves()
552 memset(&root->defrag_progress, 0, in btrfs_defrag_leaves()
553 sizeof(root->defrag_progress)); in btrfs_defrag_leaves()
563 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_defrag_root()
566 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) in btrfs_defrag_root()
584 if (btrfs_fs_closing(fs_info) || ret != -EAGAIN) in btrfs_defrag_root()
589 ret = -EAGAIN; in btrfs_defrag_root()
593 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); in btrfs_defrag_root()
602 * - No extent_map will be added to inode->extent_tree
605 * - Extra optimization to skip file extents older than @newer_than
619 struct btrfs_root *root = inode->root; in defrag_get_extent()
629 ret = -ENOMEM; in defrag_get_extent()
656 path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1; in defrag_get_extent()
668 path.slots[0]--; in defrag_get_extent()
689 /* It's beyond our target range, definitely not extent found */ in defrag_get_extent()
694 * | |<- File extent ->| in defrag_get_extent()
695 * \- start in defrag_get_extent()
700 em->start = start; in defrag_get_extent()
701 em->disk_bytenr = EXTENT_MAP_HOLE; in defrag_get_extent()
702 em->disk_num_bytes = 0; in defrag_get_extent()
703 em->ram_bytes = 0; in defrag_get_extent()
704 em->offset = 0; in defrag_get_extent()
705 em->len = key.offset - start; in defrag_get_extent()
714 * |<- file extent ->| | in defrag_get_extent()
715 * \- start in defrag_get_extent()
749 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in defrag_lookup_extent()
750 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in defrag_lookup_extent()
752 const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize; in defrag_lookup_extent()
758 read_lock(&em_tree->lock); in defrag_lookup_extent()
760 read_unlock(&em_tree->lock); in defrag_lookup_extent()
763 * We can get a merged extent, in that case, we need to re-search in defrag_lookup_extent()
771 if (em && (em->flags & EXTENT_FLAG_MERGED)) { in defrag_lookup_extent()
778 u64 end = start + sectorsize - 1; in defrag_lookup_extent()
799 return fs_info->max_extent_size; in get_extent_max_capacity()
810 if (em->start + em->len >= i_size_read(inode)) in defrag_check_next_extent()
819 next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked); in defrag_check_next_extent()
821 if (!next || next->disk_bytenr >= EXTENT_MAP_LAST_BYTE) in defrag_check_next_extent()
823 if (next->flags & EXTENT_FLAG_PREALLOC) in defrag_check_next_extent()
829 if (next->len >= get_extent_max_capacity(fs_info, em)) in defrag_check_next_extent()
832 if (next->generation < newer_than) in defrag_check_next_extent()
835 if (next->len >= extent_thresh) in defrag_check_next_extent()
849 * - Returned page is locked and has been set up properly.
850 * - No ordered extent exists in the page.
851 * - The page is uptodate.
858 struct address_space *mapping = inode->vfs_inode.i_mapping; in defrag_prepare_one_folio()
861 u64 page_end = page_start + PAGE_SIZE - 1; in defrag_prepare_one_folio()
873 * Since we can defragment files opened read-only, we can encounter in defrag_prepare_one_folio()
883 return ERR_PTR(-ETXTBSY); in defrag_prepare_one_folio()
893 /* Wait for any existing ordered extent in the range */ in defrag_prepare_one_folio()
897 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in defrag_prepare_one_folio()
899 unlock_extent(&inode->io_tree, page_start, page_end, in defrag_prepare_one_folio()
912 if (folio->mapping != mapping || !folio->private) { in defrag_prepare_one_folio()
920 * Now the page range has no ordered extent any more. Read the page to in defrag_prepare_one_folio()
926 if (folio->mapping != mapping || !folio->private) { in defrag_prepare_one_folio()
934 return ERR_PTR(-EIO); in defrag_prepare_one_folio()
957 * @locked: if the range has already held extent lock
966 struct btrfs_fs_info *fs_info = inode->root->fs_info; in defrag_collect_targets()
978 em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked); in defrag_collect_targets()
988 if (em->disk_bytenr == EXTENT_MAP_INLINE && in defrag_collect_targets()
989 em->len <= inode->root->fs_info->max_inline) in defrag_collect_targets()
993 if (em->disk_bytenr == EXTENT_MAP_HOLE || in defrag_collect_targets()
994 (em->flags & EXTENT_FLAG_PREALLOC)) in defrag_collect_targets()
998 if (em->generation < newer_than) in defrag_collect_targets()
1002 if (em->generation == (u64)-1) in defrag_collect_targets()
1009 range_len = em->len - (cur - em->start); in defrag_collect_targets()
1011 * If this range of the extent map is already flagged for delalloc, in defrag_collect_targets()
1018 * because we do the space reservation while holding the range in defrag_collect_targets()
1020 * extent, requires locking the range; in defrag_collect_targets()
1025 * extent). If we mark pages in an adjacent range for defrag, in defrag_collect_targets()
1026 * then we will have a larger contiguous range for delalloc, in defrag_collect_targets()
1030 if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1, in defrag_collect_targets()
1042 if (em->len >= extent_thresh) in defrag_collect_targets()
1049 if (em->len >= get_extent_max_capacity(fs_info, em)) in defrag_collect_targets()
1058 if (em->disk_bytenr == EXTENT_MAP_INLINE) in defrag_collect_targets()
1061 next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em, in defrag_collect_targets()
1069 last = list_entry(target_list->prev, in defrag_collect_targets()
1072 if (last->start + last->len != cur) in defrag_collect_targets()
1080 range_len = min(extent_map_end(em), start + len) - cur; in defrag_collect_targets()
1083 * last range of the target list. in defrag_collect_targets()
1088 last = list_entry(target_list->prev, in defrag_collect_targets()
1090 ASSERT(last->start + last->len <= cur); in defrag_collect_targets()
1091 if (last->start + last->len == cur) { in defrag_collect_targets()
1093 last->len += range_len; in defrag_collect_targets()
1103 ret = -ENOMEM; in defrag_collect_targets()
1106 new->start = cur; in defrag_collect_targets()
1107 new->len = range_len; in defrag_collect_targets()
1108 list_add_tail(&new->list, target_list); in defrag_collect_targets()
1119 list_del_init(&entry->list); in defrag_collect_targets()
1127 * Otherwise, we can only go the end of the specified range. in defrag_collect_targets()
1141 * Defrag one contiguous target range.
1144 * @target: target range to defrag
1145 * @pages: locked pages covering the defrag range
1150 * - Pages are prepared
1151 * Pages should be locked, no ordered extent in the pages range,
1154 * - Extent bits are locked
1161 struct btrfs_fs_info *fs_info = inode->root->fs_info; in defrag_one_locked_target()
1163 const u64 start = target->start; in defrag_one_locked_target()
1164 const u64 len = target->len; in defrag_one_locked_target()
1165 unsigned long last_index = (start + len - 1) >> PAGE_SHIFT; in defrag_one_locked_target()
1167 unsigned long first_index = folios[0]->index; in defrag_one_locked_target()
1171 ASSERT(last_index - first_index + 1 <= nr_pages); in defrag_one_locked_target()
1176 clear_extent_bit(&inode->io_tree, start, start + len - 1, in defrag_one_locked_target()
1179 set_extent_bit(&inode->io_tree, start, start + len - 1, in defrag_one_locked_target()
1183 for (i = start_index - first_index; i <= last_index - first_index; i++) { in defrag_one_locked_target()
1202 const u32 sectorsize = inode->root->fs_info->sectorsize; in defrag_one_range()
1203 u64 last_index = (start + len - 1) >> PAGE_SHIFT; in defrag_one_range()
1205 unsigned int nr_pages = last_index - start_index + 1; in defrag_one_range()
1214 return -ENOMEM; in defrag_one_range()
1228 /* Lock the pages range */ in defrag_one_range()
1229 lock_extent(&inode->io_tree, start_index << PAGE_SHIFT, in defrag_one_range()
1230 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, in defrag_one_range()
1233 * Now we have a consistent view about the extent map, re-check in defrag_one_range()
1234 * which range really needs to be defragged. in defrag_one_range()
1237 * so that we won't relock the extent range and cause deadlock. in defrag_one_range()
1253 list_del_init(&entry->list); in defrag_one_range()
1257 unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT, in defrag_one_range()
1258 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, in defrag_one_range()
1277 const u32 sectorsize = inode->root->fs_info->sectorsize; in defrag_one_cluster()
1290 u32 range_len = entry->len; in defrag_one_cluster()
1300 (max_sectors - *sectors_defragged) * sectorsize); in defrag_one_cluster()
1304 * our range may already be invalid (e.g. hole punched). in defrag_one_cluster()
1305 * Skip if our range is before last_scanned_ret, as there is in defrag_one_cluster()
1306 * no need to defrag the range anymore. in defrag_one_cluster()
1308 if (entry->start + range_len <= *last_scanned_ret) in defrag_one_cluster()
1311 page_cache_sync_readahead(inode->vfs_inode.i_mapping, in defrag_one_cluster()
1312 ra, NULL, entry->start >> PAGE_SHIFT, in defrag_one_cluster()
1313 ((entry->start + range_len - 1) >> PAGE_SHIFT) - in defrag_one_cluster()
1314 (entry->start >> PAGE_SHIFT) + 1); in defrag_one_cluster()
1316 * Here we may not defrag any range if holes are punched before in defrag_one_cluster()
1321 ret = defrag_one_range(inode, entry->start, range_len, in defrag_one_cluster()
1327 inode->root->fs_info->sectorsize_bits; in defrag_one_cluster()
1331 list_del_init(&entry->list); in defrag_one_cluster()
1344 * @range: defrag options including range and flags
1350 * Return >=0 for the number of sectors defragged, and range->start will be updated
1353 * defragging all the range).
1356 struct btrfs_ioctl_defrag_range_args *range, in btrfs_defrag_file() argument
1364 bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS); in btrfs_defrag_file()
1367 u32 extent_thresh = range->extent_thresh; in btrfs_defrag_file()
1375 if (range->start >= isize) in btrfs_defrag_file()
1376 return -EINVAL; in btrfs_defrag_file()
1379 if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES) in btrfs_defrag_file()
1380 return -EINVAL; in btrfs_defrag_file()
1381 if (range->compress_type) in btrfs_defrag_file()
1382 compress_type = range->compress_type; in btrfs_defrag_file()
1388 if (range->start + range->len > range->start) { in btrfs_defrag_file()
1389 /* Got a specific range */ in btrfs_defrag_file()
1390 last_byte = min(isize, range->start + range->len); in btrfs_defrag_file()
1396 /* Align the range */ in btrfs_defrag_file()
1397 cur = round_down(range->start, fs_info->sectorsize); in btrfs_defrag_file()
1398 last_byte = round_up(last_byte, fs_info->sectorsize) - 1; in btrfs_defrag_file()
1401 * Make writeback start from the beginning of the range, so that the in btrfs_defrag_file()
1402 * defrag range can be written sequentially. in btrfs_defrag_file()
1405 if (start_index < inode->i_mapping->writeback_index) in btrfs_defrag_file()
1406 inode->i_mapping->writeback_index = start_index; in btrfs_defrag_file()
1414 ret = -EAGAIN; in btrfs_defrag_file()
1420 (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1; in btrfs_defrag_file()
1425 ret = -ETXTBSY; in btrfs_defrag_file()
1429 if (!(inode->i_sb->s_flags & SB_ACTIVE)) { in btrfs_defrag_file()
1434 BTRFS_I(inode)->defrag_compress = compress_type; in btrfs_defrag_file()
1436 cluster_end + 1 - cur, extent_thresh, in btrfs_defrag_file()
1441 balance_dirty_pages_ratelimited(inode->i_mapping); in btrfs_defrag_file()
1455 * Update range.start for autodefrag, this will indicate where to start in btrfs_defrag_file()
1458 range->start = cur; in btrfs_defrag_file()
1464 if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) { in btrfs_defrag_file()
1465 filemap_flush(inode->i_mapping); in btrfs_defrag_file()
1467 &BTRFS_I(inode)->runtime_flags)) in btrfs_defrag_file()
1468 filemap_flush(inode->i_mapping); in btrfs_defrag_file()
1470 if (range->compress_type == BTRFS_COMPRESS_LZO) in btrfs_defrag_file()
1472 else if (range->compress_type == BTRFS_COMPRESS_ZSTD) in btrfs_defrag_file()
1478 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_defrag_file()
1494 return -ENOMEM; in btrfs_auto_defrag_init()