Lines Matching +full:edge +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
20 #include "extent-tree.h"
22 #include "tree-checker.h"
30 u64 offset; member
42 u64 offset = key->offset; in check_extent_in_eb() local
48 if (!ctx->ignore_extent_item_pos && in check_extent_in_eb()
56 if (ctx->extent_item_pos < data_offset || in check_extent_in_eb()
57 ctx->extent_item_pos >= data_offset + data_len) in check_extent_in_eb()
59 offset += ctx->extent_item_pos - data_offset; in check_extent_in_eb()
62 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup) in check_extent_in_eb()
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids, in check_extent_in_eb()
73 ret = ctx->indirect_ref_iterator(key->objectid, offset, in check_extent_in_eb()
75 ctx->user_ctx); in check_extent_in_eb()
83 return -ENOMEM; in check_extent_in_eb()
85 e->next = *eie; in check_extent_in_eb()
86 e->inum = key->objectid; in check_extent_in_eb()
87 e->offset = offset; in check_extent_in_eb()
88 e->num_bytes = data_len; in check_extent_in_eb()
99 eie_next = eie->next; in free_inode_elem_list()
132 if (disk_byte != ctx->bytenr) in find_extent_in_eb()
160 * ref->count >0:
161 * - incremented when a ref->count transitions to >0
162 * - decremented when a ref->count transitions to <1
193 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
203 return -ENOMEM; in btrfs_prelim_ref_init()
219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
225 if (ref1->level < ref2->level) in prelim_ref_compare()
226 return -1; in prelim_ref_compare()
227 if (ref1->level > ref2->level) in prelim_ref_compare()
229 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
230 return -1; in prelim_ref_compare()
231 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
233 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
234 return -1; in prelim_ref_compare()
235 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
237 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
238 return -1; in prelim_ref_compare()
239 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
241 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
242 return -1; in prelim_ref_compare()
243 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
245 if (ref1->parent < ref2->parent) in prelim_ref_compare()
246 return -1; in prelim_ref_compare()
247 if (ref1->parent > ref2->parent) in prelim_ref_compare()
260 sc->share_count--; in update_share_count()
262 sc->share_count++; in update_share_count()
264 if (newref->root_id == btrfs_root_id(sc->root) && in update_share_count()
265 newref->wanted_disk_byte == sc->data_bytenr && in update_share_count()
266 newref->key_for_search.objectid == sc->inum) in update_share_count()
267 sc->self_ref_count += newref->count; in update_share_count()
287 root = &preftree->root; in prelim_ref_insert()
288 p = &root->rb_root.rb_node; in prelim_ref_insert()
295 p = &(*p)->rb_left; in prelim_ref_insert()
297 p = &(*p)->rb_right; in prelim_ref_insert()
301 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
303 while (eie && eie->next) in prelim_ref_insert()
304 eie = eie->next; in prelim_ref_insert()
307 ref->inode_list = newref->inode_list; in prelim_ref_insert()
309 eie->next = newref->inode_list; in prelim_ref_insert()
311 preftree->count); in prelim_ref_insert()
313 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
314 * The ref->count is updated to follow any in prelim_ref_insert()
317 update_share_count(sc, ref->count, in prelim_ref_insert()
318 ref->count + newref->count, newref); in prelim_ref_insert()
319 ref->count += newref->count; in prelim_ref_insert()
325 update_share_count(sc, 0, newref->count, newref); in prelim_ref_insert()
326 preftree->count++; in prelim_ref_insert()
327 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
328 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
329 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
341 &preftree->root.rb_root, rbnode) { in prelim_release()
342 free_inode_elem_list(ref->inode_list); in prelim_release()
346 preftree->root = RB_ROOT_CACHED; in prelim_release()
347 preftree->count = 0; in prelim_release()
352 * - obtaining the parent is the goal
353 * - if you add a key, you must know that it is a correct key
354 * - if you cannot add the parent or a correct key, then we will look into the
361 * --------------------+--------+----------+--------+----------
362 * parent logical | y | - | - | -
363 * key to resolve | - | y | y | y
364 * tree block logical | - | - | - | -
367 * - column 1: we've the parent -> done
368 * - column 2, 3, 4: we use the key to find the parent
374 * --------------------+--------+----------+--------+----------
375 * parent logical | y | - | y | -
376 * key to resolve | - | - | - | y
378 * root for resolving | - | y | y | y
380 * - column 1, 3: we've the parent -> done
381 * - column 2: we take the first key from the block to find the parent
383 * - column 4: we use the key to find the parent
401 return -ENOMEM; in add_prelim_ref()
403 ref->root_id = root_id; in add_prelim_ref()
405 ref->key_for_search = *key; in add_prelim_ref()
407 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
409 ref->inode_list = NULL; in add_prelim_ref()
410 ref->level = level; in add_prelim_ref()
411 ref->count = count; in add_prelim_ref()
412 ref->parent = parent; in add_prelim_ref()
413 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
424 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
435 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
438 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
445 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
459 p = &(*p)->rb_left; in is_shared_data_backref()
461 p = &(*p)->rb_right; in is_shared_data_backref()
478 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
482 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
488 eb = path->nodes[level]; in add_all_parents()
489 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
505 eb = path->nodes[0]; in add_all_parents()
506 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
507 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
508 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
509 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
512 ret = btrfs_next_old_leaf(root, path, ctx->time_seq); in add_all_parents()
515 while (!ret && count < ref->count) { in add_all_parents()
516 eb = path->nodes[0]; in add_all_parents()
517 slot = path->slots[0]; in add_all_parents()
521 if (key.objectid != key_for_search->objectid || in add_all_parents()
531 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
532 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
533 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
536 ret = btrfs_next_old_leaf(root, path, ctx->time_seq); in add_all_parents()
549 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
553 if (!ctx->skip_inode_ref_list) { in add_all_parents()
561 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
565 if (!ret && !ctx->skip_inode_ref_list) { in add_all_parents()
566 while (old->next) in add_all_parents()
567 old = old->next; in add_all_parents()
568 old->next = eie; in add_all_parents()
573 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
576 ret = btrfs_next_old_item(root, path, ctx->time_seq); in add_all_parents()
600 int level = ref->level; in resolve_indirect_ref()
601 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
611 if (path->search_commit_root) in resolve_indirect_ref()
612 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id); in resolve_indirect_ref()
614 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false); in resolve_indirect_ref()
620 if (!path->search_commit_root && in resolve_indirect_ref()
621 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
622 ret = -ENOENT; in resolve_indirect_ref()
626 if (btrfs_is_testing(ctx->fs_info)) { in resolve_indirect_ref()
627 ret = -ENOENT; in resolve_indirect_ref()
631 if (path->search_commit_root) in resolve_indirect_ref()
632 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
633 else if (ctx->time_seq == BTRFS_SEQ_LAST) in resolve_indirect_ref()
634 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
636 root_level = btrfs_old_root_level(root, ctx->time_seq); in resolve_indirect_ref()
642 * We can often find data backrefs with an offset that is too large in resolve_indirect_ref()
643 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when in resolve_indirect_ref()
644 * subtracting a file's offset with the data offset of its in resolve_indirect_ref()
648 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
650 * add_all_parents(), otherwise we will miss it because the offset in resolve_indirect_ref()
651 * taken form the backref is much larger then the offset of the file in resolve_indirect_ref()
661 search_key.offset >= LLONG_MAX) in resolve_indirect_ref()
662 search_key.offset = 0; in resolve_indirect_ref()
663 path->lowest_level = level; in resolve_indirect_ref()
664 if (ctx->time_seq == BTRFS_SEQ_LAST) in resolve_indirect_ref()
667 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq); in resolve_indirect_ref()
669 btrfs_debug(ctx->fs_info, in resolve_indirect_ref()
671 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
672 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
673 ref->key_for_search.offset); in resolve_indirect_ref()
677 eb = path->nodes[level]; in resolve_indirect_ref()
683 level--; in resolve_indirect_ref()
684 eb = path->nodes[level]; in resolve_indirect_ref()
691 path->lowest_level = 0; in resolve_indirect_ref()
701 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
746 return -ENOMEM; in resolve_indirect_refs()
754 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
758 if (WARN(ref->parent, in resolve_indirect_refs()
760 ret = -EINVAL; in resolve_indirect_refs()
764 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
765 preftrees->indirect.count--; in resolve_indirect_refs()
767 if (ref->count == 0) { in resolve_indirect_refs()
772 if (sc && ref->root_id != btrfs_root_id(sc->root)) { in resolve_indirect_refs()
782 if (err == -ENOENT) { in resolve_indirect_refs()
783 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
795 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
796 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
806 ret = -ENOMEM; in resolve_indirect_refs()
810 new_ref->parent = node->val; in resolve_indirect_refs()
811 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
812 prelim_ref_insert(ctx->fs_info, &preftrees->direct, in resolve_indirect_refs()
820 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
842 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
845 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
849 rb_erase_cached(node, &tree->root); in add_missing_keys()
851 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
852 BUG_ON(ref->key_for_search.type); in add_missing_keys()
853 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
855 check.level = ref->level - 1; in add_missing_keys()
856 check.owner_root = ref->root_id; in add_missing_keys()
858 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check); in add_missing_keys()
866 return -EIO; in add_missing_keys()
872 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
874 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
878 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
898 spin_lock(&head->lock); in add_delayed_refs()
899 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
902 if (node->seq > seq) in add_delayed_refs()
905 switch (node->action) { in add_delayed_refs()
911 count = node->ref_mod; in add_delayed_refs()
914 count = node->ref_mod * -1; in add_delayed_refs()
919 switch (node->type) { in add_delayed_refs()
926 if (head->extent_op && head->extent_op->update_key) { in add_delayed_refs()
927 btrfs_disk_key_to_cpu(&key, &head->extent_op->key); in add_delayed_refs()
931 ret = add_indirect_ref(fs_info, preftrees, node->ref_root, in add_delayed_refs()
932 key_ptr, level + 1, node->bytenr, in add_delayed_refs()
945 node->parent, node->bytenr, count, in add_delayed_refs()
953 key.offset = btrfs_delayed_ref_offset(node); in add_delayed_refs()
971 sc->have_delayed_delete_refs = true; in add_delayed_refs()
973 ret = add_indirect_ref(fs_info, preftrees, node->ref_root, in add_delayed_refs()
974 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
980 ret = add_direct_ref(fs_info, preftrees, 0, node->parent, in add_delayed_refs()
981 node->bytenr, count, sc, in add_delayed_refs()
998 spin_unlock(&head->lock); in add_delayed_refs()
1026 leaf = path->nodes[0]; in add_inline_refs()
1027 slot = path->slots[0]; in add_inline_refs()
1032 if (ctx->check_extent_item) { in add_inline_refs()
1033 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx); in add_inline_refs()
1053 *info_level = found_key.offset; in add_inline_refs()
1060 u64 offset; in add_inline_refs() local
1067 return -EUCLEAN; in add_inline_refs()
1069 offset = btrfs_extent_inline_ref_offset(leaf, iref); in add_inline_refs()
1073 ret = add_direct_ref(ctx->fs_info, preftrees, in add_inline_refs()
1074 *info_level + 1, offset, in add_inline_refs()
1075 ctx->bytenr, 1, NULL, GFP_NOFS); in add_inline_refs()
1084 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset, in add_inline_refs()
1085 ctx->bytenr, count, sc, GFP_NOFS); in add_inline_refs()
1089 ret = add_indirect_ref(ctx->fs_info, preftrees, offset, in add_inline_refs()
1091 ctx->bytenr, 1, NULL, GFP_NOFS); in add_inline_refs()
1098 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1103 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_inline_refs()
1105 if (sc && key.objectid != sc->inum && in add_inline_refs()
1106 !sc->have_delayed_delete_refs) { in add_inline_refs()
1113 if (!ctx->skip_data_ref || in add_inline_refs()
1114 !ctx->skip_data_ref(root, key.objectid, key.offset, in add_inline_refs()
1115 ctx->user_ctx)) in add_inline_refs()
1116 ret = add_indirect_ref(ctx->fs_info, preftrees, in add_inline_refs()
1117 root, &key, 0, ctx->bytenr, in add_inline_refs()
1122 ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA)); in add_inline_refs()
1136 * add all non-inline backrefs for bytenr to the list
1146 struct btrfs_fs_info *fs_info = extent_root->fs_info; in add_keyed_refs()
1161 slot = path->slots[0]; in add_keyed_refs()
1162 leaf = path->nodes[0]; in add_keyed_refs()
1165 if (key.objectid != ctx->bytenr) in add_keyed_refs()
1176 info_level + 1, key.offset, in add_keyed_refs()
1177 ctx->bytenr, 1, NULL, GFP_NOFS); in add_keyed_refs()
1188 key.offset, ctx->bytenr, count, in add_keyed_refs()
1194 ret = add_indirect_ref(fs_info, preftrees, key.offset, in add_keyed_refs()
1195 NULL, info_level + 1, ctx->bytenr, in add_keyed_refs()
1210 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_keyed_refs()
1212 if (sc && key.objectid != sc->inum && in add_keyed_refs()
1213 !sc->have_delayed_delete_refs) { in add_keyed_refs()
1220 if (!ctx->skip_data_ref || in add_keyed_refs()
1221 !ctx->skip_data_ref(root, key.objectid, key.offset, in add_keyed_refs()
1222 ctx->user_ctx)) in add_keyed_refs()
1224 &key, 0, ctx->bytenr, in add_keyed_refs()
1241 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1248 const struct btrfs_fs_info *fs_info = root->fs_info; in lookup_backref_shared_cache()
1251 if (!current->journal_info) in lookup_backref_shared_cache()
1252 lockdep_assert_held(&fs_info->commit_root_sem); in lookup_backref_shared_cache()
1254 if (!ctx->use_path_cache) in lookup_backref_shared_cache()
1261 * Level -1 is used for the data extent, which is not reliable to cache in lookup_backref_shared_cache()
1268 entry = &ctx->path_cache_entries[level]; in lookup_backref_shared_cache()
1271 if (entry->bytenr != bytenr) in lookup_backref_shared_cache()
1278 if (!entry->is_shared && in lookup_backref_shared_cache()
1279 entry->gen != btrfs_root_last_snapshot(&root->root_item)) in lookup_backref_shared_cache()
1287 if (entry->is_shared && in lookup_backref_shared_cache()
1288 entry->gen != btrfs_get_last_root_drop_gen(fs_info)) in lookup_backref_shared_cache()
1291 *is_shared = entry->is_shared; in lookup_backref_shared_cache()
1301 ctx->path_cache_entries[i].is_shared = true; in lookup_backref_shared_cache()
1302 ctx->path_cache_entries[i].gen = entry->gen; in lookup_backref_shared_cache()
1311 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1318 const struct btrfs_fs_info *fs_info = root->fs_info; in store_backref_shared_cache()
1322 if (!current->journal_info) in store_backref_shared_cache()
1323 lockdep_assert_held(&fs_info->commit_root_sem); in store_backref_shared_cache()
1325 if (!ctx->use_path_cache) in store_backref_shared_cache()
1332 * Level -1 is used for the data extent, which is not reliable to cache in store_backref_shared_cache()
1342 gen = btrfs_root_last_snapshot(&root->root_item); in store_backref_shared_cache()
1344 entry = &ctx->path_cache_entries[level]; in store_backref_shared_cache()
1345 entry->bytenr = bytenr; in store_backref_shared_cache()
1346 entry->is_shared = is_shared; in store_backref_shared_cache()
1347 entry->gen = gen; in store_backref_shared_cache()
1358 entry = &ctx->path_cache_entries[i]; in store_backref_shared_cache()
1359 entry->is_shared = is_shared; in store_backref_shared_cache()
1360 entry->gen = gen; in store_backref_shared_cache()
1382 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr); in find_parent_nodes()
1400 ASSERT(ctx->roots == NULL); in find_parent_nodes()
1402 key.objectid = ctx->bytenr; in find_parent_nodes()
1403 key.offset = (u64)-1; in find_parent_nodes()
1404 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA)) in find_parent_nodes()
1411 return -ENOMEM; in find_parent_nodes()
1412 if (!ctx->trans) { in find_parent_nodes()
1413 path->search_commit_root = 1; in find_parent_nodes()
1414 path->skip_locking = 1; in find_parent_nodes()
1417 if (ctx->time_seq == BTRFS_SEQ_LAST) in find_parent_nodes()
1418 path->skip_locking = 1; in find_parent_nodes()
1428 * Key with offset -1 found, there would have to exist an extent in find_parent_nodes()
1429 * item with such offset, but this is out of the valid range. in find_parent_nodes()
1431 ret = -EUCLEAN; in find_parent_nodes()
1435 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) && in find_parent_nodes()
1436 ctx->time_seq != BTRFS_SEQ_LAST) { in find_parent_nodes()
1443 delayed_refs = &ctx->trans->transaction->delayed_refs; in find_parent_nodes()
1444 spin_lock(&delayed_refs->lock); in find_parent_nodes()
1445 head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr); in find_parent_nodes()
1447 if (!mutex_trylock(&head->mutex)) { in find_parent_nodes()
1448 refcount_inc(&head->refs); in find_parent_nodes()
1449 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1457 mutex_lock(&head->mutex); in find_parent_nodes()
1458 mutex_unlock(&head->mutex); in find_parent_nodes()
1462 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1463 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq, in find_parent_nodes()
1465 mutex_unlock(&head->mutex); in find_parent_nodes()
1469 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1473 if (path->slots[0]) { in find_parent_nodes()
1477 path->slots[0]--; in find_parent_nodes()
1478 leaf = path->nodes[0]; in find_parent_nodes()
1479 slot = path->slots[0]; in find_parent_nodes()
1481 if (key.objectid == ctx->bytenr && in find_parent_nodes()
1517 if (sc && ctx->bytenr == sc->data_bytenr) { in find_parent_nodes()
1527 if (sc->data_extent_gen > in find_parent_nodes()
1528 btrfs_root_last_snapshot(&sc->root->root_item)) { in find_parent_nodes()
1544 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr && in find_parent_nodes()
1545 sc->self_ref_count == 1) { in find_parent_nodes()
1549 cached = lookup_backref_shared_cache(sc->ctx, sc->root, in find_parent_nodes()
1550 sc->ctx->curr_leaf_bytenr, in find_parent_nodes()
1564 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0); in find_parent_nodes()
1586 node = rb_next(&ref->rbnode); in find_parent_nodes()
1588 * ref->count < 0 can happen here if there are delayed in find_parent_nodes()
1589 * refs with a node->action of BTRFS_DROP_DELAYED_REF. in find_parent_nodes()
1595 * and would retain their original ref->count < 0. in find_parent_nodes()
1597 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) { in find_parent_nodes()
1599 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS); in find_parent_nodes()
1603 if (ref->count && ref->parent) { in find_parent_nodes()
1604 if (!ctx->skip_inode_ref_list && !ref->inode_list && in find_parent_nodes()
1605 ref->level == 0) { in find_parent_nodes()
1609 check.level = ref->level; in find_parent_nodes()
1611 eb = read_tree_block(ctx->fs_info, ref->parent, in find_parent_nodes()
1619 ret = -EIO; in find_parent_nodes()
1623 if (!path->skip_locking) in find_parent_nodes()
1626 if (!path->skip_locking) in find_parent_nodes()
1632 ref->inode_list = eie; in find_parent_nodes()
1640 ret = ulist_add_merge_ptr(ctx->refs, ref->parent, in find_parent_nodes()
1641 ref->inode_list, in find_parent_nodes()
1645 if (!ret && !ctx->skip_inode_ref_list) { in find_parent_nodes()
1656 ret = -EUCLEAN; in find_parent_nodes()
1659 while (eie->next) in find_parent_nodes()
1660 eie = eie->next; in find_parent_nodes()
1661 eie->next = ref->inode_list; in find_parent_nodes()
1668 * use-after-free when our caller uses it or double in find_parent_nodes()
1671 ref->inode_list = NULL; in find_parent_nodes()
1690 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1691 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1693 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1696 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1702 ASSERT(ctx->refs == NULL); in btrfs_find_all_leafs()
1704 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_leafs()
1705 if (!ctx->refs) in btrfs_find_all_leafs()
1706 return -ENOMEM; in btrfs_find_all_leafs()
1710 (ret < 0 && ret != -ENOENT)) { in btrfs_find_all_leafs()
1711 free_leaf_list(ctx->refs); in btrfs_find_all_leafs()
1712 ctx->refs = NULL; in btrfs_find_all_leafs()
1730 * Found roots are added to @ctx->roots, which is allocated by this function if
1733 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1740 const u64 orig_bytenr = ctx->bytenr; in btrfs_find_all_roots_safe()
1741 const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list; in btrfs_find_all_roots_safe()
1746 ASSERT(ctx->refs == NULL); in btrfs_find_all_roots_safe()
1748 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1749 if (!ctx->refs) in btrfs_find_all_roots_safe()
1750 return -ENOMEM; in btrfs_find_all_roots_safe()
1752 if (!ctx->roots) { in btrfs_find_all_roots_safe()
1753 ctx->roots = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1754 if (!ctx->roots) { in btrfs_find_all_roots_safe()
1755 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1756 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1757 return -ENOMEM; in btrfs_find_all_roots_safe()
1762 ctx->skip_inode_ref_list = true; in btrfs_find_all_roots_safe()
1769 if (ret < 0 && ret != -ENOENT) { in btrfs_find_all_roots_safe()
1771 ulist_free(ctx->roots); in btrfs_find_all_roots_safe()
1772 ctx->roots = NULL; in btrfs_find_all_roots_safe()
1777 node = ulist_next(ctx->refs, &uiter); in btrfs_find_all_roots_safe()
1780 ctx->bytenr = node->val; in btrfs_find_all_roots_safe()
1784 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1785 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1786 ctx->bytenr = orig_bytenr; in btrfs_find_all_roots_safe()
1787 ctx->skip_inode_ref_list = orig_skip_inode_ref_list; in btrfs_find_all_roots_safe()
1797 if (!ctx->trans && !skip_commit_root_sem) in btrfs_find_all_roots()
1798 down_read(&ctx->fs_info->commit_root_sem); in btrfs_find_all_roots()
1800 if (!ctx->trans && !skip_commit_root_sem) in btrfs_find_all_roots()
1801 up_read(&ctx->fs_info->commit_root_sem); in btrfs_find_all_roots()
1813 ulist_init(&ctx->refs); in btrfs_alloc_backref_share_check_ctx()
1823 ulist_release(&ctx->refs); in btrfs_free_backref_share_ctx()
1852 struct btrfs_root *root = inode->root; in btrfs_is_data_extent_shared()
1853 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_is_data_extent_shared()
1874 if (ctx->prev_extents_cache[i].bytenr == bytenr) in btrfs_is_data_extent_shared()
1875 return ctx->prev_extents_cache[i].is_shared; in btrfs_is_data_extent_shared()
1878 ulist_init(&ctx->refs); in btrfs_is_data_extent_shared()
1882 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { in btrfs_is_data_extent_shared()
1887 down_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
1893 ctx->use_path_cache = true; in btrfs_is_data_extent_shared()
1903 ctx->curr_leaf_bytenr, 0, in btrfs_is_data_extent_shared()
1913 walk_ctx.refs = &ctx->refs; in btrfs_is_data_extent_shared()
1915 /* -1 means we are in the bytenr of the data extent. */ in btrfs_is_data_extent_shared()
1916 level = -1; in btrfs_is_data_extent_shared()
1919 const unsigned long prev_ref_count = ctx->refs.nnodes; in btrfs_is_data_extent_shared()
1932 if (ret < 0 && ret != -ENOENT) in btrfs_is_data_extent_shared()
1938 * the ctx->refs ulist, in which case we have to check multiple in btrfs_is_data_extent_shared()
1943 * 1) level -1, the data extent: If our data extent was not in btrfs_is_data_extent_shared()
1946 * the same offset, which means there are 2 (or more) file in btrfs_is_data_extent_shared()
1947 * extent items that point to the data extent - this happens in btrfs_is_data_extent_shared()
1963 * (direct ref) and a non-shared tree block ref (indirect in btrfs_is_data_extent_shared()
1966 if ((ctx->refs.nnodes - prev_ref_count) > 1) in btrfs_is_data_extent_shared()
1967 ctx->use_path_cache = false; in btrfs_is_data_extent_shared()
1972 node = ulist_next(&ctx->refs, &uiter); in btrfs_is_data_extent_shared()
1975 bytenr = node->val; in btrfs_is_data_extent_shared()
1976 if (ctx->use_path_cache) { in btrfs_is_data_extent_shared()
2000 if (!ctx->use_path_cache) { in btrfs_is_data_extent_shared()
2003 level--; in btrfs_is_data_extent_shared()
2005 bytenr = ctx->path_cache_entries[level].bytenr; in btrfs_is_data_extent_shared()
2006 ctx->use_path_cache = true; in btrfs_is_data_extent_shared()
2012 ctx->path_cache_entries[i].bytenr = 0; in btrfs_is_data_extent_shared()
2020 int slot = ctx->prev_extents_cache_slot; in btrfs_is_data_extent_shared()
2022 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr; in btrfs_is_data_extent_shared()
2023 ctx->prev_extents_cache[slot].is_shared = (ret == 1); in btrfs_is_data_extent_shared()
2026 ctx->prev_extents_cache_slot = slot; in btrfs_is_data_extent_shared()
2034 up_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
2037 ulist_release(&ctx->refs); in btrfs_is_data_extent_shared()
2038 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr; in btrfs_is_data_extent_shared()
2057 key.offset = start_off; in btrfs_find_one_extref()
2064 leaf = path->nodes[0]; in btrfs_find_one_extref()
2065 slot = path->slots[0]; in btrfs_find_one_extref()
2068 * If the item at offset is not found, in btrfs_find_one_extref()
2079 ret = -ENOENT; in btrfs_find_one_extref()
2093 ret = -ENOENT; in btrfs_find_one_extref()
2100 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_find_one_extref()
2104 *found_off = found_key.offset; in btrfs_find_one_extref()
2114 * 0-terminated. the path is only given within the current file system.
2133 s64 bytes_left = ((s64)size) - 1; in btrfs_ref_to_path()
2142 bytes_left -= name_len; in btrfs_ref_to_path()
2147 if (!path->skip_locking) in btrfs_ref_to_path()
2154 ret = -ENOENT; in btrfs_ref_to_path()
2158 next_inum = found_key.offset; in btrfs_ref_to_path()
2164 slot = path->slots[0]; in btrfs_ref_to_path()
2165 eb = path->nodes[0]; in btrfs_ref_to_path()
2168 path->nodes[0] = NULL; in btrfs_ref_to_path()
2169 path->locks[0] = 0; in btrfs_ref_to_path()
2178 --bytes_left; in btrfs_ref_to_path()
2214 key.offset = (u64)-1; in extent_from_logical()
2221 * Key with offset -1 found, there would have to exist an extent in extent_from_logical()
2222 * item with such offset, but this is out of the valid range. in extent_from_logical()
2224 return -EUCLEAN; in extent_from_logical()
2230 ret = -ENOENT; in extent_from_logical()
2233 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); in extent_from_logical()
2234 if (found_key->type == BTRFS_METADATA_ITEM_KEY) in extent_from_logical()
2235 size = fs_info->nodesize; in extent_from_logical()
2236 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) in extent_from_logical()
2237 size = found_key->offset; in extent_from_logical()
2239 if (found_key->objectid > logical || in extent_from_logical()
2240 found_key->objectid + size <= logical) { in extent_from_logical()
2243 return -ENOENT; in extent_from_logical()
2246 eb = path->nodes[0]; in extent_from_logical()
2247 item_size = btrfs_item_size(eb, path->slots[0]); in extent_from_logical()
2249 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in extent_from_logical()
2254 logical, logical - found_key->objectid, found_key->objectid, in extent_from_logical()
2255 found_key->offset, flags, item_size); in extent_from_logical()
2268 return -EIO; in extent_from_logical()
2295 if (key->type == BTRFS_METADATA_ITEM_KEY) { in get_extent_inline_ref()
2300 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); in get_extent_inline_ref()
2310 return -ENOENT; in get_extent_inline_ref()
2318 return -EUCLEAN; in get_extent_inline_ref()
2343 if (*ptr == (unsigned long)-1) in tree_backref_for_extent()
2363 if (key->type == BTRFS_EXTENT_ITEM_KEY) { in tree_backref_for_extent()
2369 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); in tree_backref_for_extent()
2370 *out_level = (u8)key->offset; in tree_backref_for_extent()
2374 *ptr = (unsigned long)-1; in tree_backref_for_extent()
2387 for (eie = inode_list; eie; eie = eie->next) { in iterate_leaf_refs()
2390 extent_item_objectid, eie->inum, in iterate_leaf_refs()
2391 eie->offset, root); in iterate_leaf_refs()
2392 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx); in iterate_leaf_refs()
2407 * when the iterator function returns a non-zero value, iteration stops.
2419 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu", in iterate_extent_inodes()
2420 ctx->bytenr); in iterate_extent_inodes()
2422 ASSERT(ctx->trans == NULL); in iterate_extent_inodes()
2423 ASSERT(ctx->roots == NULL); in iterate_extent_inodes()
2428 trans = btrfs_attach_transaction(ctx->fs_info->tree_root); in iterate_extent_inodes()
2430 if (PTR_ERR(trans) != -ENOENT && in iterate_extent_inodes()
2431 PTR_ERR(trans) != -EROFS) in iterate_extent_inodes()
2435 ctx->trans = trans; in iterate_extent_inodes()
2438 if (ctx->trans) { in iterate_extent_inodes()
2439 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem); in iterate_extent_inodes()
2440 ctx->time_seq = seq_elem.seq; in iterate_extent_inodes()
2442 down_read(&ctx->fs_info->commit_root_sem); in iterate_extent_inodes()
2448 refs = ctx->refs; in iterate_extent_inodes()
2449 ctx->refs = NULL; in iterate_extent_inodes()
2453 const u64 leaf_bytenr = ref_node->val; in iterate_extent_inodes()
2458 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux; in iterate_extent_inodes()
2460 if (ctx->cache_lookup) { in iterate_extent_inodes()
2465 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx, in iterate_extent_inodes()
2469 ret = iterate_leaf_refs(ctx->fs_info, in iterate_extent_inodes()
2482 if (!ctx->roots) { in iterate_extent_inodes()
2483 ctx->roots = ulist_alloc(GFP_NOFS); in iterate_extent_inodes()
2484 if (!ctx->roots) { in iterate_extent_inodes()
2485 ret = -ENOMEM; in iterate_extent_inodes()
2490 ctx->bytenr = leaf_bytenr; in iterate_extent_inodes()
2495 if (ctx->cache_store) in iterate_extent_inodes()
2496 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx); in iterate_extent_inodes()
2499 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) { in iterate_extent_inodes()
2500 btrfs_debug(ctx->fs_info, in iterate_extent_inodes()
2502 root_node->val, ref_node->val, in iterate_extent_inodes()
2503 ref_node->aux); in iterate_extent_inodes()
2504 ret = iterate_leaf_refs(ctx->fs_info, inode_list, in iterate_extent_inodes()
2505 root_node->val, ctx->bytenr, in iterate_extent_inodes()
2508 ulist_reinit(ctx->roots); in iterate_extent_inodes()
2513 if (ctx->trans) { in iterate_extent_inodes()
2514 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem); in iterate_extent_inodes()
2515 btrfs_end_transaction(ctx->trans); in iterate_extent_inodes()
2516 ctx->trans = NULL; in iterate_extent_inodes()
2518 up_read(&ctx->fs_info->commit_root_sem); in iterate_extent_inodes()
2521 ulist_free(ctx->roots); in iterate_extent_inodes()
2522 ctx->roots = NULL; in iterate_extent_inodes()
2530 static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx) in build_ino_list() argument
2535 if (inodes->bytes_left >= c) { in build_ino_list()
2536 inodes->bytes_left -= c; in build_ino_list()
2537 inodes->val[inodes->elem_cnt] = inum; in build_ino_list()
2538 inodes->val[inodes->elem_cnt + 1] = offset; in build_ino_list()
2539 inodes->val[inodes->elem_cnt + 2] = root; in build_ino_list()
2540 inodes->elem_cnt += 3; in build_ino_list()
2542 inodes->bytes_missing += c - inodes->bytes_left; in build_ino_list()
2543 inodes->bytes_left = 0; in build_ino_list()
2544 inodes->elem_missed += 3; in build_ino_list()
2558 int search_commit_root = path->search_commit_root; in iterate_inodes_from_logical()
2565 return -EINVAL; in iterate_inodes_from_logical()
2571 walk_ctx.extent_item_pos = logical - found_key.objectid; in iterate_inodes_from_logical()
2590 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_refs()
2591 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_refs()
2604 ret = found ? 0 : -ENOENT; in iterate_inode_refs()
2609 parent = found_key.offset; in iterate_inode_refs()
2610 slot = path->slots[0]; in iterate_inode_refs()
2611 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_refs()
2613 ret = -ENOMEM; in iterate_inode_refs()
2623 btrfs_debug(fs_root->fs_info, in iterate_inode_refs()
2624 "following ref at offset %u for inode %llu in tree %llu", in iterate_inode_refs()
2646 u64 offset = 0; in iterate_inode_extrefs() local
2649 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_extrefs()
2650 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_extrefs()
2658 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, in iterate_inode_extrefs()
2659 &offset); in iterate_inode_extrefs()
2663 ret = found ? 0 : -ENOENT; in iterate_inode_extrefs()
2668 slot = path->slots[0]; in iterate_inode_extrefs()
2669 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_extrefs()
2671 ret = -ENOMEM; in iterate_inode_extrefs()
2687 (unsigned long)&extref->name, eb, ipath); in iterate_inode_extrefs()
2696 offset++; in iterate_inode_extrefs()
2713 int i = ipath->fspath->elem_cnt; in inode_to_path()
2717 bytes_left = ipath->fspath->bytes_left > s_ptr ? in inode_to_path()
2718 ipath->fspath->bytes_left - s_ptr : 0; in inode_to_path()
2720 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; in inode_to_path()
2721 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, in inode_to_path()
2727 ipath->fspath->val[i] = (u64)(unsigned long)fspath; in inode_to_path()
2728 ++ipath->fspath->elem_cnt; in inode_to_path()
2729 ipath->fspath->bytes_left = fspath - fspath_min; in inode_to_path()
2731 ++ipath->fspath->elem_missed; in inode_to_path()
2732 ipath->fspath->bytes_missing += fspath_min - fspath; in inode_to_path()
2733 ipath->fspath->bytes_left = 0; in inode_to_path()
2741 * is has been created large enough. each path is zero-terminated and accessed
2742 * from ipath->fspath->val[i].
2743 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2744 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2745 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2746 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2757 else if (ret != -ENOENT) in paths_from_inode()
2761 if (ret == -ENOENT && found_refs) in paths_from_inode()
2775 return ERR_PTR(-ENOMEM); in init_data_container()
2778 data->bytes_left = total_bytes - sizeof(*data); in init_data_container()
2780 data->bytes_missing = sizeof(*data) - total_bytes; in init_data_container()
2788 * information will be total_bytes - sizeof(struct inode_fs_paths).
2804 return ERR_PTR(-ENOMEM); in init_ipath()
2807 ifp->btrfs_path = path; in init_ipath()
2808 ifp->fspath = fspath; in init_ipath()
2809 ifp->fs_root = fs_root; in init_ipath()
2818 kvfree(ipath->fspath); in free_ipath()
2830 ret->path = btrfs_alloc_path(); in btrfs_backref_iter_alloc()
2831 if (!ret->path) { in btrfs_backref_iter_alloc()
2837 ret->path->search_commit_root = 1; in btrfs_backref_iter_alloc()
2838 ret->path->skip_locking = 1; in btrfs_backref_iter_alloc()
2839 ret->fs_info = fs_info; in btrfs_backref_iter_alloc()
2846 iter->bytenr = 0; in btrfs_backref_iter_release()
2847 iter->item_ptr = 0; in btrfs_backref_iter_release()
2848 iter->cur_ptr = 0; in btrfs_backref_iter_release()
2849 iter->end_ptr = 0; in btrfs_backref_iter_release()
2850 btrfs_release_path(iter->path); in btrfs_backref_iter_release()
2851 memset(&iter->cur_key, 0, sizeof(iter->cur_key)); in btrfs_backref_iter_release()
2856 struct btrfs_fs_info *fs_info = iter->fs_info; in btrfs_backref_iter_start()
2858 struct btrfs_path *path = iter->path; in btrfs_backref_iter_start()
2865 key.offset = (u64)-1; in btrfs_backref_iter_start()
2866 iter->bytenr = bytenr; in btrfs_backref_iter_start()
2873 * Key with offset -1 found, there would have to exist an extent in btrfs_backref_iter_start()
2874 * item with such offset, but this is out of the valid range. in btrfs_backref_iter_start()
2876 ret = -EUCLEAN; in btrfs_backref_iter_start()
2879 if (path->slots[0] == 0) { in btrfs_backref_iter_start()
2881 ret = -EUCLEAN; in btrfs_backref_iter_start()
2884 path->slots[0]--; in btrfs_backref_iter_start()
2886 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in btrfs_backref_iter_start()
2889 ret = -ENOENT; in btrfs_backref_iter_start()
2892 memcpy(&iter->cur_key, &key, sizeof(key)); in btrfs_backref_iter_start()
2893 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2894 path->slots[0]); in btrfs_backref_iter_start()
2895 iter->end_ptr = (u32)(iter->item_ptr + in btrfs_backref_iter_start()
2896 btrfs_item_size(path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2897 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_backref_iter_start()
2903 * This is an extra precaution for non skinny-metadata, where in btrfs_backref_iter_start()
2907 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { in btrfs_backref_iter_start()
2908 ret = -ENOTSUPP; in btrfs_backref_iter_start()
2911 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); in btrfs_backref_iter_start()
2914 if (iter->cur_ptr >= iter->end_ptr) { in btrfs_backref_iter_start()
2919 ret = -ENOENT; in btrfs_backref_iter_start()
2925 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, in btrfs_backref_iter_start()
2926 path->slots[0]); in btrfs_backref_iter_start()
2927 if (iter->cur_key.objectid != bytenr || in btrfs_backref_iter_start()
2928 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && in btrfs_backref_iter_start()
2929 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { in btrfs_backref_iter_start()
2930 ret = -ENOENT; in btrfs_backref_iter_start()
2933 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2934 path->slots[0]); in btrfs_backref_iter_start()
2935 iter->item_ptr = iter->cur_ptr; in btrfs_backref_iter_start()
2936 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( in btrfs_backref_iter_start()
2937 path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2948 if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || in btrfs_backref_iter_is_inline_ref()
2949 iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) in btrfs_backref_iter_is_inline_ref()
2958 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2966 struct extent_buffer *eb = iter->path->nodes[0]; in btrfs_backref_iter_next()
2968 struct btrfs_path *path = iter->path; in btrfs_backref_iter_next()
2975 ASSERT(iter->cur_ptr < iter->end_ptr); in btrfs_backref_iter_next()
2985 ((unsigned long)iter->cur_ptr); in btrfs_backref_iter_next()
2990 iter->cur_ptr += size; in btrfs_backref_iter_next()
2991 if (iter->cur_ptr < iter->end_ptr) in btrfs_backref_iter_next()
2998 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); in btrfs_backref_iter_next()
2999 ret = btrfs_next_item(extent_root, iter->path); in btrfs_backref_iter_next()
3003 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); in btrfs_backref_iter_next()
3004 if (iter->cur_key.objectid != iter->bytenr || in btrfs_backref_iter_next()
3005 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && in btrfs_backref_iter_next()
3006 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) in btrfs_backref_iter_next()
3008 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_next()
3009 path->slots[0]); in btrfs_backref_iter_next()
3010 iter->cur_ptr = iter->item_ptr; in btrfs_backref_iter_next()
3011 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], in btrfs_backref_iter_next()
3012 path->slots[0]); in btrfs_backref_iter_next()
3021 cache->rb_root = RB_ROOT; in btrfs_backref_init_cache()
3023 INIT_LIST_HEAD(&cache->pending[i]); in btrfs_backref_init_cache()
3024 INIT_LIST_HEAD(&cache->changed); in btrfs_backref_init_cache()
3025 INIT_LIST_HEAD(&cache->detached); in btrfs_backref_init_cache()
3026 INIT_LIST_HEAD(&cache->leaves); in btrfs_backref_init_cache()
3027 INIT_LIST_HEAD(&cache->pending_edge); in btrfs_backref_init_cache()
3028 INIT_LIST_HEAD(&cache->useless_node); in btrfs_backref_init_cache()
3029 cache->fs_info = fs_info; in btrfs_backref_init_cache()
3030 cache->is_reloc = is_reloc; in btrfs_backref_init_cache()
3043 INIT_LIST_HEAD(&node->list); in btrfs_backref_alloc_node()
3044 INIT_LIST_HEAD(&node->upper); in btrfs_backref_alloc_node()
3045 INIT_LIST_HEAD(&node->lower); in btrfs_backref_alloc_node()
3046 RB_CLEAR_NODE(&node->rb_node); in btrfs_backref_alloc_node()
3047 cache->nr_nodes++; in btrfs_backref_alloc_node()
3048 node->level = level; in btrfs_backref_alloc_node()
3049 node->bytenr = bytenr; in btrfs_backref_alloc_node()
3058 ASSERT(list_empty(&node->list)); in btrfs_backref_free_node()
3059 ASSERT(list_empty(&node->lower)); in btrfs_backref_free_node()
3060 ASSERT(node->eb == NULL); in btrfs_backref_free_node()
3061 cache->nr_nodes--; in btrfs_backref_free_node()
3062 btrfs_put_root(node->root); in btrfs_backref_free_node()
3070 struct btrfs_backref_edge *edge; in btrfs_backref_alloc_edge() local
3072 edge = kzalloc(sizeof(*edge), GFP_NOFS); in btrfs_backref_alloc_edge()
3073 if (edge) in btrfs_backref_alloc_edge()
3074 cache->nr_edges++; in btrfs_backref_alloc_edge()
3075 return edge; in btrfs_backref_alloc_edge()
3079 struct btrfs_backref_edge *edge) in btrfs_backref_free_edge() argument
3081 if (edge) { in btrfs_backref_free_edge()
3082 cache->nr_edges--; in btrfs_backref_free_edge()
3083 kfree(edge); in btrfs_backref_free_edge()
3089 if (node->locked) { in btrfs_backref_unlock_node_buffer()
3090 btrfs_tree_unlock(node->eb); in btrfs_backref_unlock_node_buffer()
3091 node->locked = 0; in btrfs_backref_unlock_node_buffer()
3097 if (node->eb) { in btrfs_backref_drop_node_buffer()
3099 free_extent_buffer(node->eb); in btrfs_backref_drop_node_buffer()
3100 node->eb = NULL; in btrfs_backref_drop_node_buffer()
3114 ASSERT(list_empty(&node->upper)); in btrfs_backref_drop_node()
3117 list_del_init(&node->list); in btrfs_backref_drop_node()
3118 list_del_init(&node->lower); in btrfs_backref_drop_node()
3119 if (!RB_EMPTY_NODE(&node->rb_node)) in btrfs_backref_drop_node()
3120 rb_erase(&node->rb_node, &tree->rb_root); in btrfs_backref_drop_node()
3135 struct btrfs_backref_edge *edge; in btrfs_backref_cleanup_node() local
3140 BUG_ON(!node->lowest && !node->detached); in btrfs_backref_cleanup_node()
3141 while (!list_empty(&node->upper)) { in btrfs_backref_cleanup_node()
3142 edge = list_entry(node->upper.next, struct btrfs_backref_edge, in btrfs_backref_cleanup_node()
3144 upper = edge->node[UPPER]; in btrfs_backref_cleanup_node()
3145 list_del(&edge->list[LOWER]); in btrfs_backref_cleanup_node()
3146 list_del(&edge->list[UPPER]); in btrfs_backref_cleanup_node()
3147 btrfs_backref_free_edge(cache, edge); in btrfs_backref_cleanup_node()
3153 if (list_empty(&upper->lower)) { in btrfs_backref_cleanup_node()
3154 list_add_tail(&upper->lower, &cache->leaves); in btrfs_backref_cleanup_node()
3155 upper->lowest = 1; in btrfs_backref_cleanup_node()
3170 while (!list_empty(&cache->detached)) { in btrfs_backref_release_cache()
3171 node = list_entry(cache->detached.next, in btrfs_backref_release_cache()
3176 while (!list_empty(&cache->leaves)) { in btrfs_backref_release_cache()
3177 node = list_entry(cache->leaves.next, in btrfs_backref_release_cache()
3183 while (!list_empty(&cache->pending[i])) { in btrfs_backref_release_cache()
3184 node = list_first_entry(&cache->pending[i], in btrfs_backref_release_cache()
3190 ASSERT(list_empty(&cache->pending_edge)); in btrfs_backref_release_cache()
3191 ASSERT(list_empty(&cache->useless_node)); in btrfs_backref_release_cache()
3192 ASSERT(list_empty(&cache->changed)); in btrfs_backref_release_cache()
3193 ASSERT(list_empty(&cache->detached)); in btrfs_backref_release_cache()
3194 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); in btrfs_backref_release_cache()
3195 ASSERT(!cache->nr_nodes); in btrfs_backref_release_cache()
3196 ASSERT(!cache->nr_edges); in btrfs_backref_release_cache()
3199 void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, in btrfs_backref_link_edge() argument
3204 ASSERT(upper && lower && upper->level == lower->level + 1); in btrfs_backref_link_edge()
3205 edge->node[LOWER] = lower; in btrfs_backref_link_edge()
3206 edge->node[UPPER] = upper; in btrfs_backref_link_edge()
3208 list_add_tail(&edge->list[LOWER], &lower->upper); in btrfs_backref_link_edge()
3210 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_link_edge()
3221 * type is btrfs_inline_ref_type, offset is
3228 struct btrfs_backref_edge *edge; in handle_direct_tree_backref() local
3232 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); in handle_direct_tree_backref()
3235 if (ref_key->objectid == ref_key->offset) { in handle_direct_tree_backref()
3238 cur->is_reloc_root = 1; in handle_direct_tree_backref()
3240 if (cache->is_reloc) { in handle_direct_tree_backref()
3241 root = find_reloc_root(cache->fs_info, cur->bytenr); in handle_direct_tree_backref()
3243 return -ENOENT; in handle_direct_tree_backref()
3244 cur->root = root; in handle_direct_tree_backref()
3250 list_add(&cur->list, &cache->useless_node); in handle_direct_tree_backref()
3255 edge = btrfs_backref_alloc_edge(cache); in handle_direct_tree_backref()
3256 if (!edge) in handle_direct_tree_backref()
3257 return -ENOMEM; in handle_direct_tree_backref()
3259 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); in handle_direct_tree_backref()
3262 upper = btrfs_backref_alloc_node(cache, ref_key->offset, in handle_direct_tree_backref()
3263 cur->level + 1); in handle_direct_tree_backref()
3265 btrfs_backref_free_edge(cache, edge); in handle_direct_tree_backref()
3266 return -ENOMEM; in handle_direct_tree_backref()
3273 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in handle_direct_tree_backref()
3277 ASSERT(upper->checked); in handle_direct_tree_backref()
3278 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_direct_tree_backref()
3280 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); in handle_direct_tree_backref()
3304 struct btrfs_fs_info *fs_info = cache->fs_info; in handle_indirect_tree_backref()
3307 struct btrfs_backref_edge *edge; in handle_indirect_tree_backref() local
3315 root = btrfs_get_fs_root(fs_info, ref_key->offset, false); in handle_indirect_tree_backref()
3318 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in handle_indirect_tree_backref()
3319 cur->cowonly = 1; in handle_indirect_tree_backref()
3321 if (btrfs_root_level(&root->root_item) == cur->level) { in handle_indirect_tree_backref()
3323 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); in handle_indirect_tree_backref()
3331 * completely relying on direct backref (key->offset is parent in handle_indirect_tree_backref()
3334 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { in handle_indirect_tree_backref()
3336 list_add(&cur->list, &cache->useless_node); in handle_indirect_tree_backref()
3338 cur->root = root; in handle_indirect_tree_backref()
3343 level = cur->level + 1; in handle_indirect_tree_backref()
3346 path->search_commit_root = 1; in handle_indirect_tree_backref()
3347 path->skip_locking = 1; in handle_indirect_tree_backref()
3348 path->lowest_level = level; in handle_indirect_tree_backref()
3350 path->lowest_level = 0; in handle_indirect_tree_backref()
3355 if (ret > 0 && path->slots[level] > 0) in handle_indirect_tree_backref()
3356 path->slots[level]--; in handle_indirect_tree_backref()
3358 eb = path->nodes[level]; in handle_indirect_tree_backref()
3359 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { in handle_indirect_tree_backref()
3362 cur->bytenr, level - 1, btrfs_root_id(root), in handle_indirect_tree_backref()
3363 tree_key->objectid, tree_key->type, tree_key->offset); in handle_indirect_tree_backref()
3365 ret = -ENOENT; in handle_indirect_tree_backref()
3372 if (!path->nodes[level]) { in handle_indirect_tree_backref()
3373 ASSERT(btrfs_root_bytenr(&root->root_item) == in handle_indirect_tree_backref()
3374 lower->bytenr); in handle_indirect_tree_backref()
3377 cache->is_reloc) { in handle_indirect_tree_backref()
3379 list_add(&lower->list, &cache->useless_node); in handle_indirect_tree_backref()
3381 lower->root = root; in handle_indirect_tree_backref()
3386 edge = btrfs_backref_alloc_edge(cache); in handle_indirect_tree_backref()
3387 if (!edge) { in handle_indirect_tree_backref()
3389 ret = -ENOMEM; in handle_indirect_tree_backref()
3393 eb = path->nodes[level]; in handle_indirect_tree_backref()
3394 rb_node = rb_simple_search(&cache->rb_root, eb->start); in handle_indirect_tree_backref()
3396 upper = btrfs_backref_alloc_node(cache, eb->start, in handle_indirect_tree_backref()
3397 lower->level + 1); in handle_indirect_tree_backref()
3400 btrfs_backref_free_edge(cache, edge); in handle_indirect_tree_backref()
3401 ret = -ENOMEM; in handle_indirect_tree_backref()
3404 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3405 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in handle_indirect_tree_backref()
3406 upper->cowonly = 1; in handle_indirect_tree_backref()
3413 upper->checked = 0; in handle_indirect_tree_backref()
3415 upper->checked = 1; in handle_indirect_tree_backref()
3422 if (!upper->checked && need_check) { in handle_indirect_tree_backref()
3424 list_add_tail(&edge->list[UPPER], in handle_indirect_tree_backref()
3425 &cache->pending_edge); in handle_indirect_tree_backref()
3427 if (upper->checked) in handle_indirect_tree_backref()
3429 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3434 ASSERT(upper->checked); in handle_indirect_tree_backref()
3435 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3436 if (!upper->owner) in handle_indirect_tree_backref()
3437 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3439 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); in handle_indirect_tree_backref()
3457 * links aren't yet bi-directional. Needs to finish such links.
3472 struct btrfs_backref_edge *edge; in btrfs_backref_add_tree_node() local
3476 ret = btrfs_backref_iter_start(iter, cur->bytenr); in btrfs_backref_add_tree_node()
3489 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3493 WARN_ON(cur->checked); in btrfs_backref_add_tree_node()
3494 if (!list_empty(&cur->upper)) { in btrfs_backref_add_tree_node()
3499 ASSERT(list_is_singular(&cur->upper)); in btrfs_backref_add_tree_node()
3500 edge = list_entry(cur->upper.next, struct btrfs_backref_edge, in btrfs_backref_add_tree_node()
3502 ASSERT(list_empty(&edge->list[UPPER])); in btrfs_backref_add_tree_node()
3503 exist = edge->node[UPPER]; in btrfs_backref_add_tree_node()
3508 if (!exist->checked) in btrfs_backref_add_tree_node()
3509 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in btrfs_backref_add_tree_node()
3520 eb = iter->path->nodes[0]; in btrfs_backref_add_tree_node()
3522 key.objectid = iter->bytenr; in btrfs_backref_add_tree_node()
3528 ((unsigned long)iter->cur_ptr); in btrfs_backref_add_tree_node()
3532 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3536 key.offset = btrfs_extent_inline_ref_offset(eb, iref); in btrfs_backref_add_tree_node()
3538 key.type = iter->cur_key.type; in btrfs_backref_add_tree_node()
3539 key.offset = iter->cur_key.offset; in btrfs_backref_add_tree_node()
3548 exist->owner == key.offset) || in btrfs_backref_add_tree_node()
3550 exist->bytenr == key.offset))) { in btrfs_backref_add_tree_node()
3555 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ in btrfs_backref_add_tree_node()
3563 * offset means the root objectid. We need to search in btrfs_backref_add_tree_node()
3572 * Unrecognized tree backref items (if it can pass tree-checker) in btrfs_backref_add_tree_node()
3577 cur->checked = 1; in btrfs_backref_add_tree_node()
3590 struct list_head *useless_node = &cache->useless_node; in btrfs_backref_finish_upper_links()
3591 struct btrfs_backref_edge *edge; in btrfs_backref_finish_upper_links() local
3595 ASSERT(start->checked); in btrfs_backref_finish_upper_links()
3597 /* Insert this node to cache if it's not COW-only */ in btrfs_backref_finish_upper_links()
3598 if (!start->cowonly) { in btrfs_backref_finish_upper_links()
3599 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, in btrfs_backref_finish_upper_links()
3600 &start->rb_node); in btrfs_backref_finish_upper_links()
3602 btrfs_backref_panic(cache->fs_info, start->bytenr, in btrfs_backref_finish_upper_links()
3603 -EEXIST); in btrfs_backref_finish_upper_links()
3604 list_add_tail(&start->lower, &cache->leaves); in btrfs_backref_finish_upper_links()
3612 list_for_each_entry(edge, &start->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3613 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3619 edge = list_first_entry(&pending_edge, in btrfs_backref_finish_upper_links()
3621 list_del_init(&edge->list[UPPER]); in btrfs_backref_finish_upper_links()
3622 upper = edge->node[UPPER]; in btrfs_backref_finish_upper_links()
3623 lower = edge->node[LOWER]; in btrfs_backref_finish_upper_links()
3626 if (upper->detached) { in btrfs_backref_finish_upper_links()
3627 list_del(&edge->list[LOWER]); in btrfs_backref_finish_upper_links()
3628 btrfs_backref_free_edge(cache, edge); in btrfs_backref_finish_upper_links()
3631 if (list_empty(&lower->upper)) in btrfs_backref_finish_upper_links()
3632 list_add(&lower->list, useless_node); in btrfs_backref_finish_upper_links()
3639 * So if we have upper->rb_node populated, this means a cache in btrfs_backref_finish_upper_links()
3640 * hit. We only need to link the edge, as @upper and all its in btrfs_backref_finish_upper_links()
3643 if (!RB_EMPTY_NODE(&upper->rb_node)) { in btrfs_backref_finish_upper_links()
3644 if (upper->lowest) { in btrfs_backref_finish_upper_links()
3645 list_del_init(&upper->lower); in btrfs_backref_finish_upper_links()
3646 upper->lowest = 0; in btrfs_backref_finish_upper_links()
3649 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3654 if (!upper->checked) { in btrfs_backref_finish_upper_links()
3656 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3659 /* Sanity check, COW-only node has non-COW-only parent */ in btrfs_backref_finish_upper_links()
3660 if (start->cowonly != upper->cowonly) { in btrfs_backref_finish_upper_links()
3662 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3665 /* Only cache non-COW-only (subvolume trees) tree blocks */ in btrfs_backref_finish_upper_links()
3666 if (!upper->cowonly) { in btrfs_backref_finish_upper_links()
3667 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, in btrfs_backref_finish_upper_links()
3668 &upper->rb_node); in btrfs_backref_finish_upper_links()
3670 btrfs_backref_panic(cache->fs_info, in btrfs_backref_finish_upper_links()
3671 upper->bytenr, -EEXIST); in btrfs_backref_finish_upper_links()
3672 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3676 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3682 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3683 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3693 struct btrfs_backref_edge *edge; in btrfs_backref_error_cleanup() local
3695 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3696 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3698 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3700 while (!list_empty(&cache->pending_edge)) { in btrfs_backref_error_cleanup()
3701 edge = list_first_entry(&cache->pending_edge, in btrfs_backref_error_cleanup()
3703 list_del(&edge->list[UPPER]); in btrfs_backref_error_cleanup()
3704 list_del(&edge->list[LOWER]); in btrfs_backref_error_cleanup()
3705 lower = edge->node[LOWER]; in btrfs_backref_error_cleanup()
3706 upper = edge->node[UPPER]; in btrfs_backref_error_cleanup()
3707 btrfs_backref_free_edge(cache, edge); in btrfs_backref_error_cleanup()
3713 if (list_empty(&lower->upper) && in btrfs_backref_error_cleanup()
3714 RB_EMPTY_NODE(&lower->rb_node)) in btrfs_backref_error_cleanup()
3715 list_add(&lower->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3717 if (!RB_EMPTY_NODE(&upper->rb_node)) in btrfs_backref_error_cleanup()
3721 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_error_cleanup()
3722 list_add_tail(&edge->list[UPPER], in btrfs_backref_error_cleanup()
3723 &cache->pending_edge); in btrfs_backref_error_cleanup()
3724 if (list_empty(&upper->upper)) in btrfs_backref_error_cleanup()
3725 list_add(&upper->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3728 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3729 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3731 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3738 ASSERT(list_empty(&cache->useless_node) && in btrfs_backref_error_cleanup()
3739 list_empty(&cache->pending_edge)); in btrfs_backref_error_cleanup()