Lines Matching full:refs

218  * Return 0 when both refs are for the same block (and can be merged).
271 * Add @newref to the @root rbtree, merging identical refs.
300 /* Identical refs, merge them and free @newref */ in prelim_ref_insert()
357 * delayed refs
370 * on disk refs (inline or keyed)
418 /* direct refs use root == 0, key == NULL */
428 /* indirect refs use parent == 0 */
606 * adding new delayed refs. To deal with this we need to look in cache in resolve_indirect_ref()
717 * We maintain three separate rbtrees: one for direct refs, one for
718 * indirect refs which have a key, and one for indirect refs which do not
722 * indirect refs with missing keys. An appropriate key is located and
723 * the ref is moved onto the tree for indirect refs. After all missing
750 * the tree, allocating new refs for each insertion, and then in resolve_indirect_refs()
827 * We may have inode lists attached to refs in the parents ulist, so we in resolve_indirect_refs()
828 * must free them before freeing the ulist and its refs. in resolve_indirect_refs()
885 * add all currently queued delayed refs from this head whose seq nr is
990 * refs have been checked. in add_delayed_refs()
1024 * enumerate all inline refs in add_inline_refs()
1367 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1368 * indirect refs to their parent bytenr.
1440 * lock it so we have a consistent view of the refs at the given in find_parent_nodes()
1448 refcount_inc(&head->refs); in find_parent_nodes()
1577 * This walks the tree of merged and resolved refs. Tree blocks are in find_parent_nodes()
1589 * refs with a node->action of BTRFS_DROP_DELAYED_REF. in find_parent_nodes()
1591 * identical refs to keep the overall count correct. in find_parent_nodes()
1592 * prelim_ref_insert() will merge only those refs in find_parent_nodes()
1593 * which compare identically. Any refs having in find_parent_nodes()
1640 ret = ulist_add_merge_ptr(ctx->refs, ref->parent, in find_parent_nodes()
1666 * this ref to the ref we added to the 'refs' ulist. in find_parent_nodes()
1691 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1696 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1702 ASSERT(ctx->refs == NULL); in btrfs_find_all_leafs()
1704 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_leafs()
1705 if (!ctx->refs) in btrfs_find_all_leafs()
1711 free_leaf_list(ctx->refs); in btrfs_find_all_leafs()
1712 ctx->refs = NULL; in btrfs_find_all_leafs()
1733 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1746 ASSERT(ctx->refs == NULL); in btrfs_find_all_roots_safe()
1748 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1749 if (!ctx->refs) in btrfs_find_all_roots_safe()
1755 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1756 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1777 node = ulist_next(ctx->refs, &uiter); in btrfs_find_all_roots_safe()
1784 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1785 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1813 ulist_init(&ctx->refs); in btrfs_alloc_backref_share_check_ctx()
1823 ulist_release(&ctx->refs); in btrfs_free_backref_share_ctx()
1843 * delayed refs, but continues on even when no running transaction exists.
1878 ulist_init(&ctx->refs); in btrfs_is_data_extent_shared()
1913 walk_ctx.refs = &ctx->refs; in btrfs_is_data_extent_shared()
1919 const unsigned long prev_ref_count = ctx->refs.nnodes; in btrfs_is_data_extent_shared()
1938 * the ctx->refs ulist, in which case we have to check multiple in btrfs_is_data_extent_shared()
1966 if ((ctx->refs.nnodes - prev_ref_count) > 1) in btrfs_is_data_extent_shared()
1972 node = ulist_next(&ctx->refs, &uiter); in btrfs_is_data_extent_shared()
2037 ulist_release(&ctx->refs); in btrfs_is_data_extent_shared()
2272 * helper function to iterate extent inline refs. ptr must point to a 0 value
2274 * if more refs exist, 0 is returned and the next call to
2414 struct ulist *refs; in iterate_extent_inodes() local
2448 refs = ctx->refs; in iterate_extent_inodes()
2449 ctx->refs = NULL; in iterate_extent_inodes()
2452 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { in iterate_extent_inodes()
2511 free_leaf_list(refs); in iterate_extent_inodes()
2974 /* We're still inside the inline refs */ in btrfs_backref_iter_next()