Lines Matching +full:ref2 +full:-
1 // SPDX-License-Identifier: GPL-2.0
11 #include "delayed-ref.h"
14 #include "space-info.h"
15 #include "tree-mod-log.h"
32 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; in btrfs_check_space_for_delayed_refs()
33 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in btrfs_check_space_for_delayed_refs()
37 spin_lock(&global_rsv->lock); in btrfs_check_space_for_delayed_refs()
38 reserved = global_rsv->reserved; in btrfs_check_space_for_delayed_refs()
39 spin_unlock(&global_rsv->lock); in btrfs_check_space_for_delayed_refs()
47 spin_lock(&delayed_refs_rsv->lock); in btrfs_check_space_for_delayed_refs()
48 reserved += delayed_refs_rsv->reserved; in btrfs_check_space_for_delayed_refs()
49 if (delayed_refs_rsv->size >= reserved) in btrfs_check_space_for_delayed_refs()
51 spin_unlock(&delayed_refs_rsv->lock); in btrfs_check_space_for_delayed_refs()
67 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; in btrfs_delayed_refs_rsv_release()
83 * This is to be called anytime we may have adjusted trans->delayed_ref_updates
84 * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
89 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_update_delayed_refs_rsv()
90 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_update_delayed_refs_rsv()
91 struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv; in btrfs_update_delayed_refs_rsv()
95 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates); in btrfs_update_delayed_refs_rsv()
97 trans->delayed_ref_csum_deletions); in btrfs_update_delayed_refs_rsv()
110 * avoid exhausting it and reach -ENOSPC during a transaction commit. in btrfs_update_delayed_refs_rsv()
112 spin_lock(&local_rsv->lock); in btrfs_update_delayed_refs_rsv()
113 reserved_bytes = min(num_bytes, local_rsv->reserved); in btrfs_update_delayed_refs_rsv()
114 local_rsv->reserved -= reserved_bytes; in btrfs_update_delayed_refs_rsv()
115 local_rsv->full = (local_rsv->reserved >= local_rsv->size); in btrfs_update_delayed_refs_rsv()
116 spin_unlock(&local_rsv->lock); in btrfs_update_delayed_refs_rsv()
118 spin_lock(&delayed_rsv->lock); in btrfs_update_delayed_refs_rsv()
119 delayed_rsv->size += num_bytes; in btrfs_update_delayed_refs_rsv()
120 delayed_rsv->reserved += reserved_bytes; in btrfs_update_delayed_refs_rsv()
121 delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size); in btrfs_update_delayed_refs_rsv()
122 spin_unlock(&delayed_rsv->lock); in btrfs_update_delayed_refs_rsv()
123 trans->delayed_ref_updates = 0; in btrfs_update_delayed_refs_rsv()
124 trans->delayed_ref_csum_deletions = 0; in btrfs_update_delayed_refs_rsv()
133 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_inc_delayed_refs_rsv_bg_inserts()
135 spin_lock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_inserts()
141 delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1); in btrfs_inc_delayed_refs_rsv_bg_inserts()
142 delayed_rsv->full = false; in btrfs_inc_delayed_refs_rsv_bg_inserts()
143 spin_unlock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_inserts()
152 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_dec_delayed_refs_rsv_bg_inserts()
168 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_inc_delayed_refs_rsv_bg_updates()
170 spin_lock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_updates()
176 delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1); in btrfs_inc_delayed_refs_rsv_bg_updates()
177 delayed_rsv->full = false; in btrfs_inc_delayed_refs_rsv_bg_updates()
178 spin_unlock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_updates()
187 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_dec_delayed_refs_rsv_bg_updates()
204 * will return -ENOSPC if we can't make the reservation.
209 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; in btrfs_delayed_refs_rsv_refill()
210 struct btrfs_space_info *space_info = block_rsv->space_info; in btrfs_delayed_refs_rsv_refill()
215 int ret = -ENOSPC; in btrfs_delayed_refs_rsv_refill()
217 spin_lock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
218 if (block_rsv->reserved < block_rsv->size) { in btrfs_delayed_refs_rsv_refill()
219 num_bytes = block_rsv->size - block_rsv->reserved; in btrfs_delayed_refs_rsv_refill()
222 spin_unlock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
235 spin_lock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
236 if (block_rsv->reserved < block_rsv->size) { in btrfs_delayed_refs_rsv_refill()
237 u64 needed = block_rsv->size - block_rsv->reserved; in btrfs_delayed_refs_rsv_refill()
240 block_rsv->reserved += needed; in btrfs_delayed_refs_rsv_refill()
241 block_rsv->full = true; in btrfs_delayed_refs_rsv_refill()
242 to_free = num_bytes - needed; in btrfs_delayed_refs_rsv_refill()
245 block_rsv->reserved += num_bytes; in btrfs_delayed_refs_rsv_refill()
253 spin_unlock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
268 struct btrfs_delayed_ref_node *ref2) in comp_data_refs() argument
270 if (ref1->data_ref.objectid < ref2->data_ref.objectid) in comp_data_refs()
271 return -1; in comp_data_refs()
272 if (ref1->data_ref.objectid > ref2->data_ref.objectid) in comp_data_refs()
274 if (ref1->data_ref.offset < ref2->data_ref.offset) in comp_data_refs()
275 return -1; in comp_data_refs()
276 if (ref1->data_ref.offset > ref2->data_ref.offset) in comp_data_refs()
282 struct btrfs_delayed_ref_node *ref2, in comp_refs() argument
287 if (ref1->type < ref2->type) in comp_refs()
288 return -1; in comp_refs()
289 if (ref1->type > ref2->type) in comp_refs()
291 if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY || in comp_refs()
292 ref1->type == BTRFS_SHARED_DATA_REF_KEY) { in comp_refs()
293 if (ref1->parent < ref2->parent) in comp_refs()
294 return -1; in comp_refs()
295 if (ref1->parent > ref2->parent) in comp_refs()
298 if (ref1->ref_root < ref2->ref_root) in comp_refs()
299 return -1; in comp_refs()
300 if (ref1->ref_root > ref2->ref_root) in comp_refs()
302 if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY) in comp_refs()
303 ret = comp_data_refs(ref1, ref2); in comp_refs()
308 if (ref1->seq < ref2->seq) in comp_refs()
309 return -1; in comp_refs()
310 if (ref1->seq > ref2->seq) in comp_refs()
320 struct rb_node **p = &root->rb_root.rb_node; in htree_insert()
328 bytenr = ins->bytenr; in htree_insert()
334 if (bytenr < entry->bytenr) { in htree_insert()
335 p = &(*p)->rb_left; in htree_insert()
336 } else if (bytenr > entry->bytenr) { in htree_insert()
337 p = &(*p)->rb_right; in htree_insert()
352 struct rb_node **p = &root->rb_root.rb_node; in tree_insert()
353 struct rb_node *node = &ins->ref_node; in tree_insert()
366 p = &(*p)->rb_left; in tree_insert()
368 p = &(*p)->rb_right; in tree_insert()
386 n = rb_first_cached(&dr->href_root); in find_first_ref_head()
404 struct rb_root *root = &dr->href_root.rb_root; in find_ref_head()
408 n = root->rb_node; in find_ref_head()
413 if (bytenr < entry->bytenr) in find_ref_head()
414 n = n->rb_left; in find_ref_head()
415 else if (bytenr > entry->bytenr) in find_ref_head()
416 n = n->rb_right; in find_ref_head()
421 if (bytenr > entry->bytenr) { in find_ref_head()
422 n = rb_next(&entry->href_node); in find_ref_head()
436 lockdep_assert_held(&delayed_refs->lock); in btrfs_delayed_ref_lock()
437 if (mutex_trylock(&head->mutex)) in btrfs_delayed_ref_lock()
440 refcount_inc(&head->refs); in btrfs_delayed_ref_lock()
441 spin_unlock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
443 mutex_lock(&head->mutex); in btrfs_delayed_ref_lock()
444 spin_lock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
445 if (RB_EMPTY_NODE(&head->href_node)) { in btrfs_delayed_ref_lock()
446 mutex_unlock(&head->mutex); in btrfs_delayed_ref_lock()
448 return -EAGAIN; in btrfs_delayed_ref_lock()
459 lockdep_assert_held(&head->lock); in drop_delayed_ref()
460 rb_erase_cached(&ref->ref_node, &head->ref_tree); in drop_delayed_ref()
461 RB_CLEAR_NODE(&ref->ref_node); in drop_delayed_ref()
462 if (!list_empty(&ref->add_list)) in drop_delayed_ref()
463 list_del(&ref->add_list); in drop_delayed_ref()
465 atomic_dec(&delayed_refs->num_entries); in drop_delayed_ref()
476 struct rb_node *node = rb_next(&ref->ref_node); in merge_ref()
484 if (seq && next->seq >= seq) in merge_ref()
489 if (ref->action == next->action) { in merge_ref()
490 mod = next->ref_mod; in merge_ref()
492 if (ref->ref_mod < next->ref_mod) { in merge_ref()
496 mod = -next->ref_mod; in merge_ref()
500 ref->ref_mod += mod; in merge_ref()
501 if (ref->ref_mod == 0) { in merge_ref()
508 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || in merge_ref()
509 ref->type == BTRFS_SHARED_BLOCK_REF_KEY); in merge_ref()
524 lockdep_assert_held(&head->lock); in btrfs_merge_delayed_refs()
526 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) in btrfs_merge_delayed_refs()
530 if (head->is_data) in btrfs_merge_delayed_refs()
535 for (node = rb_first_cached(&head->ref_tree); node; in btrfs_merge_delayed_refs()
538 if (seq && ref->seq >= seq) in btrfs_merge_delayed_refs()
565 lockdep_assert_held(&delayed_refs->lock); in btrfs_select_ref_head()
567 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start, in btrfs_select_ref_head()
569 if (!head && delayed_refs->run_delayed_start != 0) { in btrfs_select_ref_head()
570 delayed_refs->run_delayed_start = 0; in btrfs_select_ref_head()
576 while (head->processing) { in btrfs_select_ref_head()
579 node = rb_next(&head->href_node); in btrfs_select_ref_head()
581 if (delayed_refs->run_delayed_start == 0) in btrfs_select_ref_head()
583 delayed_refs->run_delayed_start = 0; in btrfs_select_ref_head()
590 head->processing = true; in btrfs_select_ref_head()
591 WARN_ON(delayed_refs->num_heads_ready == 0); in btrfs_select_ref_head()
592 delayed_refs->num_heads_ready--; in btrfs_select_ref_head()
593 delayed_refs->run_delayed_start = head->bytenr + in btrfs_select_ref_head()
594 head->num_bytes; in btrfs_select_ref_head()
601 lockdep_assert_held(&delayed_refs->lock); in btrfs_delete_ref_head()
602 lockdep_assert_held(&head->lock); in btrfs_delete_ref_head()
604 rb_erase_cached(&head->href_node, &delayed_refs->href_root); in btrfs_delete_ref_head()
605 RB_CLEAR_NODE(&head->href_node); in btrfs_delete_ref_head()
606 atomic_dec(&delayed_refs->num_entries); in btrfs_delete_ref_head()
607 delayed_refs->num_heads--; in btrfs_delete_ref_head()
608 if (!head->processing) in btrfs_delete_ref_head()
609 delayed_refs->num_heads_ready--; in btrfs_delete_ref_head()
623 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs; in insert_delayed_ref()
627 spin_lock(&href->lock); in insert_delayed_ref()
628 exist = tree_insert(&href->ref_tree, ref); in insert_delayed_ref()
630 if (ref->action == BTRFS_ADD_DELAYED_REF) in insert_delayed_ref()
631 list_add_tail(&ref->add_list, &href->ref_add_list); in insert_delayed_ref()
632 atomic_inc(&root->num_entries); in insert_delayed_ref()
633 spin_unlock(&href->lock); in insert_delayed_ref()
634 trans->delayed_ref_updates++; in insert_delayed_ref()
639 if (exist->action == ref->action) { in insert_delayed_ref()
640 mod = ref->ref_mod; in insert_delayed_ref()
643 if (exist->ref_mod < ref->ref_mod) { in insert_delayed_ref()
644 exist->action = ref->action; in insert_delayed_ref()
645 mod = -exist->ref_mod; in insert_delayed_ref()
646 exist->ref_mod = ref->ref_mod; in insert_delayed_ref()
647 if (ref->action == BTRFS_ADD_DELAYED_REF) in insert_delayed_ref()
648 list_add_tail(&exist->add_list, in insert_delayed_ref()
649 &href->ref_add_list); in insert_delayed_ref()
650 else if (ref->action == BTRFS_DROP_DELAYED_REF) { in insert_delayed_ref()
651 ASSERT(!list_empty(&exist->add_list)); in insert_delayed_ref()
652 list_del_init(&exist->add_list); in insert_delayed_ref()
657 mod = -ref->ref_mod; in insert_delayed_ref()
659 exist->ref_mod += mod; in insert_delayed_ref()
662 if (exist->ref_mod == 0) in insert_delayed_ref()
663 drop_delayed_ref(trans->fs_info, root, href, exist); in insert_delayed_ref()
664 spin_unlock(&href->lock); in insert_delayed_ref()
677 &trans->transaction->delayed_refs; in update_existing_head_ref()
678 struct btrfs_fs_info *fs_info = trans->fs_info; in update_existing_head_ref()
681 BUG_ON(existing->is_data != update->is_data); in update_existing_head_ref()
683 spin_lock(&existing->lock); in update_existing_head_ref()
690 if (!existing->owning_root) in update_existing_head_ref()
691 existing->owning_root = update->owning_root; in update_existing_head_ref()
693 if (update->must_insert_reserved) { in update_existing_head_ref()
701 existing->must_insert_reserved = update->must_insert_reserved; in update_existing_head_ref()
702 existing->owning_root = update->owning_root; in update_existing_head_ref()
708 existing->num_bytes = update->num_bytes; in update_existing_head_ref()
712 if (update->extent_op) { in update_existing_head_ref()
713 if (!existing->extent_op) { in update_existing_head_ref()
714 existing->extent_op = update->extent_op; in update_existing_head_ref()
716 if (update->extent_op->update_key) { in update_existing_head_ref()
717 memcpy(&existing->extent_op->key, in update_existing_head_ref()
718 &update->extent_op->key, in update_existing_head_ref()
719 sizeof(update->extent_op->key)); in update_existing_head_ref()
720 existing->extent_op->update_key = true; in update_existing_head_ref()
722 if (update->extent_op->update_flags) { in update_existing_head_ref()
723 existing->extent_op->flags_to_set |= in update_existing_head_ref()
724 update->extent_op->flags_to_set; in update_existing_head_ref()
725 existing->extent_op->update_flags = true; in update_existing_head_ref()
727 btrfs_free_delayed_extent_op(update->extent_op); in update_existing_head_ref()
733 * currently, for refs we just added we know we're a-ok. in update_existing_head_ref()
735 old_ref_mod = existing->total_ref_mod; in update_existing_head_ref()
736 existing->ref_mod += update->ref_mod; in update_existing_head_ref()
737 existing->total_ref_mod += update->ref_mod; in update_existing_head_ref()
745 if (existing->is_data) { in update_existing_head_ref()
748 existing->num_bytes); in update_existing_head_ref()
750 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) { in update_existing_head_ref()
751 delayed_refs->pending_csums -= existing->num_bytes; in update_existing_head_ref()
754 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) { in update_existing_head_ref()
755 delayed_refs->pending_csums += existing->num_bytes; in update_existing_head_ref()
756 trans->delayed_ref_csum_deletions += csum_leaves; in update_existing_head_ref()
760 spin_unlock(&existing->lock); in update_existing_head_ref()
772 BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved); in init_delayed_ref_head()
774 switch (generic_ref->action) { in init_delayed_ref_head()
786 count_mod = -1; in init_delayed_ref_head()
794 * ref->must_insert_reserved is the flag used to record that in init_delayed_ref_head()
805 refcount_set(&head_ref->refs, 1); in init_delayed_ref_head()
806 head_ref->bytenr = generic_ref->bytenr; in init_delayed_ref_head()
807 head_ref->num_bytes = generic_ref->num_bytes; in init_delayed_ref_head()
808 head_ref->ref_mod = count_mod; in init_delayed_ref_head()
809 head_ref->reserved_bytes = reserved; in init_delayed_ref_head()
810 head_ref->must_insert_reserved = must_insert_reserved; in init_delayed_ref_head()
811 head_ref->owning_root = generic_ref->owning_root; in init_delayed_ref_head()
812 head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA); in init_delayed_ref_head()
813 head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID); in init_delayed_ref_head()
814 head_ref->ref_tree = RB_ROOT_CACHED; in init_delayed_ref_head()
815 INIT_LIST_HEAD(&head_ref->ref_add_list); in init_delayed_ref_head()
816 RB_CLEAR_NODE(&head_ref->href_node); in init_delayed_ref_head()
817 head_ref->processing = false; in init_delayed_ref_head()
818 head_ref->total_ref_mod = count_mod; in init_delayed_ref_head()
819 spin_lock_init(&head_ref->lock); in init_delayed_ref_head()
820 mutex_init(&head_ref->mutex); in init_delayed_ref_head()
823 if (generic_ref->type == BTRFS_REF_METADATA) in init_delayed_ref_head()
824 head_ref->level = generic_ref->tree_ref.level; in init_delayed_ref_head()
826 head_ref->level = U8_MAX; in init_delayed_ref_head()
829 if (generic_ref->ref_root && reserved) { in init_delayed_ref_head()
830 qrecord->data_rsv = reserved; in init_delayed_ref_head()
831 qrecord->data_rsv_refroot = generic_ref->ref_root; in init_delayed_ref_head()
833 qrecord->bytenr = generic_ref->bytenr; in init_delayed_ref_head()
834 qrecord->num_bytes = generic_ref->num_bytes; in init_delayed_ref_head()
835 qrecord->old_roots = NULL; in init_delayed_ref_head()
852 struct btrfs_fs_info *fs_info = trans->fs_info; in add_delayed_ref_head()
857 delayed_refs = &trans->transaction->delayed_refs; in add_delayed_ref_head()
866 xa_release(&delayed_refs->dirty_extents, in add_delayed_ref_head()
867 qrecord->bytenr >> fs_info->sectorsize_bits); in add_delayed_ref_head()
879 existing = htree_insert(&delayed_refs->href_root, in add_delayed_ref_head()
880 &head_ref->href_node); in add_delayed_ref_head()
896 if (head_ref->is_data && head_ref->ref_mod < 0) { in add_delayed_ref_head()
897 delayed_refs->pending_csums += head_ref->num_bytes; in add_delayed_ref_head()
898 trans->delayed_ref_csum_deletions += in add_delayed_ref_head()
899 btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes); in add_delayed_ref_head()
901 delayed_refs->num_heads++; in add_delayed_ref_head()
902 delayed_refs->num_heads_ready++; in add_delayed_ref_head()
903 atomic_inc(&delayed_refs->num_entries); in add_delayed_ref_head()
924 * can be either one of the well-known metadata trees or the
939 int action = generic_ref->action; in init_delayed_ref_common()
945 if (is_fstree(generic_ref->ref_root)) in init_delayed_ref_common()
946 seq = atomic64_read(&fs_info->tree_mod_seq); in init_delayed_ref_common()
948 refcount_set(&ref->refs, 1); in init_delayed_ref_common()
949 ref->bytenr = generic_ref->bytenr; in init_delayed_ref_common()
950 ref->num_bytes = generic_ref->num_bytes; in init_delayed_ref_common()
951 ref->ref_mod = 1; in init_delayed_ref_common()
952 ref->action = action; in init_delayed_ref_common()
953 ref->seq = seq; in init_delayed_ref_common()
954 ref->type = btrfs_ref_type(generic_ref); in init_delayed_ref_common()
955 ref->ref_root = generic_ref->ref_root; in init_delayed_ref_common()
956 ref->parent = generic_ref->parent; in init_delayed_ref_common()
957 RB_CLEAR_NODE(&ref->ref_node); in init_delayed_ref_common()
958 INIT_LIST_HEAD(&ref->add_list); in init_delayed_ref_common()
960 if (generic_ref->type == BTRFS_REF_DATA) in init_delayed_ref_common()
961 ref->data_ref = generic_ref->data_ref; in init_delayed_ref_common()
963 ref->tree_ref = generic_ref->tree_ref; in init_delayed_ref_common()
971 generic_ref->real_root = mod_root ?: generic_ref->ref_root; in btrfs_init_tree_ref()
973 generic_ref->tree_ref.level = level; in btrfs_init_tree_ref()
974 generic_ref->type = BTRFS_REF_METADATA; in btrfs_init_tree_ref()
975 if (skip_qgroup || !(is_fstree(generic_ref->ref_root) && in btrfs_init_tree_ref()
977 generic_ref->skip_qgroup = true; in btrfs_init_tree_ref()
979 generic_ref->skip_qgroup = false; in btrfs_init_tree_ref()
988 generic_ref->real_root = mod_root ?: generic_ref->ref_root; in btrfs_init_data_ref()
990 generic_ref->data_ref.objectid = ino; in btrfs_init_data_ref()
991 generic_ref->data_ref.offset = offset; in btrfs_init_data_ref()
992 generic_ref->type = BTRFS_REF_DATA; in btrfs_init_data_ref()
993 if (skip_qgroup || !(is_fstree(generic_ref->ref_root) && in btrfs_init_data_ref()
995 generic_ref->skip_qgroup = true; in btrfs_init_data_ref()
997 generic_ref->skip_qgroup = false; in btrfs_init_data_ref()
1005 struct btrfs_fs_info *fs_info = trans->fs_info; in add_delayed_ref()
1012 int action = generic_ref->action; in add_delayed_ref()
1018 return -ENOMEM; in add_delayed_ref()
1022 ret = -ENOMEM; in add_delayed_ref()
1026 if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) { in add_delayed_ref()
1029 ret = -ENOMEM; in add_delayed_ref()
1032 if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, in add_delayed_ref()
1033 generic_ref->bytenr >> fs_info->sectorsize_bits, in add_delayed_ref()
1035 ret = -ENOMEM; in add_delayed_ref()
1042 head_ref->extent_op = extent_op; in add_delayed_ref()
1044 delayed_refs = &trans->transaction->delayed_refs; in add_delayed_ref()
1045 spin_lock(&delayed_refs->lock); in add_delayed_ref()
1054 spin_unlock(&delayed_refs->lock); in add_delayed_ref()
1061 spin_unlock(&delayed_refs->lock); in add_delayed_ref()
1069 if (generic_ref->type == BTRFS_REF_DATA) in add_delayed_ref()
1070 trace_add_delayed_data_ref(trans->fs_info, node); in add_delayed_ref()
1072 trace_add_delayed_tree_ref(trans->fs_info, node); in add_delayed_ref()
1097 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action); in btrfs_add_delayed_tree_ref()
1108 ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action); in btrfs_add_delayed_data_ref()
1129 return -ENOMEM; in btrfs_add_delayed_extent_op()
1132 head_ref->extent_op = extent_op; in btrfs_add_delayed_extent_op()
1134 delayed_refs = &trans->transaction->delayed_refs; in btrfs_add_delayed_extent_op()
1135 spin_lock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1139 spin_unlock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1156 if (refcount_dec_and_test(&ref->refs)) { in btrfs_put_delayed_ref()
1157 WARN_ON(!RB_EMPTY_NODE(&ref->ref_node)); in btrfs_put_delayed_ref()
1169 lockdep_assert_held(&delayed_refs->lock); in btrfs_find_delayed_ref_head()
1178 if (type < entry->type) in find_comp()
1179 return -1; in find_comp()
1180 if (type > entry->type) in find_comp()
1184 if (root < entry->ref_root) in find_comp()
1185 return -1; in find_comp()
1186 if (root > entry->ref_root) in find_comp()
1189 if (parent < entry->parent) in find_comp()
1190 return -1; in find_comp()
1191 if (parent > entry->parent) in find_comp()
1213 lockdep_assert_held(&head->mutex); in btrfs_find_delayed_tree_ref()
1215 spin_lock(&head->lock); in btrfs_find_delayed_tree_ref()
1216 node = head->ref_tree.rb_root.rb_node; in btrfs_find_delayed_tree_ref()
1224 node = node->rb_left; in btrfs_find_delayed_tree_ref()
1226 node = node->rb_right; in btrfs_find_delayed_tree_ref()
1232 if (entry->action == BTRFS_ADD_DELAYED_REF) in btrfs_find_delayed_tree_ref()
1237 spin_unlock(&head->lock); in btrfs_find_delayed_tree_ref()
1265 return -ENOMEM; in btrfs_delayed_ref_init()