Lines Matching +full:lock +full:- +full:state
1 // SPDX-License-Identifier: GPL-2.0
8 #include "extent-io-tree.h"
13 static inline bool extent_state_in_tree(const struct extent_state *state) in extent_state_in_tree() argument
15 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree()
22 static inline void btrfs_leak_debug_add_state(struct extent_state *state) in btrfs_leak_debug_add_state() argument
27 list_add(&state->leak_list, &states); in btrfs_leak_debug_add_state()
31 static inline void btrfs_leak_debug_del_state(struct extent_state *state) in btrfs_leak_debug_del_state() argument
36 list_del(&state->leak_list); in btrfs_leak_debug_del_state()
42 struct extent_state *state; in btrfs_extent_state_leak_debug_check() local
45 state = list_entry(states.next, struct extent_state, leak_list); in btrfs_extent_state_leak_debug_check()
46 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", in btrfs_extent_state_leak_debug_check()
47 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
48 extent_state_in_tree(state), in btrfs_extent_state_leak_debug_check()
49 refcount_read(&state->refs)); in btrfs_extent_state_leak_debug_check()
50 list_del(&state->leak_list); in btrfs_extent_state_leak_debug_check()
52 kmem_cache_free(extent_state_cache, state); in btrfs_extent_state_leak_debug_check()
65 if (tree->owner != IO_TREE_INODE_IO) in __btrfs_debug_check_extent_io_range()
69 isize = i_size_read(&inode->vfs_inode); in __btrfs_debug_check_extent_io_range()
70 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
71 btrfs_debug_rl(inode->root->fs_info, in __btrfs_debug_check_extent_io_range()
77 #define btrfs_leak_debug_add_state(state) do {} while (0) argument
78 #define btrfs_leak_debug_del_state(state) do {} while (0) argument
89 return tree->owner == IO_TREE_INODE_IO; in is_inode_io_tree()
95 if (tree->owner == IO_TREE_INODE_IO) in extent_io_tree_to_inode()
96 return tree->inode; in extent_io_tree_to_inode()
100 /* Read-only access to the inode. */
103 if (tree->owner == IO_TREE_INODE_IO) in extent_io_tree_to_inode_const()
104 return tree->inode; in extent_io_tree_to_inode_const()
108 /* For read-only access to fs_info. */
111 if (tree->owner == IO_TREE_INODE_IO) in extent_io_tree_to_fs_info()
112 return tree->inode->root->fs_info; in extent_io_tree_to_fs_info()
113 return tree->fs_info; in extent_io_tree_to_fs_info()
119 tree->state = RB_ROOT; in extent_io_tree_init()
120 spin_lock_init(&tree->lock); in extent_io_tree_init()
121 tree->fs_info = fs_info; in extent_io_tree_init()
122 tree->owner = owner; in extent_io_tree_init()
126 * Empty an io tree, removing and freeing every extent state record from the
129 * aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never
130 * set on any extent state when calling this function).
135 struct extent_state *state; in extent_io_tree_release() local
138 spin_lock(&tree->lock); in extent_io_tree_release()
139 root = tree->state; in extent_io_tree_release()
140 tree->state = RB_ROOT; in extent_io_tree_release()
141 rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) { in extent_io_tree_release()
143 RB_CLEAR_NODE(&state->rb_node); in extent_io_tree_release()
144 ASSERT(!(state->state & EXTENT_LOCK_BITS)); in extent_io_tree_release()
147 * lock and we only change the waitqueue while holding that lock in extent_io_tree_release()
150 ASSERT(!waitqueue_active(&state->wq)); in extent_io_tree_release()
151 free_extent_state(state); in extent_io_tree_release()
152 cond_resched_lock(&tree->lock); in extent_io_tree_release()
158 ASSERT(RB_EMPTY_ROOT(&tree->state)); in extent_io_tree_release()
159 spin_unlock(&tree->lock); in extent_io_tree_release()
164 struct extent_state *state; in alloc_extent_state() local
171 state = kmem_cache_alloc(extent_state_cache, mask); in alloc_extent_state()
172 if (!state) in alloc_extent_state()
173 return state; in alloc_extent_state()
174 state->state = 0; in alloc_extent_state()
175 RB_CLEAR_NODE(&state->rb_node); in alloc_extent_state()
176 btrfs_leak_debug_add_state(state); in alloc_extent_state()
177 refcount_set(&state->refs, 1); in alloc_extent_state()
178 init_waitqueue_head(&state->wq); in alloc_extent_state()
179 trace_alloc_extent_state(state, mask, _RET_IP_); in alloc_extent_state()
180 return state; in alloc_extent_state()
191 void free_extent_state(struct extent_state *state) in free_extent_state() argument
193 if (!state) in free_extent_state()
195 if (refcount_dec_and_test(&state->refs)) { in free_extent_state()
196 WARN_ON(extent_state_in_tree(state)); in free_extent_state()
197 btrfs_leak_debug_del_state(state); in free_extent_state()
198 trace_free_extent_state(state, _RET_IP_); in free_extent_state()
199 kmem_cache_free(extent_state_cache, state); in free_extent_state()
203 static int add_extent_changeset(struct extent_state *state, u32 bits, in add_extent_changeset() argument
211 if (set && (state->state & bits) == bits) in add_extent_changeset()
213 if (!set && (state->state & bits) == 0) in add_extent_changeset()
215 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
216 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
221 static inline struct extent_state *next_state(struct extent_state *state) in next_state() argument
223 struct rb_node *next = rb_next(&state->rb_node); in next_state()
231 static inline struct extent_state *prev_state(struct extent_state *state) in prev_state() argument
233 struct rb_node *next = rb_prev(&state->rb_node); in prev_state()
243 * entry->start <= offset && entry->end >= offset.
263 struct rb_root *root = &tree->state; in tree_search_for_insert()
264 struct rb_node **node = &root->rb_node; in tree_search_for_insert()
272 if (offset < entry->start) in tree_search_for_insert()
273 node = &(*node)->rb_left; in tree_search_for_insert()
274 else if (offset > entry->end) in tree_search_for_insert()
275 node = &(*node)->rb_right; in tree_search_for_insert()
286 while (entry && offset > entry->end) in tree_search_for_insert()
309 struct rb_root *root = &tree->state; in tree_search_prev_next()
310 struct rb_node **node = &root->rb_node; in tree_search_prev_next()
320 if (offset < entry->start) in tree_search_prev_next()
321 node = &(*node)->rb_left; in tree_search_prev_next()
322 else if (offset > entry->end) in tree_search_prev_next()
323 node = &(*node)->rb_right; in tree_search_prev_next()
329 while (entry && offset > entry->end) in tree_search_prev_next()
334 while (entry && offset < entry->start) in tree_search_prev_next()
342 * Inexact rb-tree search, return the next entry if @offset is not found
350 const struct extent_state *state, in extent_io_tree_panic() argument
355 "extent io tree error on %s state start %llu end %llu", in extent_io_tree_panic()
356 opname, state->start, state->end); in extent_io_tree_panic()
359 static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state) in merge_prev_state() argument
363 prev = prev_state(state); in merge_prev_state()
364 if (prev && prev->end == state->start - 1 && prev->state == state->state) { in merge_prev_state()
367 state, prev); in merge_prev_state()
368 state->start = prev->start; in merge_prev_state()
369 rb_erase(&prev->rb_node, &tree->state); in merge_prev_state()
370 RB_CLEAR_NODE(&prev->rb_node); in merge_prev_state()
375 static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state) in merge_next_state() argument
379 next = next_state(state); in merge_next_state()
380 if (next && next->start == state->end + 1 && next->state == state->state) { in merge_next_state()
383 state, next); in merge_next_state()
384 state->end = next->end; in merge_next_state()
385 rb_erase(&next->rb_node, &tree->state); in merge_next_state()
386 RB_CLEAR_NODE(&next->rb_node); in merge_next_state()
393 * extents with matching state are merged together into a single extent in the
394 * tree. Extents with EXTENT_IO in their state field are not merged because
398 * This should be called with the tree lock held.
400 static void merge_state(struct extent_io_tree *tree, struct extent_state *state) in merge_state() argument
402 if (state->state & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY)) in merge_state()
405 merge_prev_state(tree, state); in merge_state()
406 merge_next_state(tree, state); in merge_state()
410 struct extent_state *state, in set_state_bits() argument
417 btrfs_set_delalloc_extent(extent_io_tree_to_inode(tree), state, bits); in set_state_bits()
419 ret = add_extent_changeset(state, bits_to_set, changeset, 1); in set_state_bits()
421 state->state |= bits_to_set; in set_state_bits()
436 * The tree lock is not taken internally. This is a utility function and
440 struct extent_state *state, in insert_state() argument
446 const u64 start = state->start - 1; in insert_state()
447 const u64 end = state->end + 1; in insert_state()
450 set_state_bits(tree, state, bits, changeset); in insert_state()
452 node = &tree->state.rb_node; in insert_state()
459 if (state->end < entry->start) { in insert_state()
460 if (try_merge && end == entry->start && in insert_state()
461 state->state == entry->state) { in insert_state()
465 state, entry); in insert_state()
466 entry->start = state->start; in insert_state()
468 state->state = 0; in insert_state()
471 node = &(*node)->rb_left; in insert_state()
472 } else if (state->end > entry->end) { in insert_state()
473 if (try_merge && entry->end == start && in insert_state()
474 state->state == entry->state) { in insert_state()
478 state, entry); in insert_state()
479 entry->end = state->end; in insert_state()
481 state->state = 0; in insert_state()
484 node = &(*node)->rb_right; in insert_state()
486 return ERR_PTR(-EEXIST); in insert_state()
490 rb_link_node(&state->rb_node, parent, node); in insert_state()
491 rb_insert_color(&state->rb_node, &tree->state); in insert_state()
493 return state; in insert_state()
497 * Insert state to @tree to the location given by @node and @parent.
500 struct extent_state *state, struct rb_node **node, in insert_state_fast() argument
504 set_state_bits(tree, state, bits, changeset); in insert_state_fast()
505 rb_link_node(&state->rb_node, parent, node); in insert_state_fast()
506 rb_insert_color(&state->rb_node, &tree->state); in insert_state_fast()
507 merge_state(tree, state); in insert_state_fast()
511 * Split a given extent state struct in two, inserting the preallocated
516 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
517 * are two extent state structs in the tree:
518 * prealloc: [orig->start, split - 1]
519 * orig: [ split, orig->end ]
534 prealloc->start = orig->start; in split_state()
535 prealloc->end = split - 1; in split_state()
536 prealloc->state = orig->state; in split_state()
537 orig->start = split; in split_state()
539 parent = &orig->rb_node; in split_state()
547 if (prealloc->end < entry->start) { in split_state()
548 node = &(*node)->rb_left; in split_state()
549 } else if (prealloc->end > entry->end) { in split_state()
550 node = &(*node)->rb_right; in split_state()
553 return -EEXIST; in split_state()
557 rb_link_node(&prealloc->rb_node, parent, node); in split_state()
558 rb_insert_color(&prealloc->rb_node, &tree->state); in split_state()
564 * Utility function to clear some bits in an extent state struct. It will
565 * optionally wake up anyone waiting on this state (wake == 1).
567 * If no bits are set on the state struct after clearing things, the
571 struct extent_state *state, in clear_state_bit() argument
580 btrfs_clear_delalloc_extent(extent_io_tree_to_inode(tree), state, in clear_state_bit()
583 ret = add_extent_changeset(state, bits_to_clear, changeset, 0); in clear_state_bit()
585 state->state &= ~bits_to_clear; in clear_state_bit()
587 wake_up(&state->wq); in clear_state_bit()
588 if (state->state == 0) { in clear_state_bit()
589 next = next_state(state); in clear_state_bit()
590 if (extent_state_in_tree(state)) { in clear_state_bit()
591 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
592 RB_CLEAR_NODE(&state->rb_node); in clear_state_bit()
593 free_extent_state(state); in clear_state_bit()
598 merge_state(tree, state); in clear_state_bit()
599 next = next_state(state); in clear_state_bit()
611 *bits &= EXTENT_NOWAIT - 1; in set_gfp_mask_from_bits()
621 * This takes the tree lock, and returns 0 on success and < 0 on error.
627 struct extent_state *state; in __clear_extent_bit() local
639 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
654 * up not needing the pre-allocated extent state at all, which in __clear_extent_bit()
657 * If we end up needing a new extent state we allocate it later. in __clear_extent_bit()
662 spin_lock(&tree->lock); in __clear_extent_bit()
672 cached->start <= start && cached->end > start) { in __clear_extent_bit()
674 refcount_dec(&cached->refs); in __clear_extent_bit()
675 state = cached; in __clear_extent_bit()
683 state = tree_search(tree, start); in __clear_extent_bit()
684 if (!state) in __clear_extent_bit()
687 if (state->start > end) in __clear_extent_bit()
689 WARN_ON(state->end < start); in __clear_extent_bit()
690 last_end = state->end; in __clear_extent_bit()
692 /* The state doesn't have the wanted bits, go ahead. */ in __clear_extent_bit()
693 if (!(state->state & bits)) { in __clear_extent_bit()
694 state = next_state(state); in __clear_extent_bit()
699 * | ---- desired range ---- | in __clear_extent_bit()
700 * | state | or in __clear_extent_bit()
701 * | ------------- state -------------- | in __clear_extent_bit()
713 if (state->start < start) { in __clear_extent_bit()
717 err = split_state(tree, state, prealloc, start); in __clear_extent_bit()
719 extent_io_tree_panic(tree, state, "split", err); in __clear_extent_bit()
724 if (state->end <= end) { in __clear_extent_bit()
725 state = clear_state_bit(tree, state, bits, wake, changeset); in __clear_extent_bit()
731 * | ---- desired range ---- | in __clear_extent_bit()
732 * | state | in __clear_extent_bit()
735 if (state->start <= end && state->end > end) { in __clear_extent_bit()
739 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
741 extent_io_tree_panic(tree, state, "split", err); in __clear_extent_bit()
744 wake_up(&state->wq); in __clear_extent_bit()
752 state = clear_state_bit(tree, state, bits, wake, changeset); in __clear_extent_bit()
754 if (last_end == (u64)-1) in __clear_extent_bit()
757 if (start <= end && state && !need_resched()) in __clear_extent_bit()
763 spin_unlock(&tree->lock); in __clear_extent_bit()
769 spin_unlock(&tree->lock); in __clear_extent_bit()
778 * Wait for one or more bits to clear on a range in the state tree.
780 * The tree lock is taken by this function
785 struct extent_state *state; in wait_extent_bit() local
789 spin_lock(&tree->lock); in wait_extent_bit()
793 * are more bits than the bits we're waiting on set on this state. in wait_extent_bit()
796 state = *cached_state; in wait_extent_bit()
797 if (extent_state_in_tree(state) && in wait_extent_bit()
798 state->start <= start && start < state->end) in wait_extent_bit()
806 state = tree_search(tree, start); in wait_extent_bit()
808 if (!state) in wait_extent_bit()
810 if (state->start > end) in wait_extent_bit()
813 if (state->state & bits) { in wait_extent_bit()
816 start = state->start; in wait_extent_bit()
817 refcount_inc(&state->refs); in wait_extent_bit()
818 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); in wait_extent_bit()
819 spin_unlock(&tree->lock); in wait_extent_bit()
821 spin_lock(&tree->lock); in wait_extent_bit()
822 finish_wait(&state->wq, &wait); in wait_extent_bit()
823 free_extent_state(state); in wait_extent_bit()
826 start = state->end + 1; in wait_extent_bit()
831 if (!cond_resched_lock(&tree->lock)) { in wait_extent_bit()
832 state = next_state(state); in wait_extent_bit()
837 /* This state is no longer useful, clear it and free it up. */ in wait_extent_bit()
839 state = *cached_state; in wait_extent_bit()
841 free_extent_state(state); in wait_extent_bit()
843 spin_unlock(&tree->lock); in wait_extent_bit()
846 static void cache_state_if_flags(struct extent_state *state, in cache_state_if_flags() argument
851 if (!flags || (state->state & flags)) { in cache_state_if_flags()
852 *cached_ptr = state; in cache_state_if_flags()
853 refcount_inc(&state->refs); in cache_state_if_flags()
858 static void cache_state(struct extent_state *state, in cache_state() argument
861 return cache_state_if_flags(state, cached_ptr, EXTENT_LOCK_BITS | EXTENT_BOUNDARY); in cache_state()
865 * Find the first state struct with 'bits' set after 'start', and return it.
866 * tree->lock must be held. NULL will returned if nothing was found after
872 struct extent_state *state; in find_first_extent_bit_state() local
878 state = tree_search(tree, start); in find_first_extent_bit_state()
879 while (state) { in find_first_extent_bit_state()
880 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
881 return state; in find_first_extent_bit_state()
882 state = next_state(state); in find_first_extent_bit_state()
899 struct extent_state *state; in find_first_extent_bit() local
902 spin_lock(&tree->lock); in find_first_extent_bit()
904 state = *cached_state; in find_first_extent_bit()
905 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
906 while ((state = next_state(state)) != NULL) { in find_first_extent_bit()
907 if (state->state & bits) in find_first_extent_bit()
911 * If we found the next extent state, clear cached_state in find_first_extent_bit()
912 * so that we can cache the next extent state below and in find_first_extent_bit()
913 * avoid future calls going over the same extent state in find_first_extent_bit()
919 if (state) in find_first_extent_bit()
927 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
929 if (state) { in find_first_extent_bit()
930 cache_state_if_flags(state, cached_state, 0); in find_first_extent_bit()
931 *start_ret = state->start; in find_first_extent_bit()
932 *end_ret = state->end; in find_first_extent_bit()
936 spin_unlock(&tree->lock); in find_first_extent_bit()
951 * will drop the tree->lock, so use this helper if you want to find the actual
953 * then walk down the tree until we find a non-contiguous area. The area
959 struct extent_state *state; in find_contiguous_extent_bit() local
964 spin_lock(&tree->lock); in find_contiguous_extent_bit()
965 state = find_first_extent_bit_state(tree, start, bits); in find_contiguous_extent_bit()
966 if (state) { in find_contiguous_extent_bit()
967 *start_ret = state->start; in find_contiguous_extent_bit()
968 *end_ret = state->end; in find_contiguous_extent_bit()
969 while ((state = next_state(state)) != NULL) { in find_contiguous_extent_bit()
970 if (state->start > (*end_ret + 1)) in find_contiguous_extent_bit()
972 *end_ret = state->end; in find_contiguous_extent_bit()
976 spin_unlock(&tree->lock); in find_contiguous_extent_bit()
990 struct extent_state *state; in btrfs_find_delalloc_range() local
995 spin_lock(&tree->lock); in btrfs_find_delalloc_range()
1001 state = tree_search(tree, cur_start); in btrfs_find_delalloc_range()
1002 if (!state) { in btrfs_find_delalloc_range()
1003 *end = (u64)-1; in btrfs_find_delalloc_range()
1007 while (state) { in btrfs_find_delalloc_range()
1008 if (found && (state->start != cur_start || in btrfs_find_delalloc_range()
1009 (state->state & EXTENT_BOUNDARY))) { in btrfs_find_delalloc_range()
1012 if (!(state->state & EXTENT_DELALLOC)) { in btrfs_find_delalloc_range()
1014 *end = state->end; in btrfs_find_delalloc_range()
1018 *start = state->start; in btrfs_find_delalloc_range()
1019 *cached_state = state; in btrfs_find_delalloc_range()
1020 refcount_inc(&state->refs); in btrfs_find_delalloc_range()
1023 *end = state->end; in btrfs_find_delalloc_range()
1024 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1025 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1028 state = next_state(state); in btrfs_find_delalloc_range()
1031 spin_unlock(&tree->lock); in btrfs_find_delalloc_range()
1040 * If any of the exclusive bits are set, this will fail with -EEXIST if some
1047 * [start, end] is inclusive This takes the tree lock.
1055 struct extent_state *state; in __set_extent_bit() local
1067 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
1077 * up not needing the pre-allocated extent state at all, which in __set_extent_bit()
1080 * If we end up needing a new extent state we allocate it later. in __set_extent_bit()
1088 spin_lock(&tree->lock); in __set_extent_bit()
1090 state = *cached_state; in __set_extent_bit()
1091 if (state->start <= start && state->end > start && in __set_extent_bit()
1092 extent_state_in_tree(state)) in __set_extent_bit()
1099 state = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
1100 if (!state) { in __set_extent_bit()
1104 prealloc->start = start; in __set_extent_bit()
1105 prealloc->end = end; in __set_extent_bit()
1112 last_start = state->start; in __set_extent_bit()
1113 last_end = state->end; in __set_extent_bit()
1116 * | ---- desired range ---- | in __set_extent_bit()
1117 * | state | in __set_extent_bit()
1119 * Just lock what we found and keep going in __set_extent_bit()
1121 if (state->start == start && state->end <= end) { in __set_extent_bit()
1122 if (state->state & exclusive_bits) { in __set_extent_bit()
1123 *failed_start = state->start; in __set_extent_bit()
1124 cache_state(state, failed_state); in __set_extent_bit()
1125 ret = -EEXIST; in __set_extent_bit()
1129 set_state_bits(tree, state, bits, changeset); in __set_extent_bit()
1130 cache_state(state, cached_state); in __set_extent_bit()
1131 merge_state(tree, state); in __set_extent_bit()
1132 if (last_end == (u64)-1) in __set_extent_bit()
1135 state = next_state(state); in __set_extent_bit()
1136 if (start < end && state && state->start == start && in __set_extent_bit()
1143 * | ---- desired range ---- | in __set_extent_bit()
1144 * | state | in __set_extent_bit()
1146 * | ------------- state -------------- | in __set_extent_bit()
1157 if (state->start < start) { in __set_extent_bit()
1158 if (state->state & exclusive_bits) { in __set_extent_bit()
1160 cache_state(state, failed_state); in __set_extent_bit()
1161 ret = -EEXIST; in __set_extent_bit()
1169 if ((state->state & bits) == bits) { in __set_extent_bit()
1170 start = state->end + 1; in __set_extent_bit()
1171 cache_state(state, cached_state); in __set_extent_bit()
1178 ret = split_state(tree, state, prealloc, start); in __set_extent_bit()
1180 extent_io_tree_panic(tree, state, "split", ret); in __set_extent_bit()
1185 if (state->end <= end) { in __set_extent_bit()
1186 set_state_bits(tree, state, bits, changeset); in __set_extent_bit()
1187 cache_state(state, cached_state); in __set_extent_bit()
1188 merge_state(tree, state); in __set_extent_bit()
1189 if (last_end == (u64)-1) in __set_extent_bit()
1192 state = next_state(state); in __set_extent_bit()
1193 if (start < end && state && state->start == start && in __set_extent_bit()
1200 * | ---- desired range ---- | in __set_extent_bit()
1201 * | state | or | state | in __set_extent_bit()
1206 if (state->start > start) { in __set_extent_bit()
1213 this_end = last_start - 1; in __set_extent_bit()
1223 prealloc->start = start; in __set_extent_bit()
1224 prealloc->end = this_end; in __set_extent_bit()
1238 * | ---- desired range ---- | in __set_extent_bit()
1239 * | state | in __set_extent_bit()
1243 if (state->start <= end && state->end > end) { in __set_extent_bit()
1244 if (state->state & exclusive_bits) { in __set_extent_bit()
1246 cache_state(state, failed_state); in __set_extent_bit()
1247 ret = -EEXIST; in __set_extent_bit()
1254 ret = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1256 extent_io_tree_panic(tree, state, "split", ret); in __set_extent_bit()
1268 spin_unlock(&tree->lock); in __set_extent_bit()
1274 spin_unlock(&tree->lock); in __set_extent_bit()
1297 * @cached_state: state that we're going to cache
1303 * boundary bits like LOCK.
1311 struct extent_state *state; in convert_extent_bit() local
1321 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1327 * Best effort, don't worry if extent state allocation fails in convert_extent_bit()
1328 * here for the first iteration. We might have a cached state in convert_extent_bit()
1330 * extent state allocations are needed. We'll only know this in convert_extent_bit()
1335 return -ENOMEM; in convert_extent_bit()
1338 spin_lock(&tree->lock); in convert_extent_bit()
1340 state = *cached_state; in convert_extent_bit()
1341 if (state->start <= start && state->end > start && in convert_extent_bit()
1342 extent_state_in_tree(state)) in convert_extent_bit()
1350 state = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1351 if (!state) { in convert_extent_bit()
1354 ret = -ENOMEM; in convert_extent_bit()
1357 prealloc->start = start; in convert_extent_bit()
1358 prealloc->end = end; in convert_extent_bit()
1365 last_start = state->start; in convert_extent_bit()
1366 last_end = state->end; in convert_extent_bit()
1369 * | ---- desired range ---- | in convert_extent_bit()
1370 * | state | in convert_extent_bit()
1372 * Just lock what we found and keep going. in convert_extent_bit()
1374 if (state->start == start && state->end <= end) { in convert_extent_bit()
1375 set_state_bits(tree, state, bits, NULL); in convert_extent_bit()
1376 cache_state(state, cached_state); in convert_extent_bit()
1377 state = clear_state_bit(tree, state, clear_bits, 0, NULL); in convert_extent_bit()
1378 if (last_end == (u64)-1) in convert_extent_bit()
1381 if (start < end && state && state->start == start && in convert_extent_bit()
1388 * | ---- desired range ---- | in convert_extent_bit()
1389 * | state | in convert_extent_bit()
1391 * | ------------- state -------------- | in convert_extent_bit()
1402 if (state->start < start) { in convert_extent_bit()
1405 ret = -ENOMEM; in convert_extent_bit()
1408 ret = split_state(tree, state, prealloc, start); in convert_extent_bit()
1410 extent_io_tree_panic(tree, state, "split", ret); in convert_extent_bit()
1414 if (state->end <= end) { in convert_extent_bit()
1415 set_state_bits(tree, state, bits, NULL); in convert_extent_bit()
1416 cache_state(state, cached_state); in convert_extent_bit()
1417 state = clear_state_bit(tree, state, clear_bits, 0, NULL); in convert_extent_bit()
1418 if (last_end == (u64)-1) in convert_extent_bit()
1421 if (start < end && state && state->start == start && in convert_extent_bit()
1428 * | ---- desired range ---- | in convert_extent_bit()
1429 * | state | or | state | in convert_extent_bit()
1434 if (state->start > start) { in convert_extent_bit()
1441 this_end = last_start - 1; in convert_extent_bit()
1445 ret = -ENOMEM; in convert_extent_bit()
1453 prealloc->start = start; in convert_extent_bit()
1454 prealloc->end = this_end; in convert_extent_bit()
1467 * | ---- desired range ---- | in convert_extent_bit()
1468 * | state | in convert_extent_bit()
1472 if (state->start <= end && state->end > end) { in convert_extent_bit()
1475 ret = -ENOMEM; in convert_extent_bit()
1479 ret = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1481 extent_io_tree_panic(tree, state, "split", ret); in convert_extent_bit()
1493 spin_unlock(&tree->lock); in convert_extent_bit()
1499 spin_unlock(&tree->lock); in convert_extent_bit()
1517 * set it's possible that @end_ret contains -1, this happens in case the range
1524 struct extent_state *state; in find_first_clear_extent_bit() local
1527 spin_lock(&tree->lock); in find_first_clear_extent_bit()
1531 state = tree_search_prev_next(tree, start, &prev, &next); in find_first_clear_extent_bit()
1532 if (!state && !next && !prev) { in find_first_clear_extent_bit()
1538 *end_ret = -1; in find_first_clear_extent_bit()
1540 } else if (!state && !next) { in find_first_clear_extent_bit()
1545 *start_ret = prev->end + 1; in find_first_clear_extent_bit()
1546 *end_ret = -1; in find_first_clear_extent_bit()
1548 } else if (!state) { in find_first_clear_extent_bit()
1549 state = next; in find_first_clear_extent_bit()
1553 * At this point 'state' either contains 'start' or start is in find_first_clear_extent_bit()
1554 * before 'state' in find_first_clear_extent_bit()
1556 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1557 if (state->state & bits) { in find_first_clear_extent_bit()
1559 * |--range with bits sets--| in find_first_clear_extent_bit()
1563 start = state->end + 1; in find_first_clear_extent_bit()
1570 * |--range with bits cleared----| in find_first_clear_extent_bit()
1574 *start_ret = state->start; in find_first_clear_extent_bit()
1579 * |---prev range---|---hole/unset---|---node range---| in find_first_clear_extent_bit()
1585 * |---hole/unset--||--first node--| in find_first_clear_extent_bit()
1590 *start_ret = prev->end + 1; in find_first_clear_extent_bit()
1601 while (state) { in find_first_clear_extent_bit()
1602 if (state->end >= start && !(state->state & bits)) { in find_first_clear_extent_bit()
1603 *end_ret = state->end; in find_first_clear_extent_bit()
1605 *end_ret = state->start - 1; in find_first_clear_extent_bit()
1608 state = next_state(state); in find_first_clear_extent_bit()
1611 spin_unlock(&tree->lock); in find_first_clear_extent_bit()
1630 * @cached_state: A cached state to be used across multiple calls to this
1644 struct extent_state *state = NULL; in count_range_bits() local
1654 spin_lock(&tree->lock); in count_range_bits()
1664 if (cached->start <= cur_start && cur_start <= cached->end) { in count_range_bits()
1665 state = cached; in count_range_bits()
1666 } else if (cached->start > cur_start) { in count_range_bits()
1670 * The cached state starts after our search range's start. Check in count_range_bits()
1671 * if the previous state record starts at or before the range we in count_range_bits()
1672 * are looking for, and if so, use it - this is a common case in count_range_bits()
1674 * no previous state record, we can start from our cached state. in count_range_bits()
1678 state = cached; in count_range_bits()
1679 else if (prev->start <= cur_start && cur_start <= prev->end) in count_range_bits()
1680 state = prev; in count_range_bits()
1688 if (!state) in count_range_bits()
1689 state = tree_search(tree, cur_start); in count_range_bits()
1691 while (state) { in count_range_bits()
1692 if (state->start > search_end) in count_range_bits()
1694 if (contig && found && state->start > last + 1) in count_range_bits()
1696 if (state->end >= cur_start && (state->state & bits) == bits) { in count_range_bits()
1697 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
1698 max(cur_start, state->start); in count_range_bits()
1702 *start = max(cur_start, state->start); in count_range_bits()
1705 last = state->end; in count_range_bits()
1709 state = next_state(state); in count_range_bits()
1714 *cached_state = state; in count_range_bits()
1715 if (state) in count_range_bits()
1716 refcount_inc(&state->refs); in count_range_bits()
1719 spin_unlock(&tree->lock); in count_range_bits()
1729 struct extent_state *state = NULL; in test_range_bit_exists() local
1734 spin_lock(&tree->lock); in test_range_bit_exists()
1735 state = tree_search(tree, start); in test_range_bit_exists()
1736 while (state && start <= end) { in test_range_bit_exists()
1737 if (state->start > end) in test_range_bit_exists()
1740 if (state->state & bit) { in test_range_bit_exists()
1745 /* If state->end is (u64)-1, start will overflow to 0 */ in test_range_bit_exists()
1746 start = state->end + 1; in test_range_bit_exists()
1749 state = next_state(state); in test_range_bit_exists()
1751 spin_unlock(&tree->lock); in test_range_bit_exists()
1761 struct extent_state *state = NULL; in test_range_bit() local
1766 spin_lock(&tree->lock); in test_range_bit()
1767 if (cached && extent_state_in_tree(cached) && cached->start <= start && in test_range_bit()
1768 cached->end > start) in test_range_bit()
1769 state = cached; in test_range_bit()
1771 state = tree_search(tree, start); in test_range_bit()
1772 while (state && start <= end) { in test_range_bit()
1773 if (state->start > start) { in test_range_bit()
1778 if (state->start > end) in test_range_bit()
1781 if ((state->state & bit) == 0) { in test_range_bit()
1786 if (state->end == (u64)-1) in test_range_bit()
1790 * Last entry (if state->end is (u64)-1 and overflow happens), in test_range_bit()
1793 start = state->end + 1; in test_range_bit()
1796 state = next_state(state); in test_range_bit()
1800 if (!state) in test_range_bit()
1802 spin_unlock(&tree->lock); in test_range_bit()
1813 * fail with -EEXIST or changeset will record the whole range. in set_record_extent_bits()
1840 if (err == -EEXIST) { in __try_lock_extent()
1842 clear_extent_bit(tree, start, failed_start - 1, bits, cached); in __try_lock_extent()
1849 * Either insert or lock state struct between start and end use mask to tell
1861 while (err == -EEXIST) { in __lock_extent()
1863 clear_extent_bit(tree, start, failed_start - 1, in __lock_extent()
1886 return -ENOMEM; in extent_state_init_cachep()