Lines Matching full:state

13 static inline bool extent_state_in_tree(const struct extent_state *state)  in extent_state_in_tree()  argument
15 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree()
22 static inline void btrfs_leak_debug_add_state(struct extent_state *state) in btrfs_leak_debug_add_state() argument
27 list_add(&state->leak_list, &states); in btrfs_leak_debug_add_state()
31 static inline void btrfs_leak_debug_del_state(struct extent_state *state) in btrfs_leak_debug_del_state() argument
36 list_del(&state->leak_list); in btrfs_leak_debug_del_state()
42 struct extent_state *state; in btrfs_extent_state_leak_debug_check() local
45 state = list_entry(states.next, struct extent_state, leak_list); in btrfs_extent_state_leak_debug_check()
46 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", in btrfs_extent_state_leak_debug_check()
47 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
48 extent_state_in_tree(state), in btrfs_extent_state_leak_debug_check()
49 refcount_read(&state->refs)); in btrfs_extent_state_leak_debug_check()
50 list_del(&state->leak_list); in btrfs_extent_state_leak_debug_check()
52 kmem_cache_free(extent_state_cache, state); in btrfs_extent_state_leak_debug_check()
77 #define btrfs_leak_debug_add_state(state) do {} while (0) argument
78 #define btrfs_leak_debug_del_state(state) do {} while (0) argument
119 tree->state = RB_ROOT; in extent_io_tree_init()
126 * Empty an io tree, removing and freeing every extent state record from the
129 * aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never
130 * set on any extent state when calling this function).
135 struct extent_state *state; in extent_io_tree_release() local
139 root = tree->state; in extent_io_tree_release()
140 tree->state = RB_ROOT; in extent_io_tree_release()
141 rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) { in extent_io_tree_release()
143 RB_CLEAR_NODE(&state->rb_node); in extent_io_tree_release()
144 ASSERT(!(state->state & EXTENT_LOCK_BITS)); in extent_io_tree_release()
150 ASSERT(!waitqueue_active(&state->wq)); in extent_io_tree_release()
151 free_extent_state(state); in extent_io_tree_release()
158 ASSERT(RB_EMPTY_ROOT(&tree->state)); in extent_io_tree_release()
164 struct extent_state *state; in alloc_extent_state() local
171 state = kmem_cache_alloc(extent_state_cache, mask); in alloc_extent_state()
172 if (!state) in alloc_extent_state()
173 return state; in alloc_extent_state()
174 state->state = 0; in alloc_extent_state()
175 RB_CLEAR_NODE(&state->rb_node); in alloc_extent_state()
176 btrfs_leak_debug_add_state(state); in alloc_extent_state()
177 refcount_set(&state->refs, 1); in alloc_extent_state()
178 init_waitqueue_head(&state->wq); in alloc_extent_state()
179 trace_alloc_extent_state(state, mask, _RET_IP_); in alloc_extent_state()
180 return state; in alloc_extent_state()
191 void free_extent_state(struct extent_state *state) in free_extent_state() argument
193 if (!state) in free_extent_state()
195 if (refcount_dec_and_test(&state->refs)) { in free_extent_state()
196 WARN_ON(extent_state_in_tree(state)); in free_extent_state()
197 btrfs_leak_debug_del_state(state); in free_extent_state()
198 trace_free_extent_state(state, _RET_IP_); in free_extent_state()
199 kmem_cache_free(extent_state_cache, state); in free_extent_state()
203 static int add_extent_changeset(struct extent_state *state, u32 bits, in add_extent_changeset() argument
211 if (set && (state->state & bits) == bits) in add_extent_changeset()
213 if (!set && (state->state & bits) == 0) in add_extent_changeset()
215 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
216 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
221 static inline struct extent_state *next_state(struct extent_state *state) in next_state() argument
223 struct rb_node *next = rb_next(&state->rb_node); in next_state()
231 static inline struct extent_state *prev_state(struct extent_state *state) in prev_state() argument
233 struct rb_node *next = rb_prev(&state->rb_node); in prev_state()
263 struct rb_root *root = &tree->state; in tree_search_for_insert()
309 struct rb_root *root = &tree->state; in tree_search_prev_next()
350 const struct extent_state *state, in extent_io_tree_panic() argument
355 "extent io tree error on %s state start %llu end %llu", in extent_io_tree_panic()
356 opname, state->start, state->end); in extent_io_tree_panic()
359 static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state) in merge_prev_state() argument
363 prev = prev_state(state); in merge_prev_state()
364 if (prev && prev->end == state->start - 1 && prev->state == state->state) { in merge_prev_state()
367 state, prev); in merge_prev_state()
368 state->start = prev->start; in merge_prev_state()
369 rb_erase(&prev->rb_node, &tree->state); in merge_prev_state()
375 static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state) in merge_next_state() argument
379 next = next_state(state); in merge_next_state()
380 if (next && next->start == state->end + 1 && next->state == state->state) { in merge_next_state()
383 state, next); in merge_next_state()
384 state->end = next->end; in merge_next_state()
385 rb_erase(&next->rb_node, &tree->state); in merge_next_state()
393 * extents with matching state are merged together into a single extent in the
394 * tree. Extents with EXTENT_IO in their state field are not merged because
400 static void merge_state(struct extent_io_tree *tree, struct extent_state *state) in merge_state() argument
402 if (state->state & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY)) in merge_state()
405 merge_prev_state(tree, state); in merge_state()
406 merge_next_state(tree, state); in merge_state()
410 struct extent_state *state, in set_state_bits() argument
417 btrfs_set_delalloc_extent(extent_io_tree_to_inode(tree), state, bits); in set_state_bits()
419 ret = add_extent_changeset(state, bits_to_set, changeset, 1); in set_state_bits()
421 state->state |= bits_to_set; in set_state_bits()
440 struct extent_state *state, in insert_state() argument
446 const u64 start = state->start - 1; in insert_state()
447 const u64 end = state->end + 1; in insert_state()
450 set_state_bits(tree, state, bits, changeset); in insert_state()
452 node = &tree->state.rb_node; in insert_state()
459 if (state->end < entry->start) { in insert_state()
461 state->state == entry->state) { in insert_state()
465 state, entry); in insert_state()
466 entry->start = state->start; in insert_state()
468 state->state = 0; in insert_state()
472 } else if (state->end > entry->end) { in insert_state()
474 state->state == entry->state) { in insert_state()
478 state, entry); in insert_state()
479 entry->end = state->end; in insert_state()
481 state->state = 0; in insert_state()
490 rb_link_node(&state->rb_node, parent, node); in insert_state()
491 rb_insert_color(&state->rb_node, &tree->state); in insert_state()
493 return state; in insert_state()
497 * Insert state to @tree to the location given by @node and @parent.
500 struct extent_state *state, struct rb_node **node, in insert_state_fast() argument
504 set_state_bits(tree, state, bits, changeset); in insert_state_fast()
505 rb_link_node(&state->rb_node, parent, node); in insert_state_fast()
506 rb_insert_color(&state->rb_node, &tree->state); in insert_state_fast()
507 merge_state(tree, state); in insert_state_fast()
511 * Split a given extent state struct in two, inserting the preallocated
517 * are two extent state structs in the tree:
536 prealloc->state = orig->state; in split_state()
558 rb_insert_color(&prealloc->rb_node, &tree->state); in split_state()
564 * Utility function to clear some bits in an extent state struct. It will
565 * optionally wake up anyone waiting on this state (wake == 1).
567 * If no bits are set on the state struct after clearing things, the
571 struct extent_state *state, in clear_state_bit() argument
580 btrfs_clear_delalloc_extent(extent_io_tree_to_inode(tree), state, in clear_state_bit()
583 ret = add_extent_changeset(state, bits_to_clear, changeset, 0); in clear_state_bit()
585 state->state &= ~bits_to_clear; in clear_state_bit()
587 wake_up(&state->wq); in clear_state_bit()
588 if (state->state == 0) { in clear_state_bit()
589 next = next_state(state); in clear_state_bit()
590 if (extent_state_in_tree(state)) { in clear_state_bit()
591 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
592 RB_CLEAR_NODE(&state->rb_node); in clear_state_bit()
593 free_extent_state(state); in clear_state_bit()
598 merge_state(tree, state); in clear_state_bit()
599 next = next_state(state); in clear_state_bit()
627 struct extent_state *state; in __clear_extent_bit() local
654 * up not needing the pre-allocated extent state at all, which in __clear_extent_bit()
657 * If we end up needing a new extent state we allocate it later. in __clear_extent_bit()
675 state = cached; in __clear_extent_bit()
683 state = tree_search(tree, start); in __clear_extent_bit()
684 if (!state) in __clear_extent_bit()
687 if (state->start > end) in __clear_extent_bit()
689 WARN_ON(state->end < start); in __clear_extent_bit()
690 last_end = state->end; in __clear_extent_bit()
692 /* The state doesn't have the wanted bits, go ahead. */ in __clear_extent_bit()
693 if (!(state->state & bits)) { in __clear_extent_bit()
694 state = next_state(state); in __clear_extent_bit()
700 * | state | or in __clear_extent_bit()
701 * | ------------- state -------------- | in __clear_extent_bit()
713 if (state->start < start) { in __clear_extent_bit()
717 err = split_state(tree, state, prealloc, start); in __clear_extent_bit()
719 extent_io_tree_panic(tree, state, "split", err); in __clear_extent_bit()
724 if (state->end <= end) { in __clear_extent_bit()
725 state = clear_state_bit(tree, state, bits, wake, changeset); in __clear_extent_bit()
732 * | state | in __clear_extent_bit()
735 if (state->start <= end && state->end > end) { in __clear_extent_bit()
739 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
741 extent_io_tree_panic(tree, state, "split", err); in __clear_extent_bit()
744 wake_up(&state->wq); in __clear_extent_bit()
752 state = clear_state_bit(tree, state, bits, wake, changeset); in __clear_extent_bit()
757 if (start <= end && state && !need_resched()) in __clear_extent_bit()
778 * Wait for one or more bits to clear on a range in the state tree.
785 struct extent_state *state; in wait_extent_bit() local
793 * are more bits than the bits we're waiting on set on this state. in wait_extent_bit()
796 state = *cached_state; in wait_extent_bit()
797 if (extent_state_in_tree(state) && in wait_extent_bit()
798 state->start <= start && start < state->end) in wait_extent_bit()
806 state = tree_search(tree, start); in wait_extent_bit()
808 if (!state) in wait_extent_bit()
810 if (state->start > end) in wait_extent_bit()
813 if (state->state & bits) { in wait_extent_bit()
816 start = state->start; in wait_extent_bit()
817 refcount_inc(&state->refs); in wait_extent_bit()
818 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); in wait_extent_bit()
822 finish_wait(&state->wq, &wait); in wait_extent_bit()
823 free_extent_state(state); in wait_extent_bit()
826 start = state->end + 1; in wait_extent_bit()
832 state = next_state(state); in wait_extent_bit()
837 /* This state is no longer useful, clear it and free it up. */ in wait_extent_bit()
839 state = *cached_state; in wait_extent_bit()
841 free_extent_state(state); in wait_extent_bit()
846 static void cache_state_if_flags(struct extent_state *state, in cache_state_if_flags() argument
851 if (!flags || (state->state & flags)) { in cache_state_if_flags()
852 *cached_ptr = state; in cache_state_if_flags()
853 refcount_inc(&state->refs); in cache_state_if_flags()
858 static void cache_state(struct extent_state *state, in cache_state() argument
861 return cache_state_if_flags(state, cached_ptr, EXTENT_LOCK_BITS | EXTENT_BOUNDARY); in cache_state()
865 * Find the first state struct with 'bits' set after 'start', and return it.
872 struct extent_state *state; in find_first_extent_bit_state() local
878 state = tree_search(tree, start); in find_first_extent_bit_state()
879 while (state) { in find_first_extent_bit_state()
880 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
881 return state; in find_first_extent_bit_state()
882 state = next_state(state); in find_first_extent_bit_state()
899 struct extent_state *state; in find_first_extent_bit() local
904 state = *cached_state; in find_first_extent_bit()
905 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
906 while ((state = next_state(state)) != NULL) { in find_first_extent_bit()
907 if (state->state & bits) in find_first_extent_bit()
911 * If we found the next extent state, clear cached_state in find_first_extent_bit()
912 * so that we can cache the next extent state below and in find_first_extent_bit()
913 * avoid future calls going over the same extent state in find_first_extent_bit()
919 if (state) in find_first_extent_bit()
927 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
929 if (state) { in find_first_extent_bit()
930 cache_state_if_flags(state, cached_state, 0); in find_first_extent_bit()
931 *start_ret = state->start; in find_first_extent_bit()
932 *end_ret = state->end; in find_first_extent_bit()
959 struct extent_state *state; in find_contiguous_extent_bit() local
965 state = find_first_extent_bit_state(tree, start, bits); in find_contiguous_extent_bit()
966 if (state) { in find_contiguous_extent_bit()
967 *start_ret = state->start; in find_contiguous_extent_bit()
968 *end_ret = state->end; in find_contiguous_extent_bit()
969 while ((state = next_state(state)) != NULL) { in find_contiguous_extent_bit()
970 if (state->start > (*end_ret + 1)) in find_contiguous_extent_bit()
972 *end_ret = state->end; in find_contiguous_extent_bit()
990 struct extent_state *state; in btrfs_find_delalloc_range() local
1001 state = tree_search(tree, cur_start); in btrfs_find_delalloc_range()
1002 if (!state) { in btrfs_find_delalloc_range()
1007 while (state) { in btrfs_find_delalloc_range()
1008 if (found && (state->start != cur_start || in btrfs_find_delalloc_range()
1009 (state->state & EXTENT_BOUNDARY))) { in btrfs_find_delalloc_range()
1012 if (!(state->state & EXTENT_DELALLOC)) { in btrfs_find_delalloc_range()
1014 *end = state->end; in btrfs_find_delalloc_range()
1018 *start = state->start; in btrfs_find_delalloc_range()
1019 *cached_state = state; in btrfs_find_delalloc_range()
1020 refcount_inc(&state->refs); in btrfs_find_delalloc_range()
1023 *end = state->end; in btrfs_find_delalloc_range()
1024 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1025 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1028 state = next_state(state); in btrfs_find_delalloc_range()
1055 struct extent_state *state; in __set_extent_bit() local
1077 * up not needing the pre-allocated extent state at all, which in __set_extent_bit()
1080 * If we end up needing a new extent state we allocate it later. in __set_extent_bit()
1090 state = *cached_state; in __set_extent_bit()
1091 if (state->start <= start && state->end > start && in __set_extent_bit()
1092 extent_state_in_tree(state)) in __set_extent_bit()
1099 state = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
1100 if (!state) { in __set_extent_bit()
1112 last_start = state->start; in __set_extent_bit()
1113 last_end = state->end; in __set_extent_bit()
1117 * | state | in __set_extent_bit()
1121 if (state->start == start && state->end <= end) { in __set_extent_bit()
1122 if (state->state & exclusive_bits) { in __set_extent_bit()
1123 *failed_start = state->start; in __set_extent_bit()
1124 cache_state(state, failed_state); in __set_extent_bit()
1129 set_state_bits(tree, state, bits, changeset); in __set_extent_bit()
1130 cache_state(state, cached_state); in __set_extent_bit()
1131 merge_state(tree, state); in __set_extent_bit()
1135 state = next_state(state); in __set_extent_bit()
1136 if (start < end && state && state->start == start && in __set_extent_bit()
1144 * | state | in __set_extent_bit()
1146 * | ------------- state -------------- | in __set_extent_bit()
1157 if (state->start < start) { in __set_extent_bit()
1158 if (state->state & exclusive_bits) { in __set_extent_bit()
1160 cache_state(state, failed_state); in __set_extent_bit()
1169 if ((state->state & bits) == bits) { in __set_extent_bit()
1170 start = state->end + 1; in __set_extent_bit()
1171 cache_state(state, cached_state); in __set_extent_bit()
1178 ret = split_state(tree, state, prealloc, start); in __set_extent_bit()
1180 extent_io_tree_panic(tree, state, "split", ret); in __set_extent_bit()
1185 if (state->end <= end) { in __set_extent_bit()
1186 set_state_bits(tree, state, bits, changeset); in __set_extent_bit()
1187 cache_state(state, cached_state); in __set_extent_bit()
1188 merge_state(tree, state); in __set_extent_bit()
1192 state = next_state(state); in __set_extent_bit()
1193 if (start < end && state && state->start == start && in __set_extent_bit()
1201 * | state | or | state | in __set_extent_bit()
1206 if (state->start > start) { in __set_extent_bit()
1239 * | state | in __set_extent_bit()
1243 if (state->start <= end && state->end > end) { in __set_extent_bit()
1244 if (state->state & exclusive_bits) { in __set_extent_bit()
1246 cache_state(state, failed_state); in __set_extent_bit()
1254 ret = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1256 extent_io_tree_panic(tree, state, "split", ret); in __set_extent_bit()
1297 * @cached_state: state that we're going to cache
1311 struct extent_state *state; in convert_extent_bit() local
1327 * Best effort, don't worry if extent state allocation fails in convert_extent_bit()
1328 * here for the first iteration. We might have a cached state in convert_extent_bit()
1330 * extent state allocations are needed. We'll only know this in convert_extent_bit()
1340 state = *cached_state; in convert_extent_bit()
1341 if (state->start <= start && state->end > start && in convert_extent_bit()
1342 extent_state_in_tree(state)) in convert_extent_bit()
1350 state = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1351 if (!state) { in convert_extent_bit()
1365 last_start = state->start; in convert_extent_bit()
1366 last_end = state->end; in convert_extent_bit()
1370 * | state | in convert_extent_bit()
1374 if (state->start == start && state->end <= end) { in convert_extent_bit()
1375 set_state_bits(tree, state, bits, NULL); in convert_extent_bit()
1376 cache_state(state, cached_state); in convert_extent_bit()
1377 state = clear_state_bit(tree, state, clear_bits, 0, NULL); in convert_extent_bit()
1381 if (start < end && state && state->start == start && in convert_extent_bit()
1389 * | state | in convert_extent_bit()
1391 * | ------------- state -------------- | in convert_extent_bit()
1402 if (state->start < start) { in convert_extent_bit()
1408 ret = split_state(tree, state, prealloc, start); in convert_extent_bit()
1410 extent_io_tree_panic(tree, state, "split", ret); in convert_extent_bit()
1414 if (state->end <= end) { in convert_extent_bit()
1415 set_state_bits(tree, state, bits, NULL); in convert_extent_bit()
1416 cache_state(state, cached_state); in convert_extent_bit()
1417 state = clear_state_bit(tree, state, clear_bits, 0, NULL); in convert_extent_bit()
1421 if (start < end && state && state->start == start && in convert_extent_bit()
1429 * | state | or | state | in convert_extent_bit()
1434 if (state->start > start) { in convert_extent_bit()
1468 * | state | in convert_extent_bit()
1472 if (state->start <= end && state->end > end) { in convert_extent_bit()
1479 ret = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1481 extent_io_tree_panic(tree, state, "split", ret); in convert_extent_bit()
1524 struct extent_state *state; in find_first_clear_extent_bit() local
1531 state = tree_search_prev_next(tree, start, &prev, &next); in find_first_clear_extent_bit()
1532 if (!state && !next && !prev) { in find_first_clear_extent_bit()
1540 } else if (!state && !next) { in find_first_clear_extent_bit()
1548 } else if (!state) { in find_first_clear_extent_bit()
1549 state = next; in find_first_clear_extent_bit()
1553 * At this point 'state' either contains 'start' or start is in find_first_clear_extent_bit()
1554 * before 'state' in find_first_clear_extent_bit()
1556 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1557 if (state->state & bits) { in find_first_clear_extent_bit()
1563 start = state->end + 1; in find_first_clear_extent_bit()
1574 *start_ret = state->start; in find_first_clear_extent_bit()
1601 while (state) { in find_first_clear_extent_bit()
1602 if (state->end >= start && !(state->state & bits)) { in find_first_clear_extent_bit()
1603 *end_ret = state->end; in find_first_clear_extent_bit()
1605 *end_ret = state->start - 1; in find_first_clear_extent_bit()
1608 state = next_state(state); in find_first_clear_extent_bit()
1630 * @cached_state: A cached state to be used across multiple calls to this
1644 struct extent_state *state = NULL; in count_range_bits() local
1665 state = cached; in count_range_bits()
1670 * The cached state starts after our search range's start. Check in count_range_bits()
1671 * if the previous state record starts at or before the range we in count_range_bits()
1674 * no previous state record, we can start from our cached state. in count_range_bits()
1678 state = cached; in count_range_bits()
1680 state = prev; in count_range_bits()
1688 if (!state) in count_range_bits()
1689 state = tree_search(tree, cur_start); in count_range_bits()
1691 while (state) { in count_range_bits()
1692 if (state->start > search_end) in count_range_bits()
1694 if (contig && found && state->start > last + 1) in count_range_bits()
1696 if (state->end >= cur_start && (state->state & bits) == bits) { in count_range_bits()
1697 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
1698 max(cur_start, state->start); in count_range_bits()
1702 *start = max(cur_start, state->start); in count_range_bits()
1705 last = state->end; in count_range_bits()
1709 state = next_state(state); in count_range_bits()
1714 *cached_state = state; in count_range_bits()
1715 if (state) in count_range_bits()
1716 refcount_inc(&state->refs); in count_range_bits()
1729 struct extent_state *state = NULL; in test_range_bit_exists() local
1735 state = tree_search(tree, start); in test_range_bit_exists()
1736 while (state && start <= end) { in test_range_bit_exists()
1737 if (state->start > end) in test_range_bit_exists()
1740 if (state->state & bit) { in test_range_bit_exists()
1745 /* If state->end is (u64)-1, start will overflow to 0 */ in test_range_bit_exists()
1746 start = state->end + 1; in test_range_bit_exists()
1749 state = next_state(state); in test_range_bit_exists()
1761 struct extent_state *state = NULL; in test_range_bit() local
1769 state = cached; in test_range_bit()
1771 state = tree_search(tree, start); in test_range_bit()
1772 while (state && start <= end) { in test_range_bit()
1773 if (state->start > start) { in test_range_bit()
1778 if (state->start > end) in test_range_bit()
1781 if ((state->state & bit) == 0) { in test_range_bit()
1786 if (state->end == (u64)-1) in test_range_bit()
1790 * Last entry (if state->end is (u64)-1 and overflow happens), in test_range_bit()
1793 start = state->end + 1; in test_range_bit()
1796 state = next_state(state); in test_range_bit()
1800 if (!state) in test_range_bit()
1849 * Either insert or lock state struct between start and end use mask to tell