Lines Matching +full:free +full:- +full:running
1 // SPDX-License-Identifier: GPL-2.0
11 #include "block-group.h"
13 #include "free-space-cache.h"
19 * Async discard manages trimming of free space outside of transaction commit.
20 * Discarding is done by managing the block_groups on a LRU list based on free
32 * The first list is special to manage discarding of fully free block groups.
33 * This is necessary because we issue a final trim for a full free block group
39 * The in-memory free space cache serves as the backing state for discard.
44 * As the free space cache uses bitmaps, there exists a tradeoff between
73 return &discard_ctl->discard_list[block_group->discard_index]; in get_discard_list()
77 * Determine if async discard should be running.
89 return (!(fs_info->sb->s_flags & SB_RDONLY) && in btrfs_run_discard_work()
90 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); in btrfs_run_discard_work()
96 lockdep_assert_held(&discard_ctl->lock); in __add_to_discard_list()
100 if (list_empty(&block_group->discard_list) || in __add_to_discard_list()
101 block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) { in __add_to_discard_list()
102 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) in __add_to_discard_list()
103 block_group->discard_index = BTRFS_DISCARD_INDEX_START; in __add_to_discard_list()
104 block_group->discard_eligible_time = (ktime_get_ns() + in __add_to_discard_list()
106 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR; in __add_to_discard_list()
108 if (list_empty(&block_group->discard_list)) in __add_to_discard_list()
111 list_move_tail(&block_group->discard_list, in __add_to_discard_list()
121 spin_lock(&discard_ctl->lock); in add_to_discard_list()
123 spin_unlock(&discard_ctl->lock); in add_to_discard_list()
131 spin_lock(&discard_ctl->lock); in add_to_discard_unused_list()
133 queued = !list_empty(&block_group->discard_list); in add_to_discard_unused_list()
136 spin_unlock(&discard_ctl->lock); in add_to_discard_unused_list()
140 list_del_init(&block_group->discard_list); in add_to_discard_unused_list()
142 block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED; in add_to_discard_unused_list()
143 block_group->discard_eligible_time = (ktime_get_ns() + in add_to_discard_unused_list()
145 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR; in add_to_discard_unused_list()
148 list_add_tail(&block_group->discard_list, in add_to_discard_unused_list()
149 &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]); in add_to_discard_unused_list()
151 spin_unlock(&discard_ctl->lock); in add_to_discard_unused_list()
157 bool running = false; in remove_from_discard_list() local
160 spin_lock(&discard_ctl->lock); in remove_from_discard_list()
162 if (block_group == discard_ctl->block_group) { in remove_from_discard_list()
163 running = true; in remove_from_discard_list()
164 discard_ctl->block_group = NULL; in remove_from_discard_list()
167 block_group->discard_eligible_time = 0; in remove_from_discard_list()
168 queued = !list_empty(&block_group->discard_list); in remove_from_discard_list()
169 list_del_init(&block_group->discard_list); in remove_from_discard_list()
171 * If the block group is currently running in the discard workfn, we in remove_from_discard_list()
176 if (queued && !running) in remove_from_discard_list()
179 spin_unlock(&discard_ctl->lock); in remove_from_discard_list()
181 return running; in remove_from_discard_list()
201 struct list_head *discard_list = &discard_ctl->discard_list[i]; in find_next_block_group()
211 if (ret_block_group->discard_eligible_time < now) in find_next_block_group()
214 if (ret_block_group->discard_eligible_time > in find_next_block_group()
215 block_group->discard_eligible_time) in find_next_block_group()
244 spin_lock(&discard_ctl->lock); in peek_discard_list()
248 if (block_group && now >= block_group->discard_eligible_time) { in peek_discard_list()
249 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && in peek_discard_list()
250 block_group->used != 0) { in peek_discard_list()
254 list_del_init(&block_group->discard_list); in peek_discard_list()
259 if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) { in peek_discard_list()
260 block_group->discard_cursor = block_group->start; in peek_discard_list()
261 block_group->discard_state = BTRFS_DISCARD_EXTENTS; in peek_discard_list()
263 discard_ctl->block_group = block_group; in peek_discard_list()
266 *discard_state = block_group->discard_state; in peek_discard_list()
267 *discard_index = block_group->discard_index; in peek_discard_list()
269 spin_unlock(&discard_ctl->lock); in peek_discard_list()
281 * to prioritize discarding based on size. Should a free space that matches
291 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_discard_check_filter()
294 discard_ctl = &block_group->fs_info->discard_ctl; in btrfs_discard_check_filter()
296 if (block_group->discard_index > BTRFS_DISCARD_INDEX_START && in btrfs_discard_check_filter()
297 bytes >= discard_minlen[block_group->discard_index - 1]) { in btrfs_discard_check_filter()
305 block_group->discard_index = i; in btrfs_discard_check_filter()
325 block_group->discard_index++; in btrfs_update_discard_index()
326 if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) { in btrfs_update_discard_index()
327 block_group->discard_index = 1; in btrfs_update_discard_index()
347 cancel_delayed_work_sync(&discard_ctl->work); in btrfs_discard_cancel_work()
363 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_discard_queue_work()
366 if (block_group->used == 0) in btrfs_discard_queue_work()
371 if (!delayed_work_pending(&discard_ctl->work)) in btrfs_discard_queue_work()
382 if (!override && delayed_work_pending(&discard_ctl->work)) in __btrfs_discard_schedule_work()
387 u64 delay = discard_ctl->delay_ms * NSEC_PER_MSEC; in __btrfs_discard_schedule_work()
388 u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit); in __btrfs_discard_schedule_work()
395 if (kbps_limit && discard_ctl->prev_discard) { in __btrfs_discard_schedule_work()
397 u64 bps_delay = div64_u64(discard_ctl->prev_discard * in __btrfs_discard_schedule_work()
407 if (now < block_group->discard_eligible_time) { in __btrfs_discard_schedule_work()
408 u64 bg_timeout = block_group->discard_eligible_time - now; in __btrfs_discard_schedule_work()
413 if (override && discard_ctl->prev_discard) { in __btrfs_discard_schedule_work()
414 u64 elapsed = now - discard_ctl->prev_discard_time; in __btrfs_discard_schedule_work()
417 delay -= elapsed; in __btrfs_discard_schedule_work()
422 mod_delayed_work(discard_ctl->discard_workers, in __btrfs_discard_schedule_work()
423 &discard_ctl->work, nsecs_to_jiffies(delay)); in __btrfs_discard_schedule_work()
442 spin_lock(&discard_ctl->lock); in btrfs_discard_schedule_work()
444 spin_unlock(&discard_ctl->lock); in btrfs_discard_schedule_work()
463 if (block_group->used == 0) { in btrfs_finish_discard_pass()
479 * region. It does this in a two-pass fashion: first extents and second
498 if (now < block_group->discard_eligible_time) { in btrfs_discard_workfn()
516 maxlen = discard_minlen[discard_index - 1]; in btrfs_discard_workfn()
519 block_group->discard_cursor, in btrfs_discard_workfn()
522 discard_ctl->discard_bitmap_bytes += trimmed; in btrfs_discard_workfn()
525 block_group->discard_cursor, in btrfs_discard_workfn()
528 discard_ctl->discard_extent_bytes += trimmed; in btrfs_discard_workfn()
532 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) { in btrfs_discard_workfn()
536 block_group->discard_cursor = block_group->start; in btrfs_discard_workfn()
537 spin_lock(&discard_ctl->lock); in btrfs_discard_workfn()
538 if (block_group->discard_state != in btrfs_discard_workfn()
540 block_group->discard_state = in btrfs_discard_workfn()
542 spin_unlock(&discard_ctl->lock); in btrfs_discard_workfn()
547 spin_lock(&discard_ctl->lock); in btrfs_discard_workfn()
548 discard_ctl->prev_discard = trimmed; in btrfs_discard_workfn()
549 discard_ctl->prev_discard_time = now; in btrfs_discard_workfn()
552 * running in this workfn, then we didn't deref it, since this function in btrfs_discard_workfn()
553 * still owned that reference. But we set the discard_ctl->block_group in btrfs_discard_workfn()
557 if (discard_ctl->block_group == NULL) in btrfs_discard_workfn()
559 discard_ctl->block_group = NULL; in btrfs_discard_workfn()
561 spin_unlock(&discard_ctl->lock); in btrfs_discard_workfn()
581 discardable_extents = atomic_read(&discard_ctl->discardable_extents); in btrfs_discard_calc_delay()
585 spin_lock(&discard_ctl->lock); in btrfs_discard_calc_delay()
588 * The following is to fix a potential -1 discrepancy that we're not in btrfs_discard_calc_delay()
595 atomic_add(-discardable_extents, in btrfs_discard_calc_delay()
596 &discard_ctl->discardable_extents); in btrfs_discard_calc_delay()
598 discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes); in btrfs_discard_calc_delay()
600 atomic64_add(-discardable_bytes, in btrfs_discard_calc_delay()
601 &discard_ctl->discardable_bytes); in btrfs_discard_calc_delay()
604 spin_unlock(&discard_ctl->lock); in btrfs_discard_calc_delay()
608 iops_limit = READ_ONCE(discard_ctl->iops_limit); in btrfs_discard_calc_delay()
622 discard_ctl->delay_ms = delay; in btrfs_discard_calc_delay()
624 spin_unlock(&discard_ctl->lock); in btrfs_discard_calc_delay()
644 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) || in btrfs_discard_update_discardable()
648 ctl = block_group->free_space_ctl; in btrfs_discard_update_discardable()
649 discard_ctl = &block_group->fs_info->discard_ctl; in btrfs_discard_update_discardable()
651 lockdep_assert_held(&ctl->tree_lock); in btrfs_discard_update_discardable()
652 extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] - in btrfs_discard_update_discardable()
653 ctl->discardable_extents[BTRFS_STAT_PREV]; in btrfs_discard_update_discardable()
655 atomic_add(extents_delta, &discard_ctl->discardable_extents); in btrfs_discard_update_discardable()
656 ctl->discardable_extents[BTRFS_STAT_PREV] = in btrfs_discard_update_discardable()
657 ctl->discardable_extents[BTRFS_STAT_CURR]; in btrfs_discard_update_discardable()
660 bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] - in btrfs_discard_update_discardable()
661 ctl->discardable_bytes[BTRFS_STAT_PREV]; in btrfs_discard_update_discardable()
663 atomic64_add(bytes_delta, &discard_ctl->discardable_bytes); in btrfs_discard_update_discardable()
664 ctl->discardable_bytes[BTRFS_STAT_PREV] = in btrfs_discard_update_discardable()
665 ctl->discardable_bytes[BTRFS_STAT_CURR]; in btrfs_discard_update_discardable()
684 spin_lock(&fs_info->unused_bgs_lock); in btrfs_discard_punt_unused_bgs_list()
686 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs, in btrfs_discard_punt_unused_bgs_list()
688 list_del_init(&block_group->bg_list); in btrfs_discard_punt_unused_bgs_list()
689 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); in btrfs_discard_punt_unused_bgs_list()
696 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_discard_punt_unused_bgs_list()
705 * are completely free and ready for the unused_bgs path. As discarding will
715 spin_lock(&discard_ctl->lock); in btrfs_discard_purge_list()
718 &discard_ctl->discard_list[i], in btrfs_discard_purge_list()
720 list_del_init(&block_group->discard_list); in btrfs_discard_purge_list()
721 spin_unlock(&discard_ctl->lock); in btrfs_discard_purge_list()
722 if (block_group->used == 0) in btrfs_discard_purge_list()
724 spin_lock(&discard_ctl->lock); in btrfs_discard_purge_list()
728 spin_unlock(&discard_ctl->lock); in btrfs_discard_purge_list()
740 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); in btrfs_discard_resume()
745 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags); in btrfs_discard_stop()
750 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; in btrfs_discard_init()
753 spin_lock_init(&discard_ctl->lock); in btrfs_discard_init()
754 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn); in btrfs_discard_init()
757 INIT_LIST_HEAD(&discard_ctl->discard_list[i]); in btrfs_discard_init()
759 discard_ctl->prev_discard = 0; in btrfs_discard_init()
760 discard_ctl->prev_discard_time = 0; in btrfs_discard_init()
761 atomic_set(&discard_ctl->discardable_extents, 0); in btrfs_discard_init()
762 atomic64_set(&discard_ctl->discardable_bytes, 0); in btrfs_discard_init()
763 discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE; in btrfs_discard_init()
764 discard_ctl->delay_ms = BTRFS_DISCARD_MAX_DELAY_MSEC; in btrfs_discard_init()
765 discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS; in btrfs_discard_init()
766 discard_ctl->kbps_limit = 0; in btrfs_discard_init()
767 discard_ctl->discard_extent_bytes = 0; in btrfs_discard_init()
768 discard_ctl->discard_bitmap_bytes = 0; in btrfs_discard_init()
769 atomic64_set(&discard_ctl->discard_bytes_saved, 0); in btrfs_discard_init()
775 cancel_delayed_work_sync(&fs_info->discard_ctl.work); in btrfs_discard_cleanup()
776 btrfs_discard_purge_list(&fs_info->discard_ctl); in btrfs_discard_cleanup()