Lines Matching refs:slab

54 static bool is_slab_open(struct vdo_slab *slab)  in is_slab_open()  argument
56 return (!vdo_is_state_quiescing(&slab->state) && in is_slab_open()
57 !vdo_is_state_quiescent(&slab->state)); in is_slab_open()
68 return ((journal->slab->status != VDO_SLAB_REBUILDING) && in must_make_entries_to_flush()
135 static bool is_slab_journal_blank(const struct vdo_slab *slab) in is_slab_journal_blank() argument
137 return ((slab->journal.tail == 1) && in is_slab_journal_blank()
138 (slab->journal.tail_header.entry_count == 0)); in is_slab_journal_blank()
150 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; in mark_slab_journal_dirty()
169 static void check_if_slab_drained(struct vdo_slab *slab) in check_if_slab_drained() argument
172 struct slab_journal *journal = &slab->journal; in check_if_slab_drained()
175 if (!vdo_is_state_draining(&slab->state) || in check_if_slab_drained()
181 (slab->active_count > 0)) in check_if_slab_drained()
185 code = vdo_get_admin_state_code(&slab->state); in check_if_slab_drained()
186 read_only = vdo_is_read_only(slab->allocator->depot->vdo); in check_if_slab_drained()
188 vdo_waitq_has_waiters(&slab->dirty_blocks) && in check_if_slab_drained()
193 vdo_finish_draining_with_result(&slab->state, in check_if_slab_drained()
359 static void update_slab_summary_entry(struct vdo_slab *slab, struct vdo_waiter *waiter, in update_slab_summary_entry() argument
364 u8 index = slab->slab_number / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK; in update_slab_summary_entry()
365 struct block_allocator *allocator = slab->allocator; in update_slab_summary_entry()
383 entry = &allocator->summary_entries[slab->slab_number]; in update_slab_summary_entry()
403 check_if_slab_drained(journal->slab); in finish_reaping()
417 return_vio_to_pool(journal->slab->allocator->vio_pool, in complete_reaping()
440 journal->slab->allocator->thread_id); in flush_endio()
473 if ((journal->slab->status != VDO_SLAB_REBUILT) || in reap_slab_journal()
474 !vdo_is_state_normal(&journal->slab->state) || in reap_slab_journal()
475 vdo_is_read_only(journal->slab->allocator->depot->vdo)) { in reap_slab_journal()
509 acquire_vio_from_pool(journal->slab->allocator->vio_pool, in reap_slab_journal()
530 if (journal->slab->status == VDO_SLAB_REPLAYING) { in adjust_slab_journal_block_reference()
576 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in release_journal_locks()
577 check_if_slab_drained(journal->slab); in release_journal_locks()
594 zone_count_t zone_number = journal->slab->allocator->zone_number; in release_journal_locks()
625 struct vdo_slab *slab = journal->slab; in update_tail_block_location() local
628 vdo_is_read_only(journal->slab->allocator->depot->vdo) || in update_tail_block_location()
630 check_if_slab_drained(slab); in update_tail_block_location()
634 if (slab->status != VDO_SLAB_REBUILT) { in update_tail_block_location()
635 u8 hint = slab->allocator->summary_entries[slab->slab_number].fullness_hint; in update_tail_block_location()
637 free_block_count = ((block_count_t) hint) << slab->allocator->depot->hint_shift; in update_tail_block_location()
639 free_block_count = slab->free_blocks; in update_tail_block_location()
651 update_slab_summary_entry(slab, &journal->slab_summary_waiter, in update_tail_block_location()
659 static void reopen_slab_journal(struct vdo_slab *slab) in reopen_slab_journal() argument
661 struct slab_journal *journal = &slab->journal; in reopen_slab_journal()
701 return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled)); in complete_write()
707 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in complete_write()
708 check_if_slab_drained(journal->slab); in complete_write()
732 continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id); in write_slab_journal_endio()
771 block_number = journal->slab->journal_origin + in write_slab_journal_block()
788 operation = vdo_get_admin_state_code(&journal->slab->state); in write_slab_journal_block()
790 vdo_finish_operation(&journal->slab->state, in write_slab_journal_block()
791 (vdo_is_read_only(journal->slab->allocator->depot->vdo) ? in write_slab_journal_block()
813 if (vdo_is_read_only(journal->slab->allocator->depot->vdo) || in commit_tail()
832 acquire_vio_from_pool(journal->slab->allocator->vio_pool, in commit_tail()
918 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); in add_entry()
927 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, in add_entry()
934 pbn - journal->slab->start, operation, increment); in add_entry()
957 bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn, in vdo_attempt_replay_into_slab() argument
962 struct slab_journal *journal = &slab->journal; in vdo_attempt_replay_into_slab()
980 vdo_start_operation_with_waiter(&journal->slab->state, in vdo_attempt_replay_into_slab()
996 if (journal->slab->status == VDO_SLAB_REBUILT) in vdo_attempt_replay_into_slab()
997 journal->slab->status = VDO_SLAB_REPLAYING; in vdo_attempt_replay_into_slab()
1017 struct vdo_slab *slab = container_of(waiter, struct vdo_slab, summary_waiter); in finish_summary_update() local
1020 slab->active_count--; in finish_summary_update()
1024 vdo_enter_read_only_mode(slab->allocator->depot->vdo, result); in finish_summary_update()
1027 check_if_slab_drained(slab); in finish_summary_update()
1043 struct vdo_slab *slab = context; in launch_reference_block_write() local
1045 if (vdo_is_read_only(slab->allocator->depot->vdo)) in launch_reference_block_write()
1048 slab->active_count++; in launch_reference_block_write()
1051 acquire_vio_from_pool(slab->allocator->vio_pool, waiter); in launch_reference_block_write()
1054 static void save_dirty_reference_blocks(struct vdo_slab *slab) in save_dirty_reference_blocks() argument
1056 vdo_waitq_notify_all_waiters(&slab->dirty_blocks, in save_dirty_reference_blocks()
1057 launch_reference_block_write, slab); in save_dirty_reference_blocks()
1058 check_if_slab_drained(slab); in save_dirty_reference_blocks()
1071 struct vdo_slab *slab = block->slab; in finish_reference_block_write() local
1074 slab->active_count--; in finish_reference_block_write()
1077 adjust_slab_journal_block_reference(&slab->journal, in finish_reference_block_write()
1079 return_vio_to_pool(slab->allocator->vio_pool, pooled); in finish_reference_block_write()
1088 check_if_slab_drained(slab); in finish_reference_block_write()
1094 vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); in finish_reference_block_write()
1095 if (vdo_is_state_draining(&slab->state)) { in finish_reference_block_write()
1097 save_dirty_reference_blocks(slab); in finish_reference_block_write()
1107 if ((slab->active_count > 0) || vdo_waitq_has_waiters(&slab->dirty_blocks)) { in finish_reference_block_write()
1108 check_if_slab_drained(slab); in finish_reference_block_write()
1112 offset = slab->allocator->summary_entries[slab->slab_number].tail_block_offset; in finish_reference_block_write()
1113 slab->active_count++; in finish_reference_block_write()
1114 slab->summary_waiter.callback = finish_summary_update; in finish_reference_block_write()
1115 update_slab_summary_entry(slab, &slab->summary_waiter, offset, in finish_reference_block_write()
1116 true, true, slab->free_blocks); in finish_reference_block_write()
1127 size_t block_index = block - block->slab->reference_blocks; in get_reference_counters_for_block()
1129 return &block->slab->counters[block_index * COUNTS_PER_BLOCK]; in get_reference_counters_for_block()
1144 vdo_pack_journal_point(&block->slab->slab_journal_point, &commit_point); in pack_reference_block()
1157 thread_id_t thread_id = block->slab->allocator->thread_id; in write_reference_block_endio()
1170 struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab; in handle_io_error() local
1173 return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_io_error()
1174 slab->active_count--; in handle_io_error()
1175 vdo_enter_read_only_mode(slab->allocator->depot->vdo, result); in handle_io_error()
1176 check_if_slab_drained(slab); in handle_io_error()
1195 block_offset = (block - block->slab->reference_blocks); in write_reference_block()
1196 pbn = (block->slab->ref_counts_origin + block_offset); in write_reference_block()
1212 WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written, in write_reference_block()
1213 block->slab->allocator->ref_counts_statistics.blocks_written + 1); in write_reference_block()
1223 struct vdo_slab *slab = journal->slab; in reclaim_journal_space() local
1224 block_count_t write_count = vdo_waitq_num_waiters(&slab->dirty_blocks); in reclaim_journal_space()
1239 vdo_waitq_notify_next_waiter(&slab->dirty_blocks, in reclaim_journal_space()
1240 launch_reference_block_write, slab); in reclaim_journal_space()
1274 vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); in dirty_block()
1280 static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab, in get_reference_block() argument
1283 return &slab->reference_blocks[index / COUNTS_PER_BLOCK]; in get_reference_block()
1295 static int __must_check slab_block_number_from_pbn(struct vdo_slab *slab, in slab_block_number_from_pbn() argument
1301 if (pbn < slab->start) in slab_block_number_from_pbn()
1304 slab_block_number = pbn - slab->start; in slab_block_number_from_pbn()
1305 if (slab_block_number >= slab->allocator->depot->slab_config.data_blocks) in slab_block_number_from_pbn()
1318 static int __must_check get_reference_counter(struct vdo_slab *slab, in get_reference_counter() argument
1323 int result = slab_block_number_from_pbn(slab, pbn, &index); in get_reference_counter()
1328 *counter_ptr = &slab->counters[index]; in get_reference_counter()
1333 static unsigned int calculate_slab_priority(struct vdo_slab *slab) in calculate_slab_priority() argument
1335 block_count_t free_blocks = slab->free_blocks; in calculate_slab_priority()
1336 unsigned int unopened_slab_priority = slab->allocator->unopened_slab_priority; in calculate_slab_priority()
1360 if (is_slab_journal_blank(slab)) in calculate_slab_priority()
1372 static void prioritize_slab(struct vdo_slab *slab) in prioritize_slab() argument
1374 VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), in prioritize_slab()
1376 slab->priority = calculate_slab_priority(slab); in prioritize_slab()
1377 vdo_priority_table_enqueue(slab->allocator->prioritized_slabs, in prioritize_slab()
1378 slab->priority, &slab->allocq_entry); in prioritize_slab()
1385 static void adjust_free_block_count(struct vdo_slab *slab, bool incremented) in adjust_free_block_count() argument
1387 struct block_allocator *allocator = slab->allocator; in adjust_free_block_count()
1393 if (slab == allocator->open_slab) in adjust_free_block_count()
1397 if (slab->priority == calculate_slab_priority(slab)) in adjust_free_block_count()
1404 vdo_priority_table_remove(allocator->prioritized_slabs, &slab->allocq_entry); in adjust_free_block_count()
1405 prioritize_slab(slab); in adjust_free_block_count()
1420 static int increment_for_data(struct vdo_slab *slab, struct reference_block *block, in increment_for_data() argument
1430 slab->free_blocks--; in increment_for_data()
1432 adjust_free_block_count(slab, false); in increment_for_data()
1445 slab->slab_number, block_number); in increment_for_data()
1468 static int decrement_for_data(struct vdo_slab *slab, struct reference_block *block, in decrement_for_data() argument
1478 block_number, slab->slab_number); in decrement_for_data()
1499 slab->free_blocks++; in decrement_for_data()
1501 adjust_free_block_count(slab, true); in decrement_for_data()
1531 static int increment_for_block_map(struct vdo_slab *slab, struct reference_block *block, in increment_for_block_map() argument
1542 slab->slab_number, block_number); in increment_for_block_map()
1547 slab->free_blocks--; in increment_for_block_map()
1549 adjust_free_block_count(slab, false); in increment_for_block_map()
1557 slab->slab_number, block_number); in increment_for_block_map()
1567 *counter_ptr, slab->slab_number, in increment_for_block_map()
1591 static int update_reference_count(struct vdo_slab *slab, struct reference_block *block, in update_reference_count() argument
1598 vdo_refcount_t *counter_ptr = &slab->counters[block_number]; in update_reference_count()
1603 result = decrement_for_data(slab, block, block_number, old_status, in update_reference_count()
1611 result = increment_for_data(slab, block, block_number, old_status, in update_reference_count()
1614 result = increment_for_block_map(slab, block, block_number, old_status, in update_reference_count()
1623 slab->slab_journal_point = *slab_journal_point; in update_reference_count()
1628 static int __must_check adjust_reference_count(struct vdo_slab *slab, in adjust_reference_count() argument
1637 if (!is_slab_open(slab)) in adjust_reference_count()
1640 result = slab_block_number_from_pbn(slab, updater->zpbn.pbn, &block_number); in adjust_reference_count()
1644 block = get_reference_block(slab, block_number); in adjust_reference_count()
1645 result = update_reference_count(slab, block, block_number, slab_journal_point, in adjust_reference_count()
1663 adjust_slab_journal_block_reference(&slab->journal, entry_lock, -1); in adjust_reference_count()
1710 zone_count_t zone_number = journal->slab->allocator->zone_number; in add_entry_from_waiter()
1726 if (journal->slab->status != VDO_SLAB_REBUILT) { in add_entry_from_waiter()
1736 result = adjust_reference_count(journal->slab, updater, in add_entry_from_waiter()
1781 (journal->slab->status == VDO_SLAB_REBUILDING)) { in add_entries()
1815 save_dirty_reference_blocks(journal->slab); in add_entries()
1841 save_dirty_reference_blocks(journal->slab); in add_entries()
1853 struct vdo_slab *slab = journal->slab; in add_entries() local
1864 for (i = 0; i < slab->reference_block_count; i++) { in add_entries()
1865 slab->reference_blocks[i].slab_journal_lock = 1; in add_entries()
1866 dirty_block(&slab->reference_blocks[i]); in add_entries()
1870 slab->reference_block_count); in add_entries()
1881 if (vdo_is_state_draining(&journal->slab->state) && in add_entries()
1882 !vdo_is_state_suspending(&journal->slab->state) && in add_entries()
1891 static void reset_search_cursor(struct vdo_slab *slab) in reset_search_cursor() argument
1893 struct search_cursor *cursor = &slab->search_cursor; in reset_search_cursor()
1898 cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count); in reset_search_cursor()
1909 static bool advance_search_cursor(struct vdo_slab *slab) in advance_search_cursor() argument
1911 struct search_cursor *cursor = &slab->search_cursor; in advance_search_cursor()
1918 reset_search_cursor(slab); in advance_search_cursor()
1928 cursor->end_index = slab->block_count; in advance_search_cursor()
1948 struct vdo_slab *slab = vdo_get_slab(depot, pbn); in vdo_adjust_reference_count_for_rebuild() local
1954 result = slab_block_number_from_pbn(slab, pbn, &block_number); in vdo_adjust_reference_count_for_rebuild()
1958 block = get_reference_block(slab, block_number); in vdo_adjust_reference_count_for_rebuild()
1959 result = update_reference_count(slab, block, block_number, NULL, in vdo_adjust_reference_count_for_rebuild()
1979 static int replay_reference_count_change(struct vdo_slab *slab, in replay_reference_count_change() argument
1984 struct reference_block *block = get_reference_block(slab, entry.sbn); in replay_reference_count_change()
1997 result = update_reference_count(slab, block, entry.sbn, entry_point, in replay_reference_count_change()
2047 static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr) in find_free_block() argument
2050 slab_block_number next_index = slab->search_cursor.index; in find_free_block()
2051 slab_block_number end_index = slab->search_cursor.end_index; in find_free_block()
2052 u8 *next_counter = &slab->counters[next_index]; in find_free_block()
2053 u8 *end_counter = &slab->counters[end_index]; in find_free_block()
2104 static bool search_current_reference_block(const struct vdo_slab *slab, in search_current_reference_block() argument
2108 return ((slab->search_cursor.block->allocated_count < COUNTS_PER_BLOCK) && in search_current_reference_block()
2109 find_free_block(slab, free_index_ptr)); in search_current_reference_block()
2123 static bool search_reference_blocks(struct vdo_slab *slab, in search_reference_blocks() argument
2127 if (search_current_reference_block(slab, free_index_ptr)) in search_reference_blocks()
2131 while (advance_search_cursor(slab)) { in search_reference_blocks()
2132 if (search_current_reference_block(slab, free_index_ptr)) in search_reference_blocks()
2142 static void make_provisional_reference(struct vdo_slab *slab, in make_provisional_reference() argument
2145 struct reference_block *block = get_reference_block(slab, block_number); in make_provisional_reference()
2151 slab->counters[block_number] = PROVISIONAL_REFERENCE_COUNT; in make_provisional_reference()
2155 slab->free_blocks--; in make_provisional_reference()
2161 static void dirty_all_reference_blocks(struct vdo_slab *slab) in dirty_all_reference_blocks() argument
2165 for (i = 0; i < slab->reference_block_count; i++) in dirty_all_reference_blocks()
2166 dirty_block(&slab->reference_blocks[i]); in dirty_all_reference_blocks()
2203 struct vdo_slab *slab = block->slab; in unpack_reference_block() local
2213 if (vdo_before_journal_point(&slab->slab_journal_point, in unpack_reference_block()
2215 slab->slab_journal_point = block->commit_points[i]; in unpack_reference_block()
2220 size_t block_index = block - block->slab->reference_blocks; in unpack_reference_block()
2223 i, block_index, block->slab->slab_number); in unpack_reference_block()
2243 struct vdo_slab *slab = block->slab; in finish_reference_block_load() local
2246 return_vio_to_pool(slab->allocator->vio_pool, pooled); in finish_reference_block_load()
2247 slab->active_count--; in finish_reference_block_load()
2250 slab->free_blocks -= block->allocated_count; in finish_reference_block_load()
2251 check_if_slab_drained(slab); in finish_reference_block_load()
2260 block->slab->allocator->thread_id); in load_reference_block_endio()
2275 size_t block_offset = (block - block->slab->reference_blocks); in load_reference_block()
2278 vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset, in load_reference_block()
2287 static void load_reference_blocks(struct vdo_slab *slab) in load_reference_blocks() argument
2291 slab->free_blocks = slab->block_count; in load_reference_blocks()
2292 slab->active_count = slab->reference_block_count; in load_reference_blocks()
2293 for (i = 0; i < slab->reference_block_count; i++) { in load_reference_blocks()
2294 struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter; in load_reference_blocks()
2297 acquire_vio_from_pool(slab->allocator->vio_pool, waiter); in load_reference_blocks()
2307 static void drain_slab(struct vdo_slab *slab) in drain_slab() argument
2311 const struct admin_state_code *state = vdo_get_admin_state_code(&slab->state); in drain_slab()
2318 commit_tail(&slab->journal); in drain_slab()
2320 if ((state == VDO_ADMIN_STATE_RECOVERING) || (slab->counters == NULL)) in drain_slab()
2324 load = slab->allocator->summary_entries[slab->slab_number].load_ref_counts; in drain_slab()
2327 load_reference_blocks(slab); in drain_slab()
2333 dirty_all_reference_blocks(slab); in drain_slab()
2341 block_count_t data_blocks = slab->allocator->depot->slab_config.data_blocks; in drain_slab()
2343 if (load || (slab->free_blocks != data_blocks) || in drain_slab()
2344 !is_slab_journal_blank(slab)) { in drain_slab()
2345 dirty_all_reference_blocks(slab); in drain_slab()
2349 save = (slab->status == VDO_SLAB_REBUILT); in drain_slab()
2351 vdo_finish_draining_with_result(&slab->state, VDO_SUCCESS); in drain_slab()
2356 save_dirty_reference_blocks(slab); in drain_slab()
2359 static int allocate_slab_counters(struct vdo_slab *slab) in allocate_slab_counters() argument
2364 result = VDO_ASSERT(slab->reference_blocks == NULL, in allocate_slab_counters()
2366 slab->slab_number); in allocate_slab_counters()
2370 result = vdo_allocate(slab->reference_block_count, struct reference_block, in allocate_slab_counters()
2371 __func__, &slab->reference_blocks); in allocate_slab_counters()
2379 bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD); in allocate_slab_counters()
2381 &slab->counters); in allocate_slab_counters()
2383 vdo_free(vdo_forget(slab->reference_blocks)); in allocate_slab_counters()
2387 slab->search_cursor.first_block = slab->reference_blocks; in allocate_slab_counters()
2388 slab->search_cursor.last_block = &slab->reference_blocks[slab->reference_block_count - 1]; in allocate_slab_counters()
2389 reset_search_cursor(slab); in allocate_slab_counters()
2391 for (index = 0; index < slab->reference_block_count; index++) { in allocate_slab_counters()
2392 slab->reference_blocks[index] = (struct reference_block) { in allocate_slab_counters()
2393 .slab = slab, in allocate_slab_counters()
2400 static int allocate_counters_if_clean(struct vdo_slab *slab) in allocate_counters_if_clean() argument
2402 if (vdo_is_state_clean_load(&slab->state)) in allocate_counters_if_clean()
2403 return allocate_slab_counters(slab); in allocate_counters_if_clean()
2412 struct vdo_slab *slab = journal->slab; in finish_loading_journal() local
2420 (header.nonce == slab->allocator->nonce)) { in finish_loading_journal()
2427 journal->head = (slab->allocator->summary_entries[slab->slab_number].is_dirty ? in finish_loading_journal()
2433 return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in finish_loading_journal()
2434 vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab)); in finish_loading_journal()
2443 journal->slab->allocator->thread_id); in read_slab_journal_tail_endio()
2453 return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_load_error()
2454 vdo_finish_loading_with_result(&journal->slab->state, result); in handle_load_error()
2469 struct vdo_slab *slab = journal->slab; in read_slab_journal_tail() local
2473 slab->allocator->summary_entries[slab->slab_number].tail_block_offset; in read_slab_journal_tail()
2484 vio->completion.callback_thread_id = slab->allocator->thread_id; in read_slab_journal_tail()
2485 vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block, in read_slab_journal_tail()
2493 static void load_slab_journal(struct vdo_slab *slab) in load_slab_journal() argument
2495 struct slab_journal *journal = &slab->journal; in load_slab_journal()
2498 last_commit_point = slab->allocator->summary_entries[slab->slab_number].tail_block_offset; in load_slab_journal()
2500 !slab->allocator->summary_entries[slab->slab_number].load_ref_counts) { in load_slab_journal()
2509 vdo_finish_loading_with_result(&slab->state, in load_slab_journal()
2510 allocate_counters_if_clean(slab)); in load_slab_journal()
2515 acquire_vio_from_pool(slab->allocator->vio_pool, &journal->resource_waiter); in load_slab_journal()
2518 static void register_slab_for_scrubbing(struct vdo_slab *slab, bool high_priority) in register_slab_for_scrubbing() argument
2520 struct slab_scrubber *scrubber = &slab->allocator->scrubber; in register_slab_for_scrubbing()
2522 VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT), in register_slab_for_scrubbing()
2525 if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING) in register_slab_for_scrubbing()
2528 list_del_init(&slab->allocq_entry); in register_slab_for_scrubbing()
2529 if (!slab->was_queued_for_scrubbing) { in register_slab_for_scrubbing()
2531 slab->was_queued_for_scrubbing = true; in register_slab_for_scrubbing()
2535 slab->status = VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING; in register_slab_for_scrubbing()
2536 list_add_tail(&slab->allocq_entry, &scrubber->high_priority_slabs); in register_slab_for_scrubbing()
2540 list_add_tail(&slab->allocq_entry, &scrubber->slabs); in register_slab_for_scrubbing()
2544 static void queue_slab(struct vdo_slab *slab) in queue_slab() argument
2546 struct block_allocator *allocator = slab->allocator; in queue_slab()
2550 VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), in queue_slab()
2556 free_blocks = slab->free_blocks; in queue_slab()
2559 slab->slab_number, (unsigned long long) free_blocks, in queue_slab()
2566 if (slab->status != VDO_SLAB_REBUILT) { in queue_slab()
2567 register_slab_for_scrubbing(slab, false); in queue_slab()
2571 if (!vdo_is_state_resuming(&slab->state)) { in queue_slab()
2579 if (!is_slab_journal_blank(slab)) { in queue_slab()
2586 reopen_slab_journal(slab); in queue_slab()
2588 prioritize_slab(slab); in queue_slab()
2598 struct vdo_slab *slab = container_of(state, struct vdo_slab, state); in initiate_slab_action() local
2604 slab->status = VDO_SLAB_REBUILDING; in initiate_slab_action()
2606 drain_slab(slab); in initiate_slab_action()
2607 check_if_slab_drained(slab); in initiate_slab_action()
2612 load_slab_journal(slab); in initiate_slab_action()
2617 queue_slab(slab); in initiate_slab_action()
2633 struct vdo_slab *slab; in get_next_slab() local
2635 slab = list_first_entry_or_null(&scrubber->high_priority_slabs, in get_next_slab()
2637 if (slab != NULL) in get_next_slab()
2638 return slab; in get_next_slab()
2734 struct vdo_slab *slab = scrubber->slab; in slab_scrubbed() local
2736 slab->status = VDO_SLAB_REBUILT; in slab_scrubbed()
2737 queue_slab(slab); in slab_scrubbed()
2738 reopen_slab_journal(slab); in slab_scrubbed()
2778 sequence_number_t block_number, struct vdo_slab *slab) in apply_block_entries() argument
2785 slab_block_number max_sbn = slab->end - slab->start; in apply_block_entries()
2800 result = replay_reference_count_change(slab, &entry_point, entry); in apply_block_entries()
2807 entry.sbn, slab->slab_number); in apply_block_entries()
2827 struct vdo_slab *slab = scrubber->slab; in apply_journal_entries() local
2828 struct slab_journal *journal = &slab->journal; in apply_journal_entries()
2841 struct journal_point ref_counts_point = slab->slab_journal_point; in apply_journal_entries()
2853 if ((header.nonce != slab->allocator->nonce) || in apply_journal_entries()
2861 slab->slab_number); in apply_journal_entries()
2866 result = apply_block_entries(block, header.entry_count, sequence, slab); in apply_journal_entries()
2893 slab->allocator->thread_id, completion->parent); in apply_journal_entries()
2894 vdo_start_operation_with_waiter(&slab->state, in apply_journal_entries()
2905 scrubber->slab->allocator->thread_id); in read_slab_journal_endio()
2918 struct vdo_slab *slab = scrubber->slab; in start_scrubbing() local
2920 if (!slab->allocator->summary_entries[slab->slab_number].is_dirty) { in start_scrubbing()
2925 vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin, in start_scrubbing()
2937 struct vdo_slab *slab; in scrub_next_slab() local
2950 slab = get_next_slab(scrubber); in scrub_next_slab()
2951 if ((slab == NULL) || in scrub_next_slab()
2960 list_del_init(&slab->allocq_entry); in scrub_next_slab()
2961 scrubber->slab = slab; in scrub_next_slab()
2963 slab->allocator->thread_id, completion->parent); in scrub_next_slab()
2964 vdo_start_operation_with_waiter(&slab->state, VDO_ADMIN_STATE_SCRUBBING, in scrub_next_slab()
3001 struct vdo_slab *slab) in register_slab_with_allocator() argument
3004 allocator->last_slab = slab->slab_number; in register_slab_with_allocator()
3047 struct vdo_slab *slab = iterator->next; in next_slab() local
3049 if ((slab == NULL) || (slab->slab_number < iterator->end + iterator->stride)) in next_slab()
3052 iterator->next = iterator->slabs[slab->slab_number - iterator->stride]; in next_slab()
3054 return slab; in next_slab()
3087 struct vdo_slab *slab = next_slab(&iterator); in notify_block_allocator_of_read_only_mode() local
3089 vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters, in notify_block_allocator_of_read_only_mode()
3090 abort_waiter, &slab->journal); in notify_block_allocator_of_read_only_mode()
3091 check_if_slab_drained(slab); in notify_block_allocator_of_read_only_mode()
3106 int vdo_acquire_provisional_reference(struct vdo_slab *slab, physical_block_number_t pbn, in vdo_acquire_provisional_reference() argument
3115 if (!is_slab_open(slab)) in vdo_acquire_provisional_reference()
3118 result = slab_block_number_from_pbn(slab, pbn, &block_number); in vdo_acquire_provisional_reference()
3122 if (slab->counters[block_number] == EMPTY_REFERENCE_COUNT) { in vdo_acquire_provisional_reference()
3123 make_provisional_reference(slab, block_number); in vdo_acquire_provisional_reference()
3129 adjust_free_block_count(slab, false); in vdo_acquire_provisional_reference()
3134 static int __must_check allocate_slab_block(struct vdo_slab *slab, in allocate_slab_block() argument
3139 if (!is_slab_open(slab)) in allocate_slab_block()
3142 if (!search_reference_blocks(slab, &free_index)) in allocate_slab_block()
3145 VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT), in allocate_slab_block()
3147 make_provisional_reference(slab, free_index); in allocate_slab_block()
3148 adjust_free_block_count(slab, false); in allocate_slab_block()
3154 slab->search_cursor.index = (free_index + 1); in allocate_slab_block()
3156 *block_number_ptr = slab->start + free_index; in allocate_slab_block()
3164 static void open_slab(struct vdo_slab *slab) in open_slab() argument
3166 reset_search_cursor(slab); in open_slab()
3167 if (is_slab_journal_blank(slab)) { in open_slab()
3168 WRITE_ONCE(slab->allocator->statistics.slabs_opened, in open_slab()
3169 slab->allocator->statistics.slabs_opened + 1); in open_slab()
3170 dirty_all_reference_blocks(slab); in open_slab()
3172 WRITE_ONCE(slab->allocator->statistics.slabs_reopened, in open_slab()
3173 slab->allocator->statistics.slabs_reopened + 1); in open_slab()
3176 slab->allocator->open_slab = slab; in open_slab()
3242 struct vdo_slab *slab = vdo_get_slab(completion->vdo->depot, updater->zpbn.pbn); in vdo_modify_reference_count() local
3244 if (!is_slab_open(slab)) { in vdo_modify_reference_count()
3254 vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter); in vdo_modify_reference_count()
3255 if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal)) in vdo_modify_reference_count()
3256 register_slab_for_scrubbing(slab, true); in vdo_modify_reference_count()
3258 add_entries(&slab->journal); in vdo_modify_reference_count()
3366 struct vdo_slab *slab = next_slab(&iterator); in apply_to_slabs() local
3368 list_del_init(&slab->allocq_entry); in apply_to_slabs()
3370 vdo_start_operation_with_waiter(&slab->state, operation, in apply_to_slabs()
3416 struct vdo_slab *slab; in erase_next_slab_journal() local
3427 slab = next_slab(&allocator->slabs_to_erase); in erase_next_slab_journal()
3428 pbn = slab->journal_origin - depot->vdo->geometry.bio_offset; in erase_next_slab_journal()
3533 struct vdo_slab *slab; in vdo_prepare_slabs_for_allocation() local
3538 slab = depot->slabs[current_slab_status.slab_number]; in vdo_prepare_slabs_for_allocation()
3541 (!allocator->summary_entries[slab->slab_number].load_ref_counts && in vdo_prepare_slabs_for_allocation()
3543 queue_slab(slab); in vdo_prepare_slabs_for_allocation()
3547 slab->status = VDO_SLAB_REQUIRES_SCRUBBING; in vdo_prepare_slabs_for_allocation()
3548 journal = &slab->journal; in vdo_prepare_slabs_for_allocation()
3552 register_slab_for_scrubbing(slab, high_priority); in vdo_prepare_slabs_for_allocation()
3585 struct vdo_slab *slab = next_slab(&iterator); in vdo_dump_block_allocator() local
3586 struct slab_journal *journal = &slab->journal; in vdo_dump_block_allocator()
3588 if (slab->reference_blocks != NULL) { in vdo_dump_block_allocator()
3590 vdo_log_info("slab %u: P%u, %llu free", slab->slab_number, in vdo_dump_block_allocator()
3591 slab->priority, in vdo_dump_block_allocator()
3592 (unsigned long long) slab->free_blocks); in vdo_dump_block_allocator()
3594 vdo_log_info("slab %u: status %s", slab->slab_number, in vdo_dump_block_allocator()
3595 status_to_string(slab->status)); in vdo_dump_block_allocator()
3615 if (slab->counters != NULL) { in vdo_dump_block_allocator()
3618 slab->free_blocks, slab->block_count, in vdo_dump_block_allocator()
3619 slab->reference_block_count, in vdo_dump_block_allocator()
3620 vdo_waitq_num_waiters(&slab->dirty_blocks), in vdo_dump_block_allocator()
3621 slab->active_count, in vdo_dump_block_allocator()
3622 (unsigned long long) slab->slab_journal_point.sequence_number, in vdo_dump_block_allocator()
3623 slab->slab_journal_point.entry_count); in vdo_dump_block_allocator()
3645 static void free_slab(struct vdo_slab *slab) in free_slab() argument
3647 if (slab == NULL) in free_slab()
3650 list_del(&slab->allocq_entry); in free_slab()
3651 vdo_free(vdo_forget(slab->journal.block)); in free_slab()
3652 vdo_free(vdo_forget(slab->journal.locks)); in free_slab()
3653 vdo_free(vdo_forget(slab->counters)); in free_slab()
3654 vdo_free(vdo_forget(slab->reference_blocks)); in free_slab()
3655 vdo_free(slab); in free_slab()
3658 static int initialize_slab_journal(struct vdo_slab *slab) in initialize_slab_journal() argument
3660 struct slab_journal *journal = &slab->journal; in initialize_slab_journal()
3661 const struct slab_config *slab_config = &slab->allocator->depot->slab_config; in initialize_slab_journal()
3674 journal->slab = slab; in initialize_slab_journal()
3681 journal->events = &slab->allocator->slab_journal_statistics; in initialize_slab_journal()
3682 journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal; in initialize_slab_journal()
3699 journal->tail_header.nonce = slab->allocator->nonce; in initialize_slab_journal()
3722 struct vdo_slab *slab; in make_slab() local
3725 result = vdo_allocate(1, struct vdo_slab, __func__, &slab); in make_slab()
3729 *slab = (struct vdo_slab) { in make_slab()
3742 INIT_LIST_HEAD(&slab->allocq_entry); in make_slab()
3744 result = initialize_slab_journal(slab); in make_slab()
3746 free_slab(slab); in make_slab()
3751 vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NEW); in make_slab()
3752 result = allocate_slab_counters(slab); in make_slab()
3754 free_slab(slab); in make_slab()
3758 vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in make_slab()
3761 *slab_ptr = slab; in make_slab()
3859 vdo_is_read_only(journal->slab->allocator->depot->vdo)) in release_recovery_journal_lock()
4132 struct vdo_slab *slab = depot->new_slabs[i]; in allocate_components() local
4134 register_slab_with_allocator(slab->allocator, slab); in allocate_components()
4367 struct vdo_slab *slab = vdo_get_slab(depot, pbn); in vdo_get_increment_limit() local
4371 if ((slab == NULL) || (slab->status != VDO_SLAB_REBUILT)) in vdo_get_increment_limit()
4374 result = get_reference_counter(slab, pbn, &counter_ptr); in vdo_get_increment_limit()
4727 struct vdo_slab *slab = depot->new_slabs[i]; in register_new_slabs() local
4729 if (slab->allocator == allocator) in register_new_slabs()
4730 register_slab_with_allocator(allocator, slab); in register_new_slabs()