Lines Matching full:vdo

39 #include "vdo.h"
151 * O(n) run time isn't ideal, but if we have 1000 VDO devices in use simultaneously we still only
890 static struct vdo *get_vdo_for_target(struct dm_target *ti) in get_vdo_for_target()
892 return ((struct device_config *) ti->private)->vdo; in get_vdo_for_target()
898 struct vdo *vdo = get_vdo_for_target(ti); in vdo_map_bio() local
900 const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state); in vdo_map_bio()
902 VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s", in vdo_map_bio()
906 vdo_count_bios(&vdo->stats.bios_in, bio); in vdo_map_bio()
911 vdo_launch_flush(vdo, bio); in vdo_map_bio()
918 (vdo == vdo_get_work_queue_owner(current_work_queue)->vdo)); in vdo_map_bio()
919 vdo_launch_bio(vdo->data_vio_pool, bio); in vdo_map_bio()
925 struct vdo *vdo = get_vdo_for_target(ti); in vdo_io_hints() local
927 limits->logical_block_size = vdo->device_config->logical_block_size; in vdo_io_hints()
936 * Sets the maximum discard size that will be passed into VDO. This value comes from a in vdo_io_hints()
949 (vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK); in vdo_io_hints()
976 struct vdo *vdo = get_vdo_for_target(ti); in vdo_status() local
985 mutex_lock(&vdo->stats_mutex); in vdo_status()
986 vdo_fetch_statistics(vdo, &vdo->stats_buffer); in vdo_status()
987 stats = &vdo->stats_buffer; in vdo_status()
990 vdo_get_backing_device(vdo), stats->mode, in vdo_status()
992 vdo_get_dedupe_index_state_name(vdo->hash_zones), in vdo_status()
993 vdo_get_compressing(vdo) ? "online" : "offline", in vdo_status()
996 mutex_unlock(&vdo->stats_mutex); in vdo_status()
1012 static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo) in get_underlying_device_block_count() argument
1014 return bdev_nr_bytes(vdo_get_backing_device(vdo)) / VDO_BLOCK_SIZE; in get_underlying_device_block_count()
1017 static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc, in process_vdo_message_locked() argument
1022 vdo_set_compressing(vdo, true); in process_vdo_message_locked()
1027 vdo_set_compressing(vdo, false); in process_vdo_message_locked()
1045 static int __must_check process_vdo_message(struct vdo *vdo, unsigned int argc, in process_vdo_message() argument
1058 return vdo_dump(vdo, argc, argv, "dmsetup message"); in process_vdo_message()
1062 vdo->dump_on_shutdown = true; in process_vdo_message()
1071 return vdo_message_dedupe_index(vdo->hash_zones, argv[0]); in process_vdo_message()
1074 if (atomic_cmpxchg(&vdo->processing_message, 0, 1) != 0) in process_vdo_message()
1077 result = process_vdo_message_locked(vdo, argc, argv); in process_vdo_message()
1081 atomic_set(&vdo->processing_message, 0); in process_vdo_message()
1089 struct vdo *vdo; in vdo_message() local
1097 vdo = get_vdo_for_target(ti); in vdo_message()
1099 vdo_register_thread_device_id(&instance_thread, &vdo->instance); in vdo_message()
1106 vdo_write_stats(vdo, result_buffer, maxlen); in vdo_message()
1109 vdo_write_config(vdo, &result_buffer, &maxlen); in vdo_message()
1112 result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv)); in vdo_message()
1137 static bool vdo_uses_device(struct vdo *vdo, const void *context) in vdo_uses_device() argument
1141 return vdo_get_backing_device(vdo)->bd_dev == config->owned_device->bdev->bd_dev; in vdo_uses_device()
1148 static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo) in get_thread_id_for_phase() argument
1150 switch (vdo->admin.phase) { in get_thread_id_for_phase()
1155 return vdo->thread_config.packer_thread; in get_thread_id_for_phase()
1159 return vdo->thread_config.cpu_thread; in get_thread_id_for_phase()
1164 return vdo->thread_config.journal_thread; in get_thread_id_for_phase()
1167 return vdo->thread_config.admin_thread; in get_thread_id_for_phase()
1171 static struct vdo_completion *prepare_admin_completion(struct vdo *vdo, in prepare_admin_completion() argument
1175 struct vdo_completion *completion = &vdo->admin.completion; in prepare_admin_completion()
1183 completion->callback_thread_id = get_thread_id_for_phase(vdo); in prepare_admin_completion()
1191 * @vdo: The on which an admin operation is being performed
1195 static u32 advance_phase(struct vdo *vdo) in advance_phase() argument
1197 u32 phase = vdo->admin.phase++; in advance_phase()
1199 vdo->admin.completion.callback_thread_id = get_thread_id_for_phase(vdo); in advance_phase()
1200 vdo->admin.completion.requeue = true; in advance_phase()
1206 * should not be called from vdo threads.
1208 static int perform_admin_operation(struct vdo *vdo, u32 starting_phase, in perform_admin_operation() argument
1213 struct vdo_administrator *admin = &vdo->admin; in perform_admin_operation()
1224 vdo_launch_completion(prepare_admin_completion(vdo, callback, error_handler)); in perform_admin_operation()
1243 static void assert_admin_phase_thread(struct vdo *vdo, const char *what) in assert_admin_phase_thread() argument
1245 VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo), in assert_admin_phase_thread()
1247 ADMIN_PHASE_NAMES[vdo->admin.phase]); in assert_admin_phase_thread()
1256 struct vdo_administrator *admin = &completion->vdo->admin; in finish_operation_callback()
1263 * decode_from_super_block() - Decode the VDO state from the super block and validate that it is
1265 * @vdo: The vdo being loaded.
1272 static int __must_check decode_from_super_block(struct vdo *vdo) in decode_from_super_block() argument
1274 const struct device_config *config = vdo->device_config; in decode_from_super_block()
1277 result = vdo_decode_component_states(vdo->super_block.buffer, &vdo->geometry, in decode_from_super_block()
1278 &vdo->states); in decode_from_super_block()
1282 vdo_set_state(vdo, vdo->states.vdo.state); in decode_from_super_block()
1283 vdo->load_state = vdo->states.vdo.state; in decode_from_super_block()
1289 if (vdo->states.vdo.config.logical_blocks < config->logical_blocks) { in decode_from_super_block()
1290 …llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block", in decode_from_super_block()
1292 (unsigned long long) vdo->states.vdo.config.logical_blocks); in decode_from_super_block()
1293 vdo->states.vdo.config.logical_blocks = config->logical_blocks; in decode_from_super_block()
1296 result = vdo_validate_component_states(&vdo->states, vdo->geometry.nonce, in decode_from_super_block()
1302 vdo->layout = vdo->states.layout; in decode_from_super_block()
1308 * portions of the vdo being loaded.
1309 * @vdo: The vdo being loaded.
1317 static int __must_check decode_vdo(struct vdo *vdo) in decode_vdo() argument
1323 result = decode_from_super_block(vdo); in decode_vdo()
1325 vdo_destroy_component_states(&vdo->states); in decode_vdo()
1329 maximum_age = vdo_convert_maximum_age(vdo->device_config->block_map_maximum_age); in decode_vdo()
1331 vdo_get_recovery_journal_length(vdo->states.vdo.config.recovery_journal_size); in decode_vdo()
1344 result = vdo_enable_read_only_entry(vdo); in decode_vdo()
1348 partition = vdo_get_known_partition(&vdo->layout, in decode_vdo()
1350 result = vdo_decode_recovery_journal(vdo->states.recovery_journal, in decode_vdo()
1351 vdo->states.vdo.nonce, vdo, partition, in decode_vdo()
1352 vdo->states.vdo.complete_recoveries, in decode_vdo()
1353 vdo->states.vdo.config.recovery_journal_size, in decode_vdo()
1354 &vdo->recovery_journal); in decode_vdo()
1358 partition = vdo_get_known_partition(&vdo->layout, VDO_SLAB_SUMMARY_PARTITION); in decode_vdo()
1359 result = vdo_decode_slab_depot(vdo->states.slab_depot, vdo, partition, in decode_vdo()
1360 &vdo->depot); in decode_vdo()
1364 result = vdo_decode_block_map(vdo->states.block_map, in decode_vdo()
1365 vdo->states.vdo.config.logical_blocks, vdo, in decode_vdo()
1366 vdo->recovery_journal, vdo->states.vdo.nonce, in decode_vdo()
1367 vdo->device_config->cache_size, maximum_age, in decode_vdo()
1368 &vdo->block_map); in decode_vdo()
1372 result = vdo_make_physical_zones(vdo, &vdo->physical_zones); in decode_vdo()
1377 result = vdo_make_logical_zones(vdo, &vdo->logical_zones); in decode_vdo()
1381 return vdo_make_hash_zones(vdo, &vdo->hash_zones); in decode_vdo()
1390 struct vdo *vdo = completion->vdo; in pre_load_callback() local
1393 assert_admin_phase_thread(vdo, __func__); in pre_load_callback()
1395 switch (advance_phase(vdo)) { in pre_load_callback()
1397 result = vdo_start_operation(&vdo->admin.state, in pre_load_callback()
1404 vdo_load_super_block(vdo, completion); in pre_load_callback()
1408 vdo_continue_completion(completion, decode_vdo(vdo)); in pre_load_callback()
1437 static void set_device_config(struct dm_target *ti, struct vdo *vdo, in set_device_config() argument
1441 list_add_tail(&config->config_list, &vdo->device_config_list); in set_device_config()
1442 config->vdo = vdo; in set_device_config()
1450 struct vdo *vdo; in vdo_initialize() local
1466 vdo = vdo_find_matching(vdo_uses_device, config); in vdo_initialize()
1467 if (vdo != NULL) { in vdo_initialize()
1468 vdo_log_error("Existing vdo already uses device %s", in vdo_initialize()
1469 vdo->device_config->parent_device_name); in vdo_initialize()
1470 ti->error = "Cannot share storage device with already-running VDO"; in vdo_initialize()
1474 result = vdo_make(instance, config, &ti->error, &vdo); in vdo_initialize()
1476 vdo_log_error("Could not create VDO device. (VDO error %d, message %s)", in vdo_initialize()
1478 vdo_destroy(vdo); in vdo_initialize()
1482 result = perform_admin_operation(vdo, PRE_LOAD_PHASE_START, pre_load_callback, in vdo_initialize()
1488 vdo_log_error("Could not start VDO device. (VDO error %d, message %s)", in vdo_initialize()
1490 vdo_destroy(vdo); in vdo_initialize()
1494 set_device_config(ti, vdo, config); in vdo_initialize()
1495 vdo->device_config = config; in vdo_initialize()
1500 static bool __must_check vdo_is_named(struct vdo *vdo, const void *context) in vdo_is_named() argument
1502 struct dm_target *ti = vdo->device_config->owning_target; in vdo_is_named()
1640 struct vdo *vdo = completion->vdo; in check_may_grow_physical() local
1642 assert_admin_phase_thread(vdo, __func__); in check_may_grow_physical()
1644 /* These checks can only be done from a vdo thread. */ in check_may_grow_physical()
1645 if (vdo_is_read_only(vdo)) in check_may_grow_physical()
1648 if (vdo_in_recovery_mode(vdo)) in check_may_grow_physical()
1660 * grow_layout() - Make the layout for growing a vdo.
1661 * @vdo: The vdo preparing to grow.
1662 * @old_size: The current size of the vdo.
1663 * @new_size: The size to which the vdo will be grown.
1667 static int grow_layout(struct vdo *vdo, block_count_t old_size, block_count_t new_size) in grow_layout() argument
1672 if (vdo->next_layout.size == new_size) { in grow_layout()
1678 if (vdo->partition_copier == NULL) { in grow_layout()
1679 vdo->partition_copier = dm_kcopyd_client_create(NULL); in grow_layout()
1680 if (IS_ERR(vdo->partition_copier)) { in grow_layout()
1681 result = PTR_ERR(vdo->partition_copier); in grow_layout()
1682 vdo->partition_copier = NULL; in grow_layout()
1688 vdo_uninitialize_layout(&vdo->next_layout); in grow_layout()
1694 result = vdo_initialize_layout(new_size, vdo->layout.start, in grow_layout()
1695 get_partition_size(&vdo->layout, in grow_layout()
1697 get_partition_size(&vdo->layout, in grow_layout()
1699 get_partition_size(&vdo->layout, in grow_layout()
1701 &vdo->next_layout); in grow_layout()
1703 dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier)); in grow_layout()
1709 get_partition_size(&vdo->next_layout, in grow_layout()
1711 get_partition_size(&vdo->next_layout, in grow_layout()
1715 vdo_uninitialize_layout(&vdo->next_layout); in grow_layout()
1716 dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier)); in grow_layout()
1723 static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks) in prepare_to_grow_physical() argument
1726 block_count_t current_physical_blocks = vdo->states.vdo.config.physical_blocks; in prepare_to_grow_physical()
1732 result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START, in prepare_to_grow_physical()
1739 result = grow_layout(vdo, current_physical_blocks, new_physical_blocks); in prepare_to_grow_physical()
1743 result = vdo_prepare_to_grow_slab_depot(vdo->depot, in prepare_to_grow_physical()
1744 vdo_get_known_partition(&vdo->next_layout, in prepare_to_grow_physical()
1747 vdo_uninitialize_layout(&vdo->next_layout); in prepare_to_grow_physical()
1760 * @may_grow: Set to true if growing the logical and physical size of the vdo is currently
1781 *error_ptr = "Can't shrink VDO logical size"; in validate_new_device_config()
1802 *error_ptr = "Removing physical storage from a VDO is not supported"; in validate_new_device_config()
1807 *error_ptr = "VDO physical size may not grow in current state"; in validate_new_device_config()
1815 struct vdo *vdo) in prepare_to_modify() argument
1818 bool may_grow = (vdo_get_admin_state(vdo) != VDO_ADMIN_STATE_PRE_LOADED); in prepare_to_modify()
1820 result = validate_new_device_config(config, vdo->device_config, may_grow, in prepare_to_modify()
1825 if (config->logical_blocks > vdo->device_config->logical_blocks) { in prepare_to_modify()
1826 block_count_t logical_blocks = vdo->states.vdo.config.logical_blocks; in prepare_to_modify()
1833 result = vdo_prepare_to_grow_block_map(vdo->block_map, in prepare_to_modify()
1843 if (config->physical_blocks > vdo->device_config->physical_blocks) { in prepare_to_modify()
1844 result = prepare_to_grow_physical(vdo, config->physical_blocks); in prepare_to_modify()
1863 if (strcmp(config->parent_device_name, vdo->device_config->parent_device_name) != 0) { in prepare_to_modify()
1867 vdo->device_config->parent_device_name, in prepare_to_modify()
1875 unsigned int argc, char **argv, struct vdo *vdo) in update_existing_vdo() argument
1885 result = prepare_to_modify(ti, config, vdo); in update_existing_vdo()
1891 set_device_config(ti, vdo, config); in update_existing_vdo()
1900 struct vdo *vdo; in vdo_ctr() local
1904 vdo = vdo_find_matching(vdo_is_named, device_name); in vdo_ctr()
1905 if (vdo == NULL) { in vdo_ctr()
1908 vdo_register_thread_device_id(&instance_thread, &vdo->instance); in vdo_ctr()
1909 result = update_existing_vdo(device_name, ti, argc, argv, vdo); in vdo_ctr()
1920 struct vdo *vdo = vdo_forget(config->vdo); in vdo_dtr() local
1923 if (list_empty(&vdo->device_config_list)) { in vdo_dtr()
1926 /* This was the last config referencing the VDO. Free it. */ in vdo_dtr()
1927 unsigned int instance = vdo->instance; in vdo_dtr()
1935 if (vdo->dump_on_shutdown) in vdo_dtr()
1936 vdo_dump_all(vdo, "device shutdown"); in vdo_dtr()
1938 vdo_destroy(vdo_forget(vdo)); in vdo_dtr()
1943 } else if (config == vdo->device_config) { in vdo_dtr()
1945 * The VDO still references this config. Give it a reference to a config that isn't in vdo_dtr()
1948 vdo->device_config = list_first_entry(&vdo->device_config_list, in vdo_dtr()
1963 * write_super_block_for_suspend() - Update the VDO state and save the super block.
1968 struct vdo *vdo = completion->vdo; in write_super_block_for_suspend() local
1970 switch (vdo_get_state(vdo)) { in write_super_block_for_suspend()
1973 vdo_set_state(vdo, VDO_CLEAN); in write_super_block_for_suspend()
1989 vdo_save_components(vdo, completion); in write_super_block_for_suspend()
1998 struct vdo *vdo = completion->vdo; in suspend_callback() local
1999 struct admin_state *state = &vdo->admin.state; in suspend_callback()
2002 assert_admin_phase_thread(vdo, __func__); in suspend_callback()
2004 switch (advance_phase(vdo)) { in suspend_callback()
2012 vdo_start_operation(state, vdo->suspend_type)); in suspend_callback()
2017 * If the VDO was already resumed from a prior suspend while read-only, some of the in suspend_callback()
2022 if (vdo_in_read_only_mode(vdo)) in suspend_callback()
2025 vdo_drain_packer(vdo->packer, completion); in suspend_callback()
2029 drain_data_vio_pool(vdo->data_vio_pool, completion); in suspend_callback()
2033 vdo_drain_hash_zones(vdo->hash_zones, completion); in suspend_callback()
2037 vdo_drain_flusher(vdo->flusher, completion); in suspend_callback()
2046 result = vdo_synchronous_flush(vdo); in suspend_callback()
2048 vdo_enter_read_only_mode(vdo, result); in suspend_callback()
2050 vdo_drain_logical_zones(vdo->logical_zones, in suspend_callback()
2055 vdo_drain_block_map(vdo->block_map, vdo_get_admin_state_code(state), in suspend_callback()
2060 vdo_drain_recovery_journal(vdo->recovery_journal, in suspend_callback()
2065 vdo_drain_slab_depot(vdo->depot, vdo_get_admin_state_code(state), in suspend_callback()
2075 /* If we didn't save the VDO or there was an error, we're done. */ in suspend_callback()
2094 struct vdo *vdo = get_vdo_for_target(ti); in vdo_postsuspend() local
2099 vdo_register_thread_device_id(&instance_thread, &vdo->instance); in vdo_postsuspend()
2100 device_name = vdo_get_device_name(vdo->device_config->owning_target); in vdo_postsuspend()
2107 result = perform_admin_operation(vdo, SUSPEND_PHASE_START, suspend_callback, in vdo_postsuspend()
2113 * VDO suspended. in vdo_postsuspend()
2118 vdo_get_admin_state(vdo)->name); in vdo_postsuspend()
2128 * was_new() - Check whether the vdo was new when it was loaded.
2129 * @vdo: The vdo to query.
2131 * Return: true if the vdo was new.
2133 static bool was_new(const struct vdo *vdo) in was_new() argument
2135 return (vdo->load_state == VDO_NEW); in was_new()
2139 * requires_repair() - Check whether a vdo requires recovery or rebuild.
2140 * @vdo: The vdo to query.
2142 * Return: true if the vdo must be repaired.
2144 static bool __must_check requires_repair(const struct vdo *vdo) in requires_repair() argument
2146 switch (vdo_get_state(vdo)) { in requires_repair()
2160 * @vdo: The vdo.
2164 static enum slab_depot_load_type get_load_type(struct vdo *vdo) in get_load_type() argument
2166 if (vdo_state_requires_read_only_rebuild(vdo->load_state)) in get_load_type()
2169 if (vdo_state_requires_recovery(vdo->load_state)) in get_load_type()
2176 * load_callback() - Callback to do the destructive parts of loading a VDO.
2181 struct vdo *vdo = completion->vdo; in load_callback() local
2184 assert_admin_phase_thread(vdo, __func__); in load_callback()
2186 switch (advance_phase(vdo)) { in load_callback()
2188 result = vdo_start_operation(&vdo->admin.state, VDO_ADMIN_STATE_LOADING); in load_callback()
2195 vdo_open_recovery_journal(vdo->recovery_journal, vdo->depot, in load_callback()
2196 vdo->block_map); in load_callback()
2201 vdo_set_dedupe_state_normal(vdo->hash_zones); in load_callback()
2202 if (vdo_is_read_only(vdo)) { in load_callback()
2211 if (requires_repair(vdo)) { in load_callback()
2216 vdo_load_slab_depot(vdo->depot, in load_callback()
2217 (was_new(vdo) ? VDO_ADMIN_STATE_FORMATTING : in load_callback()
2223 vdo_set_state(vdo, VDO_DIRTY); in load_callback()
2224 vdo_save_components(vdo, completion); in load_callback()
2228 vdo_initialize_block_map_from_journal(vdo->block_map, in load_callback()
2229 vdo->recovery_journal); in load_callback()
2230 vdo_prepare_slab_depot_to_allocate(vdo->depot, get_load_type(vdo), in load_callback()
2235 if (vdo_state_requires_recovery(vdo->load_state)) in load_callback()
2236 vdo_enter_recovery_mode(vdo); in load_callback()
2238 vdo_scrub_all_unrecovered_slabs(vdo->depot, completion); in load_callback()
2242 WRITE_ONCE(vdo->compressing, vdo->device_config->compression); in load_callback()
2243 if (vdo->device_config->deduplication) { in load_callback()
2248 vdo_start_dedupe_index(vdo->hash_zones, was_new(vdo)); in load_callback()
2251 vdo->allocations_allowed = false; in load_callback()
2258 vdo_drain_recovery_journal(vdo->recovery_journal, VDO_ADMIN_STATE_SAVING, in load_callback()
2265 vdo->admin.phase = LOAD_PHASE_FINISHED; in load_callback()
2280 * If at all possible, brings the vdo online in read-only mode. This handler is registered in
2285 struct vdo *vdo = completion->vdo; in handle_load_error() local
2288 vdo->thread_config.admin_thread)) in handle_load_error()
2291 if (vdo_state_requires_read_only_rebuild(vdo->load_state) && in handle_load_error()
2292 (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) { in handle_load_error()
2294 vdo->admin.phase = LOAD_PHASE_DRAIN_JOURNAL; in handle_load_error()
2300 (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) { in handle_load_error()
2302 vdo->admin.phase = LOAD_PHASE_FINISHED; in handle_load_error()
2309 vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY; in handle_load_error()
2310 vdo_enter_read_only_mode(vdo, completion->result); in handle_load_error()
2316 * write_super_block_for_resume() - Update the VDO state and save the super block.
2321 struct vdo *vdo = completion->vdo; in write_super_block_for_resume() local
2323 switch (vdo_get_state(vdo)) { in write_super_block_for_resume()
2326 vdo_set_state(vdo, VDO_DIRTY); in write_super_block_for_resume()
2327 vdo_save_components(vdo, completion); in write_super_block_for_resume()
2346 * resume_callback() - Callback to resume a VDO.
2351 struct vdo *vdo = completion->vdo; in resume_callback() local
2354 assert_admin_phase_thread(vdo, __func__); in resume_callback()
2356 switch (advance_phase(vdo)) { in resume_callback()
2358 result = vdo_start_operation(&vdo->admin.state, in resume_callback()
2373 vdo_resume_hash_zones(vdo->hash_zones, completion); in resume_callback()
2377 vdo_resume_slab_depot(vdo->depot, completion); in resume_callback()
2381 vdo_resume_recovery_journal(vdo->recovery_journal, completion); in resume_callback()
2385 vdo_resume_block_map(vdo->block_map, completion); in resume_callback()
2389 vdo_resume_logical_zones(vdo->logical_zones, completion); in resume_callback()
2394 bool was_enabled = vdo_get_compressing(vdo); in resume_callback()
2395 bool enable = vdo->device_config->compression; in resume_callback()
2398 WRITE_ONCE(vdo->compressing, enable); in resume_callback()
2401 vdo_resume_packer(vdo->packer, completion); in resume_callback()
2406 vdo_resume_flusher(vdo->flusher, completion); in resume_callback()
2410 resume_data_vio_pool(vdo->data_vio_pool, completion); in resume_callback()
2431 struct vdo *vdo = completion->vdo; in grow_logical_callback() local
2434 assert_admin_phase_thread(vdo, __func__); in grow_logical_callback()
2436 switch (advance_phase(vdo)) { in grow_logical_callback()
2438 if (vdo_is_read_only(vdo)) { in grow_logical_callback()
2440 "Can't grow logical size of a read-only VDO"); in grow_logical_callback()
2445 result = vdo_start_operation(&vdo->admin.state, in grow_logical_callback()
2452 vdo->states.vdo.config.logical_blocks = vdo->block_map->next_entry_count; in grow_logical_callback()
2453 vdo_save_components(vdo, completion); in grow_logical_callback()
2457 vdo_grow_block_map(vdo->block_map, completion); in grow_logical_callback()
2464 vdo_enter_read_only_mode(vdo, completion->result); in grow_logical_callback()
2480 struct vdo *vdo = completion->vdo; in handle_logical_growth_error() local
2482 if (vdo->admin.phase == GROW_LOGICAL_PHASE_GROW_BLOCK_MAP) { in handle_logical_growth_error()
2487 vdo->states.vdo.config.logical_blocks = vdo->block_map->entry_count; in handle_logical_growth_error()
2488 vdo_abandon_block_map_growth(vdo->block_map); in handle_logical_growth_error()
2491 vdo->admin.phase = GROW_LOGICAL_PHASE_ERROR; in handle_logical_growth_error()
2496 * perform_grow_logical() - Grow the logical size of the vdo.
2497 * @vdo: The vdo to grow.
2498 * @new_logical_blocks: The size to which the vdo should be grown.
2500 * Context: This method may only be called when the vdo has been suspended and must not be called
2505 static int perform_grow_logical(struct vdo *vdo, block_count_t new_logical_blocks) in perform_grow_logical() argument
2509 if (vdo->device_config->logical_blocks == new_logical_blocks) { in perform_grow_logical()
2514 vdo_abandon_block_map_growth(vdo->block_map); in perform_grow_logical()
2520 if (vdo->block_map->next_entry_count != new_logical_blocks) in perform_grow_logical()
2523 result = perform_admin_operation(vdo, GROW_LOGICAL_PHASE_START, in perform_grow_logical()
2541 static void partition_to_region(struct partition *partition, struct vdo *vdo, in partition_to_region() argument
2544 physical_block_number_t pbn = partition->offset - vdo->geometry.bio_offset; in partition_to_region()
2547 .bdev = vdo_get_backing_device(vdo), in partition_to_region()
2556 * @vdo: The vdo preparing to grow.
2560 static void copy_partition(struct vdo *vdo, enum partition_id id, in copy_partition() argument
2564 struct partition *from = vdo_get_known_partition(&vdo->layout, id); in copy_partition()
2565 struct partition *to = vdo_get_known_partition(&vdo->next_layout, id); in copy_partition()
2567 partition_to_region(from, vdo, &read_region); in copy_partition()
2568 partition_to_region(to, vdo, &write_regions[0]); in copy_partition()
2569 dm_kcopyd_copy(vdo->partition_copier, &read_region, 1, write_regions, 0, in copy_partition()
2581 struct vdo *vdo = completion->vdo; in grow_physical_callback() local
2584 assert_admin_phase_thread(vdo, __func__); in grow_physical_callback()
2586 switch (advance_phase(vdo)) { in grow_physical_callback()
2588 if (vdo_is_read_only(vdo)) { in grow_physical_callback()
2590 "Can't grow physical size of a read-only VDO"); in grow_physical_callback()
2595 result = vdo_start_operation(&vdo->admin.state, in grow_physical_callback()
2603 copy_partition(vdo, VDO_RECOVERY_JOURNAL_PARTITION, completion); in grow_physical_callback()
2607 copy_partition(vdo, VDO_SLAB_SUMMARY_PARTITION, completion); in grow_physical_callback()
2611 vdo_uninitialize_layout(&vdo->layout); in grow_physical_callback()
2612 vdo->layout = vdo->next_layout; in grow_physical_callback()
2613 vdo_forget(vdo->next_layout.head); in grow_physical_callback()
2614 vdo->states.vdo.config.physical_blocks = vdo->layout.size; in grow_physical_callback()
2615 vdo_update_slab_depot_size(vdo->depot); in grow_physical_callback()
2616 vdo_save_components(vdo, completion); in grow_physical_callback()
2620 vdo_use_new_slabs(vdo->depot, completion); in grow_physical_callback()
2624 vdo->depot->summary_origin = in grow_physical_callback()
2625 vdo_get_known_partition(&vdo->layout, in grow_physical_callback()
2627 vdo->recovery_journal->origin = in grow_physical_callback()
2628 vdo_get_known_partition(&vdo->layout, in grow_physical_callback()
2633 vdo_enter_read_only_mode(vdo, completion->result); in grow_physical_callback()
2640 vdo_uninitialize_layout(&vdo->next_layout); in grow_physical_callback()
2650 completion->vdo->admin.phase = GROW_PHYSICAL_PHASE_ERROR; in handle_physical_growth_error()
2655 * perform_grow_physical() - Grow the physical size of the vdo.
2656 * @vdo: The vdo to resize.
2659 * Context: This method may only be called when the vdo has been suspended and must not be called
2664 static int perform_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks) in perform_grow_physical() argument
2668 block_count_t old_physical_blocks = vdo->states.vdo.config.physical_blocks; in perform_grow_physical()
2674 if (new_physical_blocks != vdo->next_layout.size) { in perform_grow_physical()
2676 * Either the VDO isn't prepared to grow, or it was prepared to grow to a different in perform_grow_physical()
2680 vdo_uninitialize_layout(&vdo->next_layout); in perform_grow_physical()
2681 vdo_abandon_new_slabs(vdo->depot); in perform_grow_physical()
2687 vdo_get_known_partition(&vdo->next_layout, VDO_SLAB_DEPOT_PARTITION)->count; in perform_grow_physical()
2688 prepared_depot_size = (vdo->depot->new_slabs == NULL) ? 0 : vdo->depot->new_size; in perform_grow_physical()
2692 result = perform_admin_operation(vdo, GROW_PHYSICAL_PHASE_START, in perform_grow_physical()
2707 * @vdo: The vdo being resumed.
2708 * @config: The new device configuration derived from the table with which the vdo is being
2713 static int __must_check apply_new_vdo_configuration(struct vdo *vdo, in apply_new_vdo_configuration() argument
2718 result = perform_grow_logical(vdo, config->logical_blocks); in apply_new_vdo_configuration()
2724 result = perform_grow_physical(vdo, config->physical_blocks); in apply_new_vdo_configuration()
2731 static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) in vdo_preresume_registered() argument
2738 backing_blocks = get_underlying_device_block_count(vdo); in vdo_preresume_registered()
2741 …vdo_log_error("resume of device '%s' failed: backing device has %llu blocks but VDO physical size … in vdo_preresume_registered()
2747 if (vdo_get_admin_state(vdo) == VDO_ADMIN_STATE_PRE_LOADED) { in vdo_preresume_registered()
2749 result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback, in vdo_preresume_registered()
2757 vdo->suspend_type = VDO_ADMIN_STATE_SUSPENDING; in vdo_preresume_registered()
2758 perform_admin_operation(vdo, SUSPEND_PHASE_START, in vdo_preresume_registered()
2770 "Start failed, could not load VDO metadata"); in vdo_preresume_registered()
2771 vdo->suspend_type = VDO_ADMIN_STATE_STOPPING; in vdo_preresume_registered()
2772 perform_admin_operation(vdo, SUSPEND_PHASE_START, in vdo_preresume_registered()
2778 /* Even if the VDO is read-only, it is now able to handle read requests. */ in vdo_preresume_registered()
2784 /* If this fails, the VDO was not in a state to be resumed. This should never happen. */ in vdo_preresume_registered()
2785 result = apply_new_vdo_configuration(vdo, config); in vdo_preresume_registered()
2789 * Now that we've tried to modify the vdo, the new config *is* the config, whether the in vdo_preresume_registered()
2792 vdo->device_config = config; in vdo_preresume_registered()
2795 * Any error here is highly unexpected and the state of the vdo is questionable, so we mark in vdo_preresume_registered()
2803 vdo_enter_read_only_mode(vdo, result); in vdo_preresume_registered()
2807 if (vdo_get_admin_state(vdo)->normal) { in vdo_preresume_registered()
2808 /* The VDO was just started, so we don't need to resume it. */ in vdo_preresume_registered()
2812 result = perform_admin_operation(vdo, RESUME_PHASE_START, resume_callback, in vdo_preresume_registered()
2816 /* Even if the vdo is read-only, it has still resumed. */ in vdo_preresume_registered()
2830 struct vdo *vdo = get_vdo_for_target(ti); in vdo_preresume() local
2833 vdo_register_thread_device_id(&instance_thread, &vdo->instance); in vdo_preresume()
2834 result = vdo_preresume_registered(ti, vdo); in vdo_preresume()
2853 * If anything changes that affects how user tools will interact with vdo, update the version
2859 .name = "vdo",
2901 /* Add VDO errors to the set of errors registered by the indexer. */ in vdo_init()