Lines Matching refs:vdo
89 static bool vdo_is_equal(struct vdo *vdo, const void *context) in vdo_is_equal() argument
91 return (vdo == context); in vdo_is_equal()
103 static struct vdo * __must_check filter_vdos_locked(vdo_filter_fn filter, in filter_vdos_locked()
106 struct vdo *vdo; in filter_vdos_locked() local
108 list_for_each_entry(vdo, ®istry.links, registration) { in filter_vdos_locked()
109 if (filter(vdo, context)) in filter_vdos_locked()
110 return vdo; in filter_vdos_locked()
121 struct vdo *vdo_find_matching(vdo_filter_fn filter, const void *context) in vdo_find_matching()
123 struct vdo *vdo; in vdo_find_matching() local
126 vdo = filter_vdos_locked(filter, context); in vdo_find_matching()
129 return vdo; in vdo_find_matching()
137 &thread->vdo->allocations_allowed); in start_vdo_request_queue()
272 static int __must_check read_geometry_block(struct vdo *vdo) in read_geometry_block() argument
282 result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL, in read_geometry_block()
302 bio_set_dev(vio->bio, vdo_get_backing_device(vdo)); in read_geometry_block()
312 result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry); in read_geometry_block()
416 int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id, in vdo_make_thread() argument
420 struct vdo_thread *thread = &vdo->threads[thread_id]; in vdo_make_thread()
432 thread->vdo = vdo; in vdo_make_thread()
434 get_thread_name(&vdo->thread_config, thread_id, queue_name, sizeof(queue_name)); in vdo_make_thread()
435 return vdo_make_work_queue(vdo->thread_name_prefix, queue_name, thread, in vdo_make_thread()
445 static int register_vdo(struct vdo *vdo) in register_vdo() argument
450 result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, in register_vdo()
453 INIT_LIST_HEAD(&vdo->registration); in register_vdo()
454 list_add_tail(&vdo->registration, ®istry.links); in register_vdo()
469 static int initialize_vdo(struct vdo *vdo, struct device_config *config, in initialize_vdo() argument
475 vdo->device_config = config; in initialize_vdo()
476 vdo->starting_sector_offset = config->owning_target->begin; in initialize_vdo()
477 vdo->instance = instance; in initialize_vdo()
478 vdo->allocations_allowed = true; in initialize_vdo()
479 vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_NEW); in initialize_vdo()
480 INIT_LIST_HEAD(&vdo->device_config_list); in initialize_vdo()
481 vdo_initialize_completion(&vdo->admin.completion, vdo, VDO_ADMIN_COMPLETION); in initialize_vdo()
482 init_completion(&vdo->admin.callback_sync); in initialize_vdo()
483 mutex_init(&vdo->stats_mutex); in initialize_vdo()
484 result = read_geometry_block(vdo); in initialize_vdo()
490 result = initialize_thread_config(config->thread_counts, &vdo->thread_config); in initialize_vdo()
499 config->thread_counts.hash_zones, vdo->thread_config.thread_count); in initialize_vdo()
503 &vdo->compression_context); in initialize_vdo()
511 &vdo->compression_context[i]); in initialize_vdo()
518 result = register_vdo(vdo); in initialize_vdo()
524 vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_INITIALIZED); in initialize_vdo()
538 struct vdo **vdo_ptr) in vdo_make()
541 struct vdo *vdo; in vdo_make() local
546 result = vdo_allocate(1, struct vdo, __func__, &vdo); in vdo_make()
552 result = initialize_vdo(vdo, config, instance, reason); in vdo_make()
554 vdo_destroy(vdo); in vdo_make()
559 *vdo_ptr = vdo; in vdo_make()
561 snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix), in vdo_make()
563 BUG_ON(vdo->thread_name_prefix[0] == '\0'); in vdo_make()
564 result = vdo_allocate(vdo->thread_config.thread_count, in vdo_make()
565 struct vdo_thread, __func__, &vdo->threads); in vdo_make()
571 result = vdo_make_thread(vdo, vdo->thread_config.admin_thread, in vdo_make()
578 result = vdo_make_flusher(vdo); in vdo_make()
584 result = vdo_make_packer(vdo, DEFAULT_PACKER_BINS, &vdo->packer); in vdo_make()
590 BUG_ON(vdo->device_config->logical_block_size <= 0); in vdo_make()
591 BUG_ON(vdo->device_config->owned_device == NULL); in vdo_make()
592 result = make_data_vio_pool(vdo, MAXIMUM_VDO_USER_VIOS, in vdo_make()
594 &vdo->data_vio_pool); in vdo_make()
602 get_data_vio_pool_request_limit(vdo->data_vio_pool), in vdo_make()
603 vdo, &vdo->io_submitter); in vdo_make()
609 if (vdo_uses_bio_ack_queue(vdo)) { in vdo_make()
610 result = vdo_make_thread(vdo, vdo->thread_config.bio_ack_thread, in vdo_make()
619 result = vdo_make_thread(vdo, vdo->thread_config.cpu_thread, &cpu_q_type, in vdo_make()
621 (void **) vdo->compression_context); in vdo_make()
630 static void finish_vdo(struct vdo *vdo) in finish_vdo() argument
634 if (vdo->threads == NULL) in finish_vdo()
637 vdo_cleanup_io_submitter(vdo->io_submitter); in finish_vdo()
638 vdo_finish_dedupe_index(vdo->hash_zones); in finish_vdo()
640 for (i = 0; i < vdo->thread_config.thread_count; i++) in finish_vdo()
641 vdo_finish_work_queue(vdo->threads[i].queue); in finish_vdo()
668 static void unregister_vdo(struct vdo *vdo) in unregister_vdo() argument
671 if (filter_vdos_locked(vdo_is_equal, vdo) == vdo) in unregister_vdo()
672 list_del_init(&vdo->registration); in unregister_vdo()
681 void vdo_destroy(struct vdo *vdo) in vdo_destroy() argument
685 if (vdo == NULL) in vdo_destroy()
689 BUG_ON(vdo_get_admin_state(vdo)->normal); in vdo_destroy()
691 vdo->allocations_allowed = true; in vdo_destroy()
693 finish_vdo(vdo); in vdo_destroy()
694 unregister_vdo(vdo); in vdo_destroy()
695 free_data_vio_pool(vdo->data_vio_pool); in vdo_destroy()
696 vdo_free_io_submitter(vdo_forget(vdo->io_submitter)); in vdo_destroy()
697 vdo_free_flusher(vdo_forget(vdo->flusher)); in vdo_destroy()
698 vdo_free_packer(vdo_forget(vdo->packer)); in vdo_destroy()
699 vdo_free_recovery_journal(vdo_forget(vdo->recovery_journal)); in vdo_destroy()
700 vdo_free_slab_depot(vdo_forget(vdo->depot)); in vdo_destroy()
701 vdo_uninitialize_layout(&vdo->layout); in vdo_destroy()
702 vdo_uninitialize_layout(&vdo->next_layout); in vdo_destroy()
703 if (vdo->partition_copier) in vdo_destroy()
704 dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier)); in vdo_destroy()
705 uninitialize_super_block(&vdo->super_block); in vdo_destroy()
706 vdo_free_block_map(vdo_forget(vdo->block_map)); in vdo_destroy()
707 vdo_free_hash_zones(vdo_forget(vdo->hash_zones)); in vdo_destroy()
708 vdo_free_physical_zones(vdo_forget(vdo->physical_zones)); in vdo_destroy()
709 vdo_free_logical_zones(vdo_forget(vdo->logical_zones)); in vdo_destroy()
711 if (vdo->threads != NULL) { in vdo_destroy()
712 for (i = 0; i < vdo->thread_config.thread_count; i++) { in vdo_destroy()
713 free_listeners(&vdo->threads[i]); in vdo_destroy()
714 vdo_free_work_queue(vdo_forget(vdo->threads[i].queue)); in vdo_destroy()
716 vdo_free(vdo_forget(vdo->threads)); in vdo_destroy()
719 uninitialize_thread_config(&vdo->thread_config); in vdo_destroy()
721 if (vdo->compression_context != NULL) { in vdo_destroy()
722 for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++) in vdo_destroy()
723 vdo_free(vdo_forget(vdo->compression_context[i])); in vdo_destroy()
725 vdo_free(vdo_forget(vdo->compression_context)); in vdo_destroy()
727 vdo_free(vdo); in vdo_destroy()
730 static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block) in initialize_super_block() argument
735 (char **) &vdo->super_block.buffer); in initialize_super_block()
739 return allocate_vio_components(vdo, VIO_TYPE_SUPER_BLOCK, in initialize_super_block()
742 &vdo->super_block.vio); in initialize_super_block()
786 void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent) in vdo_load_super_block() argument
790 result = initialize_super_block(vdo, &vdo->super_block); in vdo_load_super_block()
796 vdo->super_block.vio.completion.parent = parent; in vdo_load_super_block()
797 vdo_submit_metadata_vio(&vdo->super_block.vio, in vdo_load_super_block()
798 vdo_get_data_region_start(vdo->geometry), in vdo_load_super_block()
810 struct block_device *vdo_get_backing_device(const struct vdo *vdo) in vdo_get_backing_device() argument
812 return vdo->device_config->owned_device->bdev; in vdo_get_backing_device()
832 int vdo_synchronous_flush(struct vdo *vdo) in vdo_synchronous_flush() argument
837 bio_init(&bio, vdo_get_backing_device(vdo), NULL, 0, in vdo_synchronous_flush()
842 atomic64_inc(&vdo->stats.flush_out); in vdo_synchronous_flush()
860 enum vdo_state vdo_get_state(const struct vdo *vdo) in vdo_get_state() argument
862 enum vdo_state state = atomic_read(&vdo->state); in vdo_get_state()
876 void vdo_set_state(struct vdo *vdo, enum vdo_state state) in vdo_set_state() argument
880 atomic_set(&vdo->state, state); in vdo_set_state()
889 const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo) in vdo_get_admin_state() argument
891 return vdo_get_admin_state_code(&vdo->admin.state); in vdo_get_admin_state()
897 static void record_vdo(struct vdo *vdo) in record_vdo() argument
900 vdo->states.unused = vdo->geometry.unused; in record_vdo()
901 vdo->states.vdo.state = vdo_get_state(vdo); in record_vdo()
902 vdo->states.block_map = vdo_record_block_map(vdo->block_map); in record_vdo()
903 vdo->states.recovery_journal = vdo_record_recovery_journal(vdo->recovery_journal); in record_vdo()
904 vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot); in record_vdo()
905 vdo->states.layout = vdo->layout; in record_vdo()
958 void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent) in vdo_save_components() argument
960 struct vdo_super_block *super_block = &vdo->super_block; in vdo_save_components()
972 record_vdo(vdo); in vdo_save_components()
974 vdo_encode_super_block(super_block->buffer, &vdo->states); in vdo_save_components()
978 vdo_get_data_region_start(vdo->geometry), in vdo_save_components()
993 int vdo_register_read_only_listener(struct vdo *vdo, void *listener, in vdo_register_read_only_listener() argument
997 struct vdo_thread *thread = &vdo->threads[thread_id]; in vdo_register_read_only_listener()
1001 result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread, in vdo_register_read_only_listener()
1032 struct vdo *vdo = listener; in notify_vdo_of_read_only_mode() local
1034 if (vdo_in_read_only_mode(vdo)) in notify_vdo_of_read_only_mode()
1037 vdo_set_state(vdo, VDO_READ_ONLY_MODE); in notify_vdo_of_read_only_mode()
1038 vdo_save_components(vdo, parent); in notify_vdo_of_read_only_mode()
1047 int vdo_enable_read_only_entry(struct vdo *vdo) in vdo_enable_read_only_entry() argument
1050 bool is_read_only = vdo_in_read_only_mode(vdo); in vdo_enable_read_only_entry()
1051 struct read_only_notifier *notifier = &vdo->read_only_notifier; in vdo_enable_read_only_entry()
1061 vdo_initialize_completion(¬ifier->completion, vdo, in vdo_enable_read_only_entry()
1064 for (id = 0; id < vdo->thread_config.thread_count; id++) in vdo_enable_read_only_entry()
1065 vdo->threads[id].is_read_only = is_read_only; in vdo_enable_read_only_entry()
1067 return vdo_register_read_only_listener(vdo, vdo, notify_vdo_of_read_only_mode, in vdo_enable_read_only_entry()
1068 vdo->thread_config.admin_thread); in vdo_enable_read_only_entry()
1081 struct vdo *vdo = parent->vdo; in vdo_wait_until_not_entering_read_only_mode() local
1082 struct read_only_notifier *notifier = &vdo->read_only_notifier; in vdo_wait_until_not_entering_read_only_mode()
1084 vdo_assert_on_admin_thread(vdo, __func__); in vdo_wait_until_not_entering_read_only_mode()
1128 vdo_assert_on_admin_thread(completion->vdo, __func__); in finish_entering_read_only_mode()
1145 struct vdo *vdo = completion->vdo; in make_thread_read_only() local
1152 struct vdo_thread *thread = &vdo->threads[thread_id]; in make_thread_read_only()
1174 if (++thread_id == vdo->thread_config.dedupe_thread) { in make_thread_read_only()
1182 if (thread_id >= vdo->thread_config.thread_count) { in make_thread_read_only()
1186 vdo->thread_config.admin_thread, NULL); in make_thread_read_only()
1209 struct vdo *vdo = parent->vdo; in vdo_allow_read_only_mode_entry() local
1210 struct read_only_notifier *notifier = &vdo->read_only_notifier; in vdo_allow_read_only_mode_entry()
1212 vdo_assert_on_admin_thread(vdo, __func__); in vdo_allow_read_only_mode_entry()
1248 void vdo_enter_read_only_mode(struct vdo *vdo, int error_code) in vdo_enter_read_only_mode() argument
1252 struct read_only_notifier *notifier = &vdo->read_only_notifier; in vdo_enter_read_only_mode()
1256 thread = &vdo->threads[thread_id]; in vdo_enter_read_only_mode()
1294 bool vdo_is_read_only(struct vdo *vdo) in vdo_is_read_only() argument
1296 return vdo->threads[vdo_get_callback_thread_id()].is_read_only; in vdo_is_read_only()
1305 bool vdo_in_read_only_mode(const struct vdo *vdo) in vdo_in_read_only_mode() argument
1307 return (vdo_get_state(vdo) == VDO_READ_ONLY_MODE); in vdo_in_read_only_mode()
1316 bool vdo_in_recovery_mode(const struct vdo *vdo) in vdo_in_recovery_mode() argument
1318 return (vdo_get_state(vdo) == VDO_RECOVERING); in vdo_in_recovery_mode()
1325 void vdo_enter_recovery_mode(struct vdo *vdo) in vdo_enter_recovery_mode() argument
1327 vdo_assert_on_admin_thread(vdo, __func__); in vdo_enter_recovery_mode()
1329 if (vdo_in_read_only_mode(vdo)) in vdo_enter_recovery_mode()
1333 vdo_set_state(vdo, VDO_RECOVERING); in vdo_enter_recovery_mode()
1354 static int perform_synchronous_action(struct vdo *vdo, vdo_action_fn action, in perform_synchronous_action() argument
1359 vdo_initialize_completion(&sync.vdo_completion, vdo, VDO_SYNC_COMPLETION); in perform_synchronous_action()
1373 struct vdo *vdo = completion->vdo; in set_compression_callback() local
1375 bool was_enabled = vdo_get_compressing(vdo); in set_compression_callback()
1378 WRITE_ONCE(vdo->compressing, *enable); in set_compression_callback()
1381 vdo_flush_packer(vdo->packer); in set_compression_callback()
1397 bool vdo_set_compressing(struct vdo *vdo, bool enable) in vdo_set_compressing() argument
1399 perform_synchronous_action(vdo, set_compression_callback, in vdo_set_compressing()
1400 vdo->thread_config.packer_thread, in vdo_set_compressing()
1411 bool vdo_get_compressing(struct vdo *vdo) in vdo_get_compressing() argument
1413 return READ_ONCE(vdo->compressing); in vdo_get_compressing()
1416 static size_t get_block_map_cache_size(const struct vdo *vdo) in get_block_map_cache_size() argument
1418 return ((size_t) vdo->device_config->cache_size) * VDO_BLOCK_SIZE; in get_block_map_cache_size()
1421 static struct error_statistics __must_check get_vdo_error_statistics(const struct vdo *vdo) in get_vdo_error_statistics() argument
1428 const struct atomic_statistics *atoms = &vdo->stats; in get_vdo_error_statistics()
1466 static block_count_t __must_check vdo_get_physical_blocks_allocated(const struct vdo *vdo) in vdo_get_physical_blocks_allocated() argument
1468 return (vdo_get_slab_depot_allocated_blocks(vdo->depot) - in vdo_get_physical_blocks_allocated()
1469 vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal)); in vdo_get_physical_blocks_allocated()
1478 static block_count_t __must_check vdo_get_physical_blocks_overhead(const struct vdo *vdo) in vdo_get_physical_blocks_overhead() argument
1485 return (vdo->states.vdo.config.physical_blocks - in vdo_get_physical_blocks_overhead()
1486 vdo_get_slab_depot_data_blocks(vdo->depot) + in vdo_get_physical_blocks_overhead()
1487 vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal)); in vdo_get_physical_blocks_overhead()
1510 static void get_vdo_statistics(const struct vdo *vdo, struct vdo_statistics *stats) in get_vdo_statistics() argument
1512 struct recovery_journal *journal = vdo->recovery_journal; in get_vdo_statistics()
1513 enum vdo_state state = vdo_get_state(vdo); in get_vdo_statistics()
1515 vdo_assert_on_admin_thread(vdo, __func__); in get_vdo_statistics()
1525 stats->logical_blocks = vdo->states.vdo.config.logical_blocks; in get_vdo_statistics()
1531 stats->physical_blocks = vdo->states.vdo.config.physical_blocks; in get_vdo_statistics()
1533 stats->complete_recoveries = vdo->states.vdo.complete_recoveries; in get_vdo_statistics()
1534 stats->read_only_recoveries = vdo->states.vdo.read_only_recoveries; in get_vdo_statistics()
1535 stats->block_map_cache_size = get_block_map_cache_size(vdo); in get_vdo_statistics()
1538 stats->data_blocks_used = vdo_get_physical_blocks_allocated(vdo); in get_vdo_statistics()
1539 stats->overhead_blocks_used = vdo_get_physical_blocks_overhead(vdo); in get_vdo_statistics()
1541 vdo_get_slab_depot_statistics(vdo->depot, stats); in get_vdo_statistics()
1543 stats->packer = vdo_get_packer_statistics(vdo->packer); in get_vdo_statistics()
1544 stats->block_map = vdo_get_block_map_statistics(vdo->block_map); in get_vdo_statistics()
1545 vdo_get_dedupe_statistics(vdo->hash_zones, stats); in get_vdo_statistics()
1546 stats->errors = get_vdo_error_statistics(vdo); in get_vdo_statistics()
1550 stats->instance = vdo->instance; in get_vdo_statistics()
1551 stats->current_vios_in_progress = get_data_vio_pool_active_requests(vdo->data_vio_pool); in get_vdo_statistics()
1552 stats->max_vios = get_data_vio_pool_maximum_requests(vdo->data_vio_pool); in get_vdo_statistics()
1554 stats->flush_out = atomic64_read(&vdo->stats.flush_out); in get_vdo_statistics()
1555 stats->logical_block_size = vdo->device_config->logical_block_size; in get_vdo_statistics()
1556 copy_bio_stat(&stats->bios_in, &vdo->stats.bios_in); in get_vdo_statistics()
1557 copy_bio_stat(&stats->bios_in_partial, &vdo->stats.bios_in_partial); in get_vdo_statistics()
1558 copy_bio_stat(&stats->bios_out, &vdo->stats.bios_out); in get_vdo_statistics()
1559 copy_bio_stat(&stats->bios_meta, &vdo->stats.bios_meta); in get_vdo_statistics()
1560 copy_bio_stat(&stats->bios_journal, &vdo->stats.bios_journal); in get_vdo_statistics()
1561 copy_bio_stat(&stats->bios_page_cache, &vdo->stats.bios_page_cache); in get_vdo_statistics()
1562 copy_bio_stat(&stats->bios_out_completed, &vdo->stats.bios_out_completed); in get_vdo_statistics()
1563 copy_bio_stat(&stats->bios_meta_completed, &vdo->stats.bios_meta_completed); in get_vdo_statistics()
1565 &vdo->stats.bios_journal_completed); in get_vdo_statistics()
1567 &vdo->stats.bios_page_cache_completed); in get_vdo_statistics()
1568 copy_bio_stat(&stats->bios_acknowledged, &vdo->stats.bios_acknowledged); in get_vdo_statistics()
1569 copy_bio_stat(&stats->bios_acknowledged_partial, &vdo->stats.bios_acknowledged_partial); in get_vdo_statistics()
1585 get_vdo_statistics(completion->vdo, completion->parent); in vdo_fetch_statistics_callback()
1594 void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats) in vdo_fetch_statistics() argument
1596 perform_synchronous_action(vdo, vdo_fetch_statistics_callback, in vdo_fetch_statistics()
1597 vdo->thread_config.admin_thread, stats); in vdo_fetch_statistics()
1619 BUG_ON(thread_id >= thread->vdo->thread_config.thread_count); in vdo_get_callback_thread_id()
1620 BUG_ON(thread != &thread->vdo->threads[thread_id]); in vdo_get_callback_thread_id()
1630 void vdo_dump_status(const struct vdo *vdo) in vdo_dump_status() argument
1634 vdo_dump_flusher(vdo->flusher); in vdo_dump_status()
1635 vdo_dump_recovery_journal_statistics(vdo->recovery_journal); in vdo_dump_status()
1636 vdo_dump_packer(vdo->packer); in vdo_dump_status()
1637 vdo_dump_slab_depot(vdo->depot); in vdo_dump_status()
1639 for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++) in vdo_dump_status()
1640 vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]); in vdo_dump_status()
1642 for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++) in vdo_dump_status()
1643 vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]); in vdo_dump_status()
1645 vdo_dump_hash_zones(vdo->hash_zones); in vdo_dump_status()
1653 void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) in vdo_assert_on_admin_thread() argument
1655 VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), in vdo_assert_on_admin_thread()
1666 void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone, in vdo_assert_on_logical_zone_thread() argument
1670 vdo->thread_config.logical_threads[logical_zone]), in vdo_assert_on_logical_zone_thread()
1681 void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, in vdo_assert_on_physical_zone_thread() argument
1685 vdo->thread_config.physical_threads[physical_zone]), in vdo_assert_on_physical_zone_thread()
1704 int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn, in vdo_get_physical_zone() argument
1719 if (!vdo_is_physical_data_block(vdo->depot, pbn)) in vdo_get_physical_zone()
1723 slab = vdo_get_slab(vdo->depot, pbn); in vdo_get_physical_zone()
1728 *zone_ptr = &vdo->physical_zones->zones[slab->allocator->zone_number]; in vdo_get_physical_zone()