Lines Matching full:vdo
24 #include "vdo.h"
207 const struct thread_config *thread_config = &completion->vdo->thread_config; in prepare_repair_completion()
242 repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false; in free_repair_completion()
253 struct vdo *vdo = completion->vdo; in finish_repair() local
256 vdo_assert_on_admin_thread(vdo, __func__); in finish_repair()
258 if (vdo->load_state != VDO_REBUILD_FOR_UPGRADE) in finish_repair()
259 vdo->states.vdo.complete_recoveries++; in finish_repair()
261 vdo_initialize_recovery_journal_post_repair(vdo->recovery_journal, in finish_repair()
262 vdo->states.vdo.complete_recoveries, in finish_repair()
268 if (vdo_state_requires_read_only_rebuild(vdo->load_state)) { in finish_repair()
281 vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot)); in finish_repair()
294 if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) in abort_repair()
326 struct vdo *vdo = completion->vdo; in drain_slab_depot() local
330 vdo_assert_on_admin_thread(vdo, __func__); in drain_slab_depot()
333 if (vdo_state_requires_read_only_rebuild(vdo->load_state)) { in drain_slab_depot()
342 vdo_drain_slab_depot(vdo->depot, operation, completion); in drain_slab_depot()
353 vdo_assert_on_admin_thread(completion->vdo, __func__); in flush_block_map_updates()
358 vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING, in flush_block_map_updates()
423 struct slab_depot *depot = completion->vdo->depot; in process_slot()
527 if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn)) in get_pbn_to_fetch()
545 struct block_map *block_map = repair->completion.vdo->block_map; in fetch_page()
577 struct block_map *map = completion->vdo->block_map; in rebuild_from_leaves()
616 struct slab_depot *depot = completion->vdo->depot; in process_entry()
640 struct vdo *vdo = completion->vdo; in rebuild_reference_counts() local
641 struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache; in rebuild_reference_counts()
644 if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair)) in rebuild_reference_counts()
655 vdo_traverse_forest(vdo->block_map, process_entry, completion); in rebuild_reference_counts()
738 sector = get_sector(repair->completion.vdo->recovery_journal, in get_entry()
746 * @vdo: The vdo.
751 static int validate_recovery_journal_entry(const struct vdo *vdo, in validate_recovery_journal_entry() argument
754 if ((entry->slot.pbn >= vdo->states.vdo.config.physical_blocks) || in validate_recovery_journal_entry()
758 !vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) || in validate_recovery_journal_entry()
759 !vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) { in validate_recovery_journal_entry()
797 struct vdo *vdo = completion->vdo; in add_slab_journal_entries() local
798 struct recovery_journal *journal = vdo->recovery_journal; in add_slab_journal_entries()
815 result = validate_recovery_journal_entry(vdo, &entry); in add_slab_journal_entries()
817 vdo_enter_read_only_mode(vdo, result); in add_slab_journal_entries()
830 slab = vdo_get_slab(vdo->depot, pbn); in add_slab_journal_entries()
855 struct vdo *vdo = completion->vdo; in vdo_replay_into_slab_journals() local
857 vdo_assert_on_physical_zone_thread(vdo, allocator->zone_number, __func__); in vdo_replay_into_slab_journals()
860 repair->logical_blocks_used = vdo->recovery_journal->logical_blocks_used; in vdo_replay_into_slab_journals()
861 repair->block_map_data_blocks = vdo->recovery_journal->block_map_data_blocks; in vdo_replay_into_slab_journals()
888 vdo_assert_on_admin_thread(completion->vdo, __func__); in load_slab_depot()
890 if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) { in load_slab_depot()
899 vdo_load_slab_depot(completion->vdo->depot, operation, completion, repair); in load_slab_depot()
907 vdo_assert_on_admin_thread(completion->vdo, __func__); in flush_block_map()
911 operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ? in flush_block_map()
914 vdo_drain_block_map(completion->vdo->block_map, operation, completion); in flush_block_map()
1041 &repair->completion.vdo->block_map->zones[0], pbn, true, in fetch_block_map_page()
1099 struct vdo *vdo = completion->vdo; in recover_block_map() local
1103 vdo_assert_on_logical_zone_thread(vdo, 0, __func__); in recover_block_map()
1106 vdo->block_map->zones[0].page_cache.rebuilding = in recover_block_map()
1107 vdo_state_requires_read_only_rebuild(vdo->load_state); in recover_block_map()
1224 struct recovery_journal *journal = repair->completion.vdo->recovery_journal; in find_recovery_journal_head_and_tail()
1268 * @vdo: The vdo.
1275 static bool unpack_entry(struct vdo *vdo, char *packed, enum vdo_metadata_type format, in unpack_entry() argument
1309 return (validate_recovery_journal_entry(vdo, entry) == VDO_SUCCESS); in unpack_entry()
1326 struct vdo *vdo = repair->completion.vdo; in append_sector_entries() local
1334 if (!unpack_entry(vdo, entries, format, &entry)) in append_sector_entries()
1406 struct vdo *vdo = repair->completion.vdo; in parse_journal_for_rebuild() local
1407 struct recovery_journal *journal = vdo->recovery_journal; in parse_journal_for_rebuild()
1456 struct vdo *vdo = repair->completion.vdo; in extract_new_mappings() local
1476 result = validate_recovery_journal_entry(vdo, &entry); in extract_new_mappings()
1478 vdo_enter_read_only_mode(vdo, result); in extract_new_mappings()
1495 vdo_enter_read_only_mode(vdo, result); in extract_new_mappings()
1517 struct vdo *vdo = repair->completion.vdo; in compute_usages() local
1518 struct recovery_journal *journal = vdo->recovery_journal; in compute_usages()
1531 result = validate_recovery_journal_entry(vdo, &entry); in compute_usages()
1533 vdo_enter_read_only_mode(vdo, result); in compute_usages()
1557 struct recovery_journal *journal = repair->completion.vdo->recovery_journal; in parse_journal_for_recovery()
1583 vdo_enter_read_only_mode(repair->completion.vdo, VDO_CORRUPT_JOURNAL); in parse_journal_for_recovery()
1653 return (vdo_state_requires_read_only_rebuild(repair->completion.vdo->load_state) ? in parse_journal()
1684 struct vdo *vdo = vio->completion.vdo; in read_journal_endio() local
1686 continue_vio_after_io(vio, finish_journal_load, vdo->thread_config.admin_thread); in read_journal_endio()
1690 * vdo_repair() - Load the recovery journal and then recover or rebuild a vdo.
1698 struct vdo *vdo = parent->vdo; in vdo_repair() local
1699 struct recovery_journal *journal = vdo->recovery_journal; in vdo_repair()
1704 vdo->device_config->cache_size >> 1, in vdo_repair()
1707 vdo_assert_on_admin_thread(vdo, __func__); in vdo_repair()
1709 if (vdo->load_state == VDO_FORCE_REBUILD) { in vdo_repair()
1711 vdo->states.vdo.read_only_recoveries++; in vdo_repair()
1712 } else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) { in vdo_repair()
1726 vdo_initialize_completion(&repair->completion, vdo, VDO_REPAIR_COMPLETION); in vdo_repair()
1746 result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL, in vdo_repair()