Lines Matching refs:zone

60 	struct block_map_zone *zone;  member
98 struct block_map_zone *zone; member
199 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info()
249 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread()
251 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread()
257 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed()
624 static void check_for_drain_complete(struct block_map_zone *zone) in check_for_drain_complete() argument
626 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete()
627 (zone->active_lookups == 0) && in check_for_drain_complete()
628 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete()
629 !is_vio_pool_busy(zone->vio_pool) && in check_for_drain_complete()
630 (zone->page_cache.outstanding_reads == 0) && in check_for_drain_complete()
631 (zone->page_cache.outstanding_writes == 0)) { in check_for_drain_complete()
632 vdo_finish_draining_with_result(&zone->state, in check_for_drain_complete()
633 (vdo_is_read_only(zone->block_map->vdo) ? in check_for_drain_complete()
638 static void enter_zone_read_only_mode(struct block_map_zone *zone, int result) in enter_zone_read_only_mode() argument
640 vdo_enter_read_only_mode(zone->block_map->vdo, result); in enter_zone_read_only_mode()
646 vdo_waitq_init(&zone->flush_waiters); in enter_zone_read_only_mode()
647 check_for_drain_complete(zone); in enter_zone_read_only_mode()
659 enter_zone_read_only_mode(completion->info->cache->zone, result); in validate_completed_page_or_enter_read_only_mode()
675 vdo_enter_read_only_mode(cache->zone->block_map->vdo, result); in handle_load_error()
686 check_for_drain_complete(cache->zone); in handle_load_error()
697 nonce_t nonce = info->cache->zone->block_map->nonce; in page_is_loaded()
728 check_for_drain_complete(cache->zone); in page_is_loaded()
758 continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id); in load_cache_page_endio()
809 continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id); in flush_endio()
1027 check_for_drain_complete(cache->zone); in handle_page_write_error()
1037 continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id); in write_cache_page_endio()
1062 vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal, in page_is_written_out()
1065 cache->zone->zone_number); in page_is_written_out()
1083 check_for_drain_complete(cache->zone); in page_is_written_out()
1211 struct block_map_zone *zone, physical_block_number_t pbn, in vdo_get_page() argument
1215 struct vdo_page_cache *cache = &zone->page_cache; in vdo_get_page()
1231 cache->zone->thread_id, parent); in vdo_get_page()
1377 static inline struct tree_page *get_tree_page(const struct block_map_zone *zone, in get_tree_page() argument
1380 return get_tree_page_by_index(zone->block_map->forest, lock->root_index, in get_tree_page()
1439 static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b) in is_not_older() argument
1443 result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && in is_not_older()
1444 in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), in is_not_older()
1446 a, b, zone->oldest_generation, zone->generation); in is_not_older()
1448 enter_zone_read_only_mode(zone, result); in is_not_older()
1452 return in_cyclic_range(b, a, zone->generation, 1 << 8); in is_not_older()
1455 static void release_generation(struct block_map_zone *zone, u8 generation) in release_generation() argument
1459 result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0), in release_generation()
1462 enter_zone_read_only_mode(zone, result); in release_generation()
1466 zone->dirty_page_counts[generation]--; in release_generation()
1467 while ((zone->dirty_page_counts[zone->oldest_generation] == 0) && in release_generation()
1468 (zone->oldest_generation != zone->generation)) in release_generation()
1469 zone->oldest_generation++; in release_generation()
1472 static void set_generation(struct block_map_zone *zone, struct tree_page *page, in set_generation() argument
1484 new_count = ++zone->dirty_page_counts[new_generation]; in set_generation()
1488 enter_zone_read_only_mode(zone, result); in set_generation()
1493 release_generation(zone, old_generation); in set_generation()
1504 static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone) in acquire_vio() argument
1507 acquire_vio_from_pool(zone->vio_pool, waiter); in acquire_vio()
1511 static bool attempt_increment(struct block_map_zone *zone) in attempt_increment() argument
1513 u8 generation = zone->generation + 1; in attempt_increment()
1515 if (zone->oldest_generation == generation) in attempt_increment()
1518 zone->generation = generation; in attempt_increment()
1523 static void enqueue_page(struct tree_page *page, struct block_map_zone *zone) in enqueue_page() argument
1525 if ((zone->flusher == NULL) && attempt_increment(zone)) { in enqueue_page()
1526 zone->flusher = page; in enqueue_page()
1527 acquire_vio(&page->waiter, zone); in enqueue_page()
1531 vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter); in enqueue_page()
1540 acquire_vio(waiter, write_context->zone); in write_page_if_not_dirtied()
1544 enqueue_page(page, write_context->zone); in write_page_if_not_dirtied()
1547 static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio) in return_to_pool() argument
1549 return_vio_to_pool(zone->vio_pool, vio); in return_to_pool()
1550 check_for_drain_complete(zone); in return_to_pool()
1560 struct block_map_zone *zone = pooled->context; in finish_page_write() local
1562 vdo_release_recovery_journal_block_reference(zone->block_map->journal, in finish_page_write()
1565 zone->zone_number); in finish_page_write()
1568 release_generation(zone, page->writing_generation); in finish_page_write()
1571 if (zone->flusher == page) { in finish_page_write()
1573 .zone = zone, in finish_page_write()
1577 vdo_waitq_notify_all_waiters(&zone->flush_waiters, in finish_page_write()
1579 if (dirty && attempt_increment(zone)) { in finish_page_write()
1584 zone->flusher = NULL; in finish_page_write()
1588 enqueue_page(page, zone); in finish_page_write()
1589 } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) && in finish_page_write()
1590 attempt_increment(zone)) { in finish_page_write()
1591 zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters), in finish_page_write()
1593 write_page(zone->flusher, pooled); in finish_page_write()
1597 return_to_pool(zone, pooled); in finish_page_write()
1605 struct block_map_zone *zone = pooled->context; in handle_write_error() local
1608 enter_zone_read_only_mode(zone, result); in handle_write_error()
1609 return_to_pool(zone, pooled); in handle_write_error()
1618 struct block_map_zone *zone = pooled->context; in write_initialized_page() local
1629 if (zone->flusher == tree_page) in write_initialized_page()
1640 struct block_map_zone *zone = vio->context; in write_page_endio() local
1646 zone->thread_id); in write_page_endio()
1652 struct block_map_zone *zone = vio->context; in write_page() local
1655 if ((zone->flusher != tree_page) && in write_page()
1656 is_not_older(zone, tree_page->generation, zone->generation)) { in write_page()
1661 enqueue_page(tree_page, zone); in write_page()
1662 return_to_pool(zone, vio); in write_page()
1668 completion->callback_thread_id = zone->thread_id; in write_page()
1697 struct block_map_zone *zone; in release_page_lock() local
1705 zone = data_vio->logical.zone->block_map_zone; in release_page_lock()
1706 lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key); in release_page_lock()
1717 --data_vio->logical.zone->block_map_zone->active_lookups; in finish_lookup()
1742 enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result); in abort_lookup()
1775 static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
1776 static void allocate_block_map_page(struct block_map_zone *zone,
1799 allocate_block_map_page(data_vio->logical.zone->block_map_zone, in continue_with_loaded_page()
1811 load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); in continue_with_loaded_page()
1831 struct block_map_zone *zone = pooled->context; in finish_block_map_page_load() local
1836 tree_page = get_tree_page(zone, tree_lock); in finish_block_map_page_load()
1838 nonce = zone->block_map->nonce; in finish_block_map_page_load()
1842 return_vio_to_pool(zone->vio_pool, pooled); in finish_block_map_page_load()
1856 struct block_map_zone *zone = pooled->context; in handle_io_error() local
1859 return_vio_to_pool(zone->vio_pool, pooled); in handle_io_error()
1869 data_vio->logical.zone->thread_id); in load_page_endio()
1888 static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio) in attempt_page_lock() argument
1905 result = vdo_int_map_put(zone->loading_pages, lock->key, in attempt_page_lock()
1922 static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio) in load_block_map_page() argument
1926 result = attempt_page_lock(zone, data_vio); in load_block_map_page()
1934 acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter); in load_block_map_page()
1943 data_vio->logical.zone->thread_id)) in allocation_failure()
1963 allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); in continue_allocation_for_waiter()
1998 static void write_expired_elements(struct block_map_zone *zone) in write_expired_elements() argument
2003 u8 generation = zone->generation; in write_expired_elements()
2005 expired = &zone->dirty_lists->expired[VDO_TREE_PAGE]; in write_expired_elements()
2014 enter_zone_read_only_mode(zone, result); in write_expired_elements()
2018 set_generation(zone, page, generation); in write_expired_elements()
2020 enqueue_page(page, zone); in write_expired_elements()
2023 expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE]; in write_expired_elements()
2029 save_pages(&zone->page_cache); in write_expired_elements()
2041 static void add_to_dirty_lists(struct block_map_zone *zone, in add_to_dirty_lists() argument
2047 struct dirty_lists *dirty_lists = zone->dirty_lists; in add_to_dirty_lists()
2060 write_expired_elements(zone); in add_to_dirty_lists()
2074 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in finish_block_map_allocation() local
2080 tree_page = get_tree_page(zone, tree_lock); in finish_block_map_allocation()
2092 if (zone->flusher != tree_page) { in finish_block_map_allocation()
2097 set_generation(zone, tree_page, zone->generation); in finish_block_map_allocation()
2103 add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE, in finish_block_map_allocation()
2110 tree_page = get_tree_page(zone, tree_lock); in finish_block_map_allocation()
2112 zone->block_map->nonce, in finish_block_map_allocation()
2125 allocate_block_map_page(zone, data_vio); in finish_block_map_allocation()
2190 static void allocate_block_map_page(struct block_map_zone *zone, in allocate_block_map_page() argument
2201 result = attempt_page_lock(zone, data_vio); in allocate_block_map_page()
2227 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in vdo_find_block_map_slot() local
2229 zone->active_lookups++; in vdo_find_block_map_slot()
2230 if (vdo_is_state_draining(&zone->state)) { in vdo_find_block_map_slot()
2237 page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count); in vdo_find_block_map_slot()
2250 page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer); in vdo_find_block_map_slot()
2277 allocate_block_map_page(zone, data_vio); in vdo_find_block_map_slot()
2289 load_block_map_page(zone, data_vio); in vdo_find_block_map_slot()
2323 void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone) in vdo_write_tree_page() argument
2327 if (waiting && (zone->flusher == page)) in vdo_write_tree_page()
2330 set_generation(zone, page, zone->generation); in vdo_write_tree_page()
2334 enqueue_page(page, zone); in vdo_write_tree_page()
2539 cursor->parent->zone->block_map->nonce, in finish_traversal_load()
2550 cursor->parent->zone->thread_id); in traversal_endio()
2580 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2590 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2599 vdo_write_tree_page(tree_page, cursor->parent->zone); in traverse()
2636 pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id; in launch_cursor()
2691 cursors->zone = &map->zones[0]; in vdo_traverse_forest()
2692 cursors->pool = cursors->zone->vio_pool; in vdo_traverse_forest()
2724 struct block_map_zone *zone = &map->zones[zone_number]; in initialize_block_map_zone() local
2728 zone->zone_number = zone_number; in initialize_block_map_zone()
2729 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_block_map_zone()
2730 zone->block_map = map; in initialize_block_map_zone()
2734 &zone->dirty_lists); in initialize_block_map_zone()
2738 zone->dirty_lists->maximum_age = maximum_age; in initialize_block_map_zone()
2739 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]); in initialize_block_map_zone()
2740 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]); in initialize_block_map_zone()
2743 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]); in initialize_block_map_zone()
2744 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]); in initialize_block_map_zone()
2747 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages); in initialize_block_map_zone()
2752 zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR, in initialize_block_map_zone()
2753 VIO_PRIORITY_METADATA, zone, &zone->vio_pool); in initialize_block_map_zone()
2757 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_block_map_zone()
2759 zone->page_cache.zone = zone; in initialize_block_map_zone()
2760 zone->page_cache.vdo = vdo; in initialize_block_map_zone()
2761 zone->page_cache.page_count = cache_size / map->zone_count; in initialize_block_map_zone()
2762 zone->page_cache.stats.free_pages = zone->page_cache.page_count; in initialize_block_map_zone()
2764 result = allocate_cache_components(&zone->page_cache); in initialize_block_map_zone()
2769 INIT_LIST_HEAD(&zone->page_cache.lru_list); in initialize_block_map_zone()
2770 INIT_LIST_HEAD(&zone->page_cache.outgoing_list); in initialize_block_map_zone()
2797 struct block_map_zone *zone = &map->zones[zone_number]; in advance_block_map_zone_era() local
2799 update_period(zone->dirty_lists, map->current_era_point); in advance_block_map_zone_era()
2800 write_expired_elements(zone); in advance_block_map_zone_era()
2821 static void uninitialize_block_map_zone(struct block_map_zone *zone) in uninitialize_block_map_zone() argument
2823 struct vdo_page_cache *cache = &zone->page_cache; in uninitialize_block_map_zone()
2825 vdo_free(vdo_forget(zone->dirty_lists)); in uninitialize_block_map_zone()
2826 free_vio_pool(vdo_forget(zone->vio_pool)); in uninitialize_block_map_zone()
2827 vdo_int_map_free(vdo_forget(zone->loading_pages)); in uninitialize_block_map_zone()
2842 zone_count_t zone; in vdo_free_block_map() local
2847 for (zone = 0; zone < map->zone_count; zone++) in vdo_free_block_map()
2848 uninitialize_block_map_zone(&map->zones[zone]); in vdo_free_block_map()
2865 zone_count_t zone = 0; in vdo_decode_block_map() local
2896 for (zone = 0; zone < map->zone_count; zone++) { in vdo_decode_block_map()
2897 result = initialize_block_map_zone(map, zone, cache_size, maximum_age); in vdo_decode_block_map()
2972 struct block_map_zone *zone = container_of(state, struct block_map_zone, state); in initiate_drain() local
2974 VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0), in initiate_drain()
2978 while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period) in initiate_drain()
2979 expire_oldest_list(zone->dirty_lists); in initiate_drain()
2980 write_expired_elements(zone); in initiate_drain()
2983 check_for_drain_complete(zone); in initiate_drain()
2991 struct block_map_zone *zone = &map->zones[zone_number]; in drain_zone() local
2993 vdo_start_draining(&zone->state, in drain_zone()
3010 struct block_map_zone *zone = &map->zones[zone_number]; in resume_block_map_zone() local
3012 vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state)); in resume_block_map_zone()
3082 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in fetch_mapping_page() local
3084 if (vdo_is_state_draining(&zone->state)) { in fetch_mapping_page()
3089 vdo_get_page(&data_vio->page_completion, zone, in fetch_mapping_page()
3124 mapped.pbn, &data_vio->mapped.zone); in set_mapped_location()
3196 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone; in vdo_update_block_map_page() local
3197 struct block_map *block_map = zone->block_map; in vdo_update_block_map_page()
3213 zone->zone_number); in vdo_update_block_map_page()
3218 zone->zone_number); in vdo_update_block_map_page()
3258 add_to_dirty_lists(info->cache->zone, &info->state_entry, in put_mapping_in_fetched_page()
3287 zone_count_t zone = 0; in vdo_get_block_map_statistics() local
3291 for (zone = 0; zone < map->zone_count; zone++) { in vdo_get_block_map_statistics()
3293 &(map->zones[zone].page_cache.stats); in vdo_get_block_map_statistics()