Lines Matching +full:out +full:- +full:volume +full:- +full:limit
1 // SPDX-License-Identifier: GPL-2.0-only
6 #include "volume.h"
9 #include <linux/dm-bufio.h>
14 #include "memory-alloc.h"
16 #include "string-utils.h"
17 #include "thread-utils.h"
19 #include "chapter-index.h"
22 #include "hash-utils.h"
24 #include "sparse-cache.h"
27 * The first block of the volume layout is reserved for the volume header, which is no longer used.
28 * The remainder of the volume is divided into chapters consisting of several pages of records, and
31 * volume storage acts as a circular log of the most recent chapters, with each new chapter
36 * index is generated to store which record page contains each record. The in-memory index page map
38 * when a record is read, the volume only has to load a single index page and a single record page,
43 * When reading a record, the volume index will indicate which chapter should contain it. The
44 * volume uses the index page map to determine which chapter index page needs to be loaded, and
48 * addition, the volume uses dm-bufio to manage access to the storage, which may allow for
58 * When an index rebuild is necessary, the volume reads each stored chapter to determine which
60 * in-memory volume index.
88 return (physical_page - HEADER_PAGES_PER_VOLUME) % geometry->pages_per_chapter; in map_to_page_number()
93 return (physical_page - HEADER_PAGES_PER_VOLUME) / geometry->pages_per_chapter; in map_to_chapter_number()
98 return map_to_page_number(geometry, physical_page) >= geometry->index_pages_per_chapter; in is_record_page()
104 return HEADER_PAGES_PER_VOLUME + (geometry->pages_per_chapter * chapter) + page; in map_to_physical_page()
111 .value = READ_ONCE(cache->search_pending_counters[zone_number].atomic_value), in get_invalidate_counter()
119 WRITE_ONCE(cache->search_pending_counters[zone_number].atomic_value, in set_invalidate_counter()
180 for (i = 0; i < cache->zone_count; i++) in wait_for_pending_searches()
182 for (i = 0; i < cache->zone_count; i++) { in wait_for_pending_searches()
200 if (page->buffer != NULL) in release_page_buffer()
201 dm_bufio_release(vdo_forget(page->buffer)); in release_page_buffer()
208 page->physical_page = cache->indexable_pages; in clear_cache_page()
209 WRITE_ONCE(page->last_used, 0); in clear_cache_page()
218 if (atomic64_read(&cache->clock) != READ_ONCE(page->last_used)) in make_page_most_recent()
219 WRITE_ONCE(page->last_used, atomic64_inc_return(&cache->clock)); in make_page_most_recent()
232 for (i = 0; i < cache->cache_slots; i++) { in select_victim_in_cache()
234 if (cache->cache[i].read_pending) in select_victim_in_cache()
237 last_used = READ_ONCE(cache->cache[i].last_used); in select_victim_in_cache()
244 page = &cache->cache[oldest_index]; in select_victim_in_cache()
245 if (page->physical_page != cache->indexable_pages) { in select_victim_in_cache()
246 WRITE_ONCE(cache->index[page->physical_page], cache->cache_slots); in select_victim_in_cache()
247 wait_for_pending_searches(cache, page->physical_page); in select_victim_in_cache()
250 page->read_pending = true; in select_victim_in_cache()
262 result = VDO_ASSERT((page->read_pending), "page to install has a pending read"); in put_page_in_cache()
266 page->physical_page = physical_page; in put_page_in_cache()
268 page->read_pending = false; in put_page_in_cache()
278 WRITE_ONCE(cache->index[physical_page], page - cache->cache); in put_page_in_cache()
288 result = VDO_ASSERT((page->read_pending), "page to install has a pending read"); in cancel_page_in_cache()
293 page->read_pending = false; in cancel_page_in_cache()
296 WRITE_ONCE(cache->index[physical_page], cache->cache_slots); in cancel_page_in_cache()
311 return cache->read_queue_first == next_queue_position(cache->read_queue_last); in read_queue_is_full()
318 u16 last = cache->read_queue_last; in enqueue_read()
322 if ((cache->index[physical_page] & VOLUME_CACHE_QUEUED_FLAG) == 0) { in enqueue_read()
328 cache->read_queue[last].physical_page = physical_page; in enqueue_read()
329 cache->read_queue[last].invalid = false; in enqueue_read()
330 cache->read_queue[last].first_request = NULL; in enqueue_read()
331 cache->read_queue[last].last_request = NULL; in enqueue_read()
335 WRITE_ONCE(cache->index[physical_page], in enqueue_read()
338 advance_queue_position(&cache->read_queue_last); in enqueue_read()
341 read_queue_index = cache->index[physical_page] & ~VOLUME_CACHE_QUEUED_FLAG; in enqueue_read()
344 request->next_request = NULL; in enqueue_read()
345 queue_entry = &cache->read_queue[read_queue_index]; in enqueue_read()
346 if (queue_entry->first_request == NULL) in enqueue_read()
347 queue_entry->first_request = request; in enqueue_read()
349 queue_entry->last_request->next_request = request; in enqueue_read()
350 queue_entry->last_request = request; in enqueue_read()
355 static void enqueue_page_read(struct volume *volume, struct uds_request *request, in enqueue_page_read() argument
359 while (!enqueue_read(&volume->page_cache, request, physical_page)) { in enqueue_page_read()
361 uds_wait_cond(&volume->read_threads_read_done_cond, in enqueue_page_read()
362 &volume->read_threads_mutex); in enqueue_page_read()
365 uds_signal_cond(&volume->read_threads_cond); in enqueue_page_read()
380 if (cache->read_queue_next_read == cache->read_queue_last) in reserve_read_queue_entry()
383 entry = &cache->read_queue[cache->read_queue_next_read]; in reserve_read_queue_entry()
384 index_value = cache->index[entry->physical_page]; in reserve_read_queue_entry()
387 if (entry->invalid && queued) in reserve_read_queue_entry()
388 WRITE_ONCE(cache->index[entry->physical_page], cache->cache_slots); in reserve_read_queue_entry()
395 entry->invalid = true; in reserve_read_queue_entry()
397 entry->reserved = true; in reserve_read_queue_entry()
398 advance_queue_position(&cache->read_queue_next_read); in reserve_read_queue_entry()
402 static inline struct queued_read *wait_to_reserve_read_queue_entry(struct volume *volume) in wait_to_reserve_read_queue_entry() argument
406 while (!volume->read_threads_exiting) { in wait_to_reserve_read_queue_entry()
407 queue_entry = reserve_read_queue_entry(&volume->page_cache); in wait_to_reserve_read_queue_entry()
411 uds_wait_cond(&volume->read_threads_cond, &volume->read_threads_mutex); in wait_to_reserve_read_queue_entry()
417 static int init_chapter_index_page(const struct volume *volume, u8 *index_page, in init_chapter_index_page() argument
425 struct index_geometry *geometry = volume->geometry; in init_chapter_index_page()
429 index_page, volume->nonce); in init_chapter_index_page()
430 if (volume->lookup_mode == LOOKUP_FOR_REBUILD) in init_chapter_index_page()
439 uds_get_list_number_bounds(volume->index_page_map, chapter, index_page_number, in init_chapter_index_page()
441 ci_virtual = chapter_index_page->virtual_chapter_number; in init_chapter_index_page()
444 (lowest_list == chapter_index_page->lowest_list_number) && in init_chapter_index_page()
445 (highest_list == chapter_index_page->highest_list_number)) in init_chapter_index_page()
449 (unsigned long long) volume->index_page_map->last_update); in init_chapter_index_page()
453 chapter_index_page->lowest_list_number, in init_chapter_index_page()
454 chapter_index_page->highest_list_number); in init_chapter_index_page()
459 static int initialize_index_page(const struct volume *volume, u32 physical_page, in initialize_index_page() argument
462 u32 chapter = map_to_chapter_number(volume->geometry, physical_page); in initialize_index_page()
463 u32 index_page_number = map_to_page_number(volume->geometry, physical_page); in initialize_index_page()
465 return init_chapter_index_page(volume, dm_bufio_get_block_data(page->buffer), in initialize_index_page()
466 chapter, index_page_number, &page->index_page); in initialize_index_page()
481 while (node < geometry->records_per_page) { in search_record_page()
485 result = memcmp(name, &record->name, UDS_RECORD_NAME_SIZE); in search_record_page()
488 *metadata = record->data; in search_record_page()
506 static int search_page(struct cached_page *page, const struct volume *volume, in search_page() argument
513 if (is_record_page(volume->geometry, physical_page)) { in search_page()
514 if (search_record_page(dm_bufio_get_block_data(page->buffer), in search_page()
515 &request->record_name, volume->geometry, in search_page()
516 &request->old_metadata)) in search_page()
521 result = uds_search_chapter_index_page(&page->index_page, in search_page()
522 volume->geometry, in search_page()
523 &request->record_name, in search_page()
532 *((u16 *) &request->old_metadata) = record_page_number; in search_page()
536 request->location = location; in search_page()
537 request->found = false; in search_page()
541 static int process_entry(struct volume *volume, struct queued_read *entry) in process_entry() argument
543 u32 page_number = entry->physical_page; in process_entry()
549 if (entry->invalid) { in process_entry()
554 page = select_victim_in_cache(&volume->page_cache); in process_entry()
556 mutex_unlock(&volume->read_threads_mutex); in process_entry()
557 page_data = dm_bufio_read(volume->client, page_number, &page->buffer); in process_entry()
558 mutex_lock(&volume->read_threads_mutex); in process_entry()
560 result = -PTR_ERR(page_data); in process_entry()
562 "error reading physical page %u from volume", in process_entry()
564 cancel_page_in_cache(&volume->page_cache, page_number, page); in process_entry()
568 if (entry->invalid) { in process_entry()
570 cancel_page_in_cache(&volume->page_cache, page_number, page); in process_entry()
574 if (!is_record_page(volume->geometry, page_number)) { in process_entry()
575 result = initialize_index_page(volume, page_number, page); in process_entry()
578 cancel_page_in_cache(&volume->page_cache, page_number, page); in process_entry()
583 result = put_page_in_cache(&volume->page_cache, page_number, page); in process_entry()
586 cancel_page_in_cache(&volume->page_cache, page_number, page); in process_entry()
590 request = entry->first_request; in process_entry()
592 result = search_page(page, volume, request, page_number); in process_entry()
593 request = request->next_request; in process_entry()
599 static void release_queued_requests(struct volume *volume, struct queued_read *entry, in release_queued_requests() argument
602 struct page_cache *cache = &volume->page_cache; in release_queued_requests()
603 u16 next_read = cache->read_queue_next_read; in release_queued_requests()
607 for (request = entry->first_request; request != NULL; request = next) { in release_queued_requests()
608 next = request->next_request; in release_queued_requests()
609 request->status = result; in release_queued_requests()
610 request->requeued = true; in release_queued_requests()
614 entry->reserved = false; in release_queued_requests()
617 while ((cache->read_queue_first != next_read) && in release_queued_requests()
618 (!cache->read_queue[cache->read_queue_first].reserved)) in release_queued_requests()
619 advance_queue_position(&cache->read_queue_first); in release_queued_requests()
620 uds_broadcast_cond(&volume->read_threads_read_done_cond); in release_queued_requests()
625 struct volume *volume = arg; in read_thread_function() local
628 mutex_lock(&volume->read_threads_mutex); in read_thread_function()
633 queue_entry = wait_to_reserve_read_queue_entry(volume); in read_thread_function()
634 if (volume->read_threads_exiting) in read_thread_function()
637 result = process_entry(volume, queue_entry); in read_thread_function()
638 release_queued_requests(volume, queue_entry, result); in read_thread_function()
640 mutex_unlock(&volume->read_threads_mutex); in read_thread_function()
659 * cache->index, but it would be possible and very bad if those reads did not return the in get_page_and_index()
662 index_value = READ_ONCE(cache->index[physical_page]); in get_page_and_index()
666 if (!queued && (index < cache->cache_slots)) { in get_page_and_index()
667 *page_ptr = &cache->cache[index]; in get_page_and_index()
678 *queue_index = queued ? index : -1; in get_page_and_index()
688 int queue_index = -1; in get_page_from_cache()
693 static int read_page_locked(struct volume *volume, u32 physical_page, in read_page_locked() argument
700 page = select_victim_in_cache(&volume->page_cache); in read_page_locked()
701 page_data = dm_bufio_read(volume->client, physical_page, &page->buffer); in read_page_locked()
703 result = -PTR_ERR(page_data); in read_page_locked()
705 "error reading physical page %u from volume", in read_page_locked()
707 cancel_page_in_cache(&volume->page_cache, physical_page, page); in read_page_locked()
711 if (!is_record_page(volume->geometry, physical_page)) { in read_page_locked()
712 result = initialize_index_page(volume, physical_page, page); in read_page_locked()
714 if (volume->lookup_mode != LOOKUP_FOR_REBUILD) in read_page_locked()
716 cancel_page_in_cache(&volume->page_cache, physical_page, page); in read_page_locked()
721 result = put_page_in_cache(&volume->page_cache, physical_page, page); in read_page_locked()
724 cancel_page_in_cache(&volume->page_cache, physical_page, page); in read_page_locked()
733 static int get_volume_page_locked(struct volume *volume, u32 physical_page, in get_volume_page_locked() argument
739 get_page_from_cache(&volume->page_cache, physical_page, &page); in get_volume_page_locked()
741 result = read_page_locked(volume, physical_page, &page); in get_volume_page_locked()
745 make_page_most_recent(&volume->page_cache, page); in get_volume_page_locked()
753 static int get_volume_page_protected(struct volume *volume, struct uds_request *request, in get_volume_page_protected() argument
758 get_page_from_cache(&volume->page_cache, physical_page, &page); in get_volume_page_protected()
760 if (request->zone_number == 0) { in get_volume_page_protected()
762 make_page_most_recent(&volume->page_cache, page); in get_volume_page_protected()
770 end_pending_search(&volume->page_cache, request->zone_number); in get_volume_page_protected()
771 mutex_lock(&volume->read_threads_mutex); in get_volume_page_protected()
781 get_page_from_cache(&volume->page_cache, physical_page, &page); in get_volume_page_protected()
783 enqueue_page_read(volume, request, physical_page); in get_volume_page_protected()
786 * turns out to be significant in some cases. The page is not available yet so in get_volume_page_protected()
789 mutex_unlock(&volume->read_threads_mutex); in get_volume_page_protected()
790 begin_pending_search(&volume->page_cache, physical_page, in get_volume_page_protected()
791 request->zone_number); in get_volume_page_protected()
796 * Now that the page is loaded, the volume needs to switch to "reader thread unlocked" and in get_volume_page_protected()
800 begin_pending_search(&volume->page_cache, physical_page, request->zone_number); in get_volume_page_protected()
801 mutex_unlock(&volume->read_threads_mutex); in get_volume_page_protected()
806 static int get_volume_page(struct volume *volume, u32 chapter, u32 page_number, in get_volume_page() argument
810 u32 physical_page = map_to_physical_page(volume->geometry, chapter, page_number); in get_volume_page()
812 mutex_lock(&volume->read_threads_mutex); in get_volume_page()
813 result = get_volume_page_locked(volume, physical_page, page_ptr); in get_volume_page()
814 mutex_unlock(&volume->read_threads_mutex); in get_volume_page()
818 int uds_get_volume_record_page(struct volume *volume, u32 chapter, u32 page_number, in uds_get_volume_record_page() argument
824 result = get_volume_page(volume, chapter, page_number, &page); in uds_get_volume_record_page()
826 *data_ptr = dm_bufio_get_block_data(page->buffer); in uds_get_volume_record_page()
830 int uds_get_volume_index_page(struct volume *volume, u32 chapter, u32 page_number, in uds_get_volume_index_page() argument
836 result = get_volume_page(volume, chapter, page_number, &page); in uds_get_volume_index_page()
838 *index_page_ptr = &page->index_page; in uds_get_volume_index_page()
846 static int search_cached_index_page(struct volume *volume, struct uds_request *request, in search_cached_index_page() argument
852 u32 physical_page = map_to_physical_page(volume->geometry, chapter, in search_cached_index_page()
861 begin_pending_search(&volume->page_cache, physical_page, request->zone_number); in search_cached_index_page()
863 result = get_volume_page_protected(volume, request, physical_page, &page); in search_cached_index_page()
865 end_pending_search(&volume->page_cache, request->zone_number); in search_cached_index_page()
869 result = uds_search_chapter_index_page(&page->index_page, volume->geometry, in search_cached_index_page()
870 &request->record_name, in search_cached_index_page()
872 end_pending_search(&volume->page_cache, request->zone_number); in search_cached_index_page()
880 int uds_search_cached_record_page(struct volume *volume, struct uds_request *request, in uds_search_cached_record_page() argument
884 struct index_geometry *geometry = volume->geometry; in uds_search_cached_record_page()
892 result = VDO_ASSERT(record_page_number < geometry->record_pages_per_chapter, in uds_search_cached_record_page()
894 geometry->record_pages_per_chapter); in uds_search_cached_record_page()
898 page_number = geometry->index_pages_per_chapter + record_page_number; in uds_search_cached_record_page()
900 physical_page = map_to_physical_page(volume->geometry, chapter, page_number); in uds_search_cached_record_page()
908 begin_pending_search(&volume->page_cache, physical_page, request->zone_number); in uds_search_cached_record_page()
910 result = get_volume_page_protected(volume, request, physical_page, &record_page); in uds_search_cached_record_page()
912 end_pending_search(&volume->page_cache, request->zone_number); in uds_search_cached_record_page()
916 if (search_record_page(dm_bufio_get_block_data(record_page->buffer), in uds_search_cached_record_page()
917 &request->record_name, geometry, &request->old_metadata)) in uds_search_cached_record_page()
920 end_pending_search(&volume->page_cache, request->zone_number); in uds_search_cached_record_page()
924 void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter) in uds_prefetch_volume_chapter() argument
926 const struct index_geometry *geometry = volume->geometry; in uds_prefetch_volume_chapter()
929 dm_bufio_prefetch(volume->client, physical_page, geometry->pages_per_chapter); in uds_prefetch_volume_chapter()
932 int uds_read_chapter_index_from_volume(const struct volume *volume, u64 virtual_chapter, in uds_read_chapter_index_from_volume() argument
938 const struct index_geometry *geometry = volume->geometry; in uds_read_chapter_index_from_volume()
942 dm_bufio_prefetch(volume->client, physical_page, geometry->index_pages_per_chapter); in uds_read_chapter_index_from_volume()
943 for (i = 0; i < geometry->index_pages_per_chapter; i++) { in uds_read_chapter_index_from_volume()
946 index_page = dm_bufio_read(volume->client, physical_page + i, in uds_read_chapter_index_from_volume()
949 result = -PTR_ERR(index_page); in uds_read_chapter_index_from_volume()
956 result = init_chapter_index_page(volume, index_page, physical_chapter, i, in uds_read_chapter_index_from_volume()
965 int uds_search_volume_page_cache(struct volume *volume, struct uds_request *request, in uds_search_volume_page_cache() argument
970 uds_map_to_physical_chapter(volume->geometry, request->virtual_chapter); in uds_search_volume_page_cache()
974 index_page_number = uds_find_index_page_number(volume->index_page_map, in uds_search_volume_page_cache()
975 &request->record_name, in uds_search_volume_page_cache()
978 if (request->location == UDS_LOCATION_INDEX_PAGE_LOOKUP) { in uds_search_volume_page_cache()
979 record_page_number = *((u16 *) &request->old_metadata); in uds_search_volume_page_cache()
981 result = search_cached_index_page(volume, request, physical_chapter, in uds_search_volume_page_cache()
988 return uds_search_cached_record_page(volume, request, physical_chapter, in uds_search_volume_page_cache()
992 int uds_search_volume_page_cache_for_rebuild(struct volume *volume, in uds_search_volume_page_cache_for_rebuild() argument
997 struct index_geometry *geometry = volume->geometry; in uds_search_volume_page_cache_for_rebuild()
1006 uds_find_index_page_number(volume->index_page_map, name, in uds_search_volume_page_cache_for_rebuild()
1008 result = get_volume_page(volume, physical_chapter, index_page_number, &page); in uds_search_volume_page_cache_for_rebuild()
1012 result = uds_search_chapter_index_page(&page->index_page, geometry, name, in uds_search_volume_page_cache_for_rebuild()
1020 page_number = geometry->index_pages_per_chapter + record_page_number; in uds_search_volume_page_cache_for_rebuild()
1021 result = get_volume_page(volume, physical_chapter, page_number, &page); in uds_search_volume_page_cache_for_rebuild()
1025 *found = search_record_page(dm_bufio_get_block_data(page->buffer), name, in uds_search_volume_page_cache_for_rebuild()
1033 int queue_index = -1; in invalidate_page()
1038 WRITE_ONCE(cache->index[page->physical_page], cache->cache_slots); in invalidate_page()
1039 wait_for_pending_searches(cache, page->physical_page); in invalidate_page()
1041 } else if (queue_index > -1) { in invalidate_page()
1043 cache->read_queue[queue_index].invalid = true; in invalidate_page()
1047 void uds_forget_chapter(struct volume *volume, u64 virtual_chapter) in uds_forget_chapter() argument
1050 uds_map_to_physical_chapter(volume->geometry, virtual_chapter); in uds_forget_chapter()
1051 u32 first_page = map_to_physical_page(volume->geometry, physical_chapter, 0); in uds_forget_chapter()
1055 mutex_lock(&volume->read_threads_mutex); in uds_forget_chapter()
1056 for (i = 0; i < volume->geometry->pages_per_chapter; i++) in uds_forget_chapter()
1057 invalidate_page(&volume->page_cache, first_page + i); in uds_forget_chapter()
1058 mutex_unlock(&volume->read_threads_mutex); in uds_forget_chapter()
1065 static int donate_index_page_locked(struct volume *volume, u32 physical_chapter, in donate_index_page_locked() argument
1071 map_to_physical_page(volume->geometry, physical_chapter, in donate_index_page_locked()
1074 page = select_victim_in_cache(&volume->page_cache); in donate_index_page_locked()
1075 page->buffer = page_buffer; in donate_index_page_locked()
1076 result = init_chapter_index_page(volume, dm_bufio_get_block_data(page_buffer), in donate_index_page_locked()
1078 &page->index_page); in donate_index_page_locked()
1081 cancel_page_in_cache(&volume->page_cache, physical_page, page); in donate_index_page_locked()
1085 result = put_page_in_cache(&volume->page_cache, physical_page, page); in donate_index_page_locked()
1088 cancel_page_in_cache(&volume->page_cache, physical_page, page); in donate_index_page_locked()
1095 static int write_index_pages(struct volume *volume, u32 physical_chapter_number, in write_index_pages() argument
1098 struct index_geometry *geometry = volume->geometry; in write_index_pages()
1105 index_page_number < geometry->index_pages_per_chapter; in write_index_pages()
1113 page_data = dm_bufio_new(volume->client, physical_page, &page_buffer); in write_index_pages()
1115 return vdo_log_warning_strerror(-PTR_ERR(page_data), in write_index_pages()
1119 last_page = ((index_page_number + 1) == geometry->index_pages_per_chapter); in write_index_pages()
1138 uds_update_index_page_map(volume->index_page_map, in write_index_pages()
1139 chapter_index->virtual_chapter_number, in write_index_pages()
1141 delta_list_number - 1); in write_index_pages()
1143 mutex_lock(&volume->read_threads_mutex); in write_index_pages()
1144 result = donate_index_page_locked(volume, physical_chapter_number, in write_index_pages()
1146 mutex_unlock(&volume->read_threads_mutex); in write_index_pages()
1167 * In-order traversal: copy the contents of the next record into the page at the in encode_tree()
1180 static int encode_record_page(const struct volume *volume, in encode_record_page() argument
1185 u32 records_per_page = volume->geometry->records_per_page; in encode_record_page()
1186 const struct uds_volume_record **record_pointers = volume->record_pointers; in encode_record_page()
1196 result = uds_radix_sort(volume->radix_sorter, (const u8 **) record_pointers, in encode_record_page()
1205 static int write_record_pages(struct volume *volume, u32 physical_chapter_number, in write_record_pages() argument
1209 struct index_geometry *geometry = volume->geometry; in write_record_pages()
1213 geometry->index_pages_per_chapter); in write_record_pages()
1216 record_page_number < geometry->record_pages_per_chapter; in write_record_pages()
1222 page_data = dm_bufio_new(volume->client, physical_page, &page_buffer); in write_record_pages()
1224 return vdo_log_warning_strerror(-PTR_ERR(page_data), in write_record_pages()
1228 result = encode_record_page(volume, next_record, page_data); in write_record_pages()
1236 next_record += geometry->records_per_page; in write_record_pages()
1244 int uds_write_chapter(struct volume *volume, struct open_chapter_index *chapter_index, in uds_write_chapter() argument
1249 uds_map_to_physical_chapter(volume->geometry, in uds_write_chapter()
1250 chapter_index->virtual_chapter_number); in uds_write_chapter()
1252 result = write_index_pages(volume, physical_chapter_number, chapter_index); in uds_write_chapter()
1256 result = write_record_pages(volume, physical_chapter_number, records); in uds_write_chapter()
1260 result = -dm_bufio_write_dirty_buffers(volume->client); in uds_write_chapter()
1262 vdo_log_error_strerror(result, "cannot sync chapter to volume"); in uds_write_chapter()
1267 static void probe_chapter(struct volume *volume, u32 chapter_number, in probe_chapter() argument
1270 const struct index_geometry *geometry = volume->geometry; in probe_chapter()
1276 dm_bufio_prefetch(volume->client, in probe_chapter()
1278 geometry->index_pages_per_chapter); in probe_chapter()
1280 for (i = 0; i < geometry->index_pages_per_chapter; i++) { in probe_chapter()
1284 result = uds_get_volume_index_page(volume, chapter_number, i, &page); in probe_chapter()
1288 if (page->virtual_chapter_number == BAD_CHAPTER) { in probe_chapter()
1295 vcn = page->virtual_chapter_number; in probe_chapter()
1296 } else if (page->virtual_chapter_number != vcn) { in probe_chapter()
1299 (unsigned long long) page->virtual_chapter_number); in probe_chapter()
1303 if (expected_list_number != page->lowest_list_number) { in probe_chapter()
1306 page->lowest_list_number); in probe_chapter()
1309 expected_list_number = page->highest_list_number + 1; in probe_chapter()
1317 vdo_log_error("chapter %u vcn %llu is out of phase (%u)", chapter_number, in probe_chapter()
1318 (unsigned long long) vcn, geometry->chapters_per_volume); in probe_chapter()
1325 /* Find the last valid physical chapter in the volume. */
1326 static void find_real_end_of_volume(struct volume *volume, u32 limit, u32 *limit_ptr) in find_real_end_of_volume() argument
1331 while (limit > 0) { in find_real_end_of_volume()
1332 u32 chapter = (span > limit) ? 0 : limit - span; in find_real_end_of_volume()
1335 probe_chapter(volume, chapter, &vcn); in find_real_end_of_volume()
1337 limit = chapter; in find_real_end_of_volume()
1348 *limit_ptr = limit; in find_real_end_of_volume()
1351 static int find_chapter_limits(struct volume *volume, u32 chapter_limit, u64 *lowest_vcn, in find_chapter_limits() argument
1354 struct index_geometry *geometry = volume->geometry; in find_chapter_limits()
1367 * volume is cleanly saved and somewhere in the middle of it the highest VCN immediately in find_chapter_limits()
1372 probe_chapter(volume, 0, &zero_vcn); in find_chapter_limits()
1380 * If a virtual chapter is out-of-order, it will be the one moved by conversion. Always in find_chapter_limits()
1384 if (geometry->remapped_physical > 0) { in find_chapter_limits()
1387 probe_chapter(volume, geometry->remapped_physical, &remapped_vcn); in find_chapter_limits()
1388 if (remapped_vcn == geometry->remapped_virtual) in find_chapter_limits()
1389 moved_chapter = geometry->remapped_physical; in find_chapter_limits()
1400 chapter--; in find_chapter_limits()
1402 probe_chapter(volume, chapter, &probe_vcn); in find_chapter_limits()
1417 probe_chapter(volume, left_chapter, &lowest); in find_chapter_limits()
1420 if ((moved_chapter != BAD_CHAPTER) && (lowest == geometry->remapped_virtual + 1)) in find_chapter_limits()
1421 lowest = geometry->remapped_virtual; in find_chapter_limits()
1428 right_chapter = (right_chapter + chapter_limit - 1) % chapter_limit; in find_chapter_limits()
1432 probe_chapter(volume, right_chapter, &highest); in find_chapter_limits()
1434 vdo_log_error("too many bad chapters in volume: %u", in find_chapter_limits()
1446 * Find the highest and lowest contiguous chapters present in the volume and determine their
1449 int uds_find_volume_chapter_boundaries(struct volume *volume, u64 *lowest_vcn, in uds_find_volume_chapter_boundaries() argument
1452 u32 chapter_limit = volume->geometry->chapters_per_volume; in uds_find_volume_chapter_boundaries()
1454 find_real_end_of_volume(volume, chapter_limit, &chapter_limit); in uds_find_volume_chapter_boundaries()
1463 return find_chapter_limits(volume, chapter_limit, lowest_vcn, highest_vcn); in uds_find_volume_chapter_boundaries()
1466 int __must_check uds_replace_volume_storage(struct volume *volume, in uds_replace_volume_storage() argument
1478 for (i = 0; i < volume->page_cache.indexable_pages; i++) in uds_replace_volume_storage()
1479 volume->page_cache.index[i] = volume->page_cache.cache_slots; in uds_replace_volume_storage()
1480 for (i = 0; i < volume->page_cache.cache_slots; i++) in uds_replace_volume_storage()
1481 clear_cache_page(&volume->page_cache, &volume->page_cache.cache[i]); in uds_replace_volume_storage()
1482 if (volume->sparse_cache != NULL) in uds_replace_volume_storage()
1483 uds_invalidate_sparse_cache(volume->sparse_cache); in uds_replace_volume_storage()
1484 if (volume->client != NULL) in uds_replace_volume_storage()
1485 dm_bufio_client_destroy(vdo_forget(volume->client)); in uds_replace_volume_storage()
1487 return uds_open_volume_bufio(layout, volume->geometry->bytes_per_page, in uds_replace_volume_storage()
1488 volume->reserved_buffers, &volume->client); in uds_replace_volume_storage()
1499 cache->indexable_pages = geometry->pages_per_volume + 1; in initialize_page_cache()
1500 cache->cache_slots = chapters_in_cache * geometry->record_pages_per_chapter; in initialize_page_cache()
1501 cache->zone_count = zone_count; in initialize_page_cache()
1502 atomic64_set(&cache->clock, 1); in initialize_page_cache()
1504 result = VDO_ASSERT((cache->cache_slots <= VOLUME_CACHE_MAX_ENTRIES), in initialize_page_cache()
1505 "requested cache size, %u, within limit %u", in initialize_page_cache()
1506 cache->cache_slots, VOLUME_CACHE_MAX_ENTRIES); in initialize_page_cache()
1511 "volume read queue", &cache->read_queue); in initialize_page_cache()
1515 result = vdo_allocate(cache->zone_count, struct search_pending_counter, in initialize_page_cache()
1516 "Volume Cache Zones", &cache->search_pending_counters); in initialize_page_cache()
1520 result = vdo_allocate(cache->indexable_pages, u16, "page cache index", in initialize_page_cache()
1521 &cache->index); in initialize_page_cache()
1525 result = vdo_allocate(cache->cache_slots, struct cached_page, "page cache cache", in initialize_page_cache()
1526 &cache->cache); in initialize_page_cache()
1531 for (i = 0; i < cache->indexable_pages; i++) in initialize_page_cache()
1532 cache->index[i] = cache->cache_slots; in initialize_page_cache()
1534 for (i = 0; i < cache->cache_slots; i++) in initialize_page_cache()
1535 clear_cache_page(cache, &cache->cache[i]); in initialize_page_cache()
1541 struct volume **new_volume) in uds_make_volume()
1544 struct volume *volume = NULL; in uds_make_volume() local
1549 result = vdo_allocate(1, struct volume, "volume", &volume); in uds_make_volume()
1553 volume->nonce = uds_get_volume_nonce(layout); in uds_make_volume()
1555 result = uds_copy_index_geometry(config->geometry, &volume->geometry); in uds_make_volume()
1557 uds_free_volume(volume); in uds_make_volume()
1561 geometry = volume->geometry; in uds_make_volume()
1567 reserved_buffers = config->cache_chapters * geometry->record_pages_per_chapter; in uds_make_volume()
1570 reserved_buffers += (config->cache_chapters * geometry->index_pages_per_chapter); in uds_make_volume()
1571 volume->reserved_buffers = reserved_buffers; in uds_make_volume()
1572 result = uds_open_volume_bufio(layout, geometry->bytes_per_page, in uds_make_volume()
1573 volume->reserved_buffers, &volume->client); in uds_make_volume()
1575 uds_free_volume(volume); in uds_make_volume()
1579 result = uds_make_radix_sorter(geometry->records_per_page, in uds_make_volume()
1580 &volume->radix_sorter); in uds_make_volume()
1582 uds_free_volume(volume); in uds_make_volume()
1586 result = vdo_allocate(geometry->records_per_page, in uds_make_volume()
1588 &volume->record_pointers); in uds_make_volume()
1590 uds_free_volume(volume); in uds_make_volume()
1595 size_t page_size = sizeof(struct delta_index_page) + geometry->bytes_per_page; in uds_make_volume()
1597 result = uds_make_sparse_cache(geometry, config->cache_chapters, in uds_make_volume()
1598 config->zone_count, in uds_make_volume()
1599 &volume->sparse_cache); in uds_make_volume()
1601 uds_free_volume(volume); in uds_make_volume()
1605 volume->cache_size = in uds_make_volume()
1606 page_size * geometry->index_pages_per_chapter * config->cache_chapters; in uds_make_volume()
1609 result = initialize_page_cache(&volume->page_cache, geometry, in uds_make_volume()
1610 config->cache_chapters, config->zone_count); in uds_make_volume()
1612 uds_free_volume(volume); in uds_make_volume()
1616 volume->cache_size += volume->page_cache.cache_slots * sizeof(struct delta_index_page); in uds_make_volume()
1617 result = uds_make_index_page_map(geometry, &volume->index_page_map); in uds_make_volume()
1619 uds_free_volume(volume); in uds_make_volume()
1623 mutex_init(&volume->read_threads_mutex); in uds_make_volume()
1624 uds_init_cond(&volume->read_threads_read_done_cond); in uds_make_volume()
1625 uds_init_cond(&volume->read_threads_cond); in uds_make_volume()
1627 result = vdo_allocate(config->read_threads, struct thread *, "reader threads", in uds_make_volume()
1628 &volume->reader_threads); in uds_make_volume()
1630 uds_free_volume(volume); in uds_make_volume()
1634 for (i = 0; i < config->read_threads; i++) { in uds_make_volume()
1635 result = vdo_create_thread(read_thread_function, (void *) volume, in uds_make_volume()
1636 "reader", &volume->reader_threads[i]); in uds_make_volume()
1638 uds_free_volume(volume); in uds_make_volume()
1642 volume->read_thread_count = i + 1; in uds_make_volume()
1645 *new_volume = volume; in uds_make_volume()
1653 if (cache->cache != NULL) { in uninitialize_page_cache()
1654 for (i = 0; i < cache->cache_slots; i++) in uninitialize_page_cache()
1655 release_page_buffer(&cache->cache[i]); in uninitialize_page_cache()
1657 vdo_free(cache->index); in uninitialize_page_cache()
1658 vdo_free(cache->cache); in uninitialize_page_cache()
1659 vdo_free(cache->search_pending_counters); in uninitialize_page_cache()
1660 vdo_free(cache->read_queue); in uninitialize_page_cache()
1663 void uds_free_volume(struct volume *volume) in uds_free_volume() argument
1665 if (volume == NULL) in uds_free_volume()
1668 if (volume->reader_threads != NULL) { in uds_free_volume()
1672 mutex_lock(&volume->read_threads_mutex); in uds_free_volume()
1673 volume->read_threads_exiting = true; in uds_free_volume()
1674 uds_broadcast_cond(&volume->read_threads_cond); in uds_free_volume()
1675 mutex_unlock(&volume->read_threads_mutex); in uds_free_volume()
1676 for (i = 0; i < volume->read_thread_count; i++) in uds_free_volume()
1677 vdo_join_threads(volume->reader_threads[i]); in uds_free_volume()
1678 vdo_free(volume->reader_threads); in uds_free_volume()
1679 volume->reader_threads = NULL; in uds_free_volume()
1683 uninitialize_page_cache(&volume->page_cache); in uds_free_volume()
1684 uds_free_sparse_cache(volume->sparse_cache); in uds_free_volume()
1685 if (volume->client != NULL) in uds_free_volume()
1686 dm_bufio_client_destroy(vdo_forget(volume->client)); in uds_free_volume()
1688 uds_free_index_page_map(volume->index_page_map); in uds_free_volume()
1689 uds_free_radix_sorter(volume->radix_sorter); in uds_free_volume()
1690 vdo_free(volume->geometry); in uds_free_volume()
1691 vdo_free(volume->record_pointers); in uds_free_volume()
1692 vdo_free(volume); in uds_free_volume()