Lines Matching full:vio
25 #include "data-vio.h"
35 #include "vio.h"
276 * @completion: The write vio.
281 container_of(as_vio(completion), struct slab_summary_block, vio); in finish_update()
289 * @completion: The write VIO.
294 container_of(as_vio(completion), struct slab_summary_block, vio); in handle_write_error()
303 struct vio *vio = bio->bi_private; in write_slab_summary_endio() local
305 container_of(vio, struct slab_summary_block, vio); in write_slab_summary_endio()
307 continue_vio_after_io(vio, finish_update, block->allocator->thread_id); in write_slab_summary_endio()
346 vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio, in launch_write()
370 if (vdo_is_read_only(block->vio.completion.vdo)) { in update_slab_summary_entry()
410 * reaping again in case we deferred reaping due to an outstanding vio.
411 * @completion: The flush vio.
425 * @completion: The flush vio.
436 struct vio *vio = bio->bi_private; in flush_endio() local
437 struct slab_journal *journal = vio->completion.parent; in flush_endio()
439 continue_vio_after_io(vio, complete_reaping, in flush_endio()
444 * flush_for_reaping() - A waiter callback for getting a vio with which to flush the lower layer
447 * @context: The newly acquired flush vio.
454 struct vio *vio = &pooled->vio; in flush_for_reaping() local
456 vio->completion.parent = journal; in flush_for_reaping()
457 vdo_submit_flush_vio(vio, flush_endio, handle_flush_error); in flush_for_reaping()
679 static sequence_number_t get_committing_sequence_number(const struct pooled_vio *vio) in get_committing_sequence_number() argument
682 (const struct packed_slab_journal_block *) vio->vio.data; in get_committing_sequence_number()
689 * @completion: The write vio as a completion.
729 struct vio *vio = bio->bi_private; in write_slab_journal_endio() local
730 struct slab_journal *journal = vio->completion.parent; in write_slab_journal_endio()
732 continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id); in write_slab_journal_endio()
737 * @waiter: The vio pool waiter which was just notified.
738 * @context: The vio pool entry for the write.
745 struct vio *vio = &pooled->vio; in write_slab_journal_block() local
757 /* Copy the tail block into the vio. */ in write_slab_journal_block()
758 memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE); in write_slab_journal_block()
773 vio->completion.parent = journal; in write_slab_journal_block()
780 vdo_submit_metadata_vio(vdo_forget(vio), block_number, write_slab_journal_endio, in write_slab_journal_block()
1034 * a VIO for it from the pool.
1063 * locks, and return its VIO to the pool.
1064 * @completion: The VIO that just finished writing.
1068 struct vio *vio = as_vio(completion); in finish_reference_block_write() local
1069 struct pooled_vio *pooled = vio_as_pooled_vio(vio); in finish_reference_block_write()
1155 struct vio *vio = bio->bi_private; in write_reference_block_endio() local
1156 struct reference_block *block = vio->completion.parent; in write_reference_block_endio()
1159 continue_vio_after_io(vio, finish_reference_block_write, thread_id); in write_reference_block_endio()
1164 * @completion: The VIO doing the I/O as a completion.
1169 struct vio *vio = as_vio(completion); in handle_io_error() local
1172 vio_record_metadata_io_error(vio); in handle_io_error()
1173 return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_io_error()
1180 * write_reference_block() - After a dirty block waiter has gotten a VIO from the VIO pool, copy
1181 * its counters and associated data into the VIO, and launch the write.
1183 * @context: The VIO returned by the pool.
1190 struct vdo_completion *completion = &pooled->vio.completion; in write_reference_block()
1194 pack_reference_block(block, pooled->vio.data); in write_reference_block()
1202 * moment. As long as VIO order is preserved, two VIOs updating this block at once will not in write_reference_block()
1216 vdo_submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio, in write_reference_block()
1683 * @waiter: The vio which should make an entry now.
1811 /* If the slab is over the blocking threshold, make the vio wait. */ in add_entries()
2236 * @completion: The VIO that just finished reading.
2240 struct vio *vio = as_vio(completion); in finish_reference_block_load() local
2241 struct pooled_vio *pooled = vio_as_pooled_vio(vio); in finish_reference_block_load()
2245 unpack_reference_block((struct packed_reference_block *) vio->data, block); in finish_reference_block_load()
2256 struct vio *vio = bio->bi_private; in load_reference_block_endio() local
2257 struct reference_block *block = vio->completion.parent; in load_reference_block_endio()
2259 continue_vio_after_io(vio, finish_reference_block_load, in load_reference_block_endio()
2264 * load_reference_block() - After a block waiter has gotten a VIO from the VIO pool, load the
2267 * @context: The VIO returned by the pool.
2272 struct vio *vio = &pooled->vio; in load_reference_block() local
2277 vio->completion.parent = block; in load_reference_block()
2278 vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset, in load_reference_block()
2410 struct vio *vio = as_vio(completion); in finish_loading_journal() local
2413 struct packed_slab_journal_block *block = (struct packed_slab_journal_block *) vio->data; in finish_loading_journal()
2433 return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in finish_loading_journal()
2439 struct vio *vio = bio->bi_private; in read_slab_journal_tail_endio() local
2440 struct slab_journal *journal = vio->completion.parent; in read_slab_journal_tail_endio()
2442 continue_vio_after_io(vio, finish_loading_journal, in read_slab_journal_tail_endio()
2450 struct vio *vio = as_vio(completion); in handle_load_error() local
2452 vio_record_metadata_io_error(vio); in handle_load_error()
2453 return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio)); in handle_load_error()
2458 * read_slab_journal_tail() - Read the slab journal tail block by using a vio acquired from the vio
2460 * @waiter: The vio pool waiter which has just been notified.
2461 * @context: The vio pool entry given to the waiter.
2471 struct vio *vio = &pooled->vio; in read_slab_journal_tail() local
2483 vio->completion.parent = journal; in read_slab_journal_tail()
2484 vio->completion.callback_thread_id = slab->allocator->thread_id; in read_slab_journal_tail()
2485 vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block, in read_slab_journal_tail()
2656 * uninitialize_scrubber_vio() - Clean up the slab_scrubber's vio.
2661 vdo_free(vdo_forget(scrubber->vio.data)); in uninitialize_scrubber_vio()
2662 free_vio_components(&scrubber->vio); in uninitialize_scrubber_vio()
2682 vdo_fail_completion(vdo_forget(scrubber->vio.completion.parent), result); in finish_scrubbing()
2733 container_of(as_vio(completion), struct slab_scrubber, vio); in slab_scrubbed()
2750 vdo_enter_read_only_mode(scrubber->vio.completion.vdo, result); in abort_scrubbing()
2760 struct vio *vio = as_vio(completion); in handle_scrubber_error() local
2762 vio_record_metadata_io_error(vio); in handle_scrubber_error()
2763 abort_scrubbing(container_of(vio, struct slab_scrubber, vio), in handle_scrubber_error()
2817 * apply_journal_entries() - Find the relevant vio of the slab journal and apply all valid entries.
2818 * @completion: The metadata read vio completion.
2826 container_of(as_vio(completion), struct slab_scrubber, vio); in apply_journal_entries()
2833 char *end_data = scrubber->vio.data + (end_index * VDO_BLOCK_SIZE); in apply_journal_entries()
2846 char *block_data = scrubber->vio.data + (index * VDO_BLOCK_SIZE); in apply_journal_entries()
2901 struct vio *vio = bio->bi_private; in read_slab_journal_endio() local
2902 struct slab_scrubber *scrubber = container_of(vio, struct slab_scrubber, vio); in read_slab_journal_endio()
2910 * @completion: The scrubber's vio completion.
2917 container_of(as_vio(completion), struct slab_scrubber, vio); in start_scrubbing()
2925 vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin, in start_scrubbing()
2936 struct vdo_completion *completion = &scrubber->vio.completion; in scrub_next_slab()
2977 scrubber->vio.completion.parent = parent; in scrub_slabs()
3946 journal_data, &scrubber->vio); in initialize_slab_scrubber()
3977 block->outgoing_entries, &block->vio); in initialize_slab_summary_block()
4207 free_vio_components(&allocator->summary_blocks[i].vio); in uninitialize_allocator_summary()
4443 * @completion: The vio which was used to write the summary data.
4462 struct vio *vio = bio->bi_private; in write_summary_endio() local
4463 struct vdo *vdo = vio->completion.vdo; in write_summary_endio()
4465 continue_vio_after_io(vio, finish_combining_zones, in write_summary_endio()
4508 * @completion: The vio which was used to read the summary data.
4529 struct vio *vio = bio->bi_private; in load_summary_endio() local
4530 struct vdo *vdo = vio->completion.vdo; in load_summary_endio()
4532 continue_vio_after_io(vio, finish_loading_summary, in load_summary_endio()
4544 struct vio *vio; in load_slab_summary() local
4552 (char *) depot->summary_entries, &vio); in load_slab_summary()
4560 finish_loading_summary(&vio->completion); in load_slab_summary()
4564 vdo_submit_metadata_vio(vio, depot->summary_origin, load_summary_endio, in load_slab_summary()
4800 "vio pool not busy"); in do_drain_step()