Lines Matching full:vdo
38 #include "vdo.h"
47 * bio's needs. This constant contains the aggregate of those flags; VDO strips all the other
51 * hints on IO importance. If VDO has finished the user bio, any remaining IO done doesn't care how
55 * explains the action taken with each of those flags VDO could receive:
63 * * REQ_NOMERGE: Set only if the incoming bio was split; irrelevant to VDO IO.
64 * * REQ_IDLE: Set if the incoming bio had more IO quickly following; VDO's IO pattern doesn't
66 * * REQ_FUA: Handled separately, and irrelevant to VDO IO otherwise.
68 * * REQ_BACKGROUND: Not passed down, as VIOs are a limited resource and VDO needs them recycled
77 * The data_vio_pool maintains the pool of data_vios which a vdo uses to service incoming bios. For
90 * to take the pool's lock. When a thread submits a bio to a vdo device, it will first attempt to
244 struct vdo *vdo = vdo_from_data_vio(data_vio); in initialize_lbn_lock() local
252 lock->zone = &vdo->logical_zones->zones[zone_number]; in initialize_lbn_lock()
259 struct vdo *vdo = vdo_from_data_vio(data_vio); in launch_locked_request() local
261 if (vdo_is_read_only(vdo)) { in launch_locked_request()
273 struct vdo *vdo = vdo_from_data_vio(data_vio); in acknowledge_data_vio() local
285 vdo_count_bios(&vdo->stats.bios_acknowledged, bio); in acknowledge_data_vio()
287 vdo_count_bios(&vdo->stats.bios_acknowledged_partial, bio); in acknowledge_data_vio()
422 struct vdo *vdo = vdo_from_data_vio(data_vio); in attempt_logical_block_lock() local
428 if (data_vio->logical.lbn >= vdo->states.vdo.config.logical_blocks) { in attempt_logical_block_lock()
534 static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio) in launch_bio() argument
558 vdo_count_bios(&vdo->stats.bios_in_partial, bio); in launch_bio()
562 vdo_count_bios(&vdo->stats.bios_in_partial, bio); in launch_bio()
581 lbn = (bio->bi_iter.bi_sector - vdo->starting_sector_offset) / VDO_SECTORS_PER_BLOCK; in launch_bio()
589 launch_bio(limiter->pool->completion.vdo, data_vio, bio); in assign_data_vio()
786 static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo) in initialize_data_vio() argument
816 vdo_initialize_completion(&data_vio->decrement_completion, vdo, in initialize_data_vio()
818 initialize_vio(&data_vio->vio, bio, 1, VIO_TYPE_DATA, VIO_PRIORITY_DATA, vdo); in initialize_data_vio()
836 * @vdo: The vdo to which the pool will belong.
841 int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, in make_data_vio_pool() argument
863 vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION); in make_data_vio_pool()
865 process_release_callback, vdo->thread_config.cpu_thread, in make_data_vio_pool()
877 result = initialize_data_vio(data_vio, vdo); in make_data_vio_pool()
982 launch_bio(pool->completion.vdo, data_vio, bio); in vdo_launch_bio()
999 static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name) in assert_on_vdo_cpu_thread() argument
1001 VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread), in assert_on_vdo_cpu_thread()
1011 assert_on_vdo_cpu_thread(completion->vdo, __func__); in drain_data_vio_pool()
1022 assert_on_vdo_cpu_thread(completion->vdo, __func__); in resume_data_vio_pool()
1286 struct data_vio_pool *pool = completion->vdo->data_vio_pool; in finish_cleanup()
1306 struct vdo *vdo = vdo_from_data_vio(data_vio); in perform_cleanup_stage() local
1326 (READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) && in perform_cleanup_stage()
1328 vdo_log_warning("VDO not read-only when cleaning data_vio with RJ lock"); in perform_cleanup_stage()
1352 if (vdo_is_read_only(completion->vdo)) in enter_read_only_mode()
1367 vdo_enter_read_only_mode(completion->vdo, completion->result); in enter_read_only_mode()
1712 vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio); in journal_remapping()
1814 * 2) A data_vio should not be compressed when compression is disabled for the vdo. in launch_compress_data_vio()
1932 struct vdo *vdo = completion->vdo; in acknowledge_write_callback() local
1934 VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) || in acknowledge_write_callback()
1935 (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)), in acknowledge_write_callback()