1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Copyright 2023 Red Hat
4   */
5  
6  #ifndef DATA_VIO_H
7  #define DATA_VIO_H
8  
9  #include <linux/atomic.h>
10  #include <linux/bio.h>
11  #include <linux/list.h>
12  
13  #include "permassert.h"
14  
15  #include "indexer.h"
16  
17  #include "block-map.h"
18  #include "completion.h"
19  #include "constants.h"
20  #include "dedupe.h"
21  #include "encodings.h"
22  #include "logical-zone.h"
23  #include "physical-zone.h"
24  #include "types.h"
25  #include "vdo.h"
26  #include "vio.h"
27  #include "wait-queue.h"
28  
29  /* Codes for describing the last asynchronous operation performed on a vio. */
30  enum async_operation_number {
31  	MIN_VIO_ASYNC_OPERATION_NUMBER,
32  	VIO_ASYNC_OP_LAUNCH = MIN_VIO_ASYNC_OPERATION_NUMBER,
33  	VIO_ASYNC_OP_ACKNOWLEDGE_WRITE,
34  	VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK,
35  	VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK,
36  	VIO_ASYNC_OP_LOCK_DUPLICATE_PBN,
37  	VIO_ASYNC_OP_CHECK_FOR_DUPLICATION,
38  	VIO_ASYNC_OP_CLEANUP,
39  	VIO_ASYNC_OP_COMPRESS_DATA_VIO,
40  	VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT,
41  	VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ,
42  	VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE,
43  	VIO_ASYNC_OP_HASH_DATA_VIO,
44  	VIO_ASYNC_OP_JOURNAL_REMAPPING,
45  	VIO_ASYNC_OP_ATTEMPT_PACKING,
46  	VIO_ASYNC_OP_PUT_MAPPED_BLOCK,
47  	VIO_ASYNC_OP_READ_DATA_VIO,
48  	VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX,
49  	VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS,
50  	VIO_ASYNC_OP_VERIFY_DUPLICATION,
51  	VIO_ASYNC_OP_WRITE_DATA_VIO,
52  	MAX_VIO_ASYNC_OPERATION_NUMBER,
53  } __packed;
54  
55  struct lbn_lock {
56  	logical_block_number_t lbn;
57  	bool locked;
58  	struct vdo_wait_queue waiters;
59  	struct logical_zone *zone;
60  };
61  
62  /* A position in the arboreal block map at a specific level. */
63  struct block_map_tree_slot {
64  	page_number_t page_index;
65  	struct block_map_slot block_map_slot;
66  };
67  
68  /* Fields for using the arboreal block map. */
69  struct tree_lock {
70  	/* The current height at which this data_vio is operating */
71  	height_t height;
72  	/* The block map tree for this LBN */
73  	root_count_t root_index;
74  	/* Whether we hold a page lock */
75  	bool locked;
76  	/* The key for the lock map */
77  	u64 key;
78  	/* The queue of waiters for the page this vio is allocating or loading */
79  	struct vdo_wait_queue waiters;
80  	/* The block map tree slots for this LBN */
81  	struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
82  };
83  
84  struct zoned_pbn {
85  	physical_block_number_t pbn;
86  	enum block_mapping_state state;
87  	struct physical_zone *zone;
88  };
89  
90  /*
91   * Where a data_vio is on the compression path; advance_compression_stage() depends on the order of
92   * this enum.
93   */
94  enum data_vio_compression_stage {
95  	/* A data_vio which has not yet entered the compression path */
96  	DATA_VIO_PRE_COMPRESSOR,
97  	/* A data_vio which is in the compressor */
98  	DATA_VIO_COMPRESSING,
99  	/* A data_vio which is blocked in the packer */
100  	DATA_VIO_PACKING,
101  	/* A data_vio which is no longer on the compression path (and never will be) */
102  	DATA_VIO_POST_PACKER,
103  };
104  
105  struct data_vio_compression_status {
106  	enum data_vio_compression_stage stage;
107  	bool may_not_compress;
108  };
109  
110  struct compression_state {
111  	/*
112  	 * The current compression status of this data_vio. This field contains a value which
113  	 * consists of a data_vio_compression_stage and a flag indicating whether a request has
114  	 * been made to cancel (or prevent) compression for this data_vio.
115  	 *
116  	 * This field should be accessed through the get_data_vio_compression_status() and
117  	 * set_data_vio_compression_status() methods. It should not be accessed directly.
118  	 */
119  	atomic_t status;
120  
121  	/* The compressed size of this block */
122  	u16 size;
123  
124  	/* The packer input or output bin slot which holds the enclosing data_vio */
125  	slot_number_t slot;
126  
127  	/* The packer bin to which the enclosing data_vio has been assigned */
128  	struct packer_bin *bin;
129  
130  	/* A link in the chain of data_vios which have been packed together */
131  	struct data_vio *next_in_batch;
132  
133  	/* A vio which is blocked in the packer while holding a lock this vio needs. */
134  	struct data_vio *lock_holder;
135  
136  	/*
137  	 * The compressed block used to hold the compressed form of this block and that of any
138  	 * other blocks for which this data_vio is the compressed write agent.
139  	 */
140  	struct compressed_block *block;
141  };
142  
143  /* Fields supporting allocation of data blocks. */
144  struct allocation {
145  	/* The physical zone in which to allocate a physical block */
146  	struct physical_zone *zone;
147  
148  	/* The block allocated to this vio */
149  	physical_block_number_t pbn;
150  
151  	/*
152  	 * If non-NULL, the pooled PBN lock held on the allocated block. Must be a write lock until
153  	 * the block has been written, after which it will become a read lock.
154  	 */
155  	struct pbn_lock *lock;
156  
157  	/* The type of write lock to obtain on the allocated block */
158  	enum pbn_lock_type write_lock_type;
159  
160  	/* The zone which was the start of the current allocation cycle */
161  	zone_count_t first_allocation_zone;
162  
163  	/* Whether this vio should wait for a clean slab */
164  	bool wait_for_clean_slab;
165  };
166  
167  struct reference_updater {
168  	enum journal_operation operation;
169  	bool increment;
170  	struct zoned_pbn zpbn;
171  	struct pbn_lock *lock;
172  	struct vdo_waiter waiter;
173  };
174  
175  /* A vio for processing user data requests. */
176  struct data_vio {
177  	/* The vdo_wait_queue entry structure */
178  	struct vdo_waiter waiter;
179  
180  	/* The logical block of this request */
181  	struct lbn_lock logical;
182  
183  	/* The state for traversing the block map tree */
184  	struct tree_lock tree_lock;
185  
186  	/* The current partition address of this block */
187  	struct zoned_pbn mapped;
188  
189  	/* The hash of this vio (if not zero) */
190  	struct uds_record_name record_name;
191  
192  	/* Used for logging and debugging */
193  	enum async_operation_number last_async_operation;
194  
195  	/* The operations to record in the recovery and slab journals */
196  	struct reference_updater increment_updater;
197  	struct reference_updater decrement_updater;
198  
199  	u16 read : 1;
200  	u16 write : 1;
201  	u16 fua : 1;
202  	u16 is_zero : 1;
203  	u16 is_discard : 1;
204  	u16 is_partial : 1;
205  	u16 is_duplicate : 1;
206  	u16 first_reference_operation_complete : 1;
207  	u16 downgrade_allocation_lock : 1;
208  
209  	struct allocation allocation;
210  
211  	/*
212  	 * Whether this vio has received an allocation. This field is examined from threads not in
213  	 * the allocation zone.
214  	 */
215  	bool allocation_succeeded;
216  
217  	/* The new partition address of this block after the vio write completes */
218  	struct zoned_pbn new_mapped;
219  
220  	/* The hash zone responsible for the name (NULL if is_zero_block) */
221  	struct hash_zone *hash_zone;
222  
223  	/* The lock this vio holds or shares with other vios with the same data */
224  	struct hash_lock *hash_lock;
225  
226  	/* All data_vios sharing a hash lock are kept in a list linking these list entries */
227  	struct list_head hash_lock_entry;
228  
229  	/* The block number in the partition of the UDS deduplication advice */
230  	struct zoned_pbn duplicate;
231  
232  	/*
233  	 * The sequence number of the recovery journal block containing the increment entry for
234  	 * this vio.
235  	 */
236  	sequence_number_t recovery_sequence_number;
237  
238  	/* The point in the recovery journal where this write last made an entry */
239  	struct journal_point recovery_journal_point;
240  
241  	/* The list of vios in user initiated write requests */
242  	struct list_head write_entry;
243  
244  	/* The generation number of the VDO that this vio belongs to */
245  	sequence_number_t flush_generation;
246  
247  	/* The completion to use for fetching block map pages for this vio */
248  	struct vdo_page_completion page_completion;
249  
250  	/* The user bio that initiated this VIO */
251  	struct bio *user_bio;
252  
253  	/* partial block support */
254  	block_size_t offset;
255  
256  	/*
257  	 * The number of bytes to be discarded. For discards, this field will always be positive,
258  	 * whereas for non-discards it will always be 0. Hence it can be used to determine whether
259  	 * a data_vio is processing a discard, even after the user_bio has been acknowledged.
260  	 */
261  	u32 remaining_discard;
262  
263  	struct dedupe_context *dedupe_context;
264  
265  	/* Fields beyond this point will not be reset when a pooled data_vio is reused. */
266  
267  	struct vio vio;
268  
269  	/* The completion for making reference count decrements */
270  	struct vdo_completion decrement_completion;
271  
272  	/* All of the fields necessary for the compression path */
273  	struct compression_state compression;
274  
275  	/* A block used as output during compression or uncompression */
276  	char *scratch_block;
277  
278  	struct list_head pool_entry;
279  };
280  
vio_as_data_vio(struct vio * vio)281  static inline struct data_vio *vio_as_data_vio(struct vio *vio)
282  {
283  	VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
284  	return container_of(vio, struct data_vio, vio);
285  }
286  
as_data_vio(struct vdo_completion * completion)287  static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
288  {
289  	return vio_as_data_vio(as_vio(completion));
290  }
291  
vdo_waiter_as_data_vio(struct vdo_waiter * waiter)292  static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
293  {
294  	if (waiter == NULL)
295  		return NULL;
296  
297  	return container_of(waiter, struct data_vio, waiter);
298  }
299  
data_vio_from_reference_updater(struct reference_updater * updater)300  static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
301  {
302  	if (updater->increment)
303  		return container_of(updater, struct data_vio, increment_updater);
304  
305  	return container_of(updater, struct data_vio, decrement_updater);
306  }
307  
data_vio_has_flush_generation_lock(struct data_vio * data_vio)308  static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
309  {
310  	return !list_empty(&data_vio->write_entry);
311  }
312  
vdo_from_data_vio(struct data_vio * data_vio)313  static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
314  {
315  	return data_vio->vio.completion.vdo;
316  }
317  
data_vio_has_allocation(struct data_vio * data_vio)318  static inline bool data_vio_has_allocation(struct data_vio *data_vio)
319  {
320  	return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
321  }
322  
323  struct data_vio_compression_status __must_check
324  advance_data_vio_compression_stage(struct data_vio *data_vio);
325  struct data_vio_compression_status __must_check
326  get_data_vio_compression_status(struct data_vio *data_vio);
327  bool cancel_data_vio_compression(struct data_vio *data_vio);
328  
329  struct data_vio_pool;
330  
331  int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
332  		       data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr);
333  void free_data_vio_pool(struct data_vio_pool *pool);
334  void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
335  void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
336  void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
337  
338  void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios);
339  data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool);
340  data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool);
341  data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool);
342  int __must_check set_data_vio_pool_discard_limit(struct data_vio_pool *pool,
343  						 data_vio_count_t limit);
344  data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool);
345  data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool);
346  data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool);
347  
348  void complete_data_vio(struct vdo_completion *completion);
349  void handle_data_vio_error(struct vdo_completion *completion);
350  
continue_data_vio(struct data_vio * data_vio)351  static inline void continue_data_vio(struct data_vio *data_vio)
352  {
353  	vdo_launch_completion(&data_vio->vio.completion);
354  }
355  
356  /**
357   * continue_data_vio_with_error() - Set an error code and then continue processing a data_vio.
358   *
359   * This will not mask older errors. This function can be called with a success code, but it is more
360   * efficient to call continue_data_vio() if the caller knows the result was a success.
361   */
continue_data_vio_with_error(struct data_vio * data_vio,int result)362  static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
363  {
364  	vdo_continue_completion(&data_vio->vio.completion, result);
365  }
366  
367  const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
368  
assert_data_vio_in_hash_zone(struct data_vio * data_vio)369  static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
370  {
371  	thread_id_t expected = data_vio->hash_zone->thread_id;
372  	thread_id_t thread_id = vdo_get_callback_thread_id();
373  	/*
374  	 * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
375  	 * inline, and the LBN better than nothing as an identifier.
376  	 */
377  	VDO_ASSERT_LOG_ONLY((expected == thread_id),
378  			    "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
379  			    (unsigned long long) data_vio->logical.lbn, thread_id, expected);
380  }
381  
set_data_vio_hash_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)382  static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
383  						   vdo_action_fn callback)
384  {
385  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
386  				    data_vio->hash_zone->thread_id);
387  }
388  
389  /**
390   * launch_data_vio_hash_zone_callback() - Set a callback as a hash zone operation and invoke it
391   *					  immediately.
392   */
launch_data_vio_hash_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)393  static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
394  						      vdo_action_fn callback)
395  {
396  	set_data_vio_hash_zone_callback(data_vio, callback);
397  	vdo_launch_completion(&data_vio->vio.completion);
398  }
399  
assert_data_vio_in_logical_zone(struct data_vio * data_vio)400  static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
401  {
402  	thread_id_t expected = data_vio->logical.zone->thread_id;
403  	thread_id_t thread_id = vdo_get_callback_thread_id();
404  
405  	VDO_ASSERT_LOG_ONLY((expected == thread_id),
406  			    "data_vio for logical block %llu on thread %u, should be on thread %u",
407  			    (unsigned long long) data_vio->logical.lbn, thread_id, expected);
408  }
409  
set_data_vio_logical_callback(struct data_vio * data_vio,vdo_action_fn callback)410  static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
411  						 vdo_action_fn callback)
412  {
413  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
414  				    data_vio->logical.zone->thread_id);
415  }
416  
417  /**
418   * launch_data_vio_logical_callback() - Set a callback as a logical block operation and invoke it
419   *					immediately.
420   */
launch_data_vio_logical_callback(struct data_vio * data_vio,vdo_action_fn callback)421  static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
422  						    vdo_action_fn callback)
423  {
424  	set_data_vio_logical_callback(data_vio, callback);
425  	vdo_launch_completion(&data_vio->vio.completion);
426  }
427  
assert_data_vio_in_allocated_zone(struct data_vio * data_vio)428  static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
429  {
430  	thread_id_t expected = data_vio->allocation.zone->thread_id;
431  	thread_id_t thread_id = vdo_get_callback_thread_id();
432  
433  	VDO_ASSERT_LOG_ONLY((expected == thread_id),
434  			    "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
435  			    (unsigned long long) data_vio->allocation.pbn, thread_id,
436  			    expected);
437  }
438  
set_data_vio_allocated_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)439  static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
440  							vdo_action_fn callback)
441  {
442  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
443  				    data_vio->allocation.zone->thread_id);
444  }
445  
446  /**
447   * launch_data_vio_allocated_zone_callback() - Set a callback as a physical block operation in a
448   *					       data_vio's allocated zone and queue the data_vio and
449   *					       invoke it immediately.
450   */
launch_data_vio_allocated_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)451  static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
452  							   vdo_action_fn callback)
453  {
454  	set_data_vio_allocated_zone_callback(data_vio, callback);
455  	vdo_launch_completion(&data_vio->vio.completion);
456  }
457  
assert_data_vio_in_duplicate_zone(struct data_vio * data_vio)458  static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
459  {
460  	thread_id_t expected = data_vio->duplicate.zone->thread_id;
461  	thread_id_t thread_id = vdo_get_callback_thread_id();
462  
463  	VDO_ASSERT_LOG_ONLY((expected == thread_id),
464  			    "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
465  			    (unsigned long long) data_vio->duplicate.pbn, thread_id,
466  			    expected);
467  }
468  
set_data_vio_duplicate_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)469  static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
470  							vdo_action_fn callback)
471  {
472  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
473  				    data_vio->duplicate.zone->thread_id);
474  }
475  
476  /**
477   * launch_data_vio_duplicate_zone_callback() - Set a callback as a physical block operation in a
478   *					       data_vio's duplicate zone and queue the data_vio and
479   *					       invoke it immediately.
480   */
launch_data_vio_duplicate_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)481  static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
482  							   vdo_action_fn callback)
483  {
484  	set_data_vio_duplicate_zone_callback(data_vio, callback);
485  	vdo_launch_completion(&data_vio->vio.completion);
486  }
487  
assert_data_vio_in_mapped_zone(struct data_vio * data_vio)488  static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
489  {
490  	thread_id_t expected = data_vio->mapped.zone->thread_id;
491  	thread_id_t thread_id = vdo_get_callback_thread_id();
492  
493  	VDO_ASSERT_LOG_ONLY((expected == thread_id),
494  			    "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
495  			    (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
496  }
497  
set_data_vio_mapped_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)498  static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
499  						     vdo_action_fn callback)
500  {
501  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
502  				    data_vio->mapped.zone->thread_id);
503  }
504  
assert_data_vio_in_new_mapped_zone(struct data_vio * data_vio)505  static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
506  {
507  	thread_id_t expected = data_vio->new_mapped.zone->thread_id;
508  	thread_id_t thread_id = vdo_get_callback_thread_id();
509  
510  	VDO_ASSERT_LOG_ONLY((expected == thread_id),
511  			    "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
512  			    (unsigned long long) data_vio->new_mapped.pbn, thread_id,
513  			    expected);
514  }
515  
set_data_vio_new_mapped_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)516  static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
517  							 vdo_action_fn callback)
518  {
519  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
520  				    data_vio->new_mapped.zone->thread_id);
521  }
522  
assert_data_vio_in_journal_zone(struct data_vio * data_vio)523  static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
524  {
525  	thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
526  	thread_id_t thread_id = vdo_get_callback_thread_id();
527  
528  	VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
529  			    "data_vio for logical block %llu on thread %u, should be on journal thread %u",
530  			    (unsigned long long) data_vio->logical.lbn, thread_id,
531  			    journal_thread);
532  }
533  
set_data_vio_journal_callback(struct data_vio * data_vio,vdo_action_fn callback)534  static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
535  						 vdo_action_fn callback)
536  {
537  	thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
538  
539  	vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
540  }
541  
542  /**
543   * launch_data_vio_journal_callback() - Set a callback as a journal operation and invoke it
544   *					immediately.
545   */
launch_data_vio_journal_callback(struct data_vio * data_vio,vdo_action_fn callback)546  static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
547  						    vdo_action_fn callback)
548  {
549  	set_data_vio_journal_callback(data_vio, callback);
550  	vdo_launch_completion(&data_vio->vio.completion);
551  }
552  
assert_data_vio_in_packer_zone(struct data_vio * data_vio)553  static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
554  {
555  	thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
556  	thread_id_t thread_id = vdo_get_callback_thread_id();
557  
558  	VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
559  			    "data_vio for logical block %llu on thread %u, should be on packer thread %u",
560  			    (unsigned long long) data_vio->logical.lbn, thread_id,
561  			    packer_thread);
562  }
563  
set_data_vio_packer_callback(struct data_vio * data_vio,vdo_action_fn callback)564  static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
565  						vdo_action_fn callback)
566  {
567  	thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
568  
569  	vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
570  }
571  
572  /**
573   * launch_data_vio_packer_callback() - Set a callback as a packer operation and invoke it
574   *				       immediately.
575   */
launch_data_vio_packer_callback(struct data_vio * data_vio,vdo_action_fn callback)576  static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
577  						   vdo_action_fn callback)
578  {
579  	set_data_vio_packer_callback(data_vio, callback);
580  	vdo_launch_completion(&data_vio->vio.completion);
581  }
582  
assert_data_vio_on_cpu_thread(struct data_vio * data_vio)583  static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
584  {
585  	thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
586  	thread_id_t thread_id = vdo_get_callback_thread_id();
587  
588  	VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
589  			    "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
590  			    (unsigned long long) data_vio->logical.lbn, thread_id,
591  			    cpu_thread);
592  }
593  
set_data_vio_cpu_callback(struct data_vio * data_vio,vdo_action_fn callback)594  static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
595  					     vdo_action_fn callback)
596  {
597  	thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
598  
599  	vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
600  }
601  
602  /**
603   * launch_data_vio_cpu_callback() - Set a callback to run on the CPU queues and invoke it
604   *				    immediately.
605   */
launch_data_vio_cpu_callback(struct data_vio * data_vio,vdo_action_fn callback,enum vdo_completion_priority priority)606  static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
607  						vdo_action_fn callback,
608  						enum vdo_completion_priority priority)
609  {
610  	set_data_vio_cpu_callback(data_vio, callback);
611  	vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
612  }
613  
set_data_vio_bio_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)614  static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
615  						  vdo_action_fn callback)
616  {
617  	vdo_set_completion_callback(&data_vio->vio.completion, callback,
618  				    get_vio_bio_zone_thread_id(&data_vio->vio));
619  }
620  
621  /**
622   * launch_data_vio_bio_zone_callback() - Set a callback as a bio zone operation and invoke it
623   *					 immediately.
624   */
launch_data_vio_bio_zone_callback(struct data_vio * data_vio,vdo_action_fn callback)625  static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
626  						     vdo_action_fn callback)
627  {
628  	set_data_vio_bio_zone_callback(data_vio, callback);
629  	vdo_launch_completion_with_priority(&data_vio->vio.completion,
630  					    BIO_Q_DATA_PRIORITY);
631  }
632  
633  /**
634   * launch_data_vio_on_bio_ack_queue() - If the vdo uses a bio_ack queue, set a callback to run on
635   *					it and invoke it immediately, otherwise, just run the
636   *					callback on the current thread.
637   */
launch_data_vio_on_bio_ack_queue(struct data_vio * data_vio,vdo_action_fn callback)638  static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
639  						    vdo_action_fn callback)
640  {
641  	struct vdo_completion *completion = &data_vio->vio.completion;
642  	struct vdo *vdo = completion->vdo;
643  
644  	if (!vdo_uses_bio_ack_queue(vdo)) {
645  		callback(completion);
646  		return;
647  	}
648  
649  	vdo_set_completion_callback(completion, callback,
650  				    vdo->thread_config.bio_ack_thread);
651  	vdo_launch_completion_with_priority(completion, BIO_ACK_Q_ACK_PRIORITY);
652  }
653  
654  void data_vio_allocate_data_block(struct data_vio *data_vio,
655  				  enum pbn_lock_type write_lock_type,
656  				  vdo_action_fn callback, vdo_action_fn error_handler);
657  
658  void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
659  
660  int __must_check uncompress_data_vio(struct data_vio *data_vio,
661  				     enum block_mapping_state mapping_state,
662  				     char *buffer);
663  
664  void update_metadata_for_data_vio_write(struct data_vio *data_vio,
665  					struct pbn_lock *lock);
666  void write_data_vio(struct data_vio *data_vio);
667  void launch_compress_data_vio(struct data_vio *data_vio);
668  void continue_data_vio_with_block_map_slot(struct vdo_completion *completion);
669  
670  #endif /* DATA_VIO_H */
671