1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Block data types and constants.  Directly include this file only to
4   * break include dependency loop.
5   */
6  #ifndef __LINUX_BLK_TYPES_H
7  #define __LINUX_BLK_TYPES_H
8  
9  #include <linux/types.h>
10  #include <linux/bvec.h>
11  #include <linux/device.h>
12  #include <linux/ktime.h>
13  #include <linux/rw_hint.h>
14  
15  struct bio_set;
16  struct bio;
17  struct bio_integrity_payload;
18  struct page;
19  struct io_context;
20  struct cgroup_subsys_state;
21  typedef void (bio_end_io_t) (struct bio *);
22  struct bio_crypt_ctx;
23  
24  /*
25   * The basic unit of block I/O is a sector. It is used in a number of contexts
26   * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
27   * bytes. Variables of type sector_t represent an offset or size that is a
28   * multiple of 512 bytes. Hence these two constants.
29   */
30  #ifndef SECTOR_SHIFT
31  #define SECTOR_SHIFT 9
32  #endif
33  #ifndef SECTOR_SIZE
34  #define SECTOR_SIZE (1 << SECTOR_SHIFT)
35  #endif
36  
37  #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
38  #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
39  #define SECTOR_MASK		(PAGE_SECTORS - 1)
40  
41  struct block_device {
42  	sector_t		bd_start_sect;
43  	sector_t		bd_nr_sectors;
44  	struct gendisk *	bd_disk;
45  	struct request_queue *	bd_queue;
46  	struct disk_stats __percpu *bd_stats;
47  	unsigned long		bd_stamp;
48  	atomic_t		__bd_flags;	// partition number + flags
49  #define BD_PARTNO		255	// lower 8 bits; assign-once
50  #define BD_READ_ONLY		(1u<<8) // read-only policy
51  #define BD_WRITE_HOLDER		(1u<<9)
52  #define BD_HAS_SUBMIT_BIO	(1u<<10)
53  #define BD_RO_WARNED		(1u<<11)
54  #ifdef CONFIG_FAIL_MAKE_REQUEST
55  #define BD_MAKE_IT_FAIL		(1u<<12)
56  #endif
57  	dev_t			bd_dev;
58  	struct address_space	*bd_mapping;	/* page cache */
59  
60  	atomic_t		bd_openers;
61  	spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */
62  	void *			bd_claiming;
63  	void *			bd_holder;
64  	const struct blk_holder_ops *bd_holder_ops;
65  	struct mutex		bd_holder_lock;
66  	int			bd_holders;
67  	struct kobject		*bd_holder_dir;
68  
69  	atomic_t		bd_fsfreeze_count; /* number of freeze requests */
70  	struct mutex		bd_fsfreeze_mutex; /* serialize freeze/thaw */
71  
72  	struct partition_meta_info *bd_meta_info;
73  	int			bd_writers;
74  #ifdef CONFIG_SECURITY
75  	void			*bd_security;
76  #endif
77  	/*
78  	 * keep this out-of-line as it's both big and not needed in the fast
79  	 * path
80  	 */
81  	struct device		bd_device;
82  } __randomize_layout;
83  
84  #define bdev_whole(_bdev) \
85  	((_bdev)->bd_disk->part0)
86  
87  #define dev_to_bdev(device) \
88  	container_of((device), struct block_device, bd_device)
89  
90  #define bdev_kobj(_bdev) \
91  	(&((_bdev)->bd_device.kobj))
92  
93  /*
94   * Block error status values.  See block/blk-core:blk_errors for the details.
95   */
96  typedef u8 __bitwise blk_status_t;
97  typedef u16 blk_short_t;
98  #define	BLK_STS_OK 0
99  #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
100  #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
101  #define BLK_STS_NOSPC		((__force blk_status_t)3)
102  #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
103  #define BLK_STS_TARGET		((__force blk_status_t)5)
104  #define BLK_STS_RESV_CONFLICT	((__force blk_status_t)6)
105  #define BLK_STS_MEDIUM		((__force blk_status_t)7)
106  #define BLK_STS_PROTECTION	((__force blk_status_t)8)
107  #define BLK_STS_RESOURCE	((__force blk_status_t)9)
108  #define BLK_STS_IOERR		((__force blk_status_t)10)
109  
110  /* hack for device mapper, don't use elsewhere: */
111  #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
112  
113  /*
114   * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
115   * and the bio would block (cf bio_wouldblock_error())
116   */
117  #define BLK_STS_AGAIN		((__force blk_status_t)12)
118  
119  /*
120   * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
121   * device related resources are unavailable, but the driver can guarantee
122   * that the queue will be rerun in the future once resources become
123   * available again. This is typically the case for device specific
124   * resources that are consumed for IO. If the driver fails allocating these
125   * resources, we know that inflight (or pending) IO will free these
126   * resource upon completion.
127   *
128   * This is different from BLK_STS_RESOURCE in that it explicitly references
129   * a device specific resource. For resources of wider scope, allocation
130   * failure can happen without having pending IO. This means that we can't
131   * rely on request completions freeing these resources, as IO may not be in
132   * flight. Examples of that are kernel memory allocations, DMA mappings, or
133   * any other system wide resources.
134   */
135  #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
136  
137  /*
138   * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
139   * path if the device returns a status indicating that too many zone resources
140   * are currently open. The same command should be successful if resubmitted
141   * after the number of open zones decreases below the device's limits, which is
142   * reported in the request_queue's max_open_zones.
143   */
144  #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)14)
145  
146  /*
147   * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
148   * path if the device returns a status indicating that too many zone resources
149   * are currently active. The same command should be successful if resubmitted
150   * after the number of active zones decreases below the device's limits, which
151   * is reported in the request_queue's max_active_zones.
152   */
153  #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)15)
154  
155  /*
156   * BLK_STS_OFFLINE is returned from the driver when the target device is offline
157   * or is being taken offline. This could help differentiate the case where a
158   * device is intentionally being shut down from a real I/O error.
159   */
160  #define BLK_STS_OFFLINE		((__force blk_status_t)16)
161  
162  /*
163   * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
164   * aborted the command because it exceeded one of its Command Duration Limits.
165   */
166  #define BLK_STS_DURATION_LIMIT	((__force blk_status_t)17)
167  
168  /*
169   * Invalid size or alignment.
170   */
171  #define BLK_STS_INVAL	((__force blk_status_t)19)
172  
173  /**
174   * blk_path_error - returns true if error may be path related
175   * @error: status the request was completed with
176   *
177   * Description:
178   *     This classifies block error status into non-retryable errors and ones
179   *     that may be successful if retried on a failover path.
180   *
181   * Return:
182   *     %false - retrying failover path will not help
183   *     %true  - may succeed if retried
184   */
blk_path_error(blk_status_t error)185  static inline bool blk_path_error(blk_status_t error)
186  {
187  	switch (error) {
188  	case BLK_STS_NOTSUPP:
189  	case BLK_STS_NOSPC:
190  	case BLK_STS_TARGET:
191  	case BLK_STS_RESV_CONFLICT:
192  	case BLK_STS_MEDIUM:
193  	case BLK_STS_PROTECTION:
194  		return false;
195  	}
196  
197  	/* Anything else could be a path failure, so should be retried */
198  	return true;
199  }
200  
201  struct bio_issue {
202  	u64 value;
203  };
204  
205  typedef __u32 __bitwise blk_opf_t;
206  
207  typedef unsigned int blk_qc_t;
208  #define BLK_QC_T_NONE		-1U
209  
210  /*
211   * main unit of I/O for the block layer and lower layers (ie drivers and
212   * stacking drivers)
213   */
214  struct bio {
215  	struct bio		*bi_next;	/* request queue link */
216  	struct block_device	*bi_bdev;
217  	blk_opf_t		bi_opf;		/* bottom bits REQ_OP, top bits
218  						 * req_flags.
219  						 */
220  	unsigned short		bi_flags;	/* BIO_* below */
221  	unsigned short		bi_ioprio;
222  	enum rw_hint		bi_write_hint;
223  	blk_status_t		bi_status;
224  	atomic_t		__bi_remaining;
225  
226  	struct bvec_iter	bi_iter;
227  
228  	union {
229  		/* for polled bios: */
230  		blk_qc_t		bi_cookie;
231  		/* for plugged zoned writes only: */
232  		unsigned int		__bi_nr_segments;
233  	};
234  	bio_end_io_t		*bi_end_io;
235  	void			*bi_private;
236  #ifdef CONFIG_BLK_CGROUP
237  	/*
238  	 * Represents the association of the css and request_queue for the bio.
239  	 * If a bio goes direct to device, it will not have a blkg as it will
240  	 * not have a request_queue associated with it.  The reference is put
241  	 * on release of the bio.
242  	 */
243  	struct blkcg_gq		*bi_blkg;
244  	struct bio_issue	bi_issue;
245  #ifdef CONFIG_BLK_CGROUP_IOCOST
246  	u64			bi_iocost_cost;
247  #endif
248  #endif
249  
250  #ifdef CONFIG_BLK_INLINE_ENCRYPTION
251  	struct bio_crypt_ctx	*bi_crypt_context;
252  #endif
253  
254  #if defined(CONFIG_BLK_DEV_INTEGRITY)
255  	struct bio_integrity_payload *bi_integrity; /* data integrity */
256  #endif
257  
258  	unsigned short		bi_vcnt;	/* how many bio_vec's */
259  
260  	/*
261  	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
262  	 */
263  
264  	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
265  
266  	atomic_t		__bi_cnt;	/* pin count */
267  
268  	struct bio_vec		*bi_io_vec;	/* the actual vec list */
269  
270  	struct bio_set		*bi_pool;
271  
272  	/*
273  	 * We can inline a number of vecs at the end of the bio, to avoid
274  	 * double allocations for a small number of bio_vecs. This member
275  	 * MUST obviously be kept at the very end of the bio.
276  	 */
277  	struct bio_vec		bi_inline_vecs[];
278  };
279  
280  #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
281  #define BIO_MAX_SECTORS		(UINT_MAX >> SECTOR_SHIFT)
282  
283  /*
284   * bio flags
285   */
286  enum {
287  	BIO_PAGE_PINNED,	/* Unpin pages in bio_release_pages() */
288  	BIO_CLONED,		/* doesn't own data */
289  	BIO_BOUNCED,		/* bio is a bounce bio */
290  	BIO_QUIET,		/* Make BIO Quiet */
291  	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
292  	BIO_REFFED,		/* bio has elevated ->bi_cnt */
293  	BIO_BPS_THROTTLED,	/* This bio has already been subjected to
294  				 * throttling rules. Don't do it again. */
295  	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
296  				 * of this bio. */
297  	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
298  	BIO_QOS_THROTTLED,	/* bio went through rq_qos throttle path */
299  	BIO_QOS_MERGED,		/* but went through rq_qos merge path */
300  	BIO_REMAPPED,
301  	BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
302  	BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
303  	BIO_FLAG_LAST
304  };
305  
306  typedef __u32 __bitwise blk_mq_req_flags_t;
307  
308  #define REQ_OP_BITS	8
309  #define REQ_OP_MASK	(__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
310  #define REQ_FLAG_BITS	24
311  
312  /**
313   * enum req_op - Operations common to the bio and request structures.
314   * We use 8 bits for encoding the operation, and the remaining 24 for flags.
315   *
316   * The least significant bit of the operation number indicates the data
317   * transfer direction:
318   *
319   *   - if the least significant bit is set transfers are TO the device
320   *   - if the least significant bit is not set transfers are FROM the device
321   *
322   * If a operation does not transfer data the least significant bit has no
323   * meaning.
324   */
325  enum req_op {
326  	/* read sectors from the device */
327  	REQ_OP_READ		= (__force blk_opf_t)0,
328  	/* write sectors to the device */
329  	REQ_OP_WRITE		= (__force blk_opf_t)1,
330  	/* flush the volatile write cache */
331  	REQ_OP_FLUSH		= (__force blk_opf_t)2,
332  	/* discard sectors */
333  	REQ_OP_DISCARD		= (__force blk_opf_t)3,
334  	/* securely erase sectors */
335  	REQ_OP_SECURE_ERASE	= (__force blk_opf_t)5,
336  	/* write data at the current zone write pointer */
337  	REQ_OP_ZONE_APPEND	= (__force blk_opf_t)7,
338  	/* write the zero filled sector many times */
339  	REQ_OP_WRITE_ZEROES	= (__force blk_opf_t)9,
340  	/* Open a zone */
341  	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)10,
342  	/* Close a zone */
343  	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11,
344  	/* Transition a zone to full */
345  	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)12,
346  	/* reset a zone write pointer */
347  	REQ_OP_ZONE_RESET	= (__force blk_opf_t)13,
348  	/* reset all the zone present on the device */
349  	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)15,
350  
351  	/* Driver private requests */
352  	REQ_OP_DRV_IN		= (__force blk_opf_t)34,
353  	REQ_OP_DRV_OUT		= (__force blk_opf_t)35,
354  
355  	REQ_OP_LAST		= (__force blk_opf_t)36,
356  };
357  
358  /* Keep cmd_flag_name[] in sync with the definitions below */
359  enum req_flag_bits {
360  	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
361  		REQ_OP_BITS,
362  	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
363  	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
364  	__REQ_SYNC,		/* request is sync (sync write or read) */
365  	__REQ_META,		/* metadata io request */
366  	__REQ_PRIO,		/* boost priority in cfq */
367  	__REQ_NOMERGE,		/* don't touch this for merging */
368  	__REQ_IDLE,		/* anticipate more IO after this one */
369  	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
370  	__REQ_FUA,		/* forced unit access */
371  	__REQ_PREFLUSH,		/* request for cache flush */
372  	__REQ_RAHEAD,		/* read ahead, can fail anytime */
373  	__REQ_BACKGROUND,	/* background IO */
374  	__REQ_NOWAIT,           /* Don't wait if request will block */
375  	__REQ_POLLED,		/* caller polls for completion using bio_poll */
376  	__REQ_ALLOC_CACHE,	/* allocate IO from cache if available */
377  	__REQ_SWAP,		/* swap I/O */
378  	__REQ_DRV,		/* for driver use */
379  	__REQ_FS_PRIVATE,	/* for file system (submitter) use */
380  	__REQ_ATOMIC,		/* for atomic write operations */
381  	/*
382  	 * Command specific flags, keep last:
383  	 */
384  	/* for REQ_OP_WRITE_ZEROES: */
385  	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
386  
387  	__REQ_NR_BITS,		/* stops here */
388  };
389  
390  #define REQ_FAILFAST_DEV	\
391  			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
392  #define REQ_FAILFAST_TRANSPORT	\
393  			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
394  #define REQ_FAILFAST_DRIVER	\
395  			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
396  #define REQ_SYNC	(__force blk_opf_t)(1ULL << __REQ_SYNC)
397  #define REQ_META	(__force blk_opf_t)(1ULL << __REQ_META)
398  #define REQ_PRIO	(__force blk_opf_t)(1ULL << __REQ_PRIO)
399  #define REQ_NOMERGE	(__force blk_opf_t)(1ULL << __REQ_NOMERGE)
400  #define REQ_IDLE	(__force blk_opf_t)(1ULL << __REQ_IDLE)
401  #define REQ_INTEGRITY	(__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
402  #define REQ_FUA		(__force blk_opf_t)(1ULL << __REQ_FUA)
403  #define REQ_PREFLUSH	(__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
404  #define REQ_RAHEAD	(__force blk_opf_t)(1ULL << __REQ_RAHEAD)
405  #define REQ_BACKGROUND	(__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
406  #define REQ_NOWAIT	(__force blk_opf_t)(1ULL << __REQ_NOWAIT)
407  #define REQ_POLLED	(__force blk_opf_t)(1ULL << __REQ_POLLED)
408  #define REQ_ALLOC_CACHE	(__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
409  #define REQ_SWAP	(__force blk_opf_t)(1ULL << __REQ_SWAP)
410  #define REQ_DRV		(__force blk_opf_t)(1ULL << __REQ_DRV)
411  #define REQ_FS_PRIVATE	(__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
412  #define REQ_ATOMIC	(__force blk_opf_t)(1ULL << __REQ_ATOMIC)
413  
414  #define REQ_NOUNMAP	(__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
415  
416  #define REQ_FAILFAST_MASK \
417  	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
418  
419  #define REQ_NOMERGE_FLAGS \
420  	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
421  
422  enum stat_group {
423  	STAT_READ,
424  	STAT_WRITE,
425  	STAT_DISCARD,
426  	STAT_FLUSH,
427  
428  	NR_STAT_GROUPS
429  };
430  
bio_op(const struct bio * bio)431  static inline enum req_op bio_op(const struct bio *bio)
432  {
433  	return bio->bi_opf & REQ_OP_MASK;
434  }
435  
op_is_write(blk_opf_t op)436  static inline bool op_is_write(blk_opf_t op)
437  {
438  	return !!(op & (__force blk_opf_t)1);
439  }
440  
441  /*
442   * Check if the bio or request is one that needs special treatment in the
443   * flush state machine.
444   */
op_is_flush(blk_opf_t op)445  static inline bool op_is_flush(blk_opf_t op)
446  {
447  	return op & (REQ_FUA | REQ_PREFLUSH);
448  }
449  
450  /*
451   * Reads are always treated as synchronous, as are requests with the FUA or
452   * PREFLUSH flag.  Other operations may be marked as synchronous using the
453   * REQ_SYNC flag.
454   */
op_is_sync(blk_opf_t op)455  static inline bool op_is_sync(blk_opf_t op)
456  {
457  	return (op & REQ_OP_MASK) == REQ_OP_READ ||
458  		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
459  }
460  
op_is_discard(blk_opf_t op)461  static inline bool op_is_discard(blk_opf_t op)
462  {
463  	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
464  }
465  
466  /*
467   * Check if a bio or request operation is a zone management operation, with
468   * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
469   * due to its different handling in the block layer and device response in
470   * case of command failure.
471   */
op_is_zone_mgmt(enum req_op op)472  static inline bool op_is_zone_mgmt(enum req_op op)
473  {
474  	switch (op & REQ_OP_MASK) {
475  	case REQ_OP_ZONE_RESET:
476  	case REQ_OP_ZONE_OPEN:
477  	case REQ_OP_ZONE_CLOSE:
478  	case REQ_OP_ZONE_FINISH:
479  		return true;
480  	default:
481  		return false;
482  	}
483  }
484  
op_stat_group(enum req_op op)485  static inline int op_stat_group(enum req_op op)
486  {
487  	if (op_is_discard(op))
488  		return STAT_DISCARD;
489  	return op_is_write(op);
490  }
491  
492  struct blk_rq_stat {
493  	u64 mean;
494  	u64 min;
495  	u64 max;
496  	u32 nr_samples;
497  	u64 batch;
498  };
499  
500  #endif /* __LINUX_BLK_TYPES_H */
501