1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Functions related to setting various queue properties from drivers
4   */
5  #include <linux/kernel.h>
6  #include <linux/module.h>
7  #include <linux/init.h>
8  #include <linux/bio.h>
9  #include <linux/blk-integrity.h>
10  #include <linux/pagemap.h>
11  #include <linux/backing-dev-defs.h>
12  #include <linux/gcd.h>
13  #include <linux/lcm.h>
14  #include <linux/jiffies.h>
15  #include <linux/gfp.h>
16  #include <linux/dma-mapping.h>
17  
18  #include "blk.h"
19  #include "blk-rq-qos.h"
20  #include "blk-wbt.h"
21  
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22  void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23  {
24  	q->rq_timeout = timeout;
25  }
26  EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27  
28  /**
29   * blk_set_stacking_limits - set default limits for stacking devices
30   * @lim:  the queue_limits structure to reset
31   *
32   * Prepare queue limits for applying limits from underlying devices using
33   * blk_stack_limits().
34   */
blk_set_stacking_limits(struct queue_limits * lim)35  void blk_set_stacking_limits(struct queue_limits *lim)
36  {
37  	memset(lim, 0, sizeof(*lim));
38  	lim->logical_block_size = SECTOR_SIZE;
39  	lim->physical_block_size = SECTOR_SIZE;
40  	lim->io_min = SECTOR_SIZE;
41  	lim->discard_granularity = SECTOR_SIZE;
42  	lim->dma_alignment = SECTOR_SIZE - 1;
43  	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44  
45  	/* Inherit limits from component devices */
46  	lim->max_segments = USHRT_MAX;
47  	lim->max_discard_segments = USHRT_MAX;
48  	lim->max_hw_sectors = UINT_MAX;
49  	lim->max_segment_size = UINT_MAX;
50  	lim->max_sectors = UINT_MAX;
51  	lim->max_dev_sectors = UINT_MAX;
52  	lim->max_write_zeroes_sectors = UINT_MAX;
53  	lim->max_zone_append_sectors = UINT_MAX;
54  	lim->max_user_discard_sectors = UINT_MAX;
55  }
56  EXPORT_SYMBOL(blk_set_stacking_limits);
57  
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58  void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59  		struct queue_limits *lim)
60  {
61  	/*
62  	 * For read-ahead of large files to be effective, we need to read ahead
63  	 * at least twice the optimal I/O size.
64  	 */
65  	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66  	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67  }
68  
blk_validate_zoned_limits(struct queue_limits * lim)69  static int blk_validate_zoned_limits(struct queue_limits *lim)
70  {
71  	if (!(lim->features & BLK_FEAT_ZONED)) {
72  		if (WARN_ON_ONCE(lim->max_open_zones) ||
73  		    WARN_ON_ONCE(lim->max_active_zones) ||
74  		    WARN_ON_ONCE(lim->zone_write_granularity) ||
75  		    WARN_ON_ONCE(lim->max_zone_append_sectors))
76  			return -EINVAL;
77  		return 0;
78  	}
79  
80  	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81  		return -EINVAL;
82  
83  	/*
84  	 * Given that active zones include open zones, the maximum number of
85  	 * open zones cannot be larger than the maximum number of active zones.
86  	 */
87  	if (lim->max_active_zones &&
88  	    lim->max_open_zones > lim->max_active_zones)
89  		return -EINVAL;
90  
91  	if (lim->zone_write_granularity < lim->logical_block_size)
92  		lim->zone_write_granularity = lim->logical_block_size;
93  
94  	if (lim->max_zone_append_sectors) {
95  		/*
96  		 * The Zone Append size is limited by the maximum I/O size
97  		 * and the zone size given that it can't span zones.
98  		 */
99  		lim->max_zone_append_sectors =
100  			min3(lim->max_hw_sectors,
101  			     lim->max_zone_append_sectors,
102  			     lim->chunk_sectors);
103  	}
104  
105  	return 0;
106  }
107  
blk_validate_integrity_limits(struct queue_limits * lim)108  static int blk_validate_integrity_limits(struct queue_limits *lim)
109  {
110  	struct blk_integrity *bi = &lim->integrity;
111  
112  	if (!bi->tuple_size) {
113  		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
114  		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
115  			pr_warn("invalid PI settings.\n");
116  			return -EINVAL;
117  		}
118  		return 0;
119  	}
120  
121  	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
122  		pr_warn("integrity support disabled.\n");
123  		return -EINVAL;
124  	}
125  
126  	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
127  	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
128  		pr_warn("ref tag not support without checksum.\n");
129  		return -EINVAL;
130  	}
131  
132  	if (!bi->interval_exp)
133  		bi->interval_exp = ilog2(lim->logical_block_size);
134  
135  	return 0;
136  }
137  
138  /*
139   * Returns max guaranteed bytes which we can fit in a bio.
140   *
141   * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
142   * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
143   * the first and last segments.
144   */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)145  static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
146  {
147  	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
148  	unsigned int length;
149  
150  	length = min(max_segments, 2) * lim->logical_block_size;
151  	if (max_segments > 2)
152  		length += (max_segments - 2) * PAGE_SIZE;
153  
154  	return length;
155  }
156  
blk_atomic_writes_update_limits(struct queue_limits * lim)157  static void blk_atomic_writes_update_limits(struct queue_limits *lim)
158  {
159  	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
160  					blk_queue_max_guaranteed_bio(lim));
161  
162  	unit_limit = rounddown_pow_of_two(unit_limit);
163  
164  	lim->atomic_write_max_sectors =
165  		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
166  			lim->max_hw_sectors);
167  	lim->atomic_write_unit_min =
168  		min(lim->atomic_write_hw_unit_min, unit_limit);
169  	lim->atomic_write_unit_max =
170  		min(lim->atomic_write_hw_unit_max, unit_limit);
171  	lim->atomic_write_boundary_sectors =
172  		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
173  }
174  
blk_validate_atomic_write_limits(struct queue_limits * lim)175  static void blk_validate_atomic_write_limits(struct queue_limits *lim)
176  {
177  	unsigned int boundary_sectors;
178  
179  	if (!lim->atomic_write_hw_max)
180  		goto unsupported;
181  
182  	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
183  
184  	if (boundary_sectors) {
185  		/*
186  		 * A feature of boundary support is that it disallows bios to
187  		 * be merged which would result in a merged request which
188  		 * crosses either a chunk sector or atomic write HW boundary,
189  		 * even though chunk sectors may be just set for performance.
190  		 * For simplicity, disallow atomic writes for a chunk sector
191  		 * which is non-zero and smaller than atomic write HW boundary.
192  		 * Furthermore, chunk sectors must be a multiple of atomic
193  		 * write HW boundary. Otherwise boundary support becomes
194  		 * complicated.
195  		 * Devices which do not conform to these rules can be dealt
196  		 * with if and when they show up.
197  		 */
198  		if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
199  			goto unsupported;
200  
201  		/*
202  		 * The boundary size just needs to be a multiple of unit_max
203  		 * (and not necessarily a power-of-2), so this following check
204  		 * could be relaxed in future.
205  		 * Furthermore, if needed, unit_max could even be reduced so
206  		 * that it is compliant with a !power-of-2 boundary.
207  		 */
208  		if (!is_power_of_2(boundary_sectors))
209  			goto unsupported;
210  	}
211  
212  	blk_atomic_writes_update_limits(lim);
213  	return;
214  
215  unsupported:
216  	lim->atomic_write_max_sectors = 0;
217  	lim->atomic_write_boundary_sectors = 0;
218  	lim->atomic_write_unit_min = 0;
219  	lim->atomic_write_unit_max = 0;
220  }
221  
222  /*
223   * Check that the limits in lim are valid, initialize defaults for unset
224   * values, and cap values based on others where needed.
225   */
blk_validate_limits(struct queue_limits * lim)226  static int blk_validate_limits(struct queue_limits *lim)
227  {
228  	unsigned int max_hw_sectors;
229  	unsigned int logical_block_sectors;
230  	int err;
231  
232  	/*
233  	 * Unless otherwise specified, default to 512 byte logical blocks and a
234  	 * physical block size equal to the logical block size.
235  	 */
236  	if (!lim->logical_block_size)
237  		lim->logical_block_size = SECTOR_SIZE;
238  	else if (blk_validate_block_size(lim->logical_block_size)) {
239  		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
240  		return -EINVAL;
241  	}
242  	if (lim->physical_block_size < lim->logical_block_size)
243  		lim->physical_block_size = lim->logical_block_size;
244  
245  	/*
246  	 * The minimum I/O size defaults to the physical block size unless
247  	 * explicitly overridden.
248  	 */
249  	if (lim->io_min < lim->physical_block_size)
250  		lim->io_min = lim->physical_block_size;
251  
252  	/*
253  	 * max_hw_sectors has a somewhat weird default for historical reason,
254  	 * but driver really should set their own instead of relying on this
255  	 * value.
256  	 *
257  	 * The block layer relies on the fact that every driver can
258  	 * handle at lest a page worth of data per I/O, and needs the value
259  	 * aligned to the logical block size.
260  	 */
261  	if (!lim->max_hw_sectors)
262  		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
263  	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
264  		return -EINVAL;
265  	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
266  	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
267  		return -EINVAL;
268  	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
269  			logical_block_sectors);
270  
271  	/*
272  	 * The actual max_sectors value is a complex beast and also takes the
273  	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
274  	 * value into account.  The ->max_sectors value is always calculated
275  	 * from these, so directly setting it won't have any effect.
276  	 */
277  	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
278  				lim->max_dev_sectors);
279  	if (lim->max_user_sectors) {
280  		if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
281  			return -EINVAL;
282  		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
283  	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
284  		lim->max_sectors =
285  			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
286  	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
287  		lim->max_sectors =
288  			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
289  	} else {
290  		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
291  	}
292  	lim->max_sectors = round_down(lim->max_sectors,
293  			logical_block_sectors);
294  
295  	/*
296  	 * Random default for the maximum number of segments.  Driver should not
297  	 * rely on this and set their own.
298  	 */
299  	if (!lim->max_segments)
300  		lim->max_segments = BLK_MAX_SEGMENTS;
301  
302  	lim->max_discard_sectors =
303  		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
304  
305  	if (!lim->max_discard_segments)
306  		lim->max_discard_segments = 1;
307  
308  	if (lim->discard_granularity < lim->physical_block_size)
309  		lim->discard_granularity = lim->physical_block_size;
310  
311  	/*
312  	 * By default there is no limit on the segment boundary alignment,
313  	 * but if there is one it can't be smaller than the page size as
314  	 * that would break all the normal I/O patterns.
315  	 */
316  	if (!lim->seg_boundary_mask)
317  		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
318  	if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
319  		return -EINVAL;
320  
321  	/*
322  	 * Stacking device may have both virtual boundary and max segment
323  	 * size limit, so allow this setting now, and long-term the two
324  	 * might need to move out of stacking limits since we have immutable
325  	 * bvec and lower layer bio splitting is supposed to handle the two
326  	 * correctly.
327  	 */
328  	if (lim->virt_boundary_mask) {
329  		if (!lim->max_segment_size)
330  			lim->max_segment_size = UINT_MAX;
331  	} else {
332  		/*
333  		 * The maximum segment size has an odd historic 64k default that
334  		 * drivers probably should override.  Just like the I/O size we
335  		 * require drivers to at least handle a full page per segment.
336  		 */
337  		if (!lim->max_segment_size)
338  			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
339  		if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
340  			return -EINVAL;
341  	}
342  
343  	/*
344  	 * We require drivers to at least do logical block aligned I/O, but
345  	 * historically could not check for that due to the separate calls
346  	 * to set the limits.  Once the transition is finished the check
347  	 * below should be narrowed down to check the logical block size.
348  	 */
349  	if (!lim->dma_alignment)
350  		lim->dma_alignment = SECTOR_SIZE - 1;
351  	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
352  		return -EINVAL;
353  
354  	if (lim->alignment_offset) {
355  		lim->alignment_offset &= (lim->physical_block_size - 1);
356  		lim->flags &= ~BLK_FLAG_MISALIGNED;
357  	}
358  
359  	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
360  		lim->features &= ~BLK_FEAT_FUA;
361  
362  	blk_validate_atomic_write_limits(lim);
363  
364  	err = blk_validate_integrity_limits(lim);
365  	if (err)
366  		return err;
367  	return blk_validate_zoned_limits(lim);
368  }
369  
370  /*
371   * Set the default limits for a newly allocated queue.  @lim contains the
372   * initial limits set by the driver, which could be no limit in which case
373   * all fields are cleared to zero.
374   */
blk_set_default_limits(struct queue_limits * lim)375  int blk_set_default_limits(struct queue_limits *lim)
376  {
377  	/*
378  	 * Most defaults are set by capping the bounds in blk_validate_limits,
379  	 * but max_user_discard_sectors is special and needs an explicit
380  	 * initialization to the max value here.
381  	 */
382  	lim->max_user_discard_sectors = UINT_MAX;
383  	return blk_validate_limits(lim);
384  }
385  
386  /**
387   * queue_limits_commit_update - commit an atomic update of queue limits
388   * @q:		queue to update
389   * @lim:	limits to apply
390   *
391   * Apply the limits in @lim that were obtained from queue_limits_start_update()
392   * and updated by the caller to @q.
393   *
394   * Returns 0 if successful, else a negative error code.
395   */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)396  int queue_limits_commit_update(struct request_queue *q,
397  		struct queue_limits *lim)
398  {
399  	int error;
400  
401  	error = blk_validate_limits(lim);
402  	if (error)
403  		goto out_unlock;
404  
405  #ifdef CONFIG_BLK_INLINE_ENCRYPTION
406  	if (q->crypto_profile && lim->integrity.tag_size) {
407  		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
408  		error = -EINVAL;
409  		goto out_unlock;
410  	}
411  #endif
412  
413  	q->limits = *lim;
414  	if (q->disk)
415  		blk_apply_bdi_limits(q->disk->bdi, lim);
416  out_unlock:
417  	mutex_unlock(&q->limits_lock);
418  	return error;
419  }
420  EXPORT_SYMBOL_GPL(queue_limits_commit_update);
421  
422  /**
423   * queue_limits_set - apply queue limits to queue
424   * @q:		queue to update
425   * @lim:	limits to apply
426   *
427   * Apply the limits in @lim that were freshly initialized to @q.
428   * To update existing limits use queue_limits_start_update() and
429   * queue_limits_commit_update() instead.
430   *
431   * Returns 0 if successful, else a negative error code.
432   */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)433  int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
434  {
435  	mutex_lock(&q->limits_lock);
436  	return queue_limits_commit_update(q, lim);
437  }
438  EXPORT_SYMBOL_GPL(queue_limits_set);
439  
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)440  static int queue_limit_alignment_offset(const struct queue_limits *lim,
441  		sector_t sector)
442  {
443  	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
444  	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
445  		<< SECTOR_SHIFT;
446  
447  	return (granularity + lim->alignment_offset - alignment) % granularity;
448  }
449  
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)450  static unsigned int queue_limit_discard_alignment(
451  		const struct queue_limits *lim, sector_t sector)
452  {
453  	unsigned int alignment, granularity, offset;
454  
455  	if (!lim->max_discard_sectors)
456  		return 0;
457  
458  	/* Why are these in bytes, not sectors? */
459  	alignment = lim->discard_alignment >> SECTOR_SHIFT;
460  	granularity = lim->discard_granularity >> SECTOR_SHIFT;
461  	if (!granularity)
462  		return 0;
463  
464  	/* Offset of the partition start in 'granularity' sectors */
465  	offset = sector_div(sector, granularity);
466  
467  	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
468  	offset = (granularity + alignment - offset) % granularity;
469  
470  	/* Turn it back into bytes, gaah */
471  	return offset << SECTOR_SHIFT;
472  }
473  
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)474  static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
475  {
476  	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
477  	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
478  		sectors = PAGE_SIZE >> SECTOR_SHIFT;
479  	return sectors;
480  }
481  
482  /**
483   * blk_stack_limits - adjust queue_limits for stacked devices
484   * @t:	the stacking driver limits (top device)
485   * @b:  the underlying queue limits (bottom, component device)
486   * @start:  first data sector within component device
487   *
488   * Description:
489   *    This function is used by stacking drivers like MD and DM to ensure
490   *    that all component devices have compatible block sizes and
491   *    alignments.  The stacking driver must provide a queue_limits
492   *    struct (top) and then iteratively call the stacking function for
493   *    all component (bottom) devices.  The stacking function will
494   *    attempt to combine the values and ensure proper alignment.
495   *
496   *    Returns 0 if the top and bottom queue_limits are compatible.  The
497   *    top device's block sizes and alignment offsets may be adjusted to
498   *    ensure alignment with the bottom device. If no compatible sizes
499   *    and alignments exist, -1 is returned and the resulting top
500   *    queue_limits will have the misaligned flag set to indicate that
501   *    the alignment_offset is undefined.
502   */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)503  int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
504  		     sector_t start)
505  {
506  	unsigned int top, bottom, alignment, ret = 0;
507  
508  	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
509  
510  	/*
511  	 * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
512  	 * stacking driver and all underlying devices.  The stacking driver sets
513  	 * the flags before stacking the limits, and this will clear the flags
514  	 * if any of the underlying devices does not support it.
515  	 */
516  	if (!(b->features & BLK_FEAT_NOWAIT))
517  		t->features &= ~BLK_FEAT_NOWAIT;
518  	if (!(b->features & BLK_FEAT_POLL))
519  		t->features &= ~BLK_FEAT_POLL;
520  
521  	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
522  
523  	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
524  	t->max_user_sectors = min_not_zero(t->max_user_sectors,
525  			b->max_user_sectors);
526  	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
527  	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
528  	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
529  					b->max_write_zeroes_sectors);
530  	t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
531  					 queue_limits_max_zone_append_sectors(b));
532  
533  	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
534  					    b->seg_boundary_mask);
535  	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
536  					    b->virt_boundary_mask);
537  
538  	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
539  	t->max_discard_segments = min_not_zero(t->max_discard_segments,
540  					       b->max_discard_segments);
541  	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
542  						 b->max_integrity_segments);
543  
544  	t->max_segment_size = min_not_zero(t->max_segment_size,
545  					   b->max_segment_size);
546  
547  	alignment = queue_limit_alignment_offset(b, start);
548  
549  	/* Bottom device has different alignment.  Check that it is
550  	 * compatible with the current top alignment.
551  	 */
552  	if (t->alignment_offset != alignment) {
553  
554  		top = max(t->physical_block_size, t->io_min)
555  			+ t->alignment_offset;
556  		bottom = max(b->physical_block_size, b->io_min) + alignment;
557  
558  		/* Verify that top and bottom intervals line up */
559  		if (max(top, bottom) % min(top, bottom)) {
560  			t->flags |= BLK_FLAG_MISALIGNED;
561  			ret = -1;
562  		}
563  	}
564  
565  	t->logical_block_size = max(t->logical_block_size,
566  				    b->logical_block_size);
567  
568  	t->physical_block_size = max(t->physical_block_size,
569  				     b->physical_block_size);
570  
571  	t->io_min = max(t->io_min, b->io_min);
572  	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
573  	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
574  
575  	/* Set non-power-of-2 compatible chunk_sectors boundary */
576  	if (b->chunk_sectors)
577  		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
578  
579  	/* Physical block size a multiple of the logical block size? */
580  	if (t->physical_block_size & (t->logical_block_size - 1)) {
581  		t->physical_block_size = t->logical_block_size;
582  		t->flags |= BLK_FLAG_MISALIGNED;
583  		ret = -1;
584  	}
585  
586  	/* Minimum I/O a multiple of the physical block size? */
587  	if (t->io_min & (t->physical_block_size - 1)) {
588  		t->io_min = t->physical_block_size;
589  		t->flags |= BLK_FLAG_MISALIGNED;
590  		ret = -1;
591  	}
592  
593  	/* Optimal I/O a multiple of the physical block size? */
594  	if (t->io_opt & (t->physical_block_size - 1)) {
595  		t->io_opt = 0;
596  		t->flags |= BLK_FLAG_MISALIGNED;
597  		ret = -1;
598  	}
599  
600  	/* chunk_sectors a multiple of the physical block size? */
601  	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
602  		t->chunk_sectors = 0;
603  		t->flags |= BLK_FLAG_MISALIGNED;
604  		ret = -1;
605  	}
606  
607  	/* Find lowest common alignment_offset */
608  	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
609  		% max(t->physical_block_size, t->io_min);
610  
611  	/* Verify that new alignment_offset is on a logical block boundary */
612  	if (t->alignment_offset & (t->logical_block_size - 1)) {
613  		t->flags |= BLK_FLAG_MISALIGNED;
614  		ret = -1;
615  	}
616  
617  	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
618  	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
619  	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
620  
621  	/* Discard alignment and granularity */
622  	if (b->discard_granularity) {
623  		alignment = queue_limit_discard_alignment(b, start);
624  
625  		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
626  						      b->max_discard_sectors);
627  		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
628  							 b->max_hw_discard_sectors);
629  		t->discard_granularity = max(t->discard_granularity,
630  					     b->discard_granularity);
631  		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
632  			t->discard_granularity;
633  	}
634  	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
635  						   b->max_secure_erase_sectors);
636  	t->zone_write_granularity = max(t->zone_write_granularity,
637  					b->zone_write_granularity);
638  	if (!(t->features & BLK_FEAT_ZONED)) {
639  		t->zone_write_granularity = 0;
640  		t->max_zone_append_sectors = 0;
641  	}
642  	return ret;
643  }
644  EXPORT_SYMBOL(blk_stack_limits);
645  
646  /**
647   * queue_limits_stack_bdev - adjust queue_limits for stacked devices
648   * @t:	the stacking driver limits (top device)
649   * @bdev:  the underlying block device (bottom)
650   * @offset:  offset to beginning of data within component device
651   * @pfx: prefix to use for warnings logged
652   *
653   * Description:
654   *    This function is used by stacking drivers like MD and DM to ensure
655   *    that all component devices have compatible block sizes and
656   *    alignments.  The stacking driver must provide a queue_limits
657   *    struct (top) and then iteratively call the stacking function for
658   *    all component (bottom) devices.  The stacking function will
659   *    attempt to combine the values and ensure proper alignment.
660   */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)661  void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
662  		sector_t offset, const char *pfx)
663  {
664  	if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
665  			get_start_sect(bdev) + offset))
666  		pr_notice("%s: Warning: Device %pg is misaligned\n",
667  			pfx, bdev);
668  }
669  EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
670  
671  /**
672   * queue_limits_stack_integrity - stack integrity profile
673   * @t: target queue limits
674   * @b: base queue limits
675   *
676   * Check if the integrity profile in the @b can be stacked into the
677   * target @t.  Stacking is possible if either:
678   *
679   *   a) does not have any integrity information stacked into it yet
680   *   b) the integrity profile in @b is identical to the one in @t
681   *
682   * If @b can be stacked into @t, return %true.  Else return %false and clear the
683   * integrity information in @t.
684   */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)685  bool queue_limits_stack_integrity(struct queue_limits *t,
686  		struct queue_limits *b)
687  {
688  	struct blk_integrity *ti = &t->integrity;
689  	struct blk_integrity *bi = &b->integrity;
690  
691  	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
692  		return true;
693  
694  	if (!ti->tuple_size) {
695  		/* inherit the settings from the first underlying device */
696  		if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
697  			ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
698  				(bi->flags & BLK_INTEGRITY_REF_TAG);
699  			ti->csum_type = bi->csum_type;
700  			ti->tuple_size = bi->tuple_size;
701  			ti->pi_offset = bi->pi_offset;
702  			ti->interval_exp = bi->interval_exp;
703  			ti->tag_size = bi->tag_size;
704  			goto done;
705  		}
706  		if (!bi->tuple_size)
707  			goto done;
708  	}
709  
710  	if (ti->tuple_size != bi->tuple_size)
711  		goto incompatible;
712  	if (ti->interval_exp != bi->interval_exp)
713  		goto incompatible;
714  	if (ti->tag_size != bi->tag_size)
715  		goto incompatible;
716  	if (ti->csum_type != bi->csum_type)
717  		goto incompatible;
718  	if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
719  	    (bi->flags & BLK_INTEGRITY_REF_TAG))
720  		goto incompatible;
721  
722  done:
723  	ti->flags |= BLK_INTEGRITY_STACKED;
724  	return true;
725  
726  incompatible:
727  	memset(ti, 0, sizeof(*ti));
728  	return false;
729  }
730  EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
731  
732  /**
733   * blk_set_queue_depth - tell the block layer about the device queue depth
734   * @q:		the request queue for the device
735   * @depth:		queue depth
736   *
737   */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)738  void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
739  {
740  	q->queue_depth = depth;
741  	rq_qos_queue_depth_changed(q);
742  }
743  EXPORT_SYMBOL(blk_set_queue_depth);
744  
bdev_alignment_offset(struct block_device * bdev)745  int bdev_alignment_offset(struct block_device *bdev)
746  {
747  	struct request_queue *q = bdev_get_queue(bdev);
748  
749  	if (q->limits.flags & BLK_FLAG_MISALIGNED)
750  		return -1;
751  	if (bdev_is_partition(bdev))
752  		return queue_limit_alignment_offset(&q->limits,
753  				bdev->bd_start_sect);
754  	return q->limits.alignment_offset;
755  }
756  EXPORT_SYMBOL_GPL(bdev_alignment_offset);
757  
bdev_discard_alignment(struct block_device * bdev)758  unsigned int bdev_discard_alignment(struct block_device *bdev)
759  {
760  	struct request_queue *q = bdev_get_queue(bdev);
761  
762  	if (bdev_is_partition(bdev))
763  		return queue_limit_discard_alignment(&q->limits,
764  				bdev->bd_start_sect);
765  	return q->limits.discard_alignment;
766  }
767  EXPORT_SYMBOL_GPL(bdev_discard_alignment);
768