1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Block multiqueue core code
4   *
5   * Copyright (C) 2013-2014 Jens Axboe
6   * Copyright (C) 2013-2014 Christoph Hellwig
7   */
8  #include <linux/kernel.h>
9  #include <linux/module.h>
10  #include <linux/backing-dev.h>
11  #include <linux/bio.h>
12  #include <linux/blkdev.h>
13  #include <linux/blk-integrity.h>
14  #include <linux/kmemleak.h>
15  #include <linux/mm.h>
16  #include <linux/init.h>
17  #include <linux/slab.h>
18  #include <linux/workqueue.h>
19  #include <linux/smp.h>
20  #include <linux/interrupt.h>
21  #include <linux/llist.h>
22  #include <linux/cpu.h>
23  #include <linux/cache.h>
24  #include <linux/sched/topology.h>
25  #include <linux/sched/signal.h>
26  #include <linux/delay.h>
27  #include <linux/crash_dump.h>
28  #include <linux/prefetch.h>
29  #include <linux/blk-crypto.h>
30  #include <linux/part_stat.h>
31  #include <linux/sched/isolation.h>
32  
33  #include <trace/events/block.h>
34  
35  #include <linux/t10-pi.h>
36  #include "blk.h"
37  #include "blk-mq.h"
38  #include "blk-mq-debugfs.h"
39  #include "blk-pm.h"
40  #include "blk-stat.h"
41  #include "blk-mq-sched.h"
42  #include "blk-rq-qos.h"
43  
44  static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45  static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
46  
47  static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
48  static void blk_mq_request_bypass_insert(struct request *rq,
49  		blk_insert_t flags);
50  static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
51  		struct list_head *list);
52  static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
53  			 struct io_comp_batch *iob, unsigned int flags);
54  
55  /*
56   * Check if any of the ctx, dispatch list or elevator
57   * have pending work in this hardware queue.
58   */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)59  static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
60  {
61  	return !list_empty_careful(&hctx->dispatch) ||
62  		sbitmap_any_bit_set(&hctx->ctx_map) ||
63  			blk_mq_sched_has_work(hctx);
64  }
65  
66  /*
67   * Mark this ctx as having pending work in this hardware queue
68   */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)69  static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70  				     struct blk_mq_ctx *ctx)
71  {
72  	const int bit = ctx->index_hw[hctx->type];
73  
74  	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
75  		sbitmap_set_bit(&hctx->ctx_map, bit);
76  }
77  
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)78  static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
79  				      struct blk_mq_ctx *ctx)
80  {
81  	const int bit = ctx->index_hw[hctx->type];
82  
83  	sbitmap_clear_bit(&hctx->ctx_map, bit);
84  }
85  
86  struct mq_inflight {
87  	struct block_device *part;
88  	unsigned int inflight[2];
89  };
90  
blk_mq_check_inflight(struct request * rq,void * priv)91  static bool blk_mq_check_inflight(struct request *rq, void *priv)
92  {
93  	struct mq_inflight *mi = priv;
94  
95  	if (rq->part && blk_do_io_stat(rq) &&
96  	    (!bdev_is_partition(mi->part) || rq->part == mi->part) &&
97  	    blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
98  		mi->inflight[rq_data_dir(rq)]++;
99  
100  	return true;
101  }
102  
blk_mq_in_flight(struct request_queue * q,struct block_device * part)103  unsigned int blk_mq_in_flight(struct request_queue *q,
104  		struct block_device *part)
105  {
106  	struct mq_inflight mi = { .part = part };
107  
108  	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
109  
110  	return mi.inflight[0] + mi.inflight[1];
111  }
112  
blk_mq_in_flight_rw(struct request_queue * q,struct block_device * part,unsigned int inflight[2])113  void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
114  		unsigned int inflight[2])
115  {
116  	struct mq_inflight mi = { .part = part };
117  
118  	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119  	inflight[0] = mi.inflight[0];
120  	inflight[1] = mi.inflight[1];
121  }
122  
blk_freeze_queue_start(struct request_queue * q)123  void blk_freeze_queue_start(struct request_queue *q)
124  {
125  	mutex_lock(&q->mq_freeze_lock);
126  	if (++q->mq_freeze_depth == 1) {
127  		percpu_ref_kill(&q->q_usage_counter);
128  		mutex_unlock(&q->mq_freeze_lock);
129  		if (queue_is_mq(q))
130  			blk_mq_run_hw_queues(q, false);
131  	} else {
132  		mutex_unlock(&q->mq_freeze_lock);
133  	}
134  }
135  EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
136  
blk_mq_freeze_queue_wait(struct request_queue * q)137  void blk_mq_freeze_queue_wait(struct request_queue *q)
138  {
139  	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
140  }
141  EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
142  
blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout)143  int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
144  				     unsigned long timeout)
145  {
146  	return wait_event_timeout(q->mq_freeze_wq,
147  					percpu_ref_is_zero(&q->q_usage_counter),
148  					timeout);
149  }
150  EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
151  
152  /*
153   * Guarantee no request is in use, so we can change any data structure of
154   * the queue afterward.
155   */
blk_freeze_queue(struct request_queue * q)156  void blk_freeze_queue(struct request_queue *q)
157  {
158  	/*
159  	 * In the !blk_mq case we are only calling this to kill the
160  	 * q_usage_counter, otherwise this increases the freeze depth
161  	 * and waits for it to return to zero.  For this reason there is
162  	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
163  	 * exported to drivers as the only user for unfreeze is blk_mq.
164  	 */
165  	blk_freeze_queue_start(q);
166  	blk_mq_freeze_queue_wait(q);
167  }
168  
blk_mq_freeze_queue(struct request_queue * q)169  void blk_mq_freeze_queue(struct request_queue *q)
170  {
171  	/*
172  	 * ...just an alias to keep freeze and unfreeze actions balanced
173  	 * in the blk_mq_* namespace
174  	 */
175  	blk_freeze_queue(q);
176  }
177  EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
178  
__blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic)179  void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
180  {
181  	mutex_lock(&q->mq_freeze_lock);
182  	if (force_atomic)
183  		q->q_usage_counter.data->force_atomic = true;
184  	q->mq_freeze_depth--;
185  	WARN_ON_ONCE(q->mq_freeze_depth < 0);
186  	if (!q->mq_freeze_depth) {
187  		percpu_ref_resurrect(&q->q_usage_counter);
188  		wake_up_all(&q->mq_freeze_wq);
189  	}
190  	mutex_unlock(&q->mq_freeze_lock);
191  }
192  
blk_mq_unfreeze_queue(struct request_queue * q)193  void blk_mq_unfreeze_queue(struct request_queue *q)
194  {
195  	__blk_mq_unfreeze_queue(q, false);
196  }
197  EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
198  
199  /*
200   * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
201   * mpt3sas driver such that this function can be removed.
202   */
blk_mq_quiesce_queue_nowait(struct request_queue * q)203  void blk_mq_quiesce_queue_nowait(struct request_queue *q)
204  {
205  	unsigned long flags;
206  
207  	spin_lock_irqsave(&q->queue_lock, flags);
208  	if (!q->quiesce_depth++)
209  		blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
210  	spin_unlock_irqrestore(&q->queue_lock, flags);
211  }
212  EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
213  
214  /**
215   * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
216   * @set: tag_set to wait on
217   *
218   * Note: it is driver's responsibility for making sure that quiesce has
219   * been started on or more of the request_queues of the tag_set.  This
220   * function only waits for the quiesce on those request_queues that had
221   * the quiesce flag set using blk_mq_quiesce_queue_nowait.
222   */
blk_mq_wait_quiesce_done(struct blk_mq_tag_set * set)223  void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
224  {
225  	if (set->flags & BLK_MQ_F_BLOCKING)
226  		synchronize_srcu(set->srcu);
227  	else
228  		synchronize_rcu();
229  }
230  EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
231  
232  /**
233   * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
234   * @q: request queue.
235   *
236   * Note: this function does not prevent that the struct request end_io()
237   * callback function is invoked. Once this function is returned, we make
238   * sure no dispatch can happen until the queue is unquiesced via
239   * blk_mq_unquiesce_queue().
240   */
blk_mq_quiesce_queue(struct request_queue * q)241  void blk_mq_quiesce_queue(struct request_queue *q)
242  {
243  	blk_mq_quiesce_queue_nowait(q);
244  	/* nothing to wait for non-mq queues */
245  	if (queue_is_mq(q))
246  		blk_mq_wait_quiesce_done(q->tag_set);
247  }
248  EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
249  
250  /*
251   * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
252   * @q: request queue.
253   *
254   * This function recovers queue into the state before quiescing
255   * which is done by blk_mq_quiesce_queue.
256   */
blk_mq_unquiesce_queue(struct request_queue * q)257  void blk_mq_unquiesce_queue(struct request_queue *q)
258  {
259  	unsigned long flags;
260  	bool run_queue = false;
261  
262  	spin_lock_irqsave(&q->queue_lock, flags);
263  	if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
264  		;
265  	} else if (!--q->quiesce_depth) {
266  		blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
267  		run_queue = true;
268  	}
269  	spin_unlock_irqrestore(&q->queue_lock, flags);
270  
271  	/* dispatch requests which are inserted during quiescing */
272  	if (run_queue)
273  		blk_mq_run_hw_queues(q, true);
274  }
275  EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
276  
blk_mq_quiesce_tagset(struct blk_mq_tag_set * set)277  void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
278  {
279  	struct request_queue *q;
280  
281  	mutex_lock(&set->tag_list_lock);
282  	list_for_each_entry(q, &set->tag_list, tag_set_list) {
283  		if (!blk_queue_skip_tagset_quiesce(q))
284  			blk_mq_quiesce_queue_nowait(q);
285  	}
286  	blk_mq_wait_quiesce_done(set);
287  	mutex_unlock(&set->tag_list_lock);
288  }
289  EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
290  
blk_mq_unquiesce_tagset(struct blk_mq_tag_set * set)291  void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
292  {
293  	struct request_queue *q;
294  
295  	mutex_lock(&set->tag_list_lock);
296  	list_for_each_entry(q, &set->tag_list, tag_set_list) {
297  		if (!blk_queue_skip_tagset_quiesce(q))
298  			blk_mq_unquiesce_queue(q);
299  	}
300  	mutex_unlock(&set->tag_list_lock);
301  }
302  EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
303  
blk_mq_wake_waiters(struct request_queue * q)304  void blk_mq_wake_waiters(struct request_queue *q)
305  {
306  	struct blk_mq_hw_ctx *hctx;
307  	unsigned long i;
308  
309  	queue_for_each_hw_ctx(q, hctx, i)
310  		if (blk_mq_hw_queue_mapped(hctx))
311  			blk_mq_tag_wakeup_all(hctx->tags, true);
312  }
313  
blk_rq_init(struct request_queue * q,struct request * rq)314  void blk_rq_init(struct request_queue *q, struct request *rq)
315  {
316  	memset(rq, 0, sizeof(*rq));
317  
318  	INIT_LIST_HEAD(&rq->queuelist);
319  	rq->q = q;
320  	rq->__sector = (sector_t) -1;
321  	INIT_HLIST_NODE(&rq->hash);
322  	RB_CLEAR_NODE(&rq->rb_node);
323  	rq->tag = BLK_MQ_NO_TAG;
324  	rq->internal_tag = BLK_MQ_NO_TAG;
325  	rq->start_time_ns = blk_time_get_ns();
326  	rq->part = NULL;
327  	blk_crypto_rq_set_defaults(rq);
328  }
329  EXPORT_SYMBOL(blk_rq_init);
330  
331  /* Set start and alloc time when the allocated request is actually used */
blk_mq_rq_time_init(struct request * rq,u64 alloc_time_ns)332  static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
333  {
334  	if (blk_mq_need_time_stamp(rq))
335  		rq->start_time_ns = blk_time_get_ns();
336  	else
337  		rq->start_time_ns = 0;
338  
339  #ifdef CONFIG_BLK_RQ_ALLOC_TIME
340  	if (blk_queue_rq_alloc_time(rq->q))
341  		rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
342  	else
343  		rq->alloc_time_ns = 0;
344  #endif
345  }
346  
blk_mq_rq_ctx_init(struct blk_mq_alloc_data * data,struct blk_mq_tags * tags,unsigned int tag)347  static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
348  		struct blk_mq_tags *tags, unsigned int tag)
349  {
350  	struct blk_mq_ctx *ctx = data->ctx;
351  	struct blk_mq_hw_ctx *hctx = data->hctx;
352  	struct request_queue *q = data->q;
353  	struct request *rq = tags->static_rqs[tag];
354  
355  	rq->q = q;
356  	rq->mq_ctx = ctx;
357  	rq->mq_hctx = hctx;
358  	rq->cmd_flags = data->cmd_flags;
359  
360  	if (data->flags & BLK_MQ_REQ_PM)
361  		data->rq_flags |= RQF_PM;
362  	if (blk_queue_io_stat(q))
363  		data->rq_flags |= RQF_IO_STAT;
364  	rq->rq_flags = data->rq_flags;
365  
366  	if (data->rq_flags & RQF_SCHED_TAGS) {
367  		rq->tag = BLK_MQ_NO_TAG;
368  		rq->internal_tag = tag;
369  	} else {
370  		rq->tag = tag;
371  		rq->internal_tag = BLK_MQ_NO_TAG;
372  	}
373  	rq->timeout = 0;
374  
375  	rq->part = NULL;
376  	rq->io_start_time_ns = 0;
377  	rq->stats_sectors = 0;
378  	rq->nr_phys_segments = 0;
379  	rq->nr_integrity_segments = 0;
380  	rq->end_io = NULL;
381  	rq->end_io_data = NULL;
382  
383  	blk_crypto_rq_set_defaults(rq);
384  	INIT_LIST_HEAD(&rq->queuelist);
385  	/* tag was already set */
386  	WRITE_ONCE(rq->deadline, 0);
387  	req_ref_set(rq, 1);
388  
389  	if (rq->rq_flags & RQF_USE_SCHED) {
390  		struct elevator_queue *e = data->q->elevator;
391  
392  		INIT_HLIST_NODE(&rq->hash);
393  		RB_CLEAR_NODE(&rq->rb_node);
394  
395  		if (e->type->ops.prepare_request)
396  			e->type->ops.prepare_request(rq);
397  	}
398  
399  	return rq;
400  }
401  
402  static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data * data)403  __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
404  {
405  	unsigned int tag, tag_offset;
406  	struct blk_mq_tags *tags;
407  	struct request *rq;
408  	unsigned long tag_mask;
409  	int i, nr = 0;
410  
411  	tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
412  	if (unlikely(!tag_mask))
413  		return NULL;
414  
415  	tags = blk_mq_tags_from_data(data);
416  	for (i = 0; tag_mask; i++) {
417  		if (!(tag_mask & (1UL << i)))
418  			continue;
419  		tag = tag_offset + i;
420  		prefetch(tags->static_rqs[tag]);
421  		tag_mask &= ~(1UL << i);
422  		rq = blk_mq_rq_ctx_init(data, tags, tag);
423  		rq_list_add(data->cached_rq, rq);
424  		nr++;
425  	}
426  	if (!(data->rq_flags & RQF_SCHED_TAGS))
427  		blk_mq_add_active_requests(data->hctx, nr);
428  	/* caller already holds a reference, add for remainder */
429  	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
430  	data->nr_tags -= nr;
431  
432  	return rq_list_pop(data->cached_rq);
433  }
434  
__blk_mq_alloc_requests(struct blk_mq_alloc_data * data)435  static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
436  {
437  	struct request_queue *q = data->q;
438  	u64 alloc_time_ns = 0;
439  	struct request *rq;
440  	unsigned int tag;
441  
442  	/* alloc_time includes depth and tag waits */
443  	if (blk_queue_rq_alloc_time(q))
444  		alloc_time_ns = blk_time_get_ns();
445  
446  	if (data->cmd_flags & REQ_NOWAIT)
447  		data->flags |= BLK_MQ_REQ_NOWAIT;
448  
449  retry:
450  	data->ctx = blk_mq_get_ctx(q);
451  	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
452  
453  	if (q->elevator) {
454  		/*
455  		 * All requests use scheduler tags when an I/O scheduler is
456  		 * enabled for the queue.
457  		 */
458  		data->rq_flags |= RQF_SCHED_TAGS;
459  
460  		/*
461  		 * Flush/passthrough requests are special and go directly to the
462  		 * dispatch list.
463  		 */
464  		if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
465  		    !blk_op_is_passthrough(data->cmd_flags)) {
466  			struct elevator_mq_ops *ops = &q->elevator->type->ops;
467  
468  			WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
469  
470  			data->rq_flags |= RQF_USE_SCHED;
471  			if (ops->limit_depth)
472  				ops->limit_depth(data->cmd_flags, data);
473  		}
474  	} else {
475  		blk_mq_tag_busy(data->hctx);
476  	}
477  
478  	if (data->flags & BLK_MQ_REQ_RESERVED)
479  		data->rq_flags |= RQF_RESV;
480  
481  	/*
482  	 * Try batched alloc if we want more than 1 tag.
483  	 */
484  	if (data->nr_tags > 1) {
485  		rq = __blk_mq_alloc_requests_batch(data);
486  		if (rq) {
487  			blk_mq_rq_time_init(rq, alloc_time_ns);
488  			return rq;
489  		}
490  		data->nr_tags = 1;
491  	}
492  
493  	/*
494  	 * Waiting allocations only fail because of an inactive hctx.  In that
495  	 * case just retry the hctx assignment and tag allocation as CPU hotplug
496  	 * should have migrated us to an online CPU by now.
497  	 */
498  	tag = blk_mq_get_tag(data);
499  	if (tag == BLK_MQ_NO_TAG) {
500  		if (data->flags & BLK_MQ_REQ_NOWAIT)
501  			return NULL;
502  		/*
503  		 * Give up the CPU and sleep for a random short time to
504  		 * ensure that thread using a realtime scheduling class
505  		 * are migrated off the CPU, and thus off the hctx that
506  		 * is going away.
507  		 */
508  		msleep(3);
509  		goto retry;
510  	}
511  
512  	if (!(data->rq_flags & RQF_SCHED_TAGS))
513  		blk_mq_inc_active_requests(data->hctx);
514  	rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
515  	blk_mq_rq_time_init(rq, alloc_time_ns);
516  	return rq;
517  }
518  
blk_mq_rq_cache_fill(struct request_queue * q,struct blk_plug * plug,blk_opf_t opf,blk_mq_req_flags_t flags)519  static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
520  					    struct blk_plug *plug,
521  					    blk_opf_t opf,
522  					    blk_mq_req_flags_t flags)
523  {
524  	struct blk_mq_alloc_data data = {
525  		.q		= q,
526  		.flags		= flags,
527  		.cmd_flags	= opf,
528  		.nr_tags	= plug->nr_ios,
529  		.cached_rq	= &plug->cached_rq,
530  	};
531  	struct request *rq;
532  
533  	if (blk_queue_enter(q, flags))
534  		return NULL;
535  
536  	plug->nr_ios = 1;
537  
538  	rq = __blk_mq_alloc_requests(&data);
539  	if (unlikely(!rq))
540  		blk_queue_exit(q);
541  	return rq;
542  }
543  
blk_mq_alloc_cached_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)544  static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
545  						   blk_opf_t opf,
546  						   blk_mq_req_flags_t flags)
547  {
548  	struct blk_plug *plug = current->plug;
549  	struct request *rq;
550  
551  	if (!plug)
552  		return NULL;
553  
554  	if (rq_list_empty(plug->cached_rq)) {
555  		if (plug->nr_ios == 1)
556  			return NULL;
557  		rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
558  		if (!rq)
559  			return NULL;
560  	} else {
561  		rq = rq_list_peek(&plug->cached_rq);
562  		if (!rq || rq->q != q)
563  			return NULL;
564  
565  		if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
566  			return NULL;
567  		if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
568  			return NULL;
569  
570  		plug->cached_rq = rq_list_next(rq);
571  		blk_mq_rq_time_init(rq, 0);
572  	}
573  
574  	rq->cmd_flags = opf;
575  	INIT_LIST_HEAD(&rq->queuelist);
576  	return rq;
577  }
578  
blk_mq_alloc_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)579  struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
580  		blk_mq_req_flags_t flags)
581  {
582  	struct request *rq;
583  
584  	rq = blk_mq_alloc_cached_request(q, opf, flags);
585  	if (!rq) {
586  		struct blk_mq_alloc_data data = {
587  			.q		= q,
588  			.flags		= flags,
589  			.cmd_flags	= opf,
590  			.nr_tags	= 1,
591  		};
592  		int ret;
593  
594  		ret = blk_queue_enter(q, flags);
595  		if (ret)
596  			return ERR_PTR(ret);
597  
598  		rq = __blk_mq_alloc_requests(&data);
599  		if (!rq)
600  			goto out_queue_exit;
601  	}
602  	rq->__data_len = 0;
603  	rq->__sector = (sector_t) -1;
604  	rq->bio = rq->biotail = NULL;
605  	return rq;
606  out_queue_exit:
607  	blk_queue_exit(q);
608  	return ERR_PTR(-EWOULDBLOCK);
609  }
610  EXPORT_SYMBOL(blk_mq_alloc_request);
611  
blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx)612  struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
613  	blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
614  {
615  	struct blk_mq_alloc_data data = {
616  		.q		= q,
617  		.flags		= flags,
618  		.cmd_flags	= opf,
619  		.nr_tags	= 1,
620  	};
621  	u64 alloc_time_ns = 0;
622  	struct request *rq;
623  	unsigned int cpu;
624  	unsigned int tag;
625  	int ret;
626  
627  	/* alloc_time includes depth and tag waits */
628  	if (blk_queue_rq_alloc_time(q))
629  		alloc_time_ns = blk_time_get_ns();
630  
631  	/*
632  	 * If the tag allocator sleeps we could get an allocation for a
633  	 * different hardware context.  No need to complicate the low level
634  	 * allocator for this for the rare use case of a command tied to
635  	 * a specific queue.
636  	 */
637  	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
638  	    WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
639  		return ERR_PTR(-EINVAL);
640  
641  	if (hctx_idx >= q->nr_hw_queues)
642  		return ERR_PTR(-EIO);
643  
644  	ret = blk_queue_enter(q, flags);
645  	if (ret)
646  		return ERR_PTR(ret);
647  
648  	/*
649  	 * Check if the hardware context is actually mapped to anything.
650  	 * If not tell the caller that it should skip this queue.
651  	 */
652  	ret = -EXDEV;
653  	data.hctx = xa_load(&q->hctx_table, hctx_idx);
654  	if (!blk_mq_hw_queue_mapped(data.hctx))
655  		goto out_queue_exit;
656  	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
657  	if (cpu >= nr_cpu_ids)
658  		goto out_queue_exit;
659  	data.ctx = __blk_mq_get_ctx(q, cpu);
660  
661  	if (q->elevator)
662  		data.rq_flags |= RQF_SCHED_TAGS;
663  	else
664  		blk_mq_tag_busy(data.hctx);
665  
666  	if (flags & BLK_MQ_REQ_RESERVED)
667  		data.rq_flags |= RQF_RESV;
668  
669  	ret = -EWOULDBLOCK;
670  	tag = blk_mq_get_tag(&data);
671  	if (tag == BLK_MQ_NO_TAG)
672  		goto out_queue_exit;
673  	if (!(data.rq_flags & RQF_SCHED_TAGS))
674  		blk_mq_inc_active_requests(data.hctx);
675  	rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
676  	blk_mq_rq_time_init(rq, alloc_time_ns);
677  	rq->__data_len = 0;
678  	rq->__sector = (sector_t) -1;
679  	rq->bio = rq->biotail = NULL;
680  	return rq;
681  
682  out_queue_exit:
683  	blk_queue_exit(q);
684  	return ERR_PTR(ret);
685  }
686  EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
687  
blk_mq_finish_request(struct request * rq)688  static void blk_mq_finish_request(struct request *rq)
689  {
690  	struct request_queue *q = rq->q;
691  
692  	blk_zone_finish_request(rq);
693  
694  	if (rq->rq_flags & RQF_USE_SCHED) {
695  		q->elevator->type->ops.finish_request(rq);
696  		/*
697  		 * For postflush request that may need to be
698  		 * completed twice, we should clear this flag
699  		 * to avoid double finish_request() on the rq.
700  		 */
701  		rq->rq_flags &= ~RQF_USE_SCHED;
702  	}
703  }
704  
__blk_mq_free_request(struct request * rq)705  static void __blk_mq_free_request(struct request *rq)
706  {
707  	struct request_queue *q = rq->q;
708  	struct blk_mq_ctx *ctx = rq->mq_ctx;
709  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
710  	const int sched_tag = rq->internal_tag;
711  
712  	blk_crypto_free_request(rq);
713  	blk_pm_mark_last_busy(rq);
714  	rq->mq_hctx = NULL;
715  
716  	if (rq->tag != BLK_MQ_NO_TAG) {
717  		blk_mq_dec_active_requests(hctx);
718  		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
719  	}
720  	if (sched_tag != BLK_MQ_NO_TAG)
721  		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
722  	blk_mq_sched_restart(hctx);
723  	blk_queue_exit(q);
724  }
725  
blk_mq_free_request(struct request * rq)726  void blk_mq_free_request(struct request *rq)
727  {
728  	struct request_queue *q = rq->q;
729  
730  	blk_mq_finish_request(rq);
731  
732  	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
733  		laptop_io_completion(q->disk->bdi);
734  
735  	rq_qos_done(q, rq);
736  
737  	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
738  	if (req_ref_put_and_test(rq))
739  		__blk_mq_free_request(rq);
740  }
741  EXPORT_SYMBOL_GPL(blk_mq_free_request);
742  
blk_mq_free_plug_rqs(struct blk_plug * plug)743  void blk_mq_free_plug_rqs(struct blk_plug *plug)
744  {
745  	struct request *rq;
746  
747  	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
748  		blk_mq_free_request(rq);
749  }
750  
blk_dump_rq_flags(struct request * rq,char * msg)751  void blk_dump_rq_flags(struct request *rq, char *msg)
752  {
753  	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
754  		rq->q->disk ? rq->q->disk->disk_name : "?",
755  		(__force unsigned long long) rq->cmd_flags);
756  
757  	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
758  	       (unsigned long long)blk_rq_pos(rq),
759  	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
760  	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
761  	       rq->bio, rq->biotail, blk_rq_bytes(rq));
762  }
763  EXPORT_SYMBOL(blk_dump_rq_flags);
764  
blk_account_io_completion(struct request * req,unsigned int bytes)765  static void blk_account_io_completion(struct request *req, unsigned int bytes)
766  {
767  	if (req->part && blk_do_io_stat(req)) {
768  		const int sgrp = op_stat_group(req_op(req));
769  
770  		part_stat_lock();
771  		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
772  		part_stat_unlock();
773  	}
774  }
775  
blk_print_req_error(struct request * req,blk_status_t status)776  static void blk_print_req_error(struct request *req, blk_status_t status)
777  {
778  	printk_ratelimited(KERN_ERR
779  		"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
780  		"phys_seg %u prio class %u\n",
781  		blk_status_to_str(status),
782  		req->q->disk ? req->q->disk->disk_name : "?",
783  		blk_rq_pos(req), (__force u32)req_op(req),
784  		blk_op_str(req_op(req)),
785  		(__force u32)(req->cmd_flags & ~REQ_OP_MASK),
786  		req->nr_phys_segments,
787  		IOPRIO_PRIO_CLASS(req->ioprio));
788  }
789  
790  /*
791   * Fully end IO on a request. Does not support partial completions, or
792   * errors.
793   */
blk_complete_request(struct request * req)794  static void blk_complete_request(struct request *req)
795  {
796  	const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
797  	int total_bytes = blk_rq_bytes(req);
798  	struct bio *bio = req->bio;
799  
800  	trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
801  
802  	if (!bio)
803  		return;
804  
805  	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
806  		blk_integrity_complete(req, total_bytes);
807  
808  	/*
809  	 * Upper layers may call blk_crypto_evict_key() anytime after the last
810  	 * bio_endio().  Therefore, the keyslot must be released before that.
811  	 */
812  	blk_crypto_rq_put_keyslot(req);
813  
814  	blk_account_io_completion(req, total_bytes);
815  
816  	do {
817  		struct bio *next = bio->bi_next;
818  
819  		/* Completion has already been traced */
820  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
821  
822  		blk_zone_update_request_bio(req, bio);
823  
824  		if (!is_flush)
825  			bio_endio(bio);
826  		bio = next;
827  	} while (bio);
828  
829  	/*
830  	 * Reset counters so that the request stacking driver
831  	 * can find how many bytes remain in the request
832  	 * later.
833  	 */
834  	if (!req->end_io) {
835  		req->bio = NULL;
836  		req->__data_len = 0;
837  	}
838  }
839  
840  /**
841   * blk_update_request - Complete multiple bytes without completing the request
842   * @req:      the request being processed
843   * @error:    block status code
844   * @nr_bytes: number of bytes to complete for @req
845   *
846   * Description:
847   *     Ends I/O on a number of bytes attached to @req, but doesn't complete
848   *     the request structure even if @req doesn't have leftover.
849   *     If @req has leftover, sets it up for the next range of segments.
850   *
851   *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
852   *     %false return from this function.
853   *
854   * Note:
855   *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
856   *      except in the consistency check at the end of this function.
857   *
858   * Return:
859   *     %false - this request doesn't have any more data
860   *     %true  - this request has more data
861   **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)862  bool blk_update_request(struct request *req, blk_status_t error,
863  		unsigned int nr_bytes)
864  {
865  	bool is_flush = req->rq_flags & RQF_FLUSH_SEQ;
866  	bool quiet = req->rq_flags & RQF_QUIET;
867  	int total_bytes;
868  
869  	trace_block_rq_complete(req, error, nr_bytes);
870  
871  	if (!req->bio)
872  		return false;
873  
874  	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
875  	    error == BLK_STS_OK)
876  		blk_integrity_complete(req, nr_bytes);
877  
878  	/*
879  	 * Upper layers may call blk_crypto_evict_key() anytime after the last
880  	 * bio_endio().  Therefore, the keyslot must be released before that.
881  	 */
882  	if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
883  		__blk_crypto_rq_put_keyslot(req);
884  
885  	if (unlikely(error && !blk_rq_is_passthrough(req) && !quiet) &&
886  	    !test_bit(GD_DEAD, &req->q->disk->state)) {
887  		blk_print_req_error(req, error);
888  		trace_block_rq_error(req, error, nr_bytes);
889  	}
890  
891  	blk_account_io_completion(req, nr_bytes);
892  
893  	total_bytes = 0;
894  	while (req->bio) {
895  		struct bio *bio = req->bio;
896  		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
897  
898  		if (unlikely(error))
899  			bio->bi_status = error;
900  
901  		if (bio_bytes == bio->bi_iter.bi_size) {
902  			req->bio = bio->bi_next;
903  		} else if (bio_is_zone_append(bio) && error == BLK_STS_OK) {
904  			/*
905  			 * Partial zone append completions cannot be supported
906  			 * as the BIO fragments may end up not being written
907  			 * sequentially.
908  			 */
909  			bio->bi_status = BLK_STS_IOERR;
910  		}
911  
912  		/* Completion has already been traced */
913  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
914  		if (unlikely(quiet))
915  			bio_set_flag(bio, BIO_QUIET);
916  
917  		bio_advance(bio, bio_bytes);
918  
919  		/* Don't actually finish bio if it's part of flush sequence */
920  		if (!bio->bi_iter.bi_size) {
921  			blk_zone_update_request_bio(req, bio);
922  			if (!is_flush)
923  				bio_endio(bio);
924  		}
925  
926  		total_bytes += bio_bytes;
927  		nr_bytes -= bio_bytes;
928  
929  		if (!nr_bytes)
930  			break;
931  	}
932  
933  	/*
934  	 * completely done
935  	 */
936  	if (!req->bio) {
937  		/*
938  		 * Reset counters so that the request stacking driver
939  		 * can find how many bytes remain in the request
940  		 * later.
941  		 */
942  		req->__data_len = 0;
943  		return false;
944  	}
945  
946  	req->__data_len -= total_bytes;
947  
948  	/* update sector only for requests with clear definition of sector */
949  	if (!blk_rq_is_passthrough(req))
950  		req->__sector += total_bytes >> 9;
951  
952  	/* mixed attributes always follow the first bio */
953  	if (req->rq_flags & RQF_MIXED_MERGE) {
954  		req->cmd_flags &= ~REQ_FAILFAST_MASK;
955  		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
956  	}
957  
958  	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
959  		/*
960  		 * If total number of sectors is less than the first segment
961  		 * size, something has gone terribly wrong.
962  		 */
963  		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
964  			blk_dump_rq_flags(req, "request botched");
965  			req->__data_len = blk_rq_cur_bytes(req);
966  		}
967  
968  		/* recalculate the number of segments */
969  		req->nr_phys_segments = blk_recalc_rq_segments(req);
970  	}
971  
972  	return true;
973  }
974  EXPORT_SYMBOL_GPL(blk_update_request);
975  
blk_account_io_done(struct request * req,u64 now)976  static inline void blk_account_io_done(struct request *req, u64 now)
977  {
978  	trace_block_io_done(req);
979  
980  	/*
981  	 * Account IO completion.  flush_rq isn't accounted as a
982  	 * normal IO on queueing nor completion.  Accounting the
983  	 * containing request is enough.
984  	 */
985  	if (blk_do_io_stat(req) && req->part &&
986  	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
987  		const int sgrp = op_stat_group(req_op(req));
988  
989  		part_stat_lock();
990  		update_io_ticks(req->part, jiffies, true);
991  		part_stat_inc(req->part, ios[sgrp]);
992  		part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
993  		part_stat_local_dec(req->part,
994  				    in_flight[op_is_write(req_op(req))]);
995  		part_stat_unlock();
996  	}
997  }
998  
blk_account_io_start(struct request * req)999  static inline void blk_account_io_start(struct request *req)
1000  {
1001  	trace_block_io_start(req);
1002  
1003  	if (blk_do_io_stat(req)) {
1004  		/*
1005  		 * All non-passthrough requests are created from a bio with one
1006  		 * exception: when a flush command that is part of a flush sequence
1007  		 * generated by the state machine in blk-flush.c is cloned onto the
1008  		 * lower device by dm-multipath we can get here without a bio.
1009  		 */
1010  		if (req->bio)
1011  			req->part = req->bio->bi_bdev;
1012  		else
1013  			req->part = req->q->disk->part0;
1014  
1015  		part_stat_lock();
1016  		update_io_ticks(req->part, jiffies, false);
1017  		part_stat_local_inc(req->part,
1018  				    in_flight[op_is_write(req_op(req))]);
1019  		part_stat_unlock();
1020  	}
1021  }
1022  
__blk_mq_end_request_acct(struct request * rq,u64 now)1023  static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1024  {
1025  	if (rq->rq_flags & RQF_STATS)
1026  		blk_stat_add(rq, now);
1027  
1028  	blk_mq_sched_completed_request(rq, now);
1029  	blk_account_io_done(rq, now);
1030  }
1031  
__blk_mq_end_request(struct request * rq,blk_status_t error)1032  inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1033  {
1034  	if (blk_mq_need_time_stamp(rq))
1035  		__blk_mq_end_request_acct(rq, blk_time_get_ns());
1036  
1037  	blk_mq_finish_request(rq);
1038  
1039  	if (rq->end_io) {
1040  		rq_qos_done(rq->q, rq);
1041  		if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1042  			blk_mq_free_request(rq);
1043  	} else {
1044  		blk_mq_free_request(rq);
1045  	}
1046  }
1047  EXPORT_SYMBOL(__blk_mq_end_request);
1048  
blk_mq_end_request(struct request * rq,blk_status_t error)1049  void blk_mq_end_request(struct request *rq, blk_status_t error)
1050  {
1051  	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1052  		BUG();
1053  	__blk_mq_end_request(rq, error);
1054  }
1055  EXPORT_SYMBOL(blk_mq_end_request);
1056  
1057  #define TAG_COMP_BATCH		32
1058  
blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags)1059  static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1060  					  int *tag_array, int nr_tags)
1061  {
1062  	struct request_queue *q = hctx->queue;
1063  
1064  	blk_mq_sub_active_requests(hctx, nr_tags);
1065  
1066  	blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1067  	percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1068  }
1069  
blk_mq_end_request_batch(struct io_comp_batch * iob)1070  void blk_mq_end_request_batch(struct io_comp_batch *iob)
1071  {
1072  	int tags[TAG_COMP_BATCH], nr_tags = 0;
1073  	struct blk_mq_hw_ctx *cur_hctx = NULL;
1074  	struct request *rq;
1075  	u64 now = 0;
1076  
1077  	if (iob->need_ts)
1078  		now = blk_time_get_ns();
1079  
1080  	while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1081  		prefetch(rq->bio);
1082  		prefetch(rq->rq_next);
1083  
1084  		blk_complete_request(rq);
1085  		if (iob->need_ts)
1086  			__blk_mq_end_request_acct(rq, now);
1087  
1088  		blk_mq_finish_request(rq);
1089  
1090  		rq_qos_done(rq->q, rq);
1091  
1092  		/*
1093  		 * If end_io handler returns NONE, then it still has
1094  		 * ownership of the request.
1095  		 */
1096  		if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1097  			continue;
1098  
1099  		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1100  		if (!req_ref_put_and_test(rq))
1101  			continue;
1102  
1103  		blk_crypto_free_request(rq);
1104  		blk_pm_mark_last_busy(rq);
1105  
1106  		if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1107  			if (cur_hctx)
1108  				blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1109  			nr_tags = 0;
1110  			cur_hctx = rq->mq_hctx;
1111  		}
1112  		tags[nr_tags++] = rq->tag;
1113  	}
1114  
1115  	if (nr_tags)
1116  		blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1117  }
1118  EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1119  
blk_complete_reqs(struct llist_head * list)1120  static void blk_complete_reqs(struct llist_head *list)
1121  {
1122  	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1123  	struct request *rq, *next;
1124  
1125  	llist_for_each_entry_safe(rq, next, entry, ipi_list)
1126  		rq->q->mq_ops->complete(rq);
1127  }
1128  
blk_done_softirq(void)1129  static __latent_entropy void blk_done_softirq(void)
1130  {
1131  	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1132  }
1133  
blk_softirq_cpu_dead(unsigned int cpu)1134  static int blk_softirq_cpu_dead(unsigned int cpu)
1135  {
1136  	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1137  	return 0;
1138  }
1139  
__blk_mq_complete_request_remote(void * data)1140  static void __blk_mq_complete_request_remote(void *data)
1141  {
1142  	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
1143  }
1144  
blk_mq_complete_need_ipi(struct request * rq)1145  static inline bool blk_mq_complete_need_ipi(struct request *rq)
1146  {
1147  	int cpu = raw_smp_processor_id();
1148  
1149  	if (!IS_ENABLED(CONFIG_SMP) ||
1150  	    !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1151  		return false;
1152  	/*
1153  	 * With force threaded interrupts enabled, raising softirq from an SMP
1154  	 * function call will always result in waking the ksoftirqd thread.
1155  	 * This is probably worse than completing the request on a different
1156  	 * cache domain.
1157  	 */
1158  	if (force_irqthreads())
1159  		return false;
1160  
1161  	/* same CPU or cache domain and capacity?  Complete locally */
1162  	if (cpu == rq->mq_ctx->cpu ||
1163  	    (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1164  	     cpus_share_cache(cpu, rq->mq_ctx->cpu) &&
1165  	     cpus_equal_capacity(cpu, rq->mq_ctx->cpu)))
1166  		return false;
1167  
1168  	/* don't try to IPI to an offline CPU */
1169  	return cpu_online(rq->mq_ctx->cpu);
1170  }
1171  
blk_mq_complete_send_ipi(struct request * rq)1172  static void blk_mq_complete_send_ipi(struct request *rq)
1173  {
1174  	unsigned int cpu;
1175  
1176  	cpu = rq->mq_ctx->cpu;
1177  	if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1178  		smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
1179  }
1180  
blk_mq_raise_softirq(struct request * rq)1181  static void blk_mq_raise_softirq(struct request *rq)
1182  {
1183  	struct llist_head *list;
1184  
1185  	preempt_disable();
1186  	list = this_cpu_ptr(&blk_cpu_done);
1187  	if (llist_add(&rq->ipi_list, list))
1188  		raise_softirq(BLOCK_SOFTIRQ);
1189  	preempt_enable();
1190  }
1191  
blk_mq_complete_request_remote(struct request * rq)1192  bool blk_mq_complete_request_remote(struct request *rq)
1193  {
1194  	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1195  
1196  	/*
1197  	 * For request which hctx has only one ctx mapping,
1198  	 * or a polled request, always complete locally,
1199  	 * it's pointless to redirect the completion.
1200  	 */
1201  	if ((rq->mq_hctx->nr_ctx == 1 &&
1202  	     rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1203  	     rq->cmd_flags & REQ_POLLED)
1204  		return false;
1205  
1206  	if (blk_mq_complete_need_ipi(rq)) {
1207  		blk_mq_complete_send_ipi(rq);
1208  		return true;
1209  	}
1210  
1211  	if (rq->q->nr_hw_queues == 1) {
1212  		blk_mq_raise_softirq(rq);
1213  		return true;
1214  	}
1215  	return false;
1216  }
1217  EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1218  
1219  /**
1220   * blk_mq_complete_request - end I/O on a request
1221   * @rq:		the request being processed
1222   *
1223   * Description:
1224   *	Complete a request by scheduling the ->complete_rq operation.
1225   **/
blk_mq_complete_request(struct request * rq)1226  void blk_mq_complete_request(struct request *rq)
1227  {
1228  	if (!blk_mq_complete_request_remote(rq))
1229  		rq->q->mq_ops->complete(rq);
1230  }
1231  EXPORT_SYMBOL(blk_mq_complete_request);
1232  
1233  /**
1234   * blk_mq_start_request - Start processing a request
1235   * @rq: Pointer to request to be started
1236   *
1237   * Function used by device drivers to notify the block layer that a request
1238   * is going to be processed now, so blk layer can do proper initializations
1239   * such as starting the timeout timer.
1240   */
blk_mq_start_request(struct request * rq)1241  void blk_mq_start_request(struct request *rq)
1242  {
1243  	struct request_queue *q = rq->q;
1244  
1245  	trace_block_rq_issue(rq);
1246  
1247  	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
1248  	    !blk_rq_is_passthrough(rq)) {
1249  		rq->io_start_time_ns = blk_time_get_ns();
1250  		rq->stats_sectors = blk_rq_sectors(rq);
1251  		rq->rq_flags |= RQF_STATS;
1252  		rq_qos_issue(q, rq);
1253  	}
1254  
1255  	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1256  
1257  	blk_add_timer(rq);
1258  	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1259  	rq->mq_hctx->tags->rqs[rq->tag] = rq;
1260  
1261  	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1262  		blk_integrity_prepare(rq);
1263  
1264  	if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1265  	        WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1266  }
1267  EXPORT_SYMBOL(blk_mq_start_request);
1268  
1269  /*
1270   * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1271   * queues. This is important for md arrays to benefit from merging
1272   * requests.
1273   */
blk_plug_max_rq_count(struct blk_plug * plug)1274  static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1275  {
1276  	if (plug->multiple_queues)
1277  		return BLK_MAX_REQUEST_COUNT * 2;
1278  	return BLK_MAX_REQUEST_COUNT;
1279  }
1280  
blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq)1281  static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1282  {
1283  	struct request *last = rq_list_peek(&plug->mq_list);
1284  
1285  	if (!plug->rq_count) {
1286  		trace_block_plug(rq->q);
1287  	} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1288  		   (!blk_queue_nomerges(rq->q) &&
1289  		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1290  		blk_mq_flush_plug_list(plug, false);
1291  		last = NULL;
1292  		trace_block_plug(rq->q);
1293  	}
1294  
1295  	if (!plug->multiple_queues && last && last->q != rq->q)
1296  		plug->multiple_queues = true;
1297  	/*
1298  	 * Any request allocated from sched tags can't be issued to
1299  	 * ->queue_rqs() directly
1300  	 */
1301  	if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1302  		plug->has_elevator = true;
1303  	rq->rq_next = NULL;
1304  	rq_list_add(&plug->mq_list, rq);
1305  	plug->rq_count++;
1306  }
1307  
1308  /**
1309   * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1310   * @rq:		request to insert
1311   * @at_head:    insert request at head or tail of queue
1312   *
1313   * Description:
1314   *    Insert a fully prepared request at the back of the I/O scheduler queue
1315   *    for execution.  Don't wait for completion.
1316   *
1317   * Note:
1318   *    This function will invoke @done directly if the queue is dead.
1319   */
blk_execute_rq_nowait(struct request * rq,bool at_head)1320  void blk_execute_rq_nowait(struct request *rq, bool at_head)
1321  {
1322  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1323  
1324  	WARN_ON(irqs_disabled());
1325  	WARN_ON(!blk_rq_is_passthrough(rq));
1326  
1327  	blk_account_io_start(rq);
1328  
1329  	if (current->plug && !at_head) {
1330  		blk_add_rq_to_plug(current->plug, rq);
1331  		return;
1332  	}
1333  
1334  	blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1335  	blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
1336  }
1337  EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1338  
1339  struct blk_rq_wait {
1340  	struct completion done;
1341  	blk_status_t ret;
1342  };
1343  
blk_end_sync_rq(struct request * rq,blk_status_t ret)1344  static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1345  {
1346  	struct blk_rq_wait *wait = rq->end_io_data;
1347  
1348  	wait->ret = ret;
1349  	complete(&wait->done);
1350  	return RQ_END_IO_NONE;
1351  }
1352  
blk_rq_is_poll(struct request * rq)1353  bool blk_rq_is_poll(struct request *rq)
1354  {
1355  	if (!rq->mq_hctx)
1356  		return false;
1357  	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1358  		return false;
1359  	return true;
1360  }
1361  EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1362  
blk_rq_poll_completion(struct request * rq,struct completion * wait)1363  static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1364  {
1365  	do {
1366  		blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1367  		cond_resched();
1368  	} while (!completion_done(wait));
1369  }
1370  
1371  /**
1372   * blk_execute_rq - insert a request into queue for execution
1373   * @rq:		request to insert
1374   * @at_head:    insert request at head or tail of queue
1375   *
1376   * Description:
1377   *    Insert a fully prepared request at the back of the I/O scheduler queue
1378   *    for execution and wait for completion.
1379   * Return: The blk_status_t result provided to blk_mq_end_request().
1380   */
blk_execute_rq(struct request * rq,bool at_head)1381  blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1382  {
1383  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1384  	struct blk_rq_wait wait = {
1385  		.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1386  	};
1387  
1388  	WARN_ON(irqs_disabled());
1389  	WARN_ON(!blk_rq_is_passthrough(rq));
1390  
1391  	rq->end_io_data = &wait;
1392  	rq->end_io = blk_end_sync_rq;
1393  
1394  	blk_account_io_start(rq);
1395  	blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1396  	blk_mq_run_hw_queue(hctx, false);
1397  
1398  	if (blk_rq_is_poll(rq))
1399  		blk_rq_poll_completion(rq, &wait.done);
1400  	else
1401  		blk_wait_io(&wait.done);
1402  
1403  	return wait.ret;
1404  }
1405  EXPORT_SYMBOL(blk_execute_rq);
1406  
__blk_mq_requeue_request(struct request * rq)1407  static void __blk_mq_requeue_request(struct request *rq)
1408  {
1409  	struct request_queue *q = rq->q;
1410  
1411  	blk_mq_put_driver_tag(rq);
1412  
1413  	trace_block_rq_requeue(rq);
1414  	rq_qos_requeue(q, rq);
1415  
1416  	if (blk_mq_request_started(rq)) {
1417  		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1418  		rq->rq_flags &= ~RQF_TIMED_OUT;
1419  	}
1420  }
1421  
blk_mq_requeue_request(struct request * rq,bool kick_requeue_list)1422  void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1423  {
1424  	struct request_queue *q = rq->q;
1425  	unsigned long flags;
1426  
1427  	__blk_mq_requeue_request(rq);
1428  
1429  	/* this request will be re-inserted to io scheduler queue */
1430  	blk_mq_sched_requeue_request(rq);
1431  
1432  	spin_lock_irqsave(&q->requeue_lock, flags);
1433  	list_add_tail(&rq->queuelist, &q->requeue_list);
1434  	spin_unlock_irqrestore(&q->requeue_lock, flags);
1435  
1436  	if (kick_requeue_list)
1437  		blk_mq_kick_requeue_list(q);
1438  }
1439  EXPORT_SYMBOL(blk_mq_requeue_request);
1440  
blk_mq_requeue_work(struct work_struct * work)1441  static void blk_mq_requeue_work(struct work_struct *work)
1442  {
1443  	struct request_queue *q =
1444  		container_of(work, struct request_queue, requeue_work.work);
1445  	LIST_HEAD(rq_list);
1446  	LIST_HEAD(flush_list);
1447  	struct request *rq;
1448  
1449  	spin_lock_irq(&q->requeue_lock);
1450  	list_splice_init(&q->requeue_list, &rq_list);
1451  	list_splice_init(&q->flush_list, &flush_list);
1452  	spin_unlock_irq(&q->requeue_lock);
1453  
1454  	while (!list_empty(&rq_list)) {
1455  		rq = list_entry(rq_list.next, struct request, queuelist);
1456  		/*
1457  		 * If RQF_DONTPREP ist set, the request has been started by the
1458  		 * driver already and might have driver-specific data allocated
1459  		 * already.  Insert it into the hctx dispatch list to avoid
1460  		 * block layer merges for the request.
1461  		 */
1462  		if (rq->rq_flags & RQF_DONTPREP) {
1463  			list_del_init(&rq->queuelist);
1464  			blk_mq_request_bypass_insert(rq, 0);
1465  		} else {
1466  			list_del_init(&rq->queuelist);
1467  			blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1468  		}
1469  	}
1470  
1471  	while (!list_empty(&flush_list)) {
1472  		rq = list_entry(flush_list.next, struct request, queuelist);
1473  		list_del_init(&rq->queuelist);
1474  		blk_mq_insert_request(rq, 0);
1475  	}
1476  
1477  	blk_mq_run_hw_queues(q, false);
1478  }
1479  
blk_mq_kick_requeue_list(struct request_queue * q)1480  void blk_mq_kick_requeue_list(struct request_queue *q)
1481  {
1482  	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1483  }
1484  EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1485  
blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs)1486  void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1487  				    unsigned long msecs)
1488  {
1489  	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1490  				    msecs_to_jiffies(msecs));
1491  }
1492  EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1493  
blk_is_flush_data_rq(struct request * rq)1494  static bool blk_is_flush_data_rq(struct request *rq)
1495  {
1496  	return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1497  }
1498  
blk_mq_rq_inflight(struct request * rq,void * priv)1499  static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1500  {
1501  	/*
1502  	 * If we find a request that isn't idle we know the queue is busy
1503  	 * as it's checked in the iter.
1504  	 * Return false to stop the iteration.
1505  	 *
1506  	 * In case of queue quiesce, if one flush data request is completed,
1507  	 * don't count it as inflight given the flush sequence is suspended,
1508  	 * and the original flush data request is invisible to driver, just
1509  	 * like other pending requests because of quiesce
1510  	 */
1511  	if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1512  				blk_is_flush_data_rq(rq) &&
1513  				blk_mq_request_completed(rq))) {
1514  		bool *busy = priv;
1515  
1516  		*busy = true;
1517  		return false;
1518  	}
1519  
1520  	return true;
1521  }
1522  
blk_mq_queue_inflight(struct request_queue * q)1523  bool blk_mq_queue_inflight(struct request_queue *q)
1524  {
1525  	bool busy = false;
1526  
1527  	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1528  	return busy;
1529  }
1530  EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1531  
blk_mq_rq_timed_out(struct request * req)1532  static void blk_mq_rq_timed_out(struct request *req)
1533  {
1534  	req->rq_flags |= RQF_TIMED_OUT;
1535  	if (req->q->mq_ops->timeout) {
1536  		enum blk_eh_timer_return ret;
1537  
1538  		ret = req->q->mq_ops->timeout(req);
1539  		if (ret == BLK_EH_DONE)
1540  			return;
1541  		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1542  	}
1543  
1544  	blk_add_timer(req);
1545  }
1546  
1547  struct blk_expired_data {
1548  	bool has_timedout_rq;
1549  	unsigned long next;
1550  	unsigned long timeout_start;
1551  };
1552  
blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired)1553  static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1554  {
1555  	unsigned long deadline;
1556  
1557  	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1558  		return false;
1559  	if (rq->rq_flags & RQF_TIMED_OUT)
1560  		return false;
1561  
1562  	deadline = READ_ONCE(rq->deadline);
1563  	if (time_after_eq(expired->timeout_start, deadline))
1564  		return true;
1565  
1566  	if (expired->next == 0)
1567  		expired->next = deadline;
1568  	else if (time_after(expired->next, deadline))
1569  		expired->next = deadline;
1570  	return false;
1571  }
1572  
blk_mq_put_rq_ref(struct request * rq)1573  void blk_mq_put_rq_ref(struct request *rq)
1574  {
1575  	if (is_flush_rq(rq)) {
1576  		if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1577  			blk_mq_free_request(rq);
1578  	} else if (req_ref_put_and_test(rq)) {
1579  		__blk_mq_free_request(rq);
1580  	}
1581  }
1582  
blk_mq_check_expired(struct request * rq,void * priv)1583  static bool blk_mq_check_expired(struct request *rq, void *priv)
1584  {
1585  	struct blk_expired_data *expired = priv;
1586  
1587  	/*
1588  	 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1589  	 * be reallocated underneath the timeout handler's processing, then
1590  	 * the expire check is reliable. If the request is not expired, then
1591  	 * it was completed and reallocated as a new request after returning
1592  	 * from blk_mq_check_expired().
1593  	 */
1594  	if (blk_mq_req_expired(rq, expired)) {
1595  		expired->has_timedout_rq = true;
1596  		return false;
1597  	}
1598  	return true;
1599  }
1600  
blk_mq_handle_expired(struct request * rq,void * priv)1601  static bool blk_mq_handle_expired(struct request *rq, void *priv)
1602  {
1603  	struct blk_expired_data *expired = priv;
1604  
1605  	if (blk_mq_req_expired(rq, expired))
1606  		blk_mq_rq_timed_out(rq);
1607  	return true;
1608  }
1609  
blk_mq_timeout_work(struct work_struct * work)1610  static void blk_mq_timeout_work(struct work_struct *work)
1611  {
1612  	struct request_queue *q =
1613  		container_of(work, struct request_queue, timeout_work);
1614  	struct blk_expired_data expired = {
1615  		.timeout_start = jiffies,
1616  	};
1617  	struct blk_mq_hw_ctx *hctx;
1618  	unsigned long i;
1619  
1620  	/* A deadlock might occur if a request is stuck requiring a
1621  	 * timeout at the same time a queue freeze is waiting
1622  	 * completion, since the timeout code would not be able to
1623  	 * acquire the queue reference here.
1624  	 *
1625  	 * That's why we don't use blk_queue_enter here; instead, we use
1626  	 * percpu_ref_tryget directly, because we need to be able to
1627  	 * obtain a reference even in the short window between the queue
1628  	 * starting to freeze, by dropping the first reference in
1629  	 * blk_freeze_queue_start, and the moment the last request is
1630  	 * consumed, marked by the instant q_usage_counter reaches
1631  	 * zero.
1632  	 */
1633  	if (!percpu_ref_tryget(&q->q_usage_counter))
1634  		return;
1635  
1636  	/* check if there is any timed-out request */
1637  	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1638  	if (expired.has_timedout_rq) {
1639  		/*
1640  		 * Before walking tags, we must ensure any submit started
1641  		 * before the current time has finished. Since the submit
1642  		 * uses srcu or rcu, wait for a synchronization point to
1643  		 * ensure all running submits have finished
1644  		 */
1645  		blk_mq_wait_quiesce_done(q->tag_set);
1646  
1647  		expired.next = 0;
1648  		blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1649  	}
1650  
1651  	if (expired.next != 0) {
1652  		mod_timer(&q->timeout, expired.next);
1653  	} else {
1654  		/*
1655  		 * Request timeouts are handled as a forward rolling timer. If
1656  		 * we end up here it means that no requests are pending and
1657  		 * also that no request has been pending for a while. Mark
1658  		 * each hctx as idle.
1659  		 */
1660  		queue_for_each_hw_ctx(q, hctx, i) {
1661  			/* the hctx may be unmapped, so check it here */
1662  			if (blk_mq_hw_queue_mapped(hctx))
1663  				blk_mq_tag_idle(hctx);
1664  		}
1665  	}
1666  	blk_queue_exit(q);
1667  }
1668  
1669  struct flush_busy_ctx_data {
1670  	struct blk_mq_hw_ctx *hctx;
1671  	struct list_head *list;
1672  };
1673  
flush_busy_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1674  static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1675  {
1676  	struct flush_busy_ctx_data *flush_data = data;
1677  	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1678  	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1679  	enum hctx_type type = hctx->type;
1680  
1681  	spin_lock(&ctx->lock);
1682  	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1683  	sbitmap_clear_bit(sb, bitnr);
1684  	spin_unlock(&ctx->lock);
1685  	return true;
1686  }
1687  
1688  /*
1689   * Process software queues that have been marked busy, splicing them
1690   * to the for-dispatch
1691   */
blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)1692  void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1693  {
1694  	struct flush_busy_ctx_data data = {
1695  		.hctx = hctx,
1696  		.list = list,
1697  	};
1698  
1699  	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1700  }
1701  EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1702  
1703  struct dispatch_rq_data {
1704  	struct blk_mq_hw_ctx *hctx;
1705  	struct request *rq;
1706  };
1707  
dispatch_rq_from_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1708  static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1709  		void *data)
1710  {
1711  	struct dispatch_rq_data *dispatch_data = data;
1712  	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1713  	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1714  	enum hctx_type type = hctx->type;
1715  
1716  	spin_lock(&ctx->lock);
1717  	if (!list_empty(&ctx->rq_lists[type])) {
1718  		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1719  		list_del_init(&dispatch_data->rq->queuelist);
1720  		if (list_empty(&ctx->rq_lists[type]))
1721  			sbitmap_clear_bit(sb, bitnr);
1722  	}
1723  	spin_unlock(&ctx->lock);
1724  
1725  	return !dispatch_data->rq;
1726  }
1727  
blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start)1728  struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1729  					struct blk_mq_ctx *start)
1730  {
1731  	unsigned off = start ? start->index_hw[hctx->type] : 0;
1732  	struct dispatch_rq_data data = {
1733  		.hctx = hctx,
1734  		.rq   = NULL,
1735  	};
1736  
1737  	__sbitmap_for_each_set(&hctx->ctx_map, off,
1738  			       dispatch_rq_from_ctx, &data);
1739  
1740  	return data.rq;
1741  }
1742  
__blk_mq_alloc_driver_tag(struct request * rq)1743  bool __blk_mq_alloc_driver_tag(struct request *rq)
1744  {
1745  	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1746  	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1747  	int tag;
1748  
1749  	blk_mq_tag_busy(rq->mq_hctx);
1750  
1751  	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1752  		bt = &rq->mq_hctx->tags->breserved_tags;
1753  		tag_offset = 0;
1754  	} else {
1755  		if (!hctx_may_queue(rq->mq_hctx, bt))
1756  			return false;
1757  	}
1758  
1759  	tag = __sbitmap_queue_get(bt);
1760  	if (tag == BLK_MQ_NO_TAG)
1761  		return false;
1762  
1763  	rq->tag = tag + tag_offset;
1764  	blk_mq_inc_active_requests(rq->mq_hctx);
1765  	return true;
1766  }
1767  
blk_mq_dispatch_wake(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)1768  static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1769  				int flags, void *key)
1770  {
1771  	struct blk_mq_hw_ctx *hctx;
1772  
1773  	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1774  
1775  	spin_lock(&hctx->dispatch_wait_lock);
1776  	if (!list_empty(&wait->entry)) {
1777  		struct sbitmap_queue *sbq;
1778  
1779  		list_del_init(&wait->entry);
1780  		sbq = &hctx->tags->bitmap_tags;
1781  		atomic_dec(&sbq->ws_active);
1782  	}
1783  	spin_unlock(&hctx->dispatch_wait_lock);
1784  
1785  	blk_mq_run_hw_queue(hctx, true);
1786  	return 1;
1787  }
1788  
1789  /*
1790   * Mark us waiting for a tag. For shared tags, this involves hooking us into
1791   * the tag wakeups. For non-shared tags, we can simply mark us needing a
1792   * restart. For both cases, take care to check the condition again after
1793   * marking us as waiting.
1794   */
blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq)1795  static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1796  				 struct request *rq)
1797  {
1798  	struct sbitmap_queue *sbq;
1799  	struct wait_queue_head *wq;
1800  	wait_queue_entry_t *wait;
1801  	bool ret;
1802  
1803  	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1804  	    !(blk_mq_is_shared_tags(hctx->flags))) {
1805  		blk_mq_sched_mark_restart_hctx(hctx);
1806  
1807  		/*
1808  		 * It's possible that a tag was freed in the window between the
1809  		 * allocation failure and adding the hardware queue to the wait
1810  		 * queue.
1811  		 *
1812  		 * Don't clear RESTART here, someone else could have set it.
1813  		 * At most this will cost an extra queue run.
1814  		 */
1815  		return blk_mq_get_driver_tag(rq);
1816  	}
1817  
1818  	wait = &hctx->dispatch_wait;
1819  	if (!list_empty_careful(&wait->entry))
1820  		return false;
1821  
1822  	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1823  		sbq = &hctx->tags->breserved_tags;
1824  	else
1825  		sbq = &hctx->tags->bitmap_tags;
1826  	wq = &bt_wait_ptr(sbq, hctx)->wait;
1827  
1828  	spin_lock_irq(&wq->lock);
1829  	spin_lock(&hctx->dispatch_wait_lock);
1830  	if (!list_empty(&wait->entry)) {
1831  		spin_unlock(&hctx->dispatch_wait_lock);
1832  		spin_unlock_irq(&wq->lock);
1833  		return false;
1834  	}
1835  
1836  	atomic_inc(&sbq->ws_active);
1837  	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1838  	__add_wait_queue(wq, wait);
1839  
1840  	/*
1841  	 * Add one explicit barrier since blk_mq_get_driver_tag() may
1842  	 * not imply barrier in case of failure.
1843  	 *
1844  	 * Order adding us to wait queue and allocating driver tag.
1845  	 *
1846  	 * The pair is the one implied in sbitmap_queue_wake_up() which
1847  	 * orders clearing sbitmap tag bits and waitqueue_active() in
1848  	 * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
1849  	 *
1850  	 * Otherwise, re-order of adding wait queue and getting driver tag
1851  	 * may cause __sbitmap_queue_wake_up() to wake up nothing because
1852  	 * the waitqueue_active() may not observe us in wait queue.
1853  	 */
1854  	smp_mb();
1855  
1856  	/*
1857  	 * It's possible that a tag was freed in the window between the
1858  	 * allocation failure and adding the hardware queue to the wait
1859  	 * queue.
1860  	 */
1861  	ret = blk_mq_get_driver_tag(rq);
1862  	if (!ret) {
1863  		spin_unlock(&hctx->dispatch_wait_lock);
1864  		spin_unlock_irq(&wq->lock);
1865  		return false;
1866  	}
1867  
1868  	/*
1869  	 * We got a tag, remove ourselves from the wait queue to ensure
1870  	 * someone else gets the wakeup.
1871  	 */
1872  	list_del_init(&wait->entry);
1873  	atomic_dec(&sbq->ws_active);
1874  	spin_unlock(&hctx->dispatch_wait_lock);
1875  	spin_unlock_irq(&wq->lock);
1876  
1877  	return true;
1878  }
1879  
1880  #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1881  #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1882  /*
1883   * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1884   * - EWMA is one simple way to compute running average value
1885   * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1886   * - take 4 as factor for avoiding to get too small(0) result, and this
1887   *   factor doesn't matter because EWMA decreases exponentially
1888   */
blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy)1889  static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1890  {
1891  	unsigned int ewma;
1892  
1893  	ewma = hctx->dispatch_busy;
1894  
1895  	if (!ewma && !busy)
1896  		return;
1897  
1898  	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1899  	if (busy)
1900  		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1901  	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1902  
1903  	hctx->dispatch_busy = ewma;
1904  }
1905  
1906  #define BLK_MQ_RESOURCE_DELAY	3		/* ms units */
1907  
blk_mq_handle_dev_resource(struct request * rq,struct list_head * list)1908  static void blk_mq_handle_dev_resource(struct request *rq,
1909  				       struct list_head *list)
1910  {
1911  	list_add(&rq->queuelist, list);
1912  	__blk_mq_requeue_request(rq);
1913  }
1914  
1915  enum prep_dispatch {
1916  	PREP_DISPATCH_OK,
1917  	PREP_DISPATCH_NO_TAG,
1918  	PREP_DISPATCH_NO_BUDGET,
1919  };
1920  
blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget)1921  static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1922  						  bool need_budget)
1923  {
1924  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1925  	int budget_token = -1;
1926  
1927  	if (need_budget) {
1928  		budget_token = blk_mq_get_dispatch_budget(rq->q);
1929  		if (budget_token < 0) {
1930  			blk_mq_put_driver_tag(rq);
1931  			return PREP_DISPATCH_NO_BUDGET;
1932  		}
1933  		blk_mq_set_rq_budget_token(rq, budget_token);
1934  	}
1935  
1936  	if (!blk_mq_get_driver_tag(rq)) {
1937  		/*
1938  		 * The initial allocation attempt failed, so we need to
1939  		 * rerun the hardware queue when a tag is freed. The
1940  		 * waitqueue takes care of that. If the queue is run
1941  		 * before we add this entry back on the dispatch list,
1942  		 * we'll re-run it below.
1943  		 */
1944  		if (!blk_mq_mark_tag_wait(hctx, rq)) {
1945  			/*
1946  			 * All budgets not got from this function will be put
1947  			 * together during handling partial dispatch
1948  			 */
1949  			if (need_budget)
1950  				blk_mq_put_dispatch_budget(rq->q, budget_token);
1951  			return PREP_DISPATCH_NO_TAG;
1952  		}
1953  	}
1954  
1955  	return PREP_DISPATCH_OK;
1956  }
1957  
1958  /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
blk_mq_release_budgets(struct request_queue * q,struct list_head * list)1959  static void blk_mq_release_budgets(struct request_queue *q,
1960  		struct list_head *list)
1961  {
1962  	struct request *rq;
1963  
1964  	list_for_each_entry(rq, list, queuelist) {
1965  		int budget_token = blk_mq_get_rq_budget_token(rq);
1966  
1967  		if (budget_token >= 0)
1968  			blk_mq_put_dispatch_budget(q, budget_token);
1969  	}
1970  }
1971  
1972  /*
1973   * blk_mq_commit_rqs will notify driver using bd->last that there is no
1974   * more requests. (See comment in struct blk_mq_ops for commit_rqs for
1975   * details)
1976   * Attention, we should explicitly call this in unusual cases:
1977   *  1) did not queue everything initially scheduled to queue
1978   *  2) the last attempt to queue a request failed
1979   */
blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int queued,bool from_schedule)1980  static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
1981  			      bool from_schedule)
1982  {
1983  	if (hctx->queue->mq_ops->commit_rqs && queued) {
1984  		trace_block_unplug(hctx->queue, queued, !from_schedule);
1985  		hctx->queue->mq_ops->commit_rqs(hctx);
1986  	}
1987  }
1988  
1989  /*
1990   * Returns true if we did some work AND can potentially do more.
1991   */
blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,unsigned int nr_budgets)1992  bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1993  			     unsigned int nr_budgets)
1994  {
1995  	enum prep_dispatch prep;
1996  	struct request_queue *q = hctx->queue;
1997  	struct request *rq;
1998  	int queued;
1999  	blk_status_t ret = BLK_STS_OK;
2000  	bool needs_resource = false;
2001  
2002  	if (list_empty(list))
2003  		return false;
2004  
2005  	/*
2006  	 * Now process all the entries, sending them to the driver.
2007  	 */
2008  	queued = 0;
2009  	do {
2010  		struct blk_mq_queue_data bd;
2011  
2012  		rq = list_first_entry(list, struct request, queuelist);
2013  
2014  		WARN_ON_ONCE(hctx != rq->mq_hctx);
2015  		prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2016  		if (prep != PREP_DISPATCH_OK)
2017  			break;
2018  
2019  		list_del_init(&rq->queuelist);
2020  
2021  		bd.rq = rq;
2022  		bd.last = list_empty(list);
2023  
2024  		/*
2025  		 * once the request is queued to lld, no need to cover the
2026  		 * budget any more
2027  		 */
2028  		if (nr_budgets)
2029  			nr_budgets--;
2030  		ret = q->mq_ops->queue_rq(hctx, &bd);
2031  		switch (ret) {
2032  		case BLK_STS_OK:
2033  			queued++;
2034  			break;
2035  		case BLK_STS_RESOURCE:
2036  			needs_resource = true;
2037  			fallthrough;
2038  		case BLK_STS_DEV_RESOURCE:
2039  			blk_mq_handle_dev_resource(rq, list);
2040  			goto out;
2041  		default:
2042  			blk_mq_end_request(rq, ret);
2043  		}
2044  	} while (!list_empty(list));
2045  out:
2046  	/* If we didn't flush the entire list, we could have told the driver
2047  	 * there was more coming, but that turned out to be a lie.
2048  	 */
2049  	if (!list_empty(list) || ret != BLK_STS_OK)
2050  		blk_mq_commit_rqs(hctx, queued, false);
2051  
2052  	/*
2053  	 * Any items that need requeuing? Stuff them into hctx->dispatch,
2054  	 * that is where we will continue on next queue run.
2055  	 */
2056  	if (!list_empty(list)) {
2057  		bool needs_restart;
2058  		/* For non-shared tags, the RESTART check will suffice */
2059  		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2060  			((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
2061  			blk_mq_is_shared_tags(hctx->flags));
2062  
2063  		if (nr_budgets)
2064  			blk_mq_release_budgets(q, list);
2065  
2066  		spin_lock(&hctx->lock);
2067  		list_splice_tail_init(list, &hctx->dispatch);
2068  		spin_unlock(&hctx->lock);
2069  
2070  		/*
2071  		 * Order adding requests to hctx->dispatch and checking
2072  		 * SCHED_RESTART flag. The pair of this smp_mb() is the one
2073  		 * in blk_mq_sched_restart(). Avoid restart code path to
2074  		 * miss the new added requests to hctx->dispatch, meantime
2075  		 * SCHED_RESTART is observed here.
2076  		 */
2077  		smp_mb();
2078  
2079  		/*
2080  		 * If SCHED_RESTART was set by the caller of this function and
2081  		 * it is no longer set that means that it was cleared by another
2082  		 * thread and hence that a queue rerun is needed.
2083  		 *
2084  		 * If 'no_tag' is set, that means that we failed getting
2085  		 * a driver tag with an I/O scheduler attached. If our dispatch
2086  		 * waitqueue is no longer active, ensure that we run the queue
2087  		 * AFTER adding our entries back to the list.
2088  		 *
2089  		 * If no I/O scheduler has been configured it is possible that
2090  		 * the hardware queue got stopped and restarted before requests
2091  		 * were pushed back onto the dispatch list. Rerun the queue to
2092  		 * avoid starvation. Notes:
2093  		 * - blk_mq_run_hw_queue() checks whether or not a queue has
2094  		 *   been stopped before rerunning a queue.
2095  		 * - Some but not all block drivers stop a queue before
2096  		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2097  		 *   and dm-rq.
2098  		 *
2099  		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2100  		 * bit is set, run queue after a delay to avoid IO stalls
2101  		 * that could otherwise occur if the queue is idle.  We'll do
2102  		 * similar if we couldn't get budget or couldn't lock a zone
2103  		 * and SCHED_RESTART is set.
2104  		 */
2105  		needs_restart = blk_mq_sched_needs_restart(hctx);
2106  		if (prep == PREP_DISPATCH_NO_BUDGET)
2107  			needs_resource = true;
2108  		if (!needs_restart ||
2109  		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2110  			blk_mq_run_hw_queue(hctx, true);
2111  		else if (needs_resource)
2112  			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2113  
2114  		blk_mq_update_dispatch_busy(hctx, true);
2115  		return false;
2116  	}
2117  
2118  	blk_mq_update_dispatch_busy(hctx, false);
2119  	return true;
2120  }
2121  
blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx)2122  static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2123  {
2124  	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2125  
2126  	if (cpu >= nr_cpu_ids)
2127  		cpu = cpumask_first(hctx->cpumask);
2128  	return cpu;
2129  }
2130  
2131  /*
2132   * ->next_cpu is always calculated from hctx->cpumask, so simply use
2133   * it for speeding up the check
2134   */
blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx * hctx)2135  static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx)
2136  {
2137          return hctx->next_cpu >= nr_cpu_ids;
2138  }
2139  
2140  /*
2141   * It'd be great if the workqueue API had a way to pass
2142   * in a mask and had some smarts for more clever placement.
2143   * For now we just round-robin here, switching for every
2144   * BLK_MQ_CPU_WORK_BATCH queued items.
2145   */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)2146  static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2147  {
2148  	bool tried = false;
2149  	int next_cpu = hctx->next_cpu;
2150  
2151  	/* Switch to unbound if no allowable CPUs in this hctx */
2152  	if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx))
2153  		return WORK_CPU_UNBOUND;
2154  
2155  	if (--hctx->next_cpu_batch <= 0) {
2156  select_cpu:
2157  		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2158  				cpu_online_mask);
2159  		if (next_cpu >= nr_cpu_ids)
2160  			next_cpu = blk_mq_first_mapped_cpu(hctx);
2161  		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2162  	}
2163  
2164  	/*
2165  	 * Do unbound schedule if we can't find a online CPU for this hctx,
2166  	 * and it should only happen in the path of handling CPU DEAD.
2167  	 */
2168  	if (!cpu_online(next_cpu)) {
2169  		if (!tried) {
2170  			tried = true;
2171  			goto select_cpu;
2172  		}
2173  
2174  		/*
2175  		 * Make sure to re-select CPU next time once after CPUs
2176  		 * in hctx->cpumask become online again.
2177  		 */
2178  		hctx->next_cpu = next_cpu;
2179  		hctx->next_cpu_batch = 1;
2180  		return WORK_CPU_UNBOUND;
2181  	}
2182  
2183  	hctx->next_cpu = next_cpu;
2184  	return next_cpu;
2185  }
2186  
2187  /**
2188   * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2189   * @hctx: Pointer to the hardware queue to run.
2190   * @msecs: Milliseconds of delay to wait before running the queue.
2191   *
2192   * Run a hardware queue asynchronously with a delay of @msecs.
2193   */
blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)2194  void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2195  {
2196  	if (unlikely(blk_mq_hctx_stopped(hctx)))
2197  		return;
2198  	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2199  				    msecs_to_jiffies(msecs));
2200  }
2201  EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2202  
2203  /**
2204   * blk_mq_run_hw_queue - Start to run a hardware queue.
2205   * @hctx: Pointer to the hardware queue to run.
2206   * @async: If we want to run the queue asynchronously.
2207   *
2208   * Check if the request queue is not in a quiesced state and if there are
2209   * pending requests to be sent. If this is true, run the queue to send requests
2210   * to hardware.
2211   */
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2212  void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2213  {
2214  	bool need_run;
2215  
2216  	/*
2217  	 * We can't run the queue inline with interrupts disabled.
2218  	 */
2219  	WARN_ON_ONCE(!async && in_interrupt());
2220  
2221  	might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
2222  
2223  	/*
2224  	 * When queue is quiesced, we may be switching io scheduler, or
2225  	 * updating nr_hw_queues, or other things, and we can't run queue
2226  	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2227  	 *
2228  	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2229  	 * quiesced.
2230  	 */
2231  	__blk_mq_run_dispatch_ops(hctx->queue, false,
2232  		need_run = !blk_queue_quiesced(hctx->queue) &&
2233  		blk_mq_hctx_has_pending(hctx));
2234  
2235  	if (!need_run)
2236  		return;
2237  
2238  	if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2239  		blk_mq_delay_run_hw_queue(hctx, 0);
2240  		return;
2241  	}
2242  
2243  	blk_mq_run_dispatch_ops(hctx->queue,
2244  				blk_mq_sched_dispatch_requests(hctx));
2245  }
2246  EXPORT_SYMBOL(blk_mq_run_hw_queue);
2247  
2248  /*
2249   * Return prefered queue to dispatch from (if any) for non-mq aware IO
2250   * scheduler.
2251   */
blk_mq_get_sq_hctx(struct request_queue * q)2252  static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2253  {
2254  	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2255  	/*
2256  	 * If the IO scheduler does not respect hardware queues when
2257  	 * dispatching, we just don't bother with multiple HW queues and
2258  	 * dispatch from hctx for the current CPU since running multiple queues
2259  	 * just causes lock contention inside the scheduler and pointless cache
2260  	 * bouncing.
2261  	 */
2262  	struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2263  
2264  	if (!blk_mq_hctx_stopped(hctx))
2265  		return hctx;
2266  	return NULL;
2267  }
2268  
2269  /**
2270   * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2271   * @q: Pointer to the request queue to run.
2272   * @async: If we want to run the queue asynchronously.
2273   */
blk_mq_run_hw_queues(struct request_queue * q,bool async)2274  void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2275  {
2276  	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2277  	unsigned long i;
2278  
2279  	sq_hctx = NULL;
2280  	if (blk_queue_sq_sched(q))
2281  		sq_hctx = blk_mq_get_sq_hctx(q);
2282  	queue_for_each_hw_ctx(q, hctx, i) {
2283  		if (blk_mq_hctx_stopped(hctx))
2284  			continue;
2285  		/*
2286  		 * Dispatch from this hctx either if there's no hctx preferred
2287  		 * by IO scheduler or if it has requests that bypass the
2288  		 * scheduler.
2289  		 */
2290  		if (!sq_hctx || sq_hctx == hctx ||
2291  		    !list_empty_careful(&hctx->dispatch))
2292  			blk_mq_run_hw_queue(hctx, async);
2293  	}
2294  }
2295  EXPORT_SYMBOL(blk_mq_run_hw_queues);
2296  
2297  /**
2298   * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2299   * @q: Pointer to the request queue to run.
2300   * @msecs: Milliseconds of delay to wait before running the queues.
2301   */
blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs)2302  void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2303  {
2304  	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2305  	unsigned long i;
2306  
2307  	sq_hctx = NULL;
2308  	if (blk_queue_sq_sched(q))
2309  		sq_hctx = blk_mq_get_sq_hctx(q);
2310  	queue_for_each_hw_ctx(q, hctx, i) {
2311  		if (blk_mq_hctx_stopped(hctx))
2312  			continue;
2313  		/*
2314  		 * If there is already a run_work pending, leave the
2315  		 * pending delay untouched. Otherwise, a hctx can stall
2316  		 * if another hctx is re-delaying the other's work
2317  		 * before the work executes.
2318  		 */
2319  		if (delayed_work_pending(&hctx->run_work))
2320  			continue;
2321  		/*
2322  		 * Dispatch from this hctx either if there's no hctx preferred
2323  		 * by IO scheduler or if it has requests that bypass the
2324  		 * scheduler.
2325  		 */
2326  		if (!sq_hctx || sq_hctx == hctx ||
2327  		    !list_empty_careful(&hctx->dispatch))
2328  			blk_mq_delay_run_hw_queue(hctx, msecs);
2329  	}
2330  }
2331  EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2332  
2333  /*
2334   * This function is often used for pausing .queue_rq() by driver when
2335   * there isn't enough resource or some conditions aren't satisfied, and
2336   * BLK_STS_RESOURCE is usually returned.
2337   *
2338   * We do not guarantee that dispatch can be drained or blocked
2339   * after blk_mq_stop_hw_queue() returns. Please use
2340   * blk_mq_quiesce_queue() for that requirement.
2341   */
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)2342  void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2343  {
2344  	cancel_delayed_work(&hctx->run_work);
2345  
2346  	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2347  }
2348  EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2349  
2350  /*
2351   * This function is often used for pausing .queue_rq() by driver when
2352   * there isn't enough resource or some conditions aren't satisfied, and
2353   * BLK_STS_RESOURCE is usually returned.
2354   *
2355   * We do not guarantee that dispatch can be drained or blocked
2356   * after blk_mq_stop_hw_queues() returns. Please use
2357   * blk_mq_quiesce_queue() for that requirement.
2358   */
blk_mq_stop_hw_queues(struct request_queue * q)2359  void blk_mq_stop_hw_queues(struct request_queue *q)
2360  {
2361  	struct blk_mq_hw_ctx *hctx;
2362  	unsigned long i;
2363  
2364  	queue_for_each_hw_ctx(q, hctx, i)
2365  		blk_mq_stop_hw_queue(hctx);
2366  }
2367  EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2368  
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)2369  void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2370  {
2371  	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2372  
2373  	blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2374  }
2375  EXPORT_SYMBOL(blk_mq_start_hw_queue);
2376  
blk_mq_start_hw_queues(struct request_queue * q)2377  void blk_mq_start_hw_queues(struct request_queue *q)
2378  {
2379  	struct blk_mq_hw_ctx *hctx;
2380  	unsigned long i;
2381  
2382  	queue_for_each_hw_ctx(q, hctx, i)
2383  		blk_mq_start_hw_queue(hctx);
2384  }
2385  EXPORT_SYMBOL(blk_mq_start_hw_queues);
2386  
blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2387  void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2388  {
2389  	if (!blk_mq_hctx_stopped(hctx))
2390  		return;
2391  
2392  	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2393  	blk_mq_run_hw_queue(hctx, async);
2394  }
2395  EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2396  
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)2397  void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2398  {
2399  	struct blk_mq_hw_ctx *hctx;
2400  	unsigned long i;
2401  
2402  	queue_for_each_hw_ctx(q, hctx, i)
2403  		blk_mq_start_stopped_hw_queue(hctx, async ||
2404  					(hctx->flags & BLK_MQ_F_BLOCKING));
2405  }
2406  EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2407  
blk_mq_run_work_fn(struct work_struct * work)2408  static void blk_mq_run_work_fn(struct work_struct *work)
2409  {
2410  	struct blk_mq_hw_ctx *hctx =
2411  		container_of(work, struct blk_mq_hw_ctx, run_work.work);
2412  
2413  	blk_mq_run_dispatch_ops(hctx->queue,
2414  				blk_mq_sched_dispatch_requests(hctx));
2415  }
2416  
2417  /**
2418   * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2419   * @rq: Pointer to request to be inserted.
2420   * @flags: BLK_MQ_INSERT_*
2421   *
2422   * Should only be used carefully, when the caller knows we want to
2423   * bypass a potential IO scheduler on the target device.
2424   */
blk_mq_request_bypass_insert(struct request * rq,blk_insert_t flags)2425  static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2426  {
2427  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2428  
2429  	spin_lock(&hctx->lock);
2430  	if (flags & BLK_MQ_INSERT_AT_HEAD)
2431  		list_add(&rq->queuelist, &hctx->dispatch);
2432  	else
2433  		list_add_tail(&rq->queuelist, &hctx->dispatch);
2434  	spin_unlock(&hctx->lock);
2435  }
2436  
blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)2437  static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
2438  		struct blk_mq_ctx *ctx, struct list_head *list,
2439  		bool run_queue_async)
2440  {
2441  	struct request *rq;
2442  	enum hctx_type type = hctx->type;
2443  
2444  	/*
2445  	 * Try to issue requests directly if the hw queue isn't busy to save an
2446  	 * extra enqueue & dequeue to the sw queue.
2447  	 */
2448  	if (!hctx->dispatch_busy && !run_queue_async) {
2449  		blk_mq_run_dispatch_ops(hctx->queue,
2450  			blk_mq_try_issue_list_directly(hctx, list));
2451  		if (list_empty(list))
2452  			goto out;
2453  	}
2454  
2455  	/*
2456  	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
2457  	 * offline now
2458  	 */
2459  	list_for_each_entry(rq, list, queuelist) {
2460  		BUG_ON(rq->mq_ctx != ctx);
2461  		trace_block_rq_insert(rq);
2462  		if (rq->cmd_flags & REQ_NOWAIT)
2463  			run_queue_async = true;
2464  	}
2465  
2466  	spin_lock(&ctx->lock);
2467  	list_splice_tail_init(list, &ctx->rq_lists[type]);
2468  	blk_mq_hctx_mark_pending(hctx, ctx);
2469  	spin_unlock(&ctx->lock);
2470  out:
2471  	blk_mq_run_hw_queue(hctx, run_queue_async);
2472  }
2473  
blk_mq_insert_request(struct request * rq,blk_insert_t flags)2474  static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2475  {
2476  	struct request_queue *q = rq->q;
2477  	struct blk_mq_ctx *ctx = rq->mq_ctx;
2478  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2479  
2480  	if (blk_rq_is_passthrough(rq)) {
2481  		/*
2482  		 * Passthrough request have to be added to hctx->dispatch
2483  		 * directly.  The device may be in a situation where it can't
2484  		 * handle FS request, and always returns BLK_STS_RESOURCE for
2485  		 * them, which gets them added to hctx->dispatch.
2486  		 *
2487  		 * If a passthrough request is required to unblock the queues,
2488  		 * and it is added to the scheduler queue, there is no chance to
2489  		 * dispatch it given we prioritize requests in hctx->dispatch.
2490  		 */
2491  		blk_mq_request_bypass_insert(rq, flags);
2492  	} else if (req_op(rq) == REQ_OP_FLUSH) {
2493  		/*
2494  		 * Firstly normal IO request is inserted to scheduler queue or
2495  		 * sw queue, meantime we add flush request to dispatch queue(
2496  		 * hctx->dispatch) directly and there is at most one in-flight
2497  		 * flush request for each hw queue, so it doesn't matter to add
2498  		 * flush request to tail or front of the dispatch queue.
2499  		 *
2500  		 * Secondly in case of NCQ, flush request belongs to non-NCQ
2501  		 * command, and queueing it will fail when there is any
2502  		 * in-flight normal IO request(NCQ command). When adding flush
2503  		 * rq to the front of hctx->dispatch, it is easier to introduce
2504  		 * extra time to flush rq's latency because of S_SCHED_RESTART
2505  		 * compared with adding to the tail of dispatch queue, then
2506  		 * chance of flush merge is increased, and less flush requests
2507  		 * will be issued to controller. It is observed that ~10% time
2508  		 * is saved in blktests block/004 on disk attached to AHCI/NCQ
2509  		 * drive when adding flush rq to the front of hctx->dispatch.
2510  		 *
2511  		 * Simply queue flush rq to the front of hctx->dispatch so that
2512  		 * intensive flush workloads can benefit in case of NCQ HW.
2513  		 */
2514  		blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2515  	} else if (q->elevator) {
2516  		LIST_HEAD(list);
2517  
2518  		WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2519  
2520  		list_add(&rq->queuelist, &list);
2521  		q->elevator->type->ops.insert_requests(hctx, &list, flags);
2522  	} else {
2523  		trace_block_rq_insert(rq);
2524  
2525  		spin_lock(&ctx->lock);
2526  		if (flags & BLK_MQ_INSERT_AT_HEAD)
2527  			list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2528  		else
2529  			list_add_tail(&rq->queuelist,
2530  				      &ctx->rq_lists[hctx->type]);
2531  		blk_mq_hctx_mark_pending(hctx, ctx);
2532  		spin_unlock(&ctx->lock);
2533  	}
2534  }
2535  
blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs)2536  static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2537  		unsigned int nr_segs)
2538  {
2539  	int err;
2540  
2541  	if (bio->bi_opf & REQ_RAHEAD)
2542  		rq->cmd_flags |= REQ_FAILFAST_MASK;
2543  
2544  	rq->__sector = bio->bi_iter.bi_sector;
2545  	rq->write_hint = bio->bi_write_hint;
2546  	blk_rq_bio_prep(rq, bio, nr_segs);
2547  	if (bio_integrity(bio))
2548  		rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
2549  								      bio);
2550  
2551  	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2552  	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2553  	WARN_ON_ONCE(err);
2554  
2555  	blk_account_io_start(rq);
2556  }
2557  
__blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last)2558  static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2559  					    struct request *rq, bool last)
2560  {
2561  	struct request_queue *q = rq->q;
2562  	struct blk_mq_queue_data bd = {
2563  		.rq = rq,
2564  		.last = last,
2565  	};
2566  	blk_status_t ret;
2567  
2568  	/*
2569  	 * For OK queue, we are done. For error, caller may kill it.
2570  	 * Any other error (busy), just add it to our list as we
2571  	 * previously would have done.
2572  	 */
2573  	ret = q->mq_ops->queue_rq(hctx, &bd);
2574  	switch (ret) {
2575  	case BLK_STS_OK:
2576  		blk_mq_update_dispatch_busy(hctx, false);
2577  		break;
2578  	case BLK_STS_RESOURCE:
2579  	case BLK_STS_DEV_RESOURCE:
2580  		blk_mq_update_dispatch_busy(hctx, true);
2581  		__blk_mq_requeue_request(rq);
2582  		break;
2583  	default:
2584  		blk_mq_update_dispatch_busy(hctx, false);
2585  		break;
2586  	}
2587  
2588  	return ret;
2589  }
2590  
blk_mq_get_budget_and_tag(struct request * rq)2591  static bool blk_mq_get_budget_and_tag(struct request *rq)
2592  {
2593  	int budget_token;
2594  
2595  	budget_token = blk_mq_get_dispatch_budget(rq->q);
2596  	if (budget_token < 0)
2597  		return false;
2598  	blk_mq_set_rq_budget_token(rq, budget_token);
2599  	if (!blk_mq_get_driver_tag(rq)) {
2600  		blk_mq_put_dispatch_budget(rq->q, budget_token);
2601  		return false;
2602  	}
2603  	return true;
2604  }
2605  
2606  /**
2607   * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2608   * @hctx: Pointer of the associated hardware queue.
2609   * @rq: Pointer to request to be sent.
2610   *
2611   * If the device has enough resources to accept a new request now, send the
2612   * request directly to device driver. Else, insert at hctx->dispatch queue, so
2613   * we can try send it another time in the future. Requests inserted at this
2614   * queue have higher priority.
2615   */
blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq)2616  static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2617  		struct request *rq)
2618  {
2619  	blk_status_t ret;
2620  
2621  	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2622  		blk_mq_insert_request(rq, 0);
2623  		return;
2624  	}
2625  
2626  	if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2627  		blk_mq_insert_request(rq, 0);
2628  		blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2629  		return;
2630  	}
2631  
2632  	ret = __blk_mq_issue_directly(hctx, rq, true);
2633  	switch (ret) {
2634  	case BLK_STS_OK:
2635  		break;
2636  	case BLK_STS_RESOURCE:
2637  	case BLK_STS_DEV_RESOURCE:
2638  		blk_mq_request_bypass_insert(rq, 0);
2639  		blk_mq_run_hw_queue(hctx, false);
2640  		break;
2641  	default:
2642  		blk_mq_end_request(rq, ret);
2643  		break;
2644  	}
2645  }
2646  
blk_mq_request_issue_directly(struct request * rq,bool last)2647  static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2648  {
2649  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2650  
2651  	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2652  		blk_mq_insert_request(rq, 0);
2653  		return BLK_STS_OK;
2654  	}
2655  
2656  	if (!blk_mq_get_budget_and_tag(rq))
2657  		return BLK_STS_RESOURCE;
2658  	return __blk_mq_issue_directly(hctx, rq, last);
2659  }
2660  
blk_mq_plug_issue_direct(struct blk_plug * plug)2661  static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2662  {
2663  	struct blk_mq_hw_ctx *hctx = NULL;
2664  	struct request *rq;
2665  	int queued = 0;
2666  	blk_status_t ret = BLK_STS_OK;
2667  
2668  	while ((rq = rq_list_pop(&plug->mq_list))) {
2669  		bool last = rq_list_empty(plug->mq_list);
2670  
2671  		if (hctx != rq->mq_hctx) {
2672  			if (hctx) {
2673  				blk_mq_commit_rqs(hctx, queued, false);
2674  				queued = 0;
2675  			}
2676  			hctx = rq->mq_hctx;
2677  		}
2678  
2679  		ret = blk_mq_request_issue_directly(rq, last);
2680  		switch (ret) {
2681  		case BLK_STS_OK:
2682  			queued++;
2683  			break;
2684  		case BLK_STS_RESOURCE:
2685  		case BLK_STS_DEV_RESOURCE:
2686  			blk_mq_request_bypass_insert(rq, 0);
2687  			blk_mq_run_hw_queue(hctx, false);
2688  			goto out;
2689  		default:
2690  			blk_mq_end_request(rq, ret);
2691  			break;
2692  		}
2693  	}
2694  
2695  out:
2696  	if (ret != BLK_STS_OK)
2697  		blk_mq_commit_rqs(hctx, queued, false);
2698  }
2699  
__blk_mq_flush_plug_list(struct request_queue * q,struct blk_plug * plug)2700  static void __blk_mq_flush_plug_list(struct request_queue *q,
2701  				     struct blk_plug *plug)
2702  {
2703  	if (blk_queue_quiesced(q))
2704  		return;
2705  	q->mq_ops->queue_rqs(&plug->mq_list);
2706  }
2707  
blk_mq_dispatch_plug_list(struct blk_plug * plug,bool from_sched)2708  static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2709  {
2710  	struct blk_mq_hw_ctx *this_hctx = NULL;
2711  	struct blk_mq_ctx *this_ctx = NULL;
2712  	struct request *requeue_list = NULL;
2713  	struct request **requeue_lastp = &requeue_list;
2714  	unsigned int depth = 0;
2715  	bool is_passthrough = false;
2716  	LIST_HEAD(list);
2717  
2718  	do {
2719  		struct request *rq = rq_list_pop(&plug->mq_list);
2720  
2721  		if (!this_hctx) {
2722  			this_hctx = rq->mq_hctx;
2723  			this_ctx = rq->mq_ctx;
2724  			is_passthrough = blk_rq_is_passthrough(rq);
2725  		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2726  			   is_passthrough != blk_rq_is_passthrough(rq)) {
2727  			rq_list_add_tail(&requeue_lastp, rq);
2728  			continue;
2729  		}
2730  		list_add(&rq->queuelist, &list);
2731  		depth++;
2732  	} while (!rq_list_empty(plug->mq_list));
2733  
2734  	plug->mq_list = requeue_list;
2735  	trace_block_unplug(this_hctx->queue, depth, !from_sched);
2736  
2737  	percpu_ref_get(&this_hctx->queue->q_usage_counter);
2738  	/* passthrough requests should never be issued to the I/O scheduler */
2739  	if (is_passthrough) {
2740  		spin_lock(&this_hctx->lock);
2741  		list_splice_tail_init(&list, &this_hctx->dispatch);
2742  		spin_unlock(&this_hctx->lock);
2743  		blk_mq_run_hw_queue(this_hctx, from_sched);
2744  	} else if (this_hctx->queue->elevator) {
2745  		this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
2746  				&list, 0);
2747  		blk_mq_run_hw_queue(this_hctx, from_sched);
2748  	} else {
2749  		blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
2750  	}
2751  	percpu_ref_put(&this_hctx->queue->q_usage_counter);
2752  }
2753  
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)2754  void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2755  {
2756  	struct request *rq;
2757  	unsigned int depth;
2758  
2759  	/*
2760  	 * We may have been called recursively midway through handling
2761  	 * plug->mq_list via a schedule() in the driver's queue_rq() callback.
2762  	 * To avoid mq_list changing under our feet, clear rq_count early and
2763  	 * bail out specifically if rq_count is 0 rather than checking
2764  	 * whether the mq_list is empty.
2765  	 */
2766  	if (plug->rq_count == 0)
2767  		return;
2768  	depth = plug->rq_count;
2769  	plug->rq_count = 0;
2770  
2771  	if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2772  		struct request_queue *q;
2773  
2774  		rq = rq_list_peek(&plug->mq_list);
2775  		q = rq->q;
2776  		trace_block_unplug(q, depth, true);
2777  
2778  		/*
2779  		 * Peek first request and see if we have a ->queue_rqs() hook.
2780  		 * If we do, we can dispatch the whole plug list in one go. We
2781  		 * already know at this point that all requests belong to the
2782  		 * same queue, caller must ensure that's the case.
2783  		 */
2784  		if (q->mq_ops->queue_rqs) {
2785  			blk_mq_run_dispatch_ops(q,
2786  				__blk_mq_flush_plug_list(q, plug));
2787  			if (rq_list_empty(plug->mq_list))
2788  				return;
2789  		}
2790  
2791  		blk_mq_run_dispatch_ops(q,
2792  				blk_mq_plug_issue_direct(plug));
2793  		if (rq_list_empty(plug->mq_list))
2794  			return;
2795  	}
2796  
2797  	do {
2798  		blk_mq_dispatch_plug_list(plug, from_schedule);
2799  	} while (!rq_list_empty(plug->mq_list));
2800  }
2801  
blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list)2802  static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2803  		struct list_head *list)
2804  {
2805  	int queued = 0;
2806  	blk_status_t ret = BLK_STS_OK;
2807  
2808  	while (!list_empty(list)) {
2809  		struct request *rq = list_first_entry(list, struct request,
2810  				queuelist);
2811  
2812  		list_del_init(&rq->queuelist);
2813  		ret = blk_mq_request_issue_directly(rq, list_empty(list));
2814  		switch (ret) {
2815  		case BLK_STS_OK:
2816  			queued++;
2817  			break;
2818  		case BLK_STS_RESOURCE:
2819  		case BLK_STS_DEV_RESOURCE:
2820  			blk_mq_request_bypass_insert(rq, 0);
2821  			if (list_empty(list))
2822  				blk_mq_run_hw_queue(hctx, false);
2823  			goto out;
2824  		default:
2825  			blk_mq_end_request(rq, ret);
2826  			break;
2827  		}
2828  	}
2829  
2830  out:
2831  	if (ret != BLK_STS_OK)
2832  		blk_mq_commit_rqs(hctx, queued, false);
2833  }
2834  
blk_mq_attempt_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)2835  static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2836  				     struct bio *bio, unsigned int nr_segs)
2837  {
2838  	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2839  		if (blk_attempt_plug_merge(q, bio, nr_segs))
2840  			return true;
2841  		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2842  			return true;
2843  	}
2844  	return false;
2845  }
2846  
blk_mq_get_new_requests(struct request_queue * q,struct blk_plug * plug,struct bio * bio,unsigned int nsegs)2847  static struct request *blk_mq_get_new_requests(struct request_queue *q,
2848  					       struct blk_plug *plug,
2849  					       struct bio *bio,
2850  					       unsigned int nsegs)
2851  {
2852  	struct blk_mq_alloc_data data = {
2853  		.q		= q,
2854  		.nr_tags	= 1,
2855  		.cmd_flags	= bio->bi_opf,
2856  	};
2857  	struct request *rq;
2858  
2859  	rq_qos_throttle(q, bio);
2860  
2861  	if (plug) {
2862  		data.nr_tags = plug->nr_ios;
2863  		plug->nr_ios = 1;
2864  		data.cached_rq = &plug->cached_rq;
2865  	}
2866  
2867  	rq = __blk_mq_alloc_requests(&data);
2868  	if (rq)
2869  		return rq;
2870  	rq_qos_cleanup(q, bio);
2871  	if (bio->bi_opf & REQ_NOWAIT)
2872  		bio_wouldblock_error(bio);
2873  	return NULL;
2874  }
2875  
2876  /*
2877   * Check if there is a suitable cached request and return it.
2878   */
blk_mq_peek_cached_request(struct blk_plug * plug,struct request_queue * q,blk_opf_t opf)2879  static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
2880  		struct request_queue *q, blk_opf_t opf)
2881  {
2882  	enum hctx_type type = blk_mq_get_hctx_type(opf);
2883  	struct request *rq;
2884  
2885  	if (!plug)
2886  		return NULL;
2887  	rq = rq_list_peek(&plug->cached_rq);
2888  	if (!rq || rq->q != q)
2889  		return NULL;
2890  	if (type != rq->mq_hctx->type &&
2891  	    (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT))
2892  		return NULL;
2893  	if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
2894  		return NULL;
2895  	return rq;
2896  }
2897  
blk_mq_use_cached_rq(struct request * rq,struct blk_plug * plug,struct bio * bio)2898  static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
2899  		struct bio *bio)
2900  {
2901  	WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2902  
2903  	/*
2904  	 * If any qos ->throttle() end up blocking, we will have flushed the
2905  	 * plug and hence killed the cached_rq list as well. Pop this entry
2906  	 * before we throttle.
2907  	 */
2908  	plug->cached_rq = rq_list_next(rq);
2909  	rq_qos_throttle(rq->q, bio);
2910  
2911  	blk_mq_rq_time_init(rq, 0);
2912  	rq->cmd_flags = bio->bi_opf;
2913  	INIT_LIST_HEAD(&rq->queuelist);
2914  }
2915  
bio_unaligned(const struct bio * bio,struct request_queue * q)2916  static bool bio_unaligned(const struct bio *bio, struct request_queue *q)
2917  {
2918  	unsigned int bs_mask = queue_logical_block_size(q) - 1;
2919  
2920  	/* .bi_sector of any zero sized bio need to be initialized */
2921  	if ((bio->bi_iter.bi_size & bs_mask) ||
2922  	    ((bio->bi_iter.bi_sector << SECTOR_SHIFT) & bs_mask))
2923  		return true;
2924  	return false;
2925  }
2926  
2927  /**
2928   * blk_mq_submit_bio - Create and send a request to block device.
2929   * @bio: Bio pointer.
2930   *
2931   * Builds up a request structure from @q and @bio and send to the device. The
2932   * request may not be queued directly to hardware if:
2933   * * This request can be merged with another one
2934   * * We want to place request at plug queue for possible future merging
2935   * * There is an IO scheduler active at this queue
2936   *
2937   * It will not queue the request if there is an error with the bio, or at the
2938   * request creation.
2939   */
blk_mq_submit_bio(struct bio * bio)2940  void blk_mq_submit_bio(struct bio *bio)
2941  {
2942  	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2943  	struct blk_plug *plug = current->plug;
2944  	const int is_sync = op_is_sync(bio->bi_opf);
2945  	struct blk_mq_hw_ctx *hctx;
2946  	unsigned int nr_segs;
2947  	struct request *rq;
2948  	blk_status_t ret;
2949  
2950  	/*
2951  	 * If the plug has a cached request for this queue, try to use it.
2952  	 */
2953  	rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
2954  
2955  	/*
2956  	 * A BIO that was released from a zone write plug has already been
2957  	 * through the preparation in this function, already holds a reference
2958  	 * on the queue usage counter, and is the only write BIO in-flight for
2959  	 * the target zone. Go straight to preparing a request for it.
2960  	 */
2961  	if (bio_zone_write_plugging(bio)) {
2962  		nr_segs = bio->__bi_nr_segments;
2963  		if (rq)
2964  			blk_queue_exit(q);
2965  		goto new_request;
2966  	}
2967  
2968  	bio = blk_queue_bounce(bio, q);
2969  
2970  	/*
2971  	 * The cached request already holds a q_usage_counter reference and we
2972  	 * don't have to acquire a new one if we use it.
2973  	 */
2974  	if (!rq) {
2975  		if (unlikely(bio_queue_enter(bio)))
2976  			return;
2977  	}
2978  
2979  	/*
2980  	 * Device reconfiguration may change logical block size, so alignment
2981  	 * check has to be done with queue usage counter held
2982  	 */
2983  	if (unlikely(bio_unaligned(bio, q))) {
2984  		bio_io_error(bio);
2985  		goto queue_exit;
2986  	}
2987  
2988  	bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2989  	if (!bio)
2990  		goto queue_exit;
2991  
2992  	if (!bio_integrity_prep(bio))
2993  		goto queue_exit;
2994  
2995  	if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
2996  		goto queue_exit;
2997  
2998  	if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs))
2999  		goto queue_exit;
3000  
3001  new_request:
3002  	if (!rq) {
3003  		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3004  		if (unlikely(!rq))
3005  			goto queue_exit;
3006  	} else {
3007  		blk_mq_use_cached_rq(rq, plug, bio);
3008  	}
3009  
3010  	trace_block_getrq(bio);
3011  
3012  	rq_qos_track(q, rq, bio);
3013  
3014  	blk_mq_bio_to_request(rq, bio, nr_segs);
3015  
3016  	ret = blk_crypto_rq_get_keyslot(rq);
3017  	if (ret != BLK_STS_OK) {
3018  		bio->bi_status = ret;
3019  		bio_endio(bio);
3020  		blk_mq_free_request(rq);
3021  		return;
3022  	}
3023  
3024  	if (bio_zone_write_plugging(bio))
3025  		blk_zone_write_plug_init_request(rq);
3026  
3027  	if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3028  		return;
3029  
3030  	if (plug) {
3031  		blk_add_rq_to_plug(plug, rq);
3032  		return;
3033  	}
3034  
3035  	hctx = rq->mq_hctx;
3036  	if ((rq->rq_flags & RQF_USE_SCHED) ||
3037  	    (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3038  		blk_mq_insert_request(rq, 0);
3039  		blk_mq_run_hw_queue(hctx, true);
3040  	} else {
3041  		blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3042  	}
3043  	return;
3044  
3045  queue_exit:
3046  	/*
3047  	 * Don't drop the queue reference if we were trying to use a cached
3048  	 * request and thus didn't acquire one.
3049  	 */
3050  	if (!rq)
3051  		blk_queue_exit(q);
3052  }
3053  
3054  #ifdef CONFIG_BLK_MQ_STACKING
3055  /**
3056   * blk_insert_cloned_request - Helper for stacking drivers to submit a request
3057   * @rq: the request being queued
3058   */
blk_insert_cloned_request(struct request * rq)3059  blk_status_t blk_insert_cloned_request(struct request *rq)
3060  {
3061  	struct request_queue *q = rq->q;
3062  	unsigned int max_sectors = blk_queue_get_max_sectors(rq);
3063  	unsigned int max_segments = blk_rq_get_max_segments(rq);
3064  	blk_status_t ret;
3065  
3066  	if (blk_rq_sectors(rq) > max_sectors) {
3067  		/*
3068  		 * SCSI device does not have a good way to return if
3069  		 * Write Same/Zero is actually supported. If a device rejects
3070  		 * a non-read/write command (discard, write same,etc.) the
3071  		 * low-level device driver will set the relevant queue limit to
3072  		 * 0 to prevent blk-lib from issuing more of the offending
3073  		 * operations. Commands queued prior to the queue limit being
3074  		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
3075  		 * errors being propagated to upper layers.
3076  		 */
3077  		if (max_sectors == 0)
3078  			return BLK_STS_NOTSUPP;
3079  
3080  		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
3081  			__func__, blk_rq_sectors(rq), max_sectors);
3082  		return BLK_STS_IOERR;
3083  	}
3084  
3085  	/*
3086  	 * The queue settings related to segment counting may differ from the
3087  	 * original queue.
3088  	 */
3089  	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3090  	if (rq->nr_phys_segments > max_segments) {
3091  		printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
3092  			__func__, rq->nr_phys_segments, max_segments);
3093  		return BLK_STS_IOERR;
3094  	}
3095  
3096  	if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3097  		return BLK_STS_IOERR;
3098  
3099  	ret = blk_crypto_rq_get_keyslot(rq);
3100  	if (ret != BLK_STS_OK)
3101  		return ret;
3102  
3103  	blk_account_io_start(rq);
3104  
3105  	/*
3106  	 * Since we have a scheduler attached on the top device,
3107  	 * bypass a potential scheduler on the bottom device for
3108  	 * insert.
3109  	 */
3110  	blk_mq_run_dispatch_ops(q,
3111  			ret = blk_mq_request_issue_directly(rq, true));
3112  	if (ret)
3113  		blk_account_io_done(rq, blk_time_get_ns());
3114  	return ret;
3115  }
3116  EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
3117  
3118  /**
3119   * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3120   * @rq: the clone request to be cleaned up
3121   *
3122   * Description:
3123   *     Free all bios in @rq for a cloned request.
3124   */
blk_rq_unprep_clone(struct request * rq)3125  void blk_rq_unprep_clone(struct request *rq)
3126  {
3127  	struct bio *bio;
3128  
3129  	while ((bio = rq->bio) != NULL) {
3130  		rq->bio = bio->bi_next;
3131  
3132  		bio_put(bio);
3133  	}
3134  }
3135  EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3136  
3137  /**
3138   * blk_rq_prep_clone - Helper function to setup clone request
3139   * @rq: the request to be setup
3140   * @rq_src: original request to be cloned
3141   * @bs: bio_set that bios for clone are allocated from
3142   * @gfp_mask: memory allocation mask for bio
3143   * @bio_ctr: setup function to be called for each clone bio.
3144   *           Returns %0 for success, non %0 for failure.
3145   * @data: private data to be passed to @bio_ctr
3146   *
3147   * Description:
3148   *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3149   *     Also, pages which the original bios are pointing to are not copied
3150   *     and the cloned bios just point same pages.
3151   *     So cloned bios must be completed before original bios, which means
3152   *     the caller must complete @rq before @rq_src.
3153   */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)3154  int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3155  		      struct bio_set *bs, gfp_t gfp_mask,
3156  		      int (*bio_ctr)(struct bio *, struct bio *, void *),
3157  		      void *data)
3158  {
3159  	struct bio *bio, *bio_src;
3160  
3161  	if (!bs)
3162  		bs = &fs_bio_set;
3163  
3164  	__rq_for_each_bio(bio_src, rq_src) {
3165  		bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3166  				      bs);
3167  		if (!bio)
3168  			goto free_and_out;
3169  
3170  		if (bio_ctr && bio_ctr(bio, bio_src, data))
3171  			goto free_and_out;
3172  
3173  		if (rq->bio) {
3174  			rq->biotail->bi_next = bio;
3175  			rq->biotail = bio;
3176  		} else {
3177  			rq->bio = rq->biotail = bio;
3178  		}
3179  		bio = NULL;
3180  	}
3181  
3182  	/* Copy attributes of the original request to the clone request. */
3183  	rq->__sector = blk_rq_pos(rq_src);
3184  	rq->__data_len = blk_rq_bytes(rq_src);
3185  	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3186  		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3187  		rq->special_vec = rq_src->special_vec;
3188  	}
3189  	rq->nr_phys_segments = rq_src->nr_phys_segments;
3190  	rq->ioprio = rq_src->ioprio;
3191  	rq->write_hint = rq_src->write_hint;
3192  
3193  	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3194  		goto free_and_out;
3195  
3196  	return 0;
3197  
3198  free_and_out:
3199  	if (bio)
3200  		bio_put(bio);
3201  	blk_rq_unprep_clone(rq);
3202  
3203  	return -ENOMEM;
3204  }
3205  EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3206  #endif /* CONFIG_BLK_MQ_STACKING */
3207  
3208  /*
3209   * Steal bios from a request and add them to a bio list.
3210   * The request must not have been partially completed before.
3211   */
blk_steal_bios(struct bio_list * list,struct request * rq)3212  void blk_steal_bios(struct bio_list *list, struct request *rq)
3213  {
3214  	if (rq->bio) {
3215  		if (list->tail)
3216  			list->tail->bi_next = rq->bio;
3217  		else
3218  			list->head = rq->bio;
3219  		list->tail = rq->biotail;
3220  
3221  		rq->bio = NULL;
3222  		rq->biotail = NULL;
3223  	}
3224  
3225  	rq->__data_len = 0;
3226  }
3227  EXPORT_SYMBOL_GPL(blk_steal_bios);
3228  
order_to_size(unsigned int order)3229  static size_t order_to_size(unsigned int order)
3230  {
3231  	return (size_t)PAGE_SIZE << order;
3232  }
3233  
3234  /* called before freeing request pool in @tags */
blk_mq_clear_rq_mapping(struct blk_mq_tags * drv_tags,struct blk_mq_tags * tags)3235  static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3236  				    struct blk_mq_tags *tags)
3237  {
3238  	struct page *page;
3239  	unsigned long flags;
3240  
3241  	/*
3242  	 * There is no need to clear mapping if driver tags is not initialized
3243  	 * or the mapping belongs to the driver tags.
3244  	 */
3245  	if (!drv_tags || drv_tags == tags)
3246  		return;
3247  
3248  	list_for_each_entry(page, &tags->page_list, lru) {
3249  		unsigned long start = (unsigned long)page_address(page);
3250  		unsigned long end = start + order_to_size(page->private);
3251  		int i;
3252  
3253  		for (i = 0; i < drv_tags->nr_tags; i++) {
3254  			struct request *rq = drv_tags->rqs[i];
3255  			unsigned long rq_addr = (unsigned long)rq;
3256  
3257  			if (rq_addr >= start && rq_addr < end) {
3258  				WARN_ON_ONCE(req_ref_read(rq) != 0);
3259  				cmpxchg(&drv_tags->rqs[i], rq, NULL);
3260  			}
3261  		}
3262  	}
3263  
3264  	/*
3265  	 * Wait until all pending iteration is done.
3266  	 *
3267  	 * Request reference is cleared and it is guaranteed to be observed
3268  	 * after the ->lock is released.
3269  	 */
3270  	spin_lock_irqsave(&drv_tags->lock, flags);
3271  	spin_unlock_irqrestore(&drv_tags->lock, flags);
3272  }
3273  
blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3274  void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3275  		     unsigned int hctx_idx)
3276  {
3277  	struct blk_mq_tags *drv_tags;
3278  	struct page *page;
3279  
3280  	if (list_empty(&tags->page_list))
3281  		return;
3282  
3283  	if (blk_mq_is_shared_tags(set->flags))
3284  		drv_tags = set->shared_tags;
3285  	else
3286  		drv_tags = set->tags[hctx_idx];
3287  
3288  	if (tags->static_rqs && set->ops->exit_request) {
3289  		int i;
3290  
3291  		for (i = 0; i < tags->nr_tags; i++) {
3292  			struct request *rq = tags->static_rqs[i];
3293  
3294  			if (!rq)
3295  				continue;
3296  			set->ops->exit_request(set, rq, hctx_idx);
3297  			tags->static_rqs[i] = NULL;
3298  		}
3299  	}
3300  
3301  	blk_mq_clear_rq_mapping(drv_tags, tags);
3302  
3303  	while (!list_empty(&tags->page_list)) {
3304  		page = list_first_entry(&tags->page_list, struct page, lru);
3305  		list_del_init(&page->lru);
3306  		/*
3307  		 * Remove kmemleak object previously allocated in
3308  		 * blk_mq_alloc_rqs().
3309  		 */
3310  		kmemleak_free(page_address(page));
3311  		__free_pages(page, page->private);
3312  	}
3313  }
3314  
blk_mq_free_rq_map(struct blk_mq_tags * tags)3315  void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3316  {
3317  	kfree(tags->rqs);
3318  	tags->rqs = NULL;
3319  	kfree(tags->static_rqs);
3320  	tags->static_rqs = NULL;
3321  
3322  	blk_mq_free_tags(tags);
3323  }
3324  
hctx_idx_to_type(struct blk_mq_tag_set * set,unsigned int hctx_idx)3325  static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3326  		unsigned int hctx_idx)
3327  {
3328  	int i;
3329  
3330  	for (i = 0; i < set->nr_maps; i++) {
3331  		unsigned int start = set->map[i].queue_offset;
3332  		unsigned int end = start + set->map[i].nr_queues;
3333  
3334  		if (hctx_idx >= start && hctx_idx < end)
3335  			break;
3336  	}
3337  
3338  	if (i >= set->nr_maps)
3339  		i = HCTX_TYPE_DEFAULT;
3340  
3341  	return i;
3342  }
3343  
blk_mq_get_hctx_node(struct blk_mq_tag_set * set,unsigned int hctx_idx)3344  static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3345  		unsigned int hctx_idx)
3346  {
3347  	enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3348  
3349  	return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3350  }
3351  
blk_mq_alloc_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int nr_tags,unsigned int reserved_tags)3352  static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3353  					       unsigned int hctx_idx,
3354  					       unsigned int nr_tags,
3355  					       unsigned int reserved_tags)
3356  {
3357  	int node = blk_mq_get_hctx_node(set, hctx_idx);
3358  	struct blk_mq_tags *tags;
3359  
3360  	if (node == NUMA_NO_NODE)
3361  		node = set->numa_node;
3362  
3363  	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3364  				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3365  	if (!tags)
3366  		return NULL;
3367  
3368  	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3369  				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3370  				 node);
3371  	if (!tags->rqs)
3372  		goto err_free_tags;
3373  
3374  	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3375  					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3376  					node);
3377  	if (!tags->static_rqs)
3378  		goto err_free_rqs;
3379  
3380  	return tags;
3381  
3382  err_free_rqs:
3383  	kfree(tags->rqs);
3384  err_free_tags:
3385  	blk_mq_free_tags(tags);
3386  	return NULL;
3387  }
3388  
blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node)3389  static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3390  			       unsigned int hctx_idx, int node)
3391  {
3392  	int ret;
3393  
3394  	if (set->ops->init_request) {
3395  		ret = set->ops->init_request(set, rq, hctx_idx, node);
3396  		if (ret)
3397  			return ret;
3398  	}
3399  
3400  	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3401  	return 0;
3402  }
3403  
blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth)3404  static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3405  			    struct blk_mq_tags *tags,
3406  			    unsigned int hctx_idx, unsigned int depth)
3407  {
3408  	unsigned int i, j, entries_per_page, max_order = 4;
3409  	int node = blk_mq_get_hctx_node(set, hctx_idx);
3410  	size_t rq_size, left;
3411  
3412  	if (node == NUMA_NO_NODE)
3413  		node = set->numa_node;
3414  
3415  	INIT_LIST_HEAD(&tags->page_list);
3416  
3417  	/*
3418  	 * rq_size is the size of the request plus driver payload, rounded
3419  	 * to the cacheline size
3420  	 */
3421  	rq_size = round_up(sizeof(struct request) + set->cmd_size,
3422  				cache_line_size());
3423  	left = rq_size * depth;
3424  
3425  	for (i = 0; i < depth; ) {
3426  		int this_order = max_order;
3427  		struct page *page;
3428  		int to_do;
3429  		void *p;
3430  
3431  		while (this_order && left < order_to_size(this_order - 1))
3432  			this_order--;
3433  
3434  		do {
3435  			page = alloc_pages_node(node,
3436  				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3437  				this_order);
3438  			if (page)
3439  				break;
3440  			if (!this_order--)
3441  				break;
3442  			if (order_to_size(this_order) < rq_size)
3443  				break;
3444  		} while (1);
3445  
3446  		if (!page)
3447  			goto fail;
3448  
3449  		page->private = this_order;
3450  		list_add_tail(&page->lru, &tags->page_list);
3451  
3452  		p = page_address(page);
3453  		/*
3454  		 * Allow kmemleak to scan these pages as they contain pointers
3455  		 * to additional allocations like via ops->init_request().
3456  		 */
3457  		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3458  		entries_per_page = order_to_size(this_order) / rq_size;
3459  		to_do = min(entries_per_page, depth - i);
3460  		left -= to_do * rq_size;
3461  		for (j = 0; j < to_do; j++) {
3462  			struct request *rq = p;
3463  
3464  			tags->static_rqs[i] = rq;
3465  			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3466  				tags->static_rqs[i] = NULL;
3467  				goto fail;
3468  			}
3469  
3470  			p += rq_size;
3471  			i++;
3472  		}
3473  	}
3474  	return 0;
3475  
3476  fail:
3477  	blk_mq_free_rqs(set, tags, hctx_idx);
3478  	return -ENOMEM;
3479  }
3480  
3481  struct rq_iter_data {
3482  	struct blk_mq_hw_ctx *hctx;
3483  	bool has_rq;
3484  };
3485  
blk_mq_has_request(struct request * rq,void * data)3486  static bool blk_mq_has_request(struct request *rq, void *data)
3487  {
3488  	struct rq_iter_data *iter_data = data;
3489  
3490  	if (rq->mq_hctx != iter_data->hctx)
3491  		return true;
3492  	iter_data->has_rq = true;
3493  	return false;
3494  }
3495  
blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx)3496  static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3497  {
3498  	struct blk_mq_tags *tags = hctx->sched_tags ?
3499  			hctx->sched_tags : hctx->tags;
3500  	struct rq_iter_data data = {
3501  		.hctx	= hctx,
3502  	};
3503  
3504  	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3505  	return data.has_rq;
3506  }
3507  
blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx * hctx,unsigned int this_cpu)3508  static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx,
3509  		unsigned int this_cpu)
3510  {
3511  	enum hctx_type type = hctx->type;
3512  	int cpu;
3513  
3514  	/*
3515  	 * hctx->cpumask has to rule out isolated CPUs, but userspace still
3516  	 * might submit IOs on these isolated CPUs, so use the queue map to
3517  	 * check if all CPUs mapped to this hctx are offline
3518  	 */
3519  	for_each_online_cpu(cpu) {
3520  		struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue,
3521  				type, cpu);
3522  
3523  		if (h != hctx)
3524  			continue;
3525  
3526  		/* this hctx has at least one online CPU */
3527  		if (this_cpu != cpu)
3528  			return true;
3529  	}
3530  
3531  	return false;
3532  }
3533  
blk_mq_hctx_notify_offline(unsigned int cpu,struct hlist_node * node)3534  static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3535  {
3536  	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3537  			struct blk_mq_hw_ctx, cpuhp_online);
3538  
3539  	if (blk_mq_hctx_has_online_cpu(hctx, cpu))
3540  		return 0;
3541  
3542  	/*
3543  	 * Prevent new request from being allocated on the current hctx.
3544  	 *
3545  	 * The smp_mb__after_atomic() Pairs with the implied barrier in
3546  	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3547  	 * seen once we return from the tag allocator.
3548  	 */
3549  	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3550  	smp_mb__after_atomic();
3551  
3552  	/*
3553  	 * Try to grab a reference to the queue and wait for any outstanding
3554  	 * requests.  If we could not grab a reference the queue has been
3555  	 * frozen and there are no requests.
3556  	 */
3557  	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3558  		while (blk_mq_hctx_has_requests(hctx))
3559  			msleep(5);
3560  		percpu_ref_put(&hctx->queue->q_usage_counter);
3561  	}
3562  
3563  	return 0;
3564  }
3565  
3566  /*
3567   * Check if one CPU is mapped to the specified hctx
3568   *
3569   * Isolated CPUs have been ruled out from hctx->cpumask, which is supposed
3570   * to be used for scheduling kworker only. For other usage, please call this
3571   * helper for checking if one CPU belongs to the specified hctx
3572   */
blk_mq_cpu_mapped_to_hctx(unsigned int cpu,const struct blk_mq_hw_ctx * hctx)3573  static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
3574  		const struct blk_mq_hw_ctx *hctx)
3575  {
3576  	struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue,
3577  			hctx->type, cpu);
3578  
3579  	return mapped_hctx == hctx;
3580  }
3581  
blk_mq_hctx_notify_online(unsigned int cpu,struct hlist_node * node)3582  static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3583  {
3584  	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3585  			struct blk_mq_hw_ctx, cpuhp_online);
3586  
3587  	if (blk_mq_cpu_mapped_to_hctx(cpu, hctx))
3588  		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3589  	return 0;
3590  }
3591  
3592  /*
3593   * 'cpu' is going away. splice any existing rq_list entries from this
3594   * software queue to the hw queue dispatch list, and ensure that it
3595   * gets run.
3596   */
blk_mq_hctx_notify_dead(unsigned int cpu,struct hlist_node * node)3597  static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3598  {
3599  	struct blk_mq_hw_ctx *hctx;
3600  	struct blk_mq_ctx *ctx;
3601  	LIST_HEAD(tmp);
3602  	enum hctx_type type;
3603  
3604  	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3605  	if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx))
3606  		return 0;
3607  
3608  	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3609  	type = hctx->type;
3610  
3611  	spin_lock(&ctx->lock);
3612  	if (!list_empty(&ctx->rq_lists[type])) {
3613  		list_splice_init(&ctx->rq_lists[type], &tmp);
3614  		blk_mq_hctx_clear_pending(hctx, ctx);
3615  	}
3616  	spin_unlock(&ctx->lock);
3617  
3618  	if (list_empty(&tmp))
3619  		return 0;
3620  
3621  	spin_lock(&hctx->lock);
3622  	list_splice_tail_init(&tmp, &hctx->dispatch);
3623  	spin_unlock(&hctx->lock);
3624  
3625  	blk_mq_run_hw_queue(hctx, true);
3626  	return 0;
3627  }
3628  
blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)3629  static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3630  {
3631  	if (!(hctx->flags & BLK_MQ_F_STACKING))
3632  		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3633  						    &hctx->cpuhp_online);
3634  	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3635  					    &hctx->cpuhp_dead);
3636  }
3637  
3638  /*
3639   * Before freeing hw queue, clearing the flush request reference in
3640   * tags->rqs[] for avoiding potential UAF.
3641   */
blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq)3642  static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3643  		unsigned int queue_depth, struct request *flush_rq)
3644  {
3645  	int i;
3646  	unsigned long flags;
3647  
3648  	/* The hw queue may not be mapped yet */
3649  	if (!tags)
3650  		return;
3651  
3652  	WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3653  
3654  	for (i = 0; i < queue_depth; i++)
3655  		cmpxchg(&tags->rqs[i], flush_rq, NULL);
3656  
3657  	/*
3658  	 * Wait until all pending iteration is done.
3659  	 *
3660  	 * Request reference is cleared and it is guaranteed to be observed
3661  	 * after the ->lock is released.
3662  	 */
3663  	spin_lock_irqsave(&tags->lock, flags);
3664  	spin_unlock_irqrestore(&tags->lock, flags);
3665  }
3666  
3667  /* hctx->ctxs will be freed in queue's release handler */
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)3668  static void blk_mq_exit_hctx(struct request_queue *q,
3669  		struct blk_mq_tag_set *set,
3670  		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3671  {
3672  	struct request *flush_rq = hctx->fq->flush_rq;
3673  
3674  	if (blk_mq_hw_queue_mapped(hctx))
3675  		blk_mq_tag_idle(hctx);
3676  
3677  	if (blk_queue_init_done(q))
3678  		blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3679  				set->queue_depth, flush_rq);
3680  	if (set->ops->exit_request)
3681  		set->ops->exit_request(set, flush_rq, hctx_idx);
3682  
3683  	if (set->ops->exit_hctx)
3684  		set->ops->exit_hctx(hctx, hctx_idx);
3685  
3686  	blk_mq_remove_cpuhp(hctx);
3687  
3688  	xa_erase(&q->hctx_table, hctx_idx);
3689  
3690  	spin_lock(&q->unused_hctx_lock);
3691  	list_add(&hctx->hctx_list, &q->unused_hctx_list);
3692  	spin_unlock(&q->unused_hctx_lock);
3693  }
3694  
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)3695  static void blk_mq_exit_hw_queues(struct request_queue *q,
3696  		struct blk_mq_tag_set *set, int nr_queue)
3697  {
3698  	struct blk_mq_hw_ctx *hctx;
3699  	unsigned long i;
3700  
3701  	queue_for_each_hw_ctx(q, hctx, i) {
3702  		if (i == nr_queue)
3703  			break;
3704  		blk_mq_exit_hctx(q, set, hctx, i);
3705  	}
3706  }
3707  
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)3708  static int blk_mq_init_hctx(struct request_queue *q,
3709  		struct blk_mq_tag_set *set,
3710  		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3711  {
3712  	hctx->queue_num = hctx_idx;
3713  
3714  	if (!(hctx->flags & BLK_MQ_F_STACKING))
3715  		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3716  				&hctx->cpuhp_online);
3717  	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3718  
3719  	hctx->tags = set->tags[hctx_idx];
3720  
3721  	if (set->ops->init_hctx &&
3722  	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3723  		goto unregister_cpu_notifier;
3724  
3725  	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3726  				hctx->numa_node))
3727  		goto exit_hctx;
3728  
3729  	if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3730  		goto exit_flush_rq;
3731  
3732  	return 0;
3733  
3734   exit_flush_rq:
3735  	if (set->ops->exit_request)
3736  		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3737   exit_hctx:
3738  	if (set->ops->exit_hctx)
3739  		set->ops->exit_hctx(hctx, hctx_idx);
3740   unregister_cpu_notifier:
3741  	blk_mq_remove_cpuhp(hctx);
3742  	return -1;
3743  }
3744  
3745  static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node)3746  blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3747  		int node)
3748  {
3749  	struct blk_mq_hw_ctx *hctx;
3750  	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3751  
3752  	hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3753  	if (!hctx)
3754  		goto fail_alloc_hctx;
3755  
3756  	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3757  		goto free_hctx;
3758  
3759  	atomic_set(&hctx->nr_active, 0);
3760  	if (node == NUMA_NO_NODE)
3761  		node = set->numa_node;
3762  	hctx->numa_node = node;
3763  
3764  	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3765  	spin_lock_init(&hctx->lock);
3766  	INIT_LIST_HEAD(&hctx->dispatch);
3767  	hctx->queue = q;
3768  	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3769  
3770  	INIT_LIST_HEAD(&hctx->hctx_list);
3771  
3772  	/*
3773  	 * Allocate space for all possible cpus to avoid allocation at
3774  	 * runtime
3775  	 */
3776  	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3777  			gfp, node);
3778  	if (!hctx->ctxs)
3779  		goto free_cpumask;
3780  
3781  	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3782  				gfp, node, false, false))
3783  		goto free_ctxs;
3784  	hctx->nr_ctx = 0;
3785  
3786  	spin_lock_init(&hctx->dispatch_wait_lock);
3787  	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3788  	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3789  
3790  	hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3791  	if (!hctx->fq)
3792  		goto free_bitmap;
3793  
3794  	blk_mq_hctx_kobj_init(hctx);
3795  
3796  	return hctx;
3797  
3798   free_bitmap:
3799  	sbitmap_free(&hctx->ctx_map);
3800   free_ctxs:
3801  	kfree(hctx->ctxs);
3802   free_cpumask:
3803  	free_cpumask_var(hctx->cpumask);
3804   free_hctx:
3805  	kfree(hctx);
3806   fail_alloc_hctx:
3807  	return NULL;
3808  }
3809  
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)3810  static void blk_mq_init_cpu_queues(struct request_queue *q,
3811  				   unsigned int nr_hw_queues)
3812  {
3813  	struct blk_mq_tag_set *set = q->tag_set;
3814  	unsigned int i, j;
3815  
3816  	for_each_possible_cpu(i) {
3817  		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3818  		struct blk_mq_hw_ctx *hctx;
3819  		int k;
3820  
3821  		__ctx->cpu = i;
3822  		spin_lock_init(&__ctx->lock);
3823  		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3824  			INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3825  
3826  		__ctx->queue = q;
3827  
3828  		/*
3829  		 * Set local node, IFF we have more than one hw queue. If
3830  		 * not, we remain on the home node of the device
3831  		 */
3832  		for (j = 0; j < set->nr_maps; j++) {
3833  			hctx = blk_mq_map_queue_type(q, j, i);
3834  			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3835  				hctx->numa_node = cpu_to_node(i);
3836  		}
3837  	}
3838  }
3839  
blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int depth)3840  struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3841  					     unsigned int hctx_idx,
3842  					     unsigned int depth)
3843  {
3844  	struct blk_mq_tags *tags;
3845  	int ret;
3846  
3847  	tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3848  	if (!tags)
3849  		return NULL;
3850  
3851  	ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3852  	if (ret) {
3853  		blk_mq_free_rq_map(tags);
3854  		return NULL;
3855  	}
3856  
3857  	return tags;
3858  }
3859  
__blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,int hctx_idx)3860  static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3861  				       int hctx_idx)
3862  {
3863  	if (blk_mq_is_shared_tags(set->flags)) {
3864  		set->tags[hctx_idx] = set->shared_tags;
3865  
3866  		return true;
3867  	}
3868  
3869  	set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3870  						       set->queue_depth);
3871  
3872  	return set->tags[hctx_idx];
3873  }
3874  
blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3875  void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3876  			     struct blk_mq_tags *tags,
3877  			     unsigned int hctx_idx)
3878  {
3879  	if (tags) {
3880  		blk_mq_free_rqs(set, tags, hctx_idx);
3881  		blk_mq_free_rq_map(tags);
3882  	}
3883  }
3884  
__blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx)3885  static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3886  				      unsigned int hctx_idx)
3887  {
3888  	if (!blk_mq_is_shared_tags(set->flags))
3889  		blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3890  
3891  	set->tags[hctx_idx] = NULL;
3892  }
3893  
blk_mq_map_swqueue(struct request_queue * q)3894  static void blk_mq_map_swqueue(struct request_queue *q)
3895  {
3896  	unsigned int j, hctx_idx;
3897  	unsigned long i;
3898  	struct blk_mq_hw_ctx *hctx;
3899  	struct blk_mq_ctx *ctx;
3900  	struct blk_mq_tag_set *set = q->tag_set;
3901  
3902  	queue_for_each_hw_ctx(q, hctx, i) {
3903  		cpumask_clear(hctx->cpumask);
3904  		hctx->nr_ctx = 0;
3905  		hctx->dispatch_from = NULL;
3906  	}
3907  
3908  	/*
3909  	 * Map software to hardware queues.
3910  	 *
3911  	 * If the cpu isn't present, the cpu is mapped to first hctx.
3912  	 */
3913  	for_each_possible_cpu(i) {
3914  
3915  		ctx = per_cpu_ptr(q->queue_ctx, i);
3916  		for (j = 0; j < set->nr_maps; j++) {
3917  			if (!set->map[j].nr_queues) {
3918  				ctx->hctxs[j] = blk_mq_map_queue_type(q,
3919  						HCTX_TYPE_DEFAULT, i);
3920  				continue;
3921  			}
3922  			hctx_idx = set->map[j].mq_map[i];
3923  			/* unmapped hw queue can be remapped after CPU topo changed */
3924  			if (!set->tags[hctx_idx] &&
3925  			    !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3926  				/*
3927  				 * If tags initialization fail for some hctx,
3928  				 * that hctx won't be brought online.  In this
3929  				 * case, remap the current ctx to hctx[0] which
3930  				 * is guaranteed to always have tags allocated
3931  				 */
3932  				set->map[j].mq_map[i] = 0;
3933  			}
3934  
3935  			hctx = blk_mq_map_queue_type(q, j, i);
3936  			ctx->hctxs[j] = hctx;
3937  			/*
3938  			 * If the CPU is already set in the mask, then we've
3939  			 * mapped this one already. This can happen if
3940  			 * devices share queues across queue maps.
3941  			 */
3942  			if (cpumask_test_cpu(i, hctx->cpumask))
3943  				continue;
3944  
3945  			cpumask_set_cpu(i, hctx->cpumask);
3946  			hctx->type = j;
3947  			ctx->index_hw[hctx->type] = hctx->nr_ctx;
3948  			hctx->ctxs[hctx->nr_ctx++] = ctx;
3949  
3950  			/*
3951  			 * If the nr_ctx type overflows, we have exceeded the
3952  			 * amount of sw queues we can support.
3953  			 */
3954  			BUG_ON(!hctx->nr_ctx);
3955  		}
3956  
3957  		for (; j < HCTX_MAX_TYPES; j++)
3958  			ctx->hctxs[j] = blk_mq_map_queue_type(q,
3959  					HCTX_TYPE_DEFAULT, i);
3960  	}
3961  
3962  	queue_for_each_hw_ctx(q, hctx, i) {
3963  		int cpu;
3964  
3965  		/*
3966  		 * If no software queues are mapped to this hardware queue,
3967  		 * disable it and free the request entries.
3968  		 */
3969  		if (!hctx->nr_ctx) {
3970  			/* Never unmap queue 0.  We need it as a
3971  			 * fallback in case of a new remap fails
3972  			 * allocation
3973  			 */
3974  			if (i)
3975  				__blk_mq_free_map_and_rqs(set, i);
3976  
3977  			hctx->tags = NULL;
3978  			continue;
3979  		}
3980  
3981  		hctx->tags = set->tags[i];
3982  		WARN_ON(!hctx->tags);
3983  
3984  		/*
3985  		 * Set the map size to the number of mapped software queues.
3986  		 * This is more accurate and more efficient than looping
3987  		 * over all possibly mapped software queues.
3988  		 */
3989  		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3990  
3991  		/*
3992  		 * Rule out isolated CPUs from hctx->cpumask to avoid
3993  		 * running block kworker on isolated CPUs
3994  		 */
3995  		for_each_cpu(cpu, hctx->cpumask) {
3996  			if (cpu_is_isolated(cpu))
3997  				cpumask_clear_cpu(cpu, hctx->cpumask);
3998  		}
3999  
4000  		/*
4001  		 * Initialize batch roundrobin counts
4002  		 */
4003  		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
4004  		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
4005  	}
4006  }
4007  
4008  /*
4009   * Caller needs to ensure that we're either frozen/quiesced, or that
4010   * the queue isn't live yet.
4011   */
queue_set_hctx_shared(struct request_queue * q,bool shared)4012  static void queue_set_hctx_shared(struct request_queue *q, bool shared)
4013  {
4014  	struct blk_mq_hw_ctx *hctx;
4015  	unsigned long i;
4016  
4017  	queue_for_each_hw_ctx(q, hctx, i) {
4018  		if (shared) {
4019  			hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4020  		} else {
4021  			blk_mq_tag_idle(hctx);
4022  			hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
4023  		}
4024  	}
4025  }
4026  
blk_mq_update_tag_set_shared(struct blk_mq_tag_set * set,bool shared)4027  static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
4028  					 bool shared)
4029  {
4030  	struct request_queue *q;
4031  
4032  	lockdep_assert_held(&set->tag_list_lock);
4033  
4034  	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4035  		blk_mq_freeze_queue(q);
4036  		queue_set_hctx_shared(q, shared);
4037  		blk_mq_unfreeze_queue(q);
4038  	}
4039  }
4040  
blk_mq_del_queue_tag_set(struct request_queue * q)4041  static void blk_mq_del_queue_tag_set(struct request_queue *q)
4042  {
4043  	struct blk_mq_tag_set *set = q->tag_set;
4044  
4045  	mutex_lock(&set->tag_list_lock);
4046  	list_del(&q->tag_set_list);
4047  	if (list_is_singular(&set->tag_list)) {
4048  		/* just transitioned to unshared */
4049  		set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
4050  		/* update existing queue */
4051  		blk_mq_update_tag_set_shared(set, false);
4052  	}
4053  	mutex_unlock(&set->tag_list_lock);
4054  	INIT_LIST_HEAD(&q->tag_set_list);
4055  }
4056  
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)4057  static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
4058  				     struct request_queue *q)
4059  {
4060  	mutex_lock(&set->tag_list_lock);
4061  
4062  	/*
4063  	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
4064  	 */
4065  	if (!list_empty(&set->tag_list) &&
4066  	    !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
4067  		set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4068  		/* update existing queue */
4069  		blk_mq_update_tag_set_shared(set, true);
4070  	}
4071  	if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
4072  		queue_set_hctx_shared(q, true);
4073  	list_add_tail(&q->tag_set_list, &set->tag_list);
4074  
4075  	mutex_unlock(&set->tag_list_lock);
4076  }
4077  
4078  /* All allocations will be freed in release handler of q->mq_kobj */
blk_mq_alloc_ctxs(struct request_queue * q)4079  static int blk_mq_alloc_ctxs(struct request_queue *q)
4080  {
4081  	struct blk_mq_ctxs *ctxs;
4082  	int cpu;
4083  
4084  	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
4085  	if (!ctxs)
4086  		return -ENOMEM;
4087  
4088  	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
4089  	if (!ctxs->queue_ctx)
4090  		goto fail;
4091  
4092  	for_each_possible_cpu(cpu) {
4093  		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
4094  		ctx->ctxs = ctxs;
4095  	}
4096  
4097  	q->mq_kobj = &ctxs->kobj;
4098  	q->queue_ctx = ctxs->queue_ctx;
4099  
4100  	return 0;
4101   fail:
4102  	kfree(ctxs);
4103  	return -ENOMEM;
4104  }
4105  
4106  /*
4107   * It is the actual release handler for mq, but we do it from
4108   * request queue's release handler for avoiding use-after-free
4109   * and headache because q->mq_kobj shouldn't have been introduced,
4110   * but we can't group ctx/kctx kobj without it.
4111   */
blk_mq_release(struct request_queue * q)4112  void blk_mq_release(struct request_queue *q)
4113  {
4114  	struct blk_mq_hw_ctx *hctx, *next;
4115  	unsigned long i;
4116  
4117  	queue_for_each_hw_ctx(q, hctx, i)
4118  		WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
4119  
4120  	/* all hctx are in .unused_hctx_list now */
4121  	list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4122  		list_del_init(&hctx->hctx_list);
4123  		kobject_put(&hctx->kobj);
4124  	}
4125  
4126  	xa_destroy(&q->hctx_table);
4127  
4128  	/*
4129  	 * release .mq_kobj and sw queue's kobject now because
4130  	 * both share lifetime with request queue.
4131  	 */
4132  	blk_mq_sysfs_deinit(q);
4133  }
4134  
blk_mq_can_poll(struct blk_mq_tag_set * set)4135  static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
4136  {
4137  	return set->nr_maps > HCTX_TYPE_POLL &&
4138  		set->map[HCTX_TYPE_POLL].nr_queues;
4139  }
4140  
blk_mq_alloc_queue(struct blk_mq_tag_set * set,struct queue_limits * lim,void * queuedata)4141  struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
4142  		struct queue_limits *lim, void *queuedata)
4143  {
4144  	struct queue_limits default_lim = { };
4145  	struct request_queue *q;
4146  	int ret;
4147  
4148  	if (!lim)
4149  		lim = &default_lim;
4150  	lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
4151  	if (blk_mq_can_poll(set))
4152  		lim->features |= BLK_FEAT_POLL;
4153  
4154  	q = blk_alloc_queue(lim, set->numa_node);
4155  	if (IS_ERR(q))
4156  		return q;
4157  	q->queuedata = queuedata;
4158  	ret = blk_mq_init_allocated_queue(set, q);
4159  	if (ret) {
4160  		blk_put_queue(q);
4161  		return ERR_PTR(ret);
4162  	}
4163  	return q;
4164  }
4165  EXPORT_SYMBOL(blk_mq_alloc_queue);
4166  
4167  /**
4168   * blk_mq_destroy_queue - shutdown a request queue
4169   * @q: request queue to shutdown
4170   *
4171   * This shuts down a request queue allocated by blk_mq_alloc_queue(). All future
4172   * requests will be failed with -ENODEV. The caller is responsible for dropping
4173   * the reference from blk_mq_alloc_queue() by calling blk_put_queue().
4174   *
4175   * Context: can sleep
4176   */
blk_mq_destroy_queue(struct request_queue * q)4177  void blk_mq_destroy_queue(struct request_queue *q)
4178  {
4179  	WARN_ON_ONCE(!queue_is_mq(q));
4180  	WARN_ON_ONCE(blk_queue_registered(q));
4181  
4182  	might_sleep();
4183  
4184  	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4185  	blk_queue_start_drain(q);
4186  	blk_mq_freeze_queue_wait(q);
4187  
4188  	blk_sync_queue(q);
4189  	blk_mq_cancel_work_sync(q);
4190  	blk_mq_exit_queue(q);
4191  }
4192  EXPORT_SYMBOL(blk_mq_destroy_queue);
4193  
__blk_mq_alloc_disk(struct blk_mq_tag_set * set,struct queue_limits * lim,void * queuedata,struct lock_class_key * lkclass)4194  struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
4195  		struct queue_limits *lim, void *queuedata,
4196  		struct lock_class_key *lkclass)
4197  {
4198  	struct request_queue *q;
4199  	struct gendisk *disk;
4200  
4201  	q = blk_mq_alloc_queue(set, lim, queuedata);
4202  	if (IS_ERR(q))
4203  		return ERR_CAST(q);
4204  
4205  	disk = __alloc_disk_node(q, set->numa_node, lkclass);
4206  	if (!disk) {
4207  		blk_mq_destroy_queue(q);
4208  		blk_put_queue(q);
4209  		return ERR_PTR(-ENOMEM);
4210  	}
4211  	set_bit(GD_OWNS_QUEUE, &disk->state);
4212  	return disk;
4213  }
4214  EXPORT_SYMBOL(__blk_mq_alloc_disk);
4215  
blk_mq_alloc_disk_for_queue(struct request_queue * q,struct lock_class_key * lkclass)4216  struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4217  		struct lock_class_key *lkclass)
4218  {
4219  	struct gendisk *disk;
4220  
4221  	if (!blk_get_queue(q))
4222  		return NULL;
4223  	disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4224  	if (!disk)
4225  		blk_put_queue(q);
4226  	return disk;
4227  }
4228  EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4229  
blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node)4230  static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4231  		struct blk_mq_tag_set *set, struct request_queue *q,
4232  		int hctx_idx, int node)
4233  {
4234  	struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4235  
4236  	/* reuse dead hctx first */
4237  	spin_lock(&q->unused_hctx_lock);
4238  	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4239  		if (tmp->numa_node == node) {
4240  			hctx = tmp;
4241  			break;
4242  		}
4243  	}
4244  	if (hctx)
4245  		list_del_init(&hctx->hctx_list);
4246  	spin_unlock(&q->unused_hctx_lock);
4247  
4248  	if (!hctx)
4249  		hctx = blk_mq_alloc_hctx(q, set, node);
4250  	if (!hctx)
4251  		goto fail;
4252  
4253  	if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4254  		goto free_hctx;
4255  
4256  	return hctx;
4257  
4258   free_hctx:
4259  	kobject_put(&hctx->kobj);
4260   fail:
4261  	return NULL;
4262  }
4263  
blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q)4264  static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4265  						struct request_queue *q)
4266  {
4267  	struct blk_mq_hw_ctx *hctx;
4268  	unsigned long i, j;
4269  
4270  	/* protect against switching io scheduler  */
4271  	mutex_lock(&q->sysfs_lock);
4272  	for (i = 0; i < set->nr_hw_queues; i++) {
4273  		int old_node;
4274  		int node = blk_mq_get_hctx_node(set, i);
4275  		struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4276  
4277  		if (old_hctx) {
4278  			old_node = old_hctx->numa_node;
4279  			blk_mq_exit_hctx(q, set, old_hctx, i);
4280  		}
4281  
4282  		if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4283  			if (!old_hctx)
4284  				break;
4285  			pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4286  					node, old_node);
4287  			hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4288  			WARN_ON_ONCE(!hctx);
4289  		}
4290  	}
4291  	/*
4292  	 * Increasing nr_hw_queues fails. Free the newly allocated
4293  	 * hctxs and keep the previous q->nr_hw_queues.
4294  	 */
4295  	if (i != set->nr_hw_queues) {
4296  		j = q->nr_hw_queues;
4297  	} else {
4298  		j = i;
4299  		q->nr_hw_queues = set->nr_hw_queues;
4300  	}
4301  
4302  	xa_for_each_start(&q->hctx_table, j, hctx, j)
4303  		blk_mq_exit_hctx(q, set, hctx, j);
4304  	mutex_unlock(&q->sysfs_lock);
4305  }
4306  
blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q)4307  int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4308  		struct request_queue *q)
4309  {
4310  	/* mark the queue as mq asap */
4311  	q->mq_ops = set->ops;
4312  
4313  	/*
4314  	 * ->tag_set has to be setup before initialize hctx, which cpuphp
4315  	 * handler needs it for checking queue mapping
4316  	 */
4317  	q->tag_set = set;
4318  
4319  	if (blk_mq_alloc_ctxs(q))
4320  		goto err_exit;
4321  
4322  	/* init q->mq_kobj and sw queues' kobjects */
4323  	blk_mq_sysfs_init(q);
4324  
4325  	INIT_LIST_HEAD(&q->unused_hctx_list);
4326  	spin_lock_init(&q->unused_hctx_lock);
4327  
4328  	xa_init(&q->hctx_table);
4329  
4330  	blk_mq_realloc_hw_ctxs(set, q);
4331  	if (!q->nr_hw_queues)
4332  		goto err_hctxs;
4333  
4334  	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4335  	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4336  
4337  	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4338  
4339  	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4340  	INIT_LIST_HEAD(&q->flush_list);
4341  	INIT_LIST_HEAD(&q->requeue_list);
4342  	spin_lock_init(&q->requeue_lock);
4343  
4344  	q->nr_requests = set->queue_depth;
4345  
4346  	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4347  	blk_mq_add_queue_tag_set(set, q);
4348  	blk_mq_map_swqueue(q);
4349  	return 0;
4350  
4351  err_hctxs:
4352  	blk_mq_release(q);
4353  err_exit:
4354  	q->mq_ops = NULL;
4355  	return -ENOMEM;
4356  }
4357  EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4358  
4359  /* tags can _not_ be used after returning from blk_mq_exit_queue */
blk_mq_exit_queue(struct request_queue * q)4360  void blk_mq_exit_queue(struct request_queue *q)
4361  {
4362  	struct blk_mq_tag_set *set = q->tag_set;
4363  
4364  	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4365  	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4366  	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4367  	blk_mq_del_queue_tag_set(q);
4368  }
4369  
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)4370  static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4371  {
4372  	int i;
4373  
4374  	if (blk_mq_is_shared_tags(set->flags)) {
4375  		set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4376  						BLK_MQ_NO_HCTX_IDX,
4377  						set->queue_depth);
4378  		if (!set->shared_tags)
4379  			return -ENOMEM;
4380  	}
4381  
4382  	for (i = 0; i < set->nr_hw_queues; i++) {
4383  		if (!__blk_mq_alloc_map_and_rqs(set, i))
4384  			goto out_unwind;
4385  		cond_resched();
4386  	}
4387  
4388  	return 0;
4389  
4390  out_unwind:
4391  	while (--i >= 0)
4392  		__blk_mq_free_map_and_rqs(set, i);
4393  
4394  	if (blk_mq_is_shared_tags(set->flags)) {
4395  		blk_mq_free_map_and_rqs(set, set->shared_tags,
4396  					BLK_MQ_NO_HCTX_IDX);
4397  	}
4398  
4399  	return -ENOMEM;
4400  }
4401  
4402  /*
4403   * Allocate the request maps associated with this tag_set. Note that this
4404   * may reduce the depth asked for, if memory is tight. set->queue_depth
4405   * will be updated to reflect the allocated depth.
4406   */
blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set * set)4407  static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4408  {
4409  	unsigned int depth;
4410  	int err;
4411  
4412  	depth = set->queue_depth;
4413  	do {
4414  		err = __blk_mq_alloc_rq_maps(set);
4415  		if (!err)
4416  			break;
4417  
4418  		set->queue_depth >>= 1;
4419  		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4420  			err = -ENOMEM;
4421  			break;
4422  		}
4423  	} while (set->queue_depth);
4424  
4425  	if (!set->queue_depth || err) {
4426  		pr_err("blk-mq: failed to allocate request map\n");
4427  		return -ENOMEM;
4428  	}
4429  
4430  	if (depth != set->queue_depth)
4431  		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4432  						depth, set->queue_depth);
4433  
4434  	return 0;
4435  }
4436  
blk_mq_update_queue_map(struct blk_mq_tag_set * set)4437  static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4438  {
4439  	/*
4440  	 * blk_mq_map_queues() and multiple .map_queues() implementations
4441  	 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4442  	 * number of hardware queues.
4443  	 */
4444  	if (set->nr_maps == 1)
4445  		set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4446  
4447  	if (set->ops->map_queues) {
4448  		int i;
4449  
4450  		/*
4451  		 * transport .map_queues is usually done in the following
4452  		 * way:
4453  		 *
4454  		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4455  		 * 	mask = get_cpu_mask(queue)
4456  		 * 	for_each_cpu(cpu, mask)
4457  		 * 		set->map[x].mq_map[cpu] = queue;
4458  		 * }
4459  		 *
4460  		 * When we need to remap, the table has to be cleared for
4461  		 * killing stale mapping since one CPU may not be mapped
4462  		 * to any hw queue.
4463  		 */
4464  		for (i = 0; i < set->nr_maps; i++)
4465  			blk_mq_clear_mq_map(&set->map[i]);
4466  
4467  		set->ops->map_queues(set);
4468  	} else {
4469  		BUG_ON(set->nr_maps > 1);
4470  		blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4471  	}
4472  }
4473  
blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set * set,int new_nr_hw_queues)4474  static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4475  				       int new_nr_hw_queues)
4476  {
4477  	struct blk_mq_tags **new_tags;
4478  	int i;
4479  
4480  	if (set->nr_hw_queues >= new_nr_hw_queues)
4481  		goto done;
4482  
4483  	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4484  				GFP_KERNEL, set->numa_node);
4485  	if (!new_tags)
4486  		return -ENOMEM;
4487  
4488  	if (set->tags)
4489  		memcpy(new_tags, set->tags, set->nr_hw_queues *
4490  		       sizeof(*set->tags));
4491  	kfree(set->tags);
4492  	set->tags = new_tags;
4493  
4494  	for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
4495  		if (!__blk_mq_alloc_map_and_rqs(set, i)) {
4496  			while (--i >= set->nr_hw_queues)
4497  				__blk_mq_free_map_and_rqs(set, i);
4498  			return -ENOMEM;
4499  		}
4500  		cond_resched();
4501  	}
4502  
4503  done:
4504  	set->nr_hw_queues = new_nr_hw_queues;
4505  	return 0;
4506  }
4507  
4508  /*
4509   * Alloc a tag set to be associated with one or more request queues.
4510   * May fail with EINVAL for various error conditions. May adjust the
4511   * requested depth down, if it's too large. In that case, the set
4512   * value will be stored in set->queue_depth.
4513   */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)4514  int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4515  {
4516  	int i, ret;
4517  
4518  	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4519  
4520  	if (!set->nr_hw_queues)
4521  		return -EINVAL;
4522  	if (!set->queue_depth)
4523  		return -EINVAL;
4524  	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4525  		return -EINVAL;
4526  
4527  	if (!set->ops->queue_rq)
4528  		return -EINVAL;
4529  
4530  	if (!set->ops->get_budget ^ !set->ops->put_budget)
4531  		return -EINVAL;
4532  
4533  	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4534  		pr_info("blk-mq: reduced tag depth to %u\n",
4535  			BLK_MQ_MAX_DEPTH);
4536  		set->queue_depth = BLK_MQ_MAX_DEPTH;
4537  	}
4538  
4539  	if (!set->nr_maps)
4540  		set->nr_maps = 1;
4541  	else if (set->nr_maps > HCTX_MAX_TYPES)
4542  		return -EINVAL;
4543  
4544  	/*
4545  	 * If a crashdump is active, then we are potentially in a very
4546  	 * memory constrained environment. Limit us to  64 tags to prevent
4547  	 * using too much memory.
4548  	 */
4549  	if (is_kdump_kernel())
4550  		set->queue_depth = min(64U, set->queue_depth);
4551  
4552  	/*
4553  	 * There is no use for more h/w queues than cpus if we just have
4554  	 * a single map
4555  	 */
4556  	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4557  		set->nr_hw_queues = nr_cpu_ids;
4558  
4559  	if (set->flags & BLK_MQ_F_BLOCKING) {
4560  		set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
4561  		if (!set->srcu)
4562  			return -ENOMEM;
4563  		ret = init_srcu_struct(set->srcu);
4564  		if (ret)
4565  			goto out_free_srcu;
4566  	}
4567  
4568  	ret = -ENOMEM;
4569  	set->tags = kcalloc_node(set->nr_hw_queues,
4570  				 sizeof(struct blk_mq_tags *), GFP_KERNEL,
4571  				 set->numa_node);
4572  	if (!set->tags)
4573  		goto out_cleanup_srcu;
4574  
4575  	for (i = 0; i < set->nr_maps; i++) {
4576  		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4577  						  sizeof(set->map[i].mq_map[0]),
4578  						  GFP_KERNEL, set->numa_node);
4579  		if (!set->map[i].mq_map)
4580  			goto out_free_mq_map;
4581  		set->map[i].nr_queues = set->nr_hw_queues;
4582  	}
4583  
4584  	blk_mq_update_queue_map(set);
4585  
4586  	ret = blk_mq_alloc_set_map_and_rqs(set);
4587  	if (ret)
4588  		goto out_free_mq_map;
4589  
4590  	mutex_init(&set->tag_list_lock);
4591  	INIT_LIST_HEAD(&set->tag_list);
4592  
4593  	return 0;
4594  
4595  out_free_mq_map:
4596  	for (i = 0; i < set->nr_maps; i++) {
4597  		kfree(set->map[i].mq_map);
4598  		set->map[i].mq_map = NULL;
4599  	}
4600  	kfree(set->tags);
4601  	set->tags = NULL;
4602  out_cleanup_srcu:
4603  	if (set->flags & BLK_MQ_F_BLOCKING)
4604  		cleanup_srcu_struct(set->srcu);
4605  out_free_srcu:
4606  	if (set->flags & BLK_MQ_F_BLOCKING)
4607  		kfree(set->srcu);
4608  	return ret;
4609  }
4610  EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4611  
4612  /* allocate and initialize a tagset for a simple single-queue device */
blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set * set,const struct blk_mq_ops * ops,unsigned int queue_depth,unsigned int set_flags)4613  int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4614  		const struct blk_mq_ops *ops, unsigned int queue_depth,
4615  		unsigned int set_flags)
4616  {
4617  	memset(set, 0, sizeof(*set));
4618  	set->ops = ops;
4619  	set->nr_hw_queues = 1;
4620  	set->nr_maps = 1;
4621  	set->queue_depth = queue_depth;
4622  	set->numa_node = NUMA_NO_NODE;
4623  	set->flags = set_flags;
4624  	return blk_mq_alloc_tag_set(set);
4625  }
4626  EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4627  
blk_mq_free_tag_set(struct blk_mq_tag_set * set)4628  void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4629  {
4630  	int i, j;
4631  
4632  	for (i = 0; i < set->nr_hw_queues; i++)
4633  		__blk_mq_free_map_and_rqs(set, i);
4634  
4635  	if (blk_mq_is_shared_tags(set->flags)) {
4636  		blk_mq_free_map_and_rqs(set, set->shared_tags,
4637  					BLK_MQ_NO_HCTX_IDX);
4638  	}
4639  
4640  	for (j = 0; j < set->nr_maps; j++) {
4641  		kfree(set->map[j].mq_map);
4642  		set->map[j].mq_map = NULL;
4643  	}
4644  
4645  	kfree(set->tags);
4646  	set->tags = NULL;
4647  	if (set->flags & BLK_MQ_F_BLOCKING) {
4648  		cleanup_srcu_struct(set->srcu);
4649  		kfree(set->srcu);
4650  	}
4651  }
4652  EXPORT_SYMBOL(blk_mq_free_tag_set);
4653  
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)4654  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4655  {
4656  	struct blk_mq_tag_set *set = q->tag_set;
4657  	struct blk_mq_hw_ctx *hctx;
4658  	int ret;
4659  	unsigned long i;
4660  
4661  	if (WARN_ON_ONCE(!q->mq_freeze_depth))
4662  		return -EINVAL;
4663  
4664  	if (!set)
4665  		return -EINVAL;
4666  
4667  	if (q->nr_requests == nr)
4668  		return 0;
4669  
4670  	blk_mq_quiesce_queue(q);
4671  
4672  	ret = 0;
4673  	queue_for_each_hw_ctx(q, hctx, i) {
4674  		if (!hctx->tags)
4675  			continue;
4676  		/*
4677  		 * If we're using an MQ scheduler, just update the scheduler
4678  		 * queue depth. This is similar to what the old code would do.
4679  		 */
4680  		if (hctx->sched_tags) {
4681  			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4682  						      nr, true);
4683  		} else {
4684  			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4685  						      false);
4686  		}
4687  		if (ret)
4688  			break;
4689  		if (q->elevator && q->elevator->type->ops.depth_updated)
4690  			q->elevator->type->ops.depth_updated(hctx);
4691  	}
4692  	if (!ret) {
4693  		q->nr_requests = nr;
4694  		if (blk_mq_is_shared_tags(set->flags)) {
4695  			if (q->elevator)
4696  				blk_mq_tag_update_sched_shared_tags(q);
4697  			else
4698  				blk_mq_tag_resize_shared_tags(set, nr);
4699  		}
4700  	}
4701  
4702  	blk_mq_unquiesce_queue(q);
4703  
4704  	return ret;
4705  }
4706  
4707  /*
4708   * request_queue and elevator_type pair.
4709   * It is just used by __blk_mq_update_nr_hw_queues to cache
4710   * the elevator_type associated with a request_queue.
4711   */
4712  struct blk_mq_qe_pair {
4713  	struct list_head node;
4714  	struct request_queue *q;
4715  	struct elevator_type *type;
4716  };
4717  
4718  /*
4719   * Cache the elevator_type in qe pair list and switch the
4720   * io scheduler to 'none'
4721   */
blk_mq_elv_switch_none(struct list_head * head,struct request_queue * q)4722  static bool blk_mq_elv_switch_none(struct list_head *head,
4723  		struct request_queue *q)
4724  {
4725  	struct blk_mq_qe_pair *qe;
4726  
4727  	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4728  	if (!qe)
4729  		return false;
4730  
4731  	/* q->elevator needs protection from ->sysfs_lock */
4732  	mutex_lock(&q->sysfs_lock);
4733  
4734  	/* the check has to be done with holding sysfs_lock */
4735  	if (!q->elevator) {
4736  		kfree(qe);
4737  		goto unlock;
4738  	}
4739  
4740  	INIT_LIST_HEAD(&qe->node);
4741  	qe->q = q;
4742  	qe->type = q->elevator->type;
4743  	/* keep a reference to the elevator module as we'll switch back */
4744  	__elevator_get(qe->type);
4745  	list_add(&qe->node, head);
4746  	elevator_disable(q);
4747  unlock:
4748  	mutex_unlock(&q->sysfs_lock);
4749  
4750  	return true;
4751  }
4752  
blk_lookup_qe_pair(struct list_head * head,struct request_queue * q)4753  static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4754  						struct request_queue *q)
4755  {
4756  	struct blk_mq_qe_pair *qe;
4757  
4758  	list_for_each_entry(qe, head, node)
4759  		if (qe->q == q)
4760  			return qe;
4761  
4762  	return NULL;
4763  }
4764  
blk_mq_elv_switch_back(struct list_head * head,struct request_queue * q)4765  static void blk_mq_elv_switch_back(struct list_head *head,
4766  				  struct request_queue *q)
4767  {
4768  	struct blk_mq_qe_pair *qe;
4769  	struct elevator_type *t;
4770  
4771  	qe = blk_lookup_qe_pair(head, q);
4772  	if (!qe)
4773  		return;
4774  	t = qe->type;
4775  	list_del(&qe->node);
4776  	kfree(qe);
4777  
4778  	mutex_lock(&q->sysfs_lock);
4779  	elevator_switch(q, t);
4780  	/* drop the reference acquired in blk_mq_elv_switch_none */
4781  	elevator_put(t);
4782  	mutex_unlock(&q->sysfs_lock);
4783  }
4784  
__blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4785  static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4786  							int nr_hw_queues)
4787  {
4788  	struct request_queue *q;
4789  	LIST_HEAD(head);
4790  	int prev_nr_hw_queues = set->nr_hw_queues;
4791  	int i;
4792  
4793  	lockdep_assert_held(&set->tag_list_lock);
4794  
4795  	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4796  		nr_hw_queues = nr_cpu_ids;
4797  	if (nr_hw_queues < 1)
4798  		return;
4799  	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4800  		return;
4801  
4802  	list_for_each_entry(q, &set->tag_list, tag_set_list)
4803  		blk_mq_freeze_queue(q);
4804  	/*
4805  	 * Switch IO scheduler to 'none', cleaning up the data associated
4806  	 * with the previous scheduler. We will switch back once we are done
4807  	 * updating the new sw to hw queue mappings.
4808  	 */
4809  	list_for_each_entry(q, &set->tag_list, tag_set_list)
4810  		if (!blk_mq_elv_switch_none(&head, q))
4811  			goto switch_back;
4812  
4813  	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4814  		blk_mq_debugfs_unregister_hctxs(q);
4815  		blk_mq_sysfs_unregister_hctxs(q);
4816  	}
4817  
4818  	if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4819  		goto reregister;
4820  
4821  fallback:
4822  	blk_mq_update_queue_map(set);
4823  	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4824  		struct queue_limits lim;
4825  
4826  		blk_mq_realloc_hw_ctxs(set, q);
4827  
4828  		if (q->nr_hw_queues != set->nr_hw_queues) {
4829  			int i = prev_nr_hw_queues;
4830  
4831  			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4832  					nr_hw_queues, prev_nr_hw_queues);
4833  			for (; i < set->nr_hw_queues; i++)
4834  				__blk_mq_free_map_and_rqs(set, i);
4835  
4836  			set->nr_hw_queues = prev_nr_hw_queues;
4837  			goto fallback;
4838  		}
4839  		lim = queue_limits_start_update(q);
4840  		if (blk_mq_can_poll(set))
4841  			lim.features |= BLK_FEAT_POLL;
4842  		else
4843  			lim.features &= ~BLK_FEAT_POLL;
4844  		if (queue_limits_commit_update(q, &lim) < 0)
4845  			pr_warn("updating the poll flag failed\n");
4846  		blk_mq_map_swqueue(q);
4847  	}
4848  
4849  reregister:
4850  	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4851  		blk_mq_sysfs_register_hctxs(q);
4852  		blk_mq_debugfs_register_hctxs(q);
4853  	}
4854  
4855  switch_back:
4856  	list_for_each_entry(q, &set->tag_list, tag_set_list)
4857  		blk_mq_elv_switch_back(&head, q);
4858  
4859  	list_for_each_entry(q, &set->tag_list, tag_set_list)
4860  		blk_mq_unfreeze_queue(q);
4861  
4862  	/* Free the excess tags when nr_hw_queues shrink. */
4863  	for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4864  		__blk_mq_free_map_and_rqs(set, i);
4865  }
4866  
blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4867  void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4868  {
4869  	mutex_lock(&set->tag_list_lock);
4870  	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4871  	mutex_unlock(&set->tag_list_lock);
4872  }
4873  EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4874  
blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags)4875  static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4876  			 struct io_comp_batch *iob, unsigned int flags)
4877  {
4878  	long state = get_current_state();
4879  	int ret;
4880  
4881  	do {
4882  		ret = q->mq_ops->poll(hctx, iob);
4883  		if (ret > 0) {
4884  			__set_current_state(TASK_RUNNING);
4885  			return ret;
4886  		}
4887  
4888  		if (signal_pending_state(state, current))
4889  			__set_current_state(TASK_RUNNING);
4890  		if (task_is_running(current))
4891  			return 1;
4892  
4893  		if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4894  			break;
4895  		cpu_relax();
4896  	} while (!need_resched());
4897  
4898  	__set_current_state(TASK_RUNNING);
4899  	return 0;
4900  }
4901  
blk_mq_poll(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags)4902  int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4903  		struct io_comp_batch *iob, unsigned int flags)
4904  {
4905  	struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4906  
4907  	return blk_hctx_poll(q, hctx, iob, flags);
4908  }
4909  
blk_rq_poll(struct request * rq,struct io_comp_batch * iob,unsigned int poll_flags)4910  int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4911  		unsigned int poll_flags)
4912  {
4913  	struct request_queue *q = rq->q;
4914  	int ret;
4915  
4916  	if (!blk_rq_is_poll(rq))
4917  		return 0;
4918  	if (!percpu_ref_tryget(&q->q_usage_counter))
4919  		return 0;
4920  
4921  	ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4922  	blk_queue_exit(q);
4923  
4924  	return ret;
4925  }
4926  EXPORT_SYMBOL_GPL(blk_rq_poll);
4927  
blk_mq_rq_cpu(struct request * rq)4928  unsigned int blk_mq_rq_cpu(struct request *rq)
4929  {
4930  	return rq->mq_ctx->cpu;
4931  }
4932  EXPORT_SYMBOL(blk_mq_rq_cpu);
4933  
blk_mq_cancel_work_sync(struct request_queue * q)4934  void blk_mq_cancel_work_sync(struct request_queue *q)
4935  {
4936  	struct blk_mq_hw_ctx *hctx;
4937  	unsigned long i;
4938  
4939  	cancel_delayed_work_sync(&q->requeue_work);
4940  
4941  	queue_for_each_hw_ctx(q, hctx, i)
4942  		cancel_delayed_work_sync(&hctx->run_work);
4943  }
4944  
blk_mq_init(void)4945  static int __init blk_mq_init(void)
4946  {
4947  	int i;
4948  
4949  	for_each_possible_cpu(i)
4950  		init_llist_head(&per_cpu(blk_cpu_done, i));
4951  	for_each_possible_cpu(i)
4952  		INIT_CSD(&per_cpu(blk_cpu_csd, i),
4953  			 __blk_mq_complete_request_remote, NULL);
4954  	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4955  
4956  	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4957  				  "block/softirq:dead", NULL,
4958  				  blk_softirq_cpu_dead);
4959  	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4960  				blk_mq_hctx_notify_dead);
4961  	cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4962  				blk_mq_hctx_notify_online,
4963  				blk_mq_hctx_notify_offline);
4964  	return 0;
4965  }
4966  subsys_initcall(blk_mq_init);
4967