Lines Matching +full:input +full:- +full:depth

1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-mq.h>
6 #include "blk-stat.h"
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
34 BLK_MQ_NO_TAG = -1U,
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
64 unsigned int hctx_idx, unsigned int depth);
70 * CPU -> queue mappings
75 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
84 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type()
102 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
111 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
134 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
138 * This assumes per-cpu software queueing queues. They could be per-node
139 * as well, for instance. For now this is hardcoded as-is. Note that we don't
149 /* input parameter */
160 /* input & output parameter */
179 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
194 return &bt->ws[0]; in bt_wait_ptr()
195 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
203 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy()
209 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_idle()
216 return tag < tags->nr_reserved_tags; in blk_mq_tag_is_reserved()
226 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
227 return data->hctx->sched_tags; in blk_mq_tags_from_data()
228 return data->hctx->tags; in blk_mq_tags_from_data()
233 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_hctx_stopped()
238 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
249 if (q->mq_ops->put_budget) in blk_mq_put_dispatch_budget()
250 q->mq_ops->put_budget(q, budget_token); in blk_mq_put_dispatch_budget()
255 if (q->mq_ops->get_budget) in blk_mq_get_dispatch_budget()
256 return q->mq_ops->get_budget(q); in blk_mq_get_dispatch_budget()
265 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
266 rq->q->mq_ops->set_rq_budget_token(rq, token); in blk_mq_set_rq_budget_token()
271 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
272 return rq->q->mq_ops->get_rq_budget_token(rq); in blk_mq_get_rq_budget_token()
273 return -1; in blk_mq_get_rq_budget_token()
279 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_add_active_requests()
280 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_add_active_requests()
282 atomic_add(val, &hctx->nr_active); in __blk_mq_add_active_requests()
293 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_sub_active_requests()
294 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_sub_active_requests()
296 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests()
307 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_add_active_requests()
313 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_inc_active_requests()
320 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_sub_active_requests()
326 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_dec_active_requests()
332 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_active_requests()
333 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_active_requests()
334 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
340 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
341 rq->tag = BLK_MQ_NO_TAG; in __blk_mq_put_driver_tag()
346 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) in blk_mq_put_driver_tag()
349 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
356 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) in blk_mq_get_driver_tag()
367 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
374 struct request *rq = list_entry_rq(list->next); in blk_mq_free_requests()
376 list_del_init(&rq->queuelist); in blk_mq_free_requests()
383 * and attempt to provide a fair share of the tag depth for each of them.
388 unsigned int depth, users; in hctx_may_queue() local
390 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in hctx_may_queue()
396 if (bt->sb.depth == 1) in hctx_may_queue()
399 if (blk_mq_is_shared_tags(hctx->flags)) { in hctx_may_queue()
400 struct request_queue *q = hctx->queue; in hctx_may_queue()
402 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in hctx_may_queue()
405 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
409 users = READ_ONCE(hctx->tags->active_queues); in hctx_may_queue()
416 depth = max((bt->sb.depth + users - 1) / users, 4U); in hctx_may_queue()
417 return __blk_mq_active_requests(hctx) < depth; in hctx_may_queue()
423 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
424 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
428 srcu_idx = srcu_read_lock(__tag_set->srcu); \
430 srcu_read_unlock(__tag_set->srcu, srcu_idx); \