Lines Matching full:plug
520 struct blk_plug *plug, in blk_mq_rq_cache_fill() argument
528 .nr_tags = plug->nr_ios, in blk_mq_rq_cache_fill()
529 .cached_rq = &plug->cached_rq, in blk_mq_rq_cache_fill()
536 plug->nr_ios = 1; in blk_mq_rq_cache_fill()
548 struct blk_plug *plug = current->plug; in blk_mq_alloc_cached_request() local
551 if (!plug) in blk_mq_alloc_cached_request()
554 if (rq_list_empty(plug->cached_rq)) { in blk_mq_alloc_cached_request()
555 if (plug->nr_ios == 1) in blk_mq_alloc_cached_request()
557 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); in blk_mq_alloc_cached_request()
561 rq = rq_list_peek(&plug->cached_rq); in blk_mq_alloc_cached_request()
570 plug->cached_rq = rq_list_next(rq); in blk_mq_alloc_cached_request()
743 void blk_mq_free_plug_rqs(struct blk_plug *plug) in blk_mq_free_plug_rqs() argument
747 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) in blk_mq_free_plug_rqs()
1270 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1274 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) in blk_plug_max_rq_count() argument
1276 if (plug->multiple_queues) in blk_plug_max_rq_count()
1281 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) in blk_add_rq_to_plug() argument
1283 struct request *last = rq_list_peek(&plug->mq_list); in blk_add_rq_to_plug()
1285 if (!plug->rq_count) { in blk_add_rq_to_plug()
1287 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || in blk_add_rq_to_plug()
1290 blk_mq_flush_plug_list(plug, false); in blk_add_rq_to_plug()
1295 if (!plug->multiple_queues && last && last->q != rq->q) in blk_add_rq_to_plug()
1296 plug->multiple_queues = true; in blk_add_rq_to_plug()
1301 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) in blk_add_rq_to_plug()
1302 plug->has_elevator = true; in blk_add_rq_to_plug()
1304 rq_list_add(&plug->mq_list, rq); in blk_add_rq_to_plug()
1305 plug->rq_count++; in blk_add_rq_to_plug()
1329 if (current->plug && !at_head) { in blk_execute_rq_nowait()
1330 blk_add_rq_to_plug(current->plug, rq); in blk_execute_rq_nowait()
2456 * preemption doesn't flush plug list, so it's possible ctx->cpu is in blk_mq_insert_requests()
2661 static void blk_mq_plug_issue_direct(struct blk_plug *plug) in blk_mq_plug_issue_direct() argument
2668 while ((rq = rq_list_pop(&plug->mq_list))) { in blk_mq_plug_issue_direct()
2669 bool last = rq_list_empty(plug->mq_list); in blk_mq_plug_issue_direct()
2701 struct blk_plug *plug) in __blk_mq_flush_plug_list() argument
2705 q->mq_ops->queue_rqs(&plug->mq_list); in __blk_mq_flush_plug_list()
2708 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) in blk_mq_dispatch_plug_list() argument
2719 struct request *rq = rq_list_pop(&plug->mq_list); in blk_mq_dispatch_plug_list()
2732 } while (!rq_list_empty(plug->mq_list)); in blk_mq_dispatch_plug_list()
2734 plug->mq_list = requeue_list; in blk_mq_dispatch_plug_list()
2754 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) in blk_mq_flush_plug_list() argument
2761 * plug->mq_list via a schedule() in the driver's queue_rq() callback. in blk_mq_flush_plug_list()
2766 if (plug->rq_count == 0) in blk_mq_flush_plug_list()
2768 depth = plug->rq_count; in blk_mq_flush_plug_list()
2769 plug->rq_count = 0; in blk_mq_flush_plug_list()
2771 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { in blk_mq_flush_plug_list()
2774 rq = rq_list_peek(&plug->mq_list); in blk_mq_flush_plug_list()
2780 * If we do, we can dispatch the whole plug list in one go. We in blk_mq_flush_plug_list()
2786 __blk_mq_flush_plug_list(q, plug)); in blk_mq_flush_plug_list()
2787 if (rq_list_empty(plug->mq_list)) in blk_mq_flush_plug_list()
2792 blk_mq_plug_issue_direct(plug)); in blk_mq_flush_plug_list()
2793 if (rq_list_empty(plug->mq_list)) in blk_mq_flush_plug_list()
2798 blk_mq_dispatch_plug_list(plug, from_schedule); in blk_mq_flush_plug_list()
2799 } while (!rq_list_empty(plug->mq_list)); in blk_mq_flush_plug_list()
2848 struct blk_plug *plug, in blk_mq_get_new_requests() argument
2861 if (plug) { in blk_mq_get_new_requests()
2862 data.nr_tags = plug->nr_ios; in blk_mq_get_new_requests()
2863 plug->nr_ios = 1; in blk_mq_get_new_requests()
2864 data.cached_rq = &plug->cached_rq; in blk_mq_get_new_requests()
2879 static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, in blk_mq_peek_cached_request() argument
2885 if (!plug) in blk_mq_peek_cached_request()
2887 rq = rq_list_peek(&plug->cached_rq); in blk_mq_peek_cached_request()
2898 static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, in blk_mq_use_cached_rq() argument
2901 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); in blk_mq_use_cached_rq()
2905 * plug and hence killed the cached_rq list as well. Pop this entry in blk_mq_use_cached_rq()
2908 plug->cached_rq = rq_list_next(rq); in blk_mq_use_cached_rq()
2934 * * We want to place request at plug queue for possible future merging
2943 struct blk_plug *plug = current->plug; in blk_mq_submit_bio() local
2951 * If the plug has a cached request for this queue, try to use it. in blk_mq_submit_bio()
2953 rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); in blk_mq_submit_bio()
2956 * A BIO that was released from a zone write plug has already been in blk_mq_submit_bio()
3003 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); in blk_mq_submit_bio()
3007 blk_mq_use_cached_rq(rq, plug, bio); in blk_mq_submit_bio()
3030 if (plug) { in blk_mq_submit_bio()
3031 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()