/linux-6.12.1/block/ |
D | blk-mq-sched.h | 40 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_allow_merge() 51 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_completed_request() 61 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_requeue_request()
|
D | blk-mq.c | 361 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init() 363 data->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init() 364 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init() 366 if (data->rq_flags & RQF_SCHED_TAGS) { in blk_mq_rq_ctx_init() 389 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_rq_ctx_init() 426 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests_batch() 458 data->rq_flags |= RQF_SCHED_TAGS; in __blk_mq_alloc_requests() 470 data->rq_flags |= RQF_USE_SCHED; in __blk_mq_alloc_requests() 479 data->rq_flags |= RQF_RESV; in __blk_mq_alloc_requests() 512 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests() [all …]
|
D | blk-merge.c | 581 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg() 755 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge() 768 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge() 787 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_mixed_merge() 885 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge() 1032 if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) in bio_attempt_back_merge() 1055 if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) in bio_attempt_front_merge()
|
D | blk-flush.c | 120 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request() 319 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush() 372 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_rq_init_flush()
|
D | blk-pm.h | 21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
|
D | blk.h | 156 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable() 310 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 416 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq); in blk_do_io_stat() 501 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING) in blk_zone_finish_request()
|
D | blk-timeout.c | 140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
|
D | blk-mq.h | 154 req_flags_t rq_flags; member 226 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
|
/linux-6.12.1/include/linux/ |
D | blk-mq.h | 108 req_flags_t rq_flags; member 866 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); in blk_mq_need_time_stamp() 871 return rq->rq_flags & RQF_RESV; in blk_mq_is_reserved_rq() 886 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror || in blk_mq_add_to_batch() 1104 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes() 1115 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec() 1154 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
|
/linux-6.12.1/kernel/sched/ |
D | sched.h | 1697 struct rq_flags { struct 1722 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() 1735 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() 1745 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() 1758 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1762 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1766 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() 1774 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() 1786 struct rq *rq; struct rq_flags rf) 1788 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() [all …]
|
D | core.c | 667 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() 691 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() 829 struct rq_flags rf; in hrtick() 857 struct rq_flags rf; in __hrtick_start() 1771 struct rq_flags rf; in uclamp_update_active() 2193 struct rq_flags rf; in wait_task_inactive() 2428 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() 2474 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() 2498 struct rq_flags rf; in migration_cpu_stop() 2865 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() [all …]
|
D | stop_task.c | 19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
|
D | psi.c | 1049 struct rq_flags rf; in psi_memstall_enter() 1080 struct rq_flags rf; in psi_memstall_leave() 1149 struct rq_flags rf; in cgroup_move_task() 1225 struct rq_flags rf; in psi_cgroup_restart()
|
D | core_sched.c | 57 struct rq_flags rf; in sched_core_update_cookie()
|
D | deadline.c | 1176 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) in __push_dl_task() 1204 struct rq_flags *rf = &scope.rf; in dl_server_timer() 1270 struct rq_flags rf; in dl_task_timer() 1764 struct rq_flags rf; in inactive_task_timer() 2264 struct rq_flags rf; in migrate_task_rq_dl() 2316 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_dl() 2949 struct rq_flags rf; in dl_add_task_root_domain()
|
/linux-6.12.1/drivers/net/ethernet/fungible/funcore/ |
D | fun_queue.h | 69 u16 rq_flags; member 120 u16 rq_flags; member
|
D | fun_queue.c | 459 funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; in fun_alloc_queue() 528 rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, in fun_create_rq()
|
/linux-6.12.1/include/linux/sunrpc/ |
D | svc.h | 210 unsigned long rq_flags; /* flags field */ member 310 set_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop() 312 return test_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop()
|
/linux-6.12.1/drivers/scsi/ |
D | scsi_lib.c | 118 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd() 119 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd() 320 req->rq_flags |= RQF_QUIET; in scsi_execute_cmd() 731 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in scsi_rq_err_bytes() 910 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action() 1001 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result() 1235 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq() 1237 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq() 1316 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) in scsi_device_state_check() 1324 if (req && !(req->rq_flags & RQF_PM)) in scsi_device_state_check() [all …]
|
/linux-6.12.1/drivers/nvme/host/ |
D | ioctl.c | 102 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument 107 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request() 461 blk_opf_t rq_flags = REQ_ALLOC_CACHE; in nvme_uring_cmd_io() local 496 rq_flags |= REQ_NOWAIT; in nvme_uring_cmd_io() 500 rq_flags |= REQ_POLLED; in nvme_uring_cmd_io() 502 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); in nvme_uring_cmd_io()
|
/linux-6.12.1/drivers/mmc/core/ |
D | queue.c | 243 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq() 291 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq() 293 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
|
/linux-6.12.1/net/sunrpc/ |
D | svc_xprt.c | 419 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot() 423 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot() 431 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot() 1185 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer() 1216 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
|
/linux-6.12.1/drivers/md/ |
D | dm-rq.c | 266 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done() 293 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request()
|
/linux-6.12.1/drivers/scsi/device_handler/ |
D | scsi_dh_hp_sw.c | 191 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()
|
/linux-6.12.1/fs/nfsd/ |
D | nfscache.c | 536 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup() 632 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
|