Searched refs:task_rq (Results 1 – 7 of 7) sorted by relevance
/linux-6.12.1/kernel/sched/ |
D | ext.c | 1915 struct rq *rq = task_rq(p); in direct_dispatch() 2377 !WARN_ON_ONCE(src_rq != task_rq(p)); in unlink_dsq_and_lock_src_rq() 2397 … *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; } in consume_remote_task() argument 2415 struct rq *task_rq = task_rq(p); in consume_dispatch_q() local 2417 if (rq == task_rq) { in consume_dispatch_q() 2425 if (likely(consume_remote_task(rq, p, dsq, task_rq))) in consume_dispatch_q() 2459 struct rq *src_rq = task_rq(p); in dispatch_to_local_dsq() 2504 !WARN_ON_ONCE(src_rq != task_rq(p))) { in dispatch_to_local_dsq() 3039 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a))) in scx_prio_less() 3172 if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) { in select_task_rq_scx() [all …]
|
D | core.c | 241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less() 675 rq = task_rq(p); in __task_rq_lock() 677 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock() 699 rq = task_rq(p); in task_rq_lock() 718 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock() 1386 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight() 2204 rq = task_rq(p); in wait_task_inactive() 2526 if (task_rq(p) == rq) { in migration_cpu_stop() 2605 if (task_rq(p) != rq) in push_cpu_stop() 2622 if (task_rq(p) == rq) { in push_cpu_stop() [all …]
|
D | syscalls.c | 1187 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity() 1478 p_rq = task_rq(p); in yield_to() 1487 if (task_rq(p) != p_rq) in yield_to()
|
D | deadline.c | 77 rq = task_rq(dl_task_of(dl_se)); in rq_of_dl_se() 352 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); in dl_change_utilization() 2270 rq = task_rq(p); in migrate_task_rq_dl() 2546 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq() 2642 if (unlikely(task_rq(task) != rq || in find_lock_later_rq() 2892 rq = task_rq(p); in set_cpus_allowed_dl()
|
D | rt.c | 300 return task_rq(p); in rq_of_rt_se() 1828 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq() 1833 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq() 1934 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
|
D | sched.h | 1344 #define task_rq(p) cpu_rq(task_cpu(p)) macro 1586 return &task_rq(p)->cfs; in task_cfs_rq() 1592 struct rq *rq = task_rq(p); in cfs_rq_of()
|
D | fair.c | 1479 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group() 6817 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair() 9640 WARN_ON_ONCE(task_rq(p) != rq); in attach_task() 12981 struct rq *rq = task_rq(a); in cfs_prio_less() 12988 SCHED_WARN_ON(task_rq(b)->core != rq->core); in cfs_prio_less() 13011 cfs_rqa = &task_rq(a)->cfs; in cfs_prio_less() 13012 cfs_rqb = &task_rq(b)->cfs; in cfs_prio_less() 13063 check_update_overutilized_status(task_rq(curr)); in task_tick_fair()
|