Searched refs:src_rq (Results 1 – 5 of 5) sorted by relevance
/linux-6.12.1/kernel/sched/ |
D | ext.c | 2263 struct rq *src_rq, struct rq *dst_rq) in move_remote_task_to_local_dsq() argument 2265 lockdep_assert_rq_held(src_rq); in move_remote_task_to_local_dsq() 2268 deactivate_task(src_rq, p, 0); in move_remote_task_to_local_dsq() 2272 raw_spin_rq_unlock(src_rq); in move_remote_task_to_local_dsq() 2362 struct rq *src_rq) in unlink_dsq_and_lock_src_rq() argument 2373 raw_spin_rq_lock(src_rq); in unlink_dsq_and_lock_src_rq() 2377 !WARN_ON_ONCE(src_rq != task_rq(p)); in unlink_dsq_and_lock_src_rq() 2381 struct scx_dispatch_q *dsq, struct rq *src_rq) in consume_remote_task() argument 2385 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { in consume_remote_task() 2386 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); in consume_remote_task() [all …]
|
D | rt.c | 2287 struct rq *src_rq; in pull_rt_task() local 2315 src_rq = cpu_rq(cpu); in pull_rt_task() 2324 if (src_rq->rt.highest_prio.next >= in pull_rt_task() 2334 double_lock_balance(this_rq, src_rq); in pull_rt_task() 2340 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task() 2347 WARN_ON(p == src_rq->curr); in pull_rt_task() 2358 if (p->prio < src_rq->curr->prio) in pull_rt_task() 2362 push_task = get_push_task(src_rq); in pull_rt_task() 2364 deactivate_task(src_rq, p, 0); in pull_rt_task() 2377 double_unlock_balance(this_rq, src_rq); in pull_rt_task() [all …]
|
D | deadline.c | 2781 struct rq *src_rq; in pull_dl_task() local 2797 src_rq = cpu_rq(cpu); in pull_dl_task() 2805 src_rq->dl.earliest_dl.next)) in pull_dl_task() 2810 double_lock_balance(this_rq, src_rq); in pull_dl_task() 2816 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task() 2819 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task() 2828 WARN_ON(p == src_rq->curr); in pull_dl_task() 2836 src_rq->curr->dl.deadline)) in pull_dl_task() 2840 push_task = get_push_task(src_rq); in pull_dl_task() 2842 deactivate_task(src_rq, p, 0); in pull_dl_task() [all …]
|
D | core.c | 3302 struct rq *src_rq, *dst_rq; in __migrate_swap_task() local 3305 src_rq = task_rq(p); in __migrate_swap_task() 3308 rq_pin_lock(src_rq, &srf); in __migrate_swap_task() 3311 deactivate_task(src_rq, p, 0); in __migrate_swap_task() 3317 rq_unpin_lock(src_rq, &srf); in __migrate_swap_task() 3337 struct rq *src_rq, *dst_rq; in migrate_swap_stop() local 3342 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop() 3346 guard(double_rq_lock)(src_rq, dst_rq); in migrate_swap_stop() 10162 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, in __sched_mm_cid_migrate_from_fetch_cid() argument 10191 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid() [all …]
|
D | fair.c | 9233 struct rq *src_rq; member 9264 lockdep_assert_rq_held(env->src_rq); in task_hot() 9296 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot() 9327 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality() 9369 lockdep_assert_rq_held(env->src_rq); in can_migrate_task() 9421 if (task_on_cpu(env->src_rq, p)) { in can_migrate_task() 9458 lockdep_assert_rq_held(env->src_rq); in detach_task() 9460 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task() 9474 lockdep_assert_rq_held(env->src_rq); in detach_one_task() 9477 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task() [all …]
|