Lines Matching +full:three +full:- +full:conversion +full:- +full:cycles
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
72 # include <linux/entry-common.h>
97 #include "../../io_uring/io-wq.h"
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
164 return -2; in __task_prio()
166 if (p->dl_server) in __task_prio()
167 return -1; /* deadline */ in __task_prio()
169 if (rt_or_dl_prio(p->prio)) in __task_prio()
170 return p->prio; /* [-1, 99] */ in __task_prio()
172 if (p->sched_class == &idle_sched_class) in __task_prio()
195 if (-pa < -pb) in prio_less()
198 if (-pb < -pa) in prio_less()
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ in prio_less()
204 a_dl = &a->dl; in prio_less()
207 * __task_prio() can return -1 (for DL) even for those. In that in prio_less()
210 if (a->dl_server) in prio_less()
211 a_dl = a->dl_server; in prio_less()
213 b_dl = &b->dl; in prio_less()
214 if (b->dl_server) in prio_less()
215 b_dl = b->dl_server; in prio_less()
217 return !dl_time_before(a_dl->deadline, b_dl->deadline); in prio_less()
234 if (a->core_cookie < b->core_cookie) in __sched_core_less()
237 if (a->core_cookie > b->core_cookie) in __sched_core_less()
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()
259 if (cookie < p->core_cookie) in rb_sched_core_cmp()
260 return -1; in rb_sched_core_cmp()
262 if (cookie > p->core_cookie) in rb_sched_core_cmp()
270 if (p->se.sched_delayed) in sched_core_enqueue()
273 rq->core->core_task_seq++; in sched_core_enqueue()
275 if (!p->core_cookie) in sched_core_enqueue()
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
283 if (p->se.sched_delayed) in sched_core_dequeue()
286 rq->core->core_task_seq++; in sched_core_dequeue()
289 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
290 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
296 * and re-examine whether the core is still in forced idle state. in sched_core_dequeue()
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
299 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
305 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
306 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
313 struct rb_node *node = &p->core_node; in sched_core_next()
322 if (p->core_cookie != cookie) in sched_core_next()
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
344 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()
383 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()
404 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()
406 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()
417 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()
482 if (!atomic_add_unless(&sched_core_count, -1, 1)) in sched_core_put()
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
519 * complicated to avoid having to take two rq->locks.
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
537 * p->state <- TASK_*:
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
547 * rq->lock. Non-zero indicates the task is runnable, the special
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
556 * p->on_cpu <- { 0, 1 }:
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
563 * CPU to have ->on_cpu = 1 at the same time. ]
567 * - Don't call set_task_cpu() on a blocked task:
572 * - for try_to_wake_up(), called under p->pi_lock:
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
576 * - for migration called under rq->lock:
582 * - for migration called under double_rq_lock():
598 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
624 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
647 * double_rq_lock - safely lock two runqueues
665 * __task_rq_lock - lock the rq @p resides on.
668 __acquires(rq->lock) in __task_rq_lock()
672 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
692 __acquires(p->pi_lock) in task_rq_lock()
693 __acquires(rq->lock) in task_rq_lock()
698 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
704 * ACQUIRE (rq->lock) in task_rq_lock()
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq() in task_rq_lock()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); in task_rq_lock()
707 * [S] ->cpu = new_cpu [L] task_rq() in task_rq_lock()
708 * [L] ->on_rq in task_rq_lock()
709 * RELEASE (rq->lock) in task_rq_lock()
712 * the old rq->lock will fully serialize against the stores. in task_rq_lock()
723 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
731 * RQ-clock updating methods:
743 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
750 * When this happens, we stop ->clock_task and only update the in update_rq_clock_task()
752 * update will consume the rest. This ensures ->clock_task is in update_rq_clock_task()
755 * It does however cause some slight miss-attribution of {soft,}IRQ in update_rq_clock_task()
757 * the current rq->clock timestamp, except that would require using in update_rq_clock_task()
763 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
764 delta -= irq_delta; in update_rq_clock_task()
765 delayacct_irq(rq->curr, irq_delta); in update_rq_clock_task()
770 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
775 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
776 delta -= steal; in update_rq_clock_task()
780 rq->clock_task += delta; in update_rq_clock_task()
795 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
800 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
801 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
804 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
807 rq->clock += delta; in update_rq_clock()
813 * Use HR-timers to deliver accurate preemption points.
818 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
819 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
823 * High-resolution timer tick.
835 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
845 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
846 ktime_t time = rq->hrtick_time; in __hrtick_restart()
867 * called with rq->lock held and IRQs disabled
871 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
879 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); in hrtick_start()
884 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
891 * called with rq->lock held and IRQs disabled
900 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
909 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
911 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
912 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
947 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); in set_nr_and_not_polling()
959 typeof(ti->flags) val = READ_ONCE(ti->flags); in set_nr_if_polling()
966 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); in set_nr_if_polling()
988 struct wake_q_node *node = &task->wake_q; in __wake_q_add()
991 * Atomically grab the task, if ->wake_q is !nil already it means in __wake_q_add()
999 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
1005 *head->lastp = node; in __wake_q_add()
1006 head->lastp = &node->next; in __wake_q_add()
1011 * wake_q_add() - queue a wakeup for 'later' waking.
1019 * This function must be used as-if it were wake_up_process(); IOW the task
1029 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1037 * This function must be used as-if it were wake_up_process(); IOW the task
1040 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1053 struct wake_q_node *node = head->first; in wake_up_q()
1059 /* Task can safely be re-inserted now: */ in wake_up_q()
1060 node = node->next; in wake_up_q()
1061 task->wake_q.next = NULL; in wake_up_q()
1073 * resched_curr - mark rq's current task 'to be rescheduled now'.
1076 * might also involve a cross-CPU call to trigger the scheduler on
1081 struct task_struct *curr = rq->curr; in resched_curr()
1118 * from an idle CPU. This is good for power-savings.
1126 int i, cpu = smp_processor_id(), default_cpu = -1; in get_nohz_timer_target()
1150 if (default_cpu == -1) in get_nohz_timer_target()
1174 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling in wake_up_idle_cpu()
1178 * re-evaluate the next tick. Provided some re-ordering of tick in wake_up_idle_cpu()
1182 * - On most architectures, a simple fetch_or on ti::flags with a in wake_up_idle_cpu()
1185 * - x86 needs to perform a last need_resched() check between in wake_up_idle_cpu()
1195 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
1204 * We just need the target to call irq_exit() and re-evaluate in wake_up_full_nohz_cpu()
1244 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1245 if (rq->idle_balance && !need_resched()) { in nohz_csd_func()
1246 rq->nohz_idle_balance = flags; in nohz_csd_func()
1256 if (rq->nr_running != 1) in __need_bw_check()
1259 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1273 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1280 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1281 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1291 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1303 if (rq->cfs.nr_running > 1) in sched_can_stop_tick()
1311 * E.g. going from 2->1 without going through pick_next_task(). in sched_can_stop_tick()
1313 if (__need_bw_check(rq, rq->curr)) { in sched_can_stop_tick()
1314 if (cfs_task_bw_constrained(rq->curr)) in sched_can_stop_tick()
1343 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from()
1355 parent = parent->parent; in walk_tg_tree_from()
1370 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1385 if (update_load && p->sched_class->reweight_task) in set_load_weight()
1386 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight()
1388 p->se.load = lw; in set_load_weight()
1395 * The (slow-path) user-space triggers utilization clamp value updates which
1396 * can require updates on (fast-path) scheduler's data structures used to
1398 * While the per-CPU rq lock protects fast-path update operations, user-space
1420 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1456 * idle (which drops the max-clamp) by retaining the last known in uclamp_idle_value()
1457 * max-clamp. in uclamp_idle_value()
1460 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1470 /* Reset max-clamp retention only on idle exit */ in uclamp_idle_reset()
1471 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1481 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1482 int bucket_id = UCLAMP_BUCKETS - 1; in uclamp_rq_max_value()
1488 for ( ; bucket_id >= 0; bucket_id--) { in uclamp_rq_max_value()
1494 /* No tasks -- default clamp values */ in uclamp_rq_max_value()
1503 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1505 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1508 if (uc_se->user_defined) in __uclamp_update_util_min_rt_default()
1520 /* Protect updates to p->uclamp_* */ in uclamp_update_util_min_rt_default()
1529 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1542 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1543 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1555 * - the task specific clamp value, when explicitly requested from userspace
1556 * - the task group effective clamp value, for tasks not either in the root
1558 * - the system default clamp value, defined by the sysadmin
1577 /* Task currently refcounted: use back-annotated (effective) value */ in uclamp_eff_value()
1578 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1579 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1591 * Tasks can have a task-specific value requested from user-space, track
1599 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1600 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1606 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1608 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_inc_id()
1609 bucket->tasks++; in uclamp_rq_inc_id()
1610 uc_se->active = true; in uclamp_rq_inc_id()
1612 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1618 if (bucket->tasks == 1 || uc_se->value > bucket->value) in uclamp_rq_inc_id()
1619 bucket->value = uc_se->value; in uclamp_rq_inc_id()
1621 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) in uclamp_rq_inc_id()
1622 uclamp_rq_set(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1637 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1638 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1649 * In this case the uc_se->active flag should be false since no uclamp in uclamp_rq_dec_id()
1660 * // Must not decrement bucket->tasks here in uclamp_rq_dec_id()
1664 * bucket[uc_se->bucket_id]. in uclamp_rq_dec_id()
1668 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id()
1671 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_dec_id()
1673 SCHED_WARN_ON(!bucket->tasks); in uclamp_rq_dec_id()
1674 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1675 bucket->tasks--; in uclamp_rq_dec_id()
1677 uc_se->active = false; in uclamp_rq_dec_id()
1685 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1693 SCHED_WARN_ON(bucket->value > rq_clamp); in uclamp_rq_dec_id()
1694 if (bucket->value >= rq_clamp) { in uclamp_rq_dec_id()
1695 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1713 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1716 if (p->se.sched_delayed) in uclamp_rq_inc()
1723 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1724 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1740 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1743 if (p->se.sched_delayed) in uclamp_rq_dec()
1753 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1763 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1764 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1818 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1820 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
1879 result = -EINVAL; in sysctl_sched_uclamp_handler()
1924 * We don't need to hold task_rq_lock() when updating p->uclamp_* here in uclamp_fork()
1928 p->uclamp[clamp_id].active = false; in uclamp_fork()
1930 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
1934 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
1947 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
1955 rq->uclamp_flags = UCLAMP_FLAG_IDLE; in init_uclamp_rq()
2005 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2006 state = READ_ONCE(p->__state); in get_wchan()
2008 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2010 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2020 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2022 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear in enqueue_task()
2023 * ->sched_delayed. in enqueue_task()
2053 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail' in dequeue_task()
2054 * and mark the task ->sched_delayed. in dequeue_task()
2057 return p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2069 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); in activate_task()
2070 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in activate_task()
2077 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); in deactivate_task()
2078 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in deactivate_task()
2095 * task_curr - is this task currently executing on a CPU?
2106 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2112 if (prev_class != p->sched_class && p->sched_class->switching_to) in check_class_changing()
2113 p->sched_class->switching_to(rq, p); in check_class_changing()
2117 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2127 if (prev_class != p->sched_class) { in check_class_changed()
2128 if (prev_class->switched_from) in check_class_changed()
2129 prev_class->switched_from(rq, p); in check_class_changed()
2131 p->sched_class->switched_to(rq, p); in check_class_changed()
2132 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2133 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2138 if (p->sched_class == rq->curr->sched_class) in wakeup_preempt()
2139 rq->curr->sched_class->wakeup_preempt(rq, p, flags); in wakeup_preempt()
2140 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) in wakeup_preempt()
2147 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in wakeup_preempt()
2154 if (READ_ONCE(p->__state) & state) in __task_state_match()
2157 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2158 return -1; in __task_state_match()
2170 guard(raw_spinlock_irq)(&p->pi_lock); in task_state_match()
2175 * wait_task_inactive - wait for a thread to unschedule.
2200 * any task-queue locks at all. We'll only try to get in wait_task_inactive()
2208 * still, just relax and busy-wait without holding in wait_task_inactive()
2235 * When matching on p->saved_state, consider this task in wait_task_inactive()
2240 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2268 * yield - it could be a while. in wait_task_inactive()
2297 .new_mask = cpumask_of(rq->cpu), in migrate_disable_switch()
2301 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2304 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2317 if (p->migration_disabled) { in migrate_disable()
2320 *Warn about overflow half-way through the range. in migrate_disable()
2322 WARN_ON_ONCE((s16)p->migration_disabled < 0); in migrate_disable()
2324 p->migration_disabled++; in migrate_disable()
2329 this_rq()->nr_pinned++; in migrate_disable()
2330 p->migration_disabled = 1; in migrate_disable()
2338 .new_mask = &p->cpus_mask, in migrate_enable()
2347 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) in migrate_enable()
2351 if (p->migration_disabled > 1) { in migrate_enable()
2352 p->migration_disabled--; in migrate_enable()
2361 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2369 p->migration_disabled = 0; in migrate_enable()
2370 this_rq()->nr_pinned--; in migrate_enable()
2376 return rq->nr_pinned; in rq_has_pinned_tasks()
2380 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2394 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2424 * move_queued_task - move a queued task to new rq.
2487 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2494 struct set_affinity_pending *pending = arg->pending; in migration_cpu_stop()
2495 struct task_struct *p = arg->task; in migration_cpu_stop()
2512 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2516 * If we were passed a pending, then ->stop_pending was set, thus in migration_cpu_stop()
2517 * p->migration_pending must have remained stable. in migration_cpu_stop()
2519 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2523 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because in migration_cpu_stop()
2524 * we're holding p->pi_lock. in migration_cpu_stop()
2531 p->migration_pending = NULL; in migration_cpu_stop()
2534 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2540 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2542 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2564 * ->pi_lock, so the allowed mask is stable - if it got in migration_cpu_stop()
2567 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2568 p->migration_pending = NULL; in migration_cpu_stop()
2574 * When migrate_enable() hits a rq mis-match we can't reliably in migration_cpu_stop()
2578 WARN_ON_ONCE(!pending->stop_pending); in migration_cpu_stop()
2582 &pending->arg, &pending->stop_work); in migration_cpu_stop()
2588 pending->stop_pending = false; in migration_cpu_stop()
2592 complete_all(&pending->done); in migration_cpu_stop()
2602 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2609 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2613 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2615 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2616 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2624 set_task_cpu(p, lowest_rq->cpu); in push_cpu_stop()
2632 rq->push_busy = false; in push_cpu_stop()
2634 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2646 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { in set_cpus_allowed_common()
2647 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2651 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2652 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2657 if (ctx->flags & SCA_USER) in set_cpus_allowed_common()
2658 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2669 * supposed to change these variables while holding both rq->lock and in __do_set_cpus_allowed()
2670 * p->pi_lock. in __do_set_cpus_allowed()
2673 * accesses these variables under p->pi_lock and only does so after in __do_set_cpus_allowed()
2674 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() in __do_set_cpus_allowed()
2679 if (ctx->flags & SCA_MIGRATE_DISABLE) in __do_set_cpus_allowed()
2680 SCHED_WARN_ON(!p->on_cpu); in __do_set_cpus_allowed()
2682 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2690 * holding rq->lock. in __do_set_cpus_allowed()
2698 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2725 * Because this is called with p->pi_lock held, it is not possible in do_set_cpus_allowed()
2739 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's in dup_user_cpus_ptr()
2742 dst->user_cpus_ptr = NULL; in dup_user_cpus_ptr()
2749 if (data_race(!src->user_cpus_ptr)) in dup_user_cpus_ptr()
2754 return -ENOMEM; in dup_user_cpus_ptr()
2762 raw_spin_lock_irqsave(&src->pi_lock, flags); in dup_user_cpus_ptr()
2763 if (src->user_cpus_ptr) { in dup_user_cpus_ptr()
2764 swap(dst->user_cpus_ptr, user_mask); in dup_user_cpus_ptr()
2765 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); in dup_user_cpus_ptr()
2767 raw_spin_unlock_irqrestore(&src->pi_lock, flags); in dup_user_cpus_ptr()
2779 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2797 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2800 * Initial conditions: P0->cpus_mask = [0, 1]
2809 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2822 * `--> <woken on migration completion>
2824 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2826 * task p are serialized by p->pi_lock, which we can leverage: the one that
2827 * should come into effect at the end of the Migrate-Disable region is the last
2828 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2833 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2837 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2843 * Migrate-Disable. Consider:
2845 * Initial conditions: P0->cpus_mask = [0, 1]
2863 * p->migration_pending done with p->pi_lock held.
2867 __releases(rq->lock) in affine_move_task()
2868 __releases(p->pi_lock) in affine_move_task()
2874 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
2878 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2879 rq->push_busy = true; in affine_move_task()
2887 pending = p->migration_pending; in affine_move_task()
2888 if (pending && !pending->stop_pending) { in affine_move_task()
2889 p->migration_pending = NULL; in affine_move_task()
2896 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
2897 p, &rq->push_work); in affine_move_task()
2902 complete_all(&pending->done); in affine_move_task()
2908 /* serialized by p->pi_lock */ in affine_move_task()
2909 if (!p->migration_pending) { in affine_move_task()
2919 p->migration_pending = &my_pending; in affine_move_task()
2921 pending = p->migration_pending; in affine_move_task()
2922 refcount_inc(&pending->refs); in affine_move_task()
2929 * Serialized by p->pi_lock, so this is safe. in affine_move_task()
2931 pending->arg.dest_cpu = dest_cpu; in affine_move_task()
2934 pending = p->migration_pending; in affine_move_task()
2936 * - !MIGRATE_ENABLE: in affine_move_task()
2939 * - MIGRATE_ENABLE: in affine_move_task()
2949 return -EINVAL; in affine_move_task()
2952 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
2956 * and have the stopper function handle it all race-free. in affine_move_task()
2958 stop_pending = pending->stop_pending; in affine_move_task()
2960 pending->stop_pending = true; in affine_move_task()
2963 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
2969 &pending->arg, &pending->stop_work); in affine_move_task()
2981 if (!pending->stop_pending) { in affine_move_task()
2982 p->migration_pending = NULL; in affine_move_task()
2989 complete_all(&pending->done); in affine_move_task()
2992 wait_for_completion(&pending->done); in affine_move_task()
2994 if (refcount_dec_and_test(&pending->refs)) in affine_move_task()
2995 wake_up_var(&pending->refs); /* No UaF, just an address */ in affine_move_task()
3010 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3016 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
3017 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3021 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3030 * however, during cpu-hot-unplug, even these might get pushed in __set_cpus_allowed_ptr_locked()
3036 * set_cpus_allowed_common() and actually reset p->cpus_ptr. in __set_cpus_allowed_ptr_locked()
3041 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { in __set_cpus_allowed_ptr_locked()
3042 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3047 * Must re-check here, to close a race against __kthread_bind(), in __set_cpus_allowed_ptr_locked()
3050 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3051 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3055 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { in __set_cpus_allowed_ptr_locked()
3056 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3057 if (ctx->flags & SCA_USER) in __set_cpus_allowed_ptr_locked()
3058 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3064 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3065 ret = -EBUSY; in __set_cpus_allowed_ptr_locked()
3075 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); in __set_cpus_allowed_ptr_locked()
3077 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3083 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3110 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3111 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && in __set_cpus_allowed_ptr()
3112 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3113 ctx->new_mask = rq->scratch_mask; in __set_cpus_allowed_ptr()
3136 * -EINVAL.
3158 err = -EPERM; in restrict_cpus_allowed_ptr()
3163 err = -EINVAL; in restrict_cpus_allowed_ptr()
3176 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3209 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3245 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3251 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3254 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, in set_task_cpu()
3256 * time relying on p->on_rq. in set_task_cpu()
3259 p->sched_class == &fair_sched_class && in set_task_cpu()
3260 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3264 * The caller should hold either p->pi_lock or rq->lock, when changing in set_task_cpu()
3265 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. in set_task_cpu()
3273 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3287 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3288 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3289 p->se.nr_migrations++; in set_task_cpu()
3325 p->wake_cpu = cpu; in __migrate_swap_task()
3339 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) in migrate_swap_stop()
3340 return -EAGAIN; in migrate_swap_stop()
3342 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3343 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
3345 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); in migrate_swap_stop()
3348 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
3349 return -EAGAIN; in migrate_swap_stop()
3351 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop()
3352 return -EAGAIN; in migrate_swap_stop()
3354 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) in migrate_swap_stop()
3355 return -EAGAIN; in migrate_swap_stop()
3357 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) in migrate_swap_stop()
3358 return -EAGAIN; in migrate_swap_stop()
3360 __migrate_swap_task(arg->src_task, arg->dst_cpu); in migrate_swap_stop()
3361 __migrate_swap_task(arg->dst_task, arg->src_cpu); in migrate_swap_stop()
3373 int ret = -EINVAL; in migrate_swap()
3386 * These three tests are all lockless; this is OK since all of them in migrate_swap()
3387 * will be re-checked with proper locks held further down the line. in migrate_swap()
3392 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) in migrate_swap()
3395 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) in migrate_swap()
3407 * kick_process - kick a running thread to enter/exit the kernel
3408 * @p: the to-be-kicked thread
3411 * kernel-mode, without any delay. (to get signals handled.)
3430 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3434 * - cpu_active must be a subset of cpu_online
3436 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3441 * - on CPU-down we clear cpu_active() to mask the sched domains and
3460 * will return -1. There is no CPU on the node, and we should in select_fallback_rq()
3463 if (nid != -1) { in select_fallback_rq()
3475 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3493 * hold p->pi_lock and again violate locking order. in select_fallback_rq()
3513 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3515 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3523 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3528 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3530 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { in select_task_rq()
3531 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags); in select_task_rq()
3534 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3539 * to rely on ttwu() to place the task on a valid ->cpus_ptr in select_task_rq()
3544 * [ this allows ->select_task() to simply return task_cpu(p) and in select_task_rq()
3556 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; in sched_set_stop_task()
3557 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
3565 * much confusion -- but then, stop work should not in sched_set_stop_task()
3570 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
3573 * The PI code calls rt_mutex_setprio() with ->pi_lock held to in sched_set_stop_task()
3579 * The stop task itself will never be part of the PI-chain, it in sched_set_stop_task()
3580 * never blocks, therefore that ->pi_lock recursion is safe. in sched_set_stop_task()
3581 * Tell lockdep about this by placing the stop->pi_lock in its in sched_set_stop_task()
3584 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); in sched_set_stop_task()
3587 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
3594 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
3620 if (cpu == rq->cpu) { in ttwu_stat()
3621 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3622 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3626 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3629 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3631 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
3638 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3641 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3642 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3645 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3653 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3665 if (p->sched_contributes_to_load) in ttwu_do_activate()
3666 rq->nr_uninterruptible--; in ttwu_do_activate()
3675 if (p->in_iowait) { in ttwu_do_activate()
3677 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3686 if (p->sched_class->task_woken) { in ttwu_do_activate()
3689 * drop the rq->lock, hereafter rq is only used for statistics. in ttwu_do_activate()
3692 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3696 if (rq->idle_stamp) { in ttwu_do_activate()
3697 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate()
3698 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_activate()
3700 update_avg(&rq->avg_idle, delta); in ttwu_do_activate()
3702 if (rq->avg_idle > max) in ttwu_do_activate()
3703 rq->avg_idle = max; in ttwu_do_activate()
3705 rq->idle_stamp = 0; in ttwu_do_activate()
3724 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3727 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3728 * then schedule() must still happen and p->state can be changed to
3744 if (p->se.sched_delayed) in ttwu_runnable()
3776 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3777 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3782 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3787 * idle_cpu() does not observe a false-negative -- if it does, in sched_ttwu_pending()
3795 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3807 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { in call_function_single_prep_ipi()
3825 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3827 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3828 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3836 if (is_idle_task(rcu_dereference(rq->curr))) { in wake_up_if_idle()
3838 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3863 * Whether CPUs are share cache resources, which means LLC on non-cluster
3893 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3909 * the task activation to the idle (or soon-to-be-idle) CPU as in ttwu_queue_cond()
3913 * Note that we can only get here with (wakee) p->on_rq=0, in ttwu_queue_cond()
3914 * p->on_cpu can be whatever, we've done the dequeue, so in ttwu_queue_cond()
3915 * the wakee has been accounted out of ->nr_running. in ttwu_queue_cond()
3917 if (!cpu_rq(cpu)->nr_running) in ttwu_queue_cond()
4002 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4008 * Notes on Program-Order guarantees on SMP systems.
4012 * The basic program-order guarantee on SMP systems is that when a task [t]
4013 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4018 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4019 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4020 * rq(c1)->lock (if not at the same time, then in that order).
4021 * C) LOCK of the rq(c1)->lock scheduling in task
4030 * LOCK rq(0)->lock
4031 * sched-out X
4032 * sched-in Y
4033 * UNLOCK rq(0)->lock
4035 * LOCK rq(0)->lock // orders against CPU0
4037 * UNLOCK rq(0)->lock
4039 * LOCK rq(1)->lock
4041 * UNLOCK rq(1)->lock
4043 * LOCK rq(1)->lock // orders against CPU2
4044 * sched-out Z
4045 * sched-in X
4046 * UNLOCK rq(1)->lock
4049 * BLOCKING -- aka. SLEEP + WAKEUP
4055 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4056 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4062 * LOCK rq(0)->lock LOCK X->pi_lock
4064 * sched-out X
4065 * smp_store_release(X->on_cpu, 0);
4067 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4068 * X->state = WAKING
4071 * LOCK rq(2)->lock
4073 * X->state = RUNNING
4074 * UNLOCK rq(2)->lock
4076 * LOCK rq(2)->lock // orders against CPU1
4077 * sched-out Z
4078 * sched-in X
4079 * UNLOCK rq(2)->lock
4081 * UNLOCK X->pi_lock
4082 * UNLOCK rq(0)->lock
4091 * try_to_wake_up - wake up a thread
4098 * If (@state & @p->state) @p->state = TASK_RUNNING.
4104 * It issues a full memory barrier before accessing @p->state, see the comment
4107 * Uses p->pi_lock to serialize against concurrent wake-ups.
4109 * Relies on p->pi_lock stabilizing:
4110 * - p->sched_class
4111 * - p->cpus_ptr
4112 * - p->sched_task_group
4115 * Tries really hard to only take one task_rq(p)->lock for performance.
4116 * Takes rq->lock in:
4117 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4118 * - ttwu_queue() -- new rq, for enqueue of the task;
4119 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4124 * Return: %true if @p->state changes (an actual wakeup was done),
4136 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) in try_to_wake_up()
4138 * case the whole 'p->on_rq && ttwu_runnable()' case below in try_to_wake_up()
4146 * - we rely on Program-Order guarantees for all the ordering, in try_to_wake_up()
4147 * - we're serialized against set_special_state() by virtue of in try_to_wake_up()
4148 * it disabling IRQs (this allows not taking ->pi_lock). in try_to_wake_up()
4150 SCHED_WARN_ON(p->se.sched_delayed); in try_to_wake_up()
4162 * reordered with p->state check below. This pairs with smp_store_mb() in try_to_wake_up()
4165 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4173 * Ensure we load p->on_rq _after_ p->state, otherwise it would in try_to_wake_up()
4174 * be possible to, falsely, observe p->on_rq == 0 and get stuck in try_to_wake_up()
4178 * STORE p->on_rq = 1 LOAD p->state in try_to_wake_up()
4179 * UNLOCK rq->lock in try_to_wake_up()
4182 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4184 * UNLOCK rq->lock in try_to_wake_up()
4187 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq in try_to_wake_up()
4189 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4195 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4200 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be in try_to_wake_up()
4201 * possible to, falsely, observe p->on_cpu == 0. in try_to_wake_up()
4203 * One must be running (->on_cpu == 1) in order to remove oneself in try_to_wake_up()
4207 * STORE p->on_cpu = 1 LOAD p->on_rq in try_to_wake_up()
4208 * UNLOCK rq->lock in try_to_wake_up()
4211 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4213 * STORE p->on_rq = 0 LOAD p->on_cpu in try_to_wake_up()
4215 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4218 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure in try_to_wake_up()
4220 * care about it's own p->state. See the comment in __schedule(). in try_to_wake_up()
4225 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq in try_to_wake_up()
4226 * == 0), which means we need to do an enqueue, change p->state to in try_to_wake_up()
4227 * TASK_WAKING such that we can unlock p->pi_lock before doing the in try_to_wake_up()
4230 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4235 * which potentially sends an IPI instead of spinning on p->on_cpu to in try_to_wake_up()
4239 * Ensure we load task_cpu(p) after p->on_cpu: in try_to_wake_up()
4242 * STORE p->cpu = @cpu in try_to_wake_up()
4244 * LOCK rq->lock in try_to_wake_up()
4245 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) in try_to_wake_up()
4246 * STORE p->on_cpu = 1 LOAD p->cpu in try_to_wake_up()
4251 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4264 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4266 cpu = select_task_rq(p, p->wake_cpu, &wake_flags); in try_to_wake_up()
4268 if (p->in_iowait) { in try_to_wake_up()
4270 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4292 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4295 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when in __task_needs_rq_lock()
4303 * Ensure we load p->on_rq after p->__state, otherwise it would be in __task_needs_rq_lock()
4304 * possible to, falsely, observe p->on_rq == 0. in __task_needs_rq_lock()
4309 if (p->on_rq) in __task_needs_rq_lock()
4318 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4325 * task_call_func - Invoke a function on task in fixed state
4345 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4352 * - blocked and we're holding off wakeups (pi->lock) in task_call_func()
4353 * - woken, and we're holding off enqueue (rq->lock) in task_call_func()
4354 * - queued, and we're holding off schedule (rq->lock) in task_call_func()
4355 * - running, and we're holding off de-schedule (rq->lock) in task_call_func()
4357 * The called function (@func) can use: task_curr(), p->on_rq and in task_call_func()
4358 * p->__state to differentiate between these states. in task_call_func()
4365 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4370 * cpu_curr_snapshot - Return a snapshot of the currently running task
4402 * wake_up_process - Wake up a specific process
4431 p->on_rq = 0; in __sched_fork()
4433 p->se.on_rq = 0; in __sched_fork()
4434 p->se.exec_start = 0; in __sched_fork()
4435 p->se.sum_exec_runtime = 0; in __sched_fork()
4436 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4437 p->se.nr_migrations = 0; in __sched_fork()
4438 p->se.vruntime = 0; in __sched_fork()
4439 p->se.vlag = 0; in __sched_fork()
4440 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4443 SCHED_WARN_ON(p->se.sched_delayed); in __sched_fork()
4446 p->se.cfs_rq = NULL; in __sched_fork()
4451 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4454 init_dl_entity(&p->dl); in __sched_fork()
4456 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4457 p->rt.timeout = 0; in __sched_fork()
4458 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4459 p->rt.on_rq = 0; in __sched_fork()
4460 p->rt.on_list = 0; in __sched_fork()
4463 init_scx_entity(&p->scx); in __sched_fork()
4467 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4471 p->capture_control = NULL; in __sched_fork()
4475 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4476 p->migration_pending = NULL; in __sched_fork()
4510 pgdat->nbp_threshold = 0; in reset_memory_tiering()
4511 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); in reset_memory_tiering()
4512 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); in reset_memory_tiering()
4524 return -EPERM; in sysctl_numa_balancing()
4593 return -EPERM; in sysctl_schedstats()
4664 * fork()/clone()-time setup:
4674 p->__state = TASK_NEW; in sched_fork()
4679 p->prio = current->normal_prio; in sched_fork()
4686 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4688 p->policy = SCHED_NORMAL; in sched_fork()
4689 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4690 p->rt_priority = 0; in sched_fork()
4691 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4692 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4694 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4696 p->se.custom_slice = 0; in sched_fork()
4697 p->se.slice = sysctl_sched_base_slice; in sched_fork()
4703 p->sched_reset_on_fork = 0; in sched_fork()
4706 if (dl_prio(p->prio)) in sched_fork()
4707 return -EAGAIN; in sched_fork()
4711 if (rt_prio(p->prio)) { in sched_fork()
4712 p->sched_class = &rt_sched_class; in sched_fork()
4714 } else if (task_should_scx(p->policy)) { in sched_fork()
4715 p->sched_class = &ext_sched_class; in sched_fork()
4718 p->sched_class = &fair_sched_class; in sched_fork()
4721 init_entity_runnable_average(&p->se); in sched_fork()
4726 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4729 p->on_cpu = 0; in sched_fork()
4733 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4734 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4744 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly in sched_cgroup_fork()
4747 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4751 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4754 p->sched_task_group = tg; in sched_cgroup_fork()
4763 if (p->sched_class->task_fork) in sched_cgroup_fork()
4764 p->sched_class->task_fork(p); in sched_cgroup_fork()
4765 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4798 * wake_up_new_task - wake up a newly created task for the first time.
4810 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4811 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4815 * - cpus_ptr can change in the fork path in wake_up_new_task()
4816 * - any previously selected CPU might disappear through hotplug in wake_up_new_task()
4819 * as we're not fully set-up yet. in wake_up_new_task()
4821 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4833 if (p->sched_class->task_woken) { in wake_up_new_task()
4835 * Nothing relies on rq->lock after this, so it's fine to in wake_up_new_task()
4839 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4863 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4871 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); in preempt_notifier_register()
4876 * preempt_notifier_unregister - no longer interested in preemption notifications
4883 hlist_del(¬ifier->link); in preempt_notifier_unregister()
4891 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_in_preempt_notifiers()
4892 notifier->ops->sched_in(notifier, raw_smp_processor_id()); in __fire_sched_in_preempt_notifiers()
4907 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_out_preempt_notifiers()
4908 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
4940 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and in prepare_task()
4943 WRITE_ONCE(next->on_cpu, 1); in prepare_task()
4952 * p->on_cpu is cleared, the task can be moved to a different CPU. We in finish_task()
4956 * In particular, the load of prev->state in finish_task_switch() must in finish_task()
4961 smp_store_release(&prev->on_cpu, 0); in finish_task()
4975 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
4976 next = head->next; in do_balance_callbacks()
4977 head->next = NULL; in do_balance_callbacks()
4991 * that queued it (only later, when it's safe to drop rq->lock again),
4995 * a single test, namely: rq->balance_callback == NULL.
5005 struct balance_callback *head = rq->balance_callback; in __splice_balance_callbacks()
5014 * in the same rq->lock section. in __splice_balance_callbacks()
5022 rq->balance_callback = NULL; in __splice_balance_callbacks()
5062 * of the scheduler it's an obvious special-case), so we in prepare_lock_switch()
5066 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
5069 rq_lockp(rq)->owner = next; in prepare_lock_switch()
5077 * fix up the runqueue lock - which gets 'carried over' from in finish_lock_switch()
5080 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
5100 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_out()
5108 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_in()
5114 * prepare_task_switch - prepare to switch tasks
5141 * finish_task_switch - clean up after a task-switch
5147 * and do any other architecture-specific cleanup actions.
5160 __releases(rq->lock) in finish_task_switch()
5163 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
5173 * raw_spin_lock_irq(&rq->lock) // 2 in finish_task_switch()
5179 current->comm, current->pid, preempt_count())) in finish_task_switch()
5182 rq->prev_mm = NULL; in finish_task_switch()
5186 * If a task dies, then it sets TASK_DEAD in tsk->state and calls in finish_task_switch()
5190 * We must observe prev->state before clearing prev->on_cpu (in in finish_task_switch()
5192 * running on another CPU and we could rave with its RUNNING -> DEAD in finish_task_switch()
5195 prev_state = READ_ONCE(prev->__state); in finish_task_switch()
5217 * schedule between user->kernel->user threads without passing though in finish_task_switch()
5219 * rq->curr, before returning to userspace, so provide them here: in finish_task_switch()
5221 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly in finish_task_switch()
5223 * - a sync_core for SYNC_CORE. in finish_task_switch()
5231 if (prev->sched_class->task_dead) in finish_task_switch()
5232 prev->sched_class->task_dead(prev); in finish_task_switch()
5244 * schedule_tail - first thing a freshly forked thread must call.
5248 __releases(rq->lock) in schedule_tail()
5254 * finish_task_switch() will drop rq->lock() and lower preempt_count in schedule_tail()
5262 if (current->set_child_tid) in schedule_tail()
5263 put_user(task_pid_vnr(current), current->set_child_tid); in schedule_tail()
5269 * context_switch - switch to the new MM and the new thread's register state.
5285 * kernel -> kernel lazy + transfer active in context_switch()
5286 * user -> kernel lazy + mmgrab_lazy_tlb() active in context_switch()
5288 * kernel -> user switch + mmdrop_lazy_tlb() active in context_switch()
5289 * user -> user switch in context_switch()
5294 if (!next->mm) { // to kernel in context_switch()
5295 enter_lazy_tlb(prev->active_mm, next); in context_switch()
5297 next->active_mm = prev->active_mm; in context_switch()
5298 if (prev->mm) // from user in context_switch()
5299 mmgrab_lazy_tlb(prev->active_mm); in context_switch()
5301 prev->active_mm = NULL; in context_switch()
5303 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5306 * rq->curr / membarrier_switch_mm() and returning to userspace. in context_switch()
5309 * case 'prev->active_mm == next->mm' through in context_switch()
5312 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
5313 lru_gen_use_mm(next->mm); in context_switch()
5315 if (!prev->mm) { // from kernel in context_switch()
5317 rq->prev_mm = prev->active_mm; in context_switch()
5318 prev->active_mm = NULL; in context_switch()
5345 sum += cpu_rq(i)->nr_running; in nr_running()
5354 * preemption, thus the result might have a time-of-check-to-time-of-use
5357 * - from a non-preemptible section (of course)
5359 * - from a thread that is bound to a single CPU
5361 * - in a loop with very short iterations (e.g. a polling loop)
5365 return raw_rq()->nr_running == 1; in single_task_running()
5371 return cpu_rq(cpu)->nr_switches; in nr_context_switches_cpu()
5380 sum += cpu_rq(i)->nr_switches; in nr_context_switches()
5388 * for a CPU that has IO-wait which might not even end up running the task when
5394 return atomic_read(&cpu_rq(cpu)->nr_iowait); in nr_iowait_cpu()
5398 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5400 * The idea behind IO-wait account is to account the idle time that we could
5402 * storage performance, we'd have a proportional reduction in IO-wait time.
5405 * idle time as IO-wait, because if the storage were faster, it could've been
5412 * CPU will have IO-wait accounted, while the other has regular idle. Even
5416 * This means, that when looking globally, the current IO-wait accounting on
5422 * blocked on. This means the per CPU IO-wait number is meaningless.
5440 * sched_exec - execve() is a valuable balancing opportunity, because at
5449 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5450 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5472 * and its field curr->exec_start; when called from task_sched_runtime(),
5479 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
5481 struct sched_entity *curr = task_rq(p)->cfs.curr; in prefetch_curr_exec_start()
5484 prefetch(&curr->exec_start); in prefetch_curr_exec_start()
5500 * 64-bit doesn't need locks to atomically read a 64-bit value. in task_sched_runtime()
5502 * Reading ->on_cpu is racy, but this is OK. in task_sched_runtime()
5506 * indistinguishable from the read occurring a few cycles earlier. in task_sched_runtime()
5507 * If we see ->on_cpu without ->on_rq, the task is leaving, and has in task_sched_runtime()
5510 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5511 return p->se.sum_exec_runtime; in task_sched_runtime()
5516 * Must be ->curr _and_ ->on_rq. If dequeued, we would in task_sched_runtime()
5517 * project cycles that may never be accounted to this in task_sched_runtime()
5523 p->sched_class->update_curr(rq); in task_sched_runtime()
5525 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5547 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5548 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5549 rq->ticks_without_resched = 0; in cpu_resched_latency()
5553 rq->ticks_without_resched++; in cpu_resched_latency()
5554 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5600 curr = rq->curr; in sched_tick()
5606 curr->sched_class->task_tick(rq, curr, 0); in sched_tick()
5621 if (curr->flags & PF_WQ_WORKER) in sched_tick()
5626 rq->idle_balance = idle_cpu(cpu); in sched_tick()
5639 /* Values for ->state, see diagram below. */
5645 * State diagram for ->state:
5654 * +--TICK_SCHED_REMOTE_OFFLINING
5673 int cpu = twork->cpu; in sched_tick_remote()
5681 * statistics and checks timeslices in a time-independent way, regardless in sched_tick_remote()
5686 struct task_struct *curr = rq->curr; in sched_tick_remote()
5696 u64 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5699 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5711 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); in sched_tick_remote()
5728 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); in sched_tick_start()
5731 twork->cpu = cpu; in sched_tick_start()
5732 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); in sched_tick_start()
5733 queue_delayed_work(system_unbound_wq, &twork->work, HZ); in sched_tick_start()
5749 /* There cannot be competing actions, but don't rely on stop-machine. */ in sched_tick_stop()
5750 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); in sched_tick_stop()
5779 current->preempt_disable_ip = ip; in preempt_latency_start()
5800 PREEMPT_MASK - 10); in preempt_count_add()
5847 return p->preempt_disable_ip; in get_preempt_disable_ip()
5865 prev->comm, prev->pid, preempt_count()); in __schedule_bug()
5882 * Various schedule()-time debugging checks and statistics:
5895 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { in schedule_debug()
5896 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", in schedule_debug()
5897 prev->comm, prev->pid, prev->non_block_count); in schedule_debug()
5912 schedstat_inc(this_rq()->sched_count); in schedule_debug()
5918 const struct sched_class *start_class = prev->sched_class; in prev_balance()
5928 rq->scx.flags |= SCX_RQ_BAL_PENDING; in prev_balance()
5936 * that when we release the rq->lock the task is in the same in prev_balance()
5937 * state as before we took rq->lock. in prev_balance()
5943 if (class->balance && class->balance(rq, prev, rf)) in prev_balance()
5949 * Pick up the highest-prio task:
5957 rq->dl_server = NULL; in __pick_next_task()
5968 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && in __pick_next_task()
5969 rq->nr_running == rq->cfs.h_nr_running)) { in __pick_next_task()
5988 if (class->pick_next_task) { in __pick_next_task()
5989 p = class->pick_next_task(rq, prev); in __pick_next_task()
5993 p = class->pick_task(rq); in __pick_next_task()
6007 return (task_rq(t)->idle == t); in is_task_rq_idle()
6012 return is_task_rq_idle(a) || (a->core_cookie == cookie); in cookie_equals()
6020 return a->core_cookie == b->core_cookie; in cookie_match()
6028 rq->dl_server = NULL; in pick_task()
6031 p = class->pick_task(rq); in pick_task()
6049 bool core_clock_updated = (rq == rq->core); in pick_next_task()
6060 /* Stopper task is switching into idle, no need core-wide selection. */ in pick_next_task()
6067 rq->core_pick = NULL; in pick_next_task()
6068 rq->core_dl_server = NULL; in pick_next_task()
6077 * rq->core_pick can be NULL if no selection was made for a CPU because in pick_next_task()
6078 * it was either offline or went offline during a sibling's core-wide in pick_next_task()
6079 * selection. In this case, do a core-wide selection. in pick_next_task()
6081 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
6082 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
6083 rq->core_pick) { in pick_next_task()
6084 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
6086 next = rq->core_pick; in pick_next_task()
6087 rq->dl_server = rq->core_dl_server; in pick_next_task()
6088 rq->core_pick = NULL; in pick_next_task()
6089 rq->core_dl_server = NULL; in pick_next_task()
6096 need_sync = !!rq->core->core_cookie; in pick_next_task()
6099 rq->core->core_cookie = 0UL; in pick_next_task()
6100 if (rq->core->core_forceidle_count) { in pick_next_task()
6102 update_rq_clock(rq->core); in pick_next_task()
6107 rq->core->core_forceidle_start = 0; in pick_next_task()
6108 rq->core->core_forceidle_count = 0; in pick_next_task()
6109 rq->core->core_forceidle_occupation = 0; in pick_next_task()
6115 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq in pick_next_task()
6124 rq->core->core_task_seq++; in pick_next_task()
6132 if (!next->core_cookie) { in pick_next_task()
6133 rq->core_pick = NULL; in pick_next_task()
6134 rq->core_dl_server = NULL; in pick_next_task()
6149 * Tie-break prio towards the current CPU in pick_next_task()
6159 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) in pick_next_task()
6162 rq_i->core_pick = p = pick_task(rq_i); in pick_next_task()
6163 rq_i->core_dl_server = rq_i->dl_server; in pick_next_task()
6169 cookie = rq->core->core_cookie = max->core_cookie; in pick_next_task()
6177 p = rq_i->core_pick; in pick_next_task()
6187 rq_i->core_pick = p; in pick_next_task()
6188 rq_i->core_dl_server = NULL; in pick_next_task()
6190 if (p == rq_i->idle) { in pick_next_task()
6191 if (rq_i->nr_running) { in pick_next_task()
6192 rq->core->core_forceidle_count++; in pick_next_task()
6194 rq->core->core_forceidle_seq++; in pick_next_task()
6201 if (schedstat_enabled() && rq->core->core_forceidle_count) { in pick_next_task()
6202 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
6203 rq->core->core_forceidle_occupation = occ; in pick_next_task()
6206 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
6207 next = rq->core_pick; in pick_next_task()
6208 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
6216 * NOTE: L1TF -- at this point we're no longer running the old task and in pick_next_task()
6218 * their task. This ensures there is no inter-sibling overlap between in pick_next_task()
6219 * non-matching user state. in pick_next_task()
6228 * picked for it. That's Ok - it will pick tasks for itself, in pick_next_task()
6231 if (!rq_i->core_pick) in pick_next_task()
6235 * Update for new !FI->FI transitions, or if continuing to be in !FI: in pick_next_task()
6242 if (!(fi_before && rq->core->core_forceidle_count)) in pick_next_task()
6243 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); in pick_next_task()
6245 rq_i->core_pick->core_occupation = occ; in pick_next_task()
6248 rq_i->core_pick = NULL; in pick_next_task()
6249 rq_i->core_dl_server = NULL; in pick_next_task()
6254 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); in pick_next_task()
6256 if (rq_i->curr == rq_i->core_pick) { in pick_next_task()
6257 rq_i->core_pick = NULL; in pick_next_task()
6258 rq_i->core_dl_server = NULL; in pick_next_task()
6267 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task()
6283 cookie = dst->core->core_cookie; in try_steal_cookie()
6287 if (dst->curr != dst->idle) in try_steal_cookie()
6295 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6301 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6372 if (!rq->core->core_cookie) in queue_core_balance()
6375 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
6378 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
6382 sched_core_lock(*_T->lock, &_T->flags),
6383 sched_core_unlock(*_T->lock, &_T->flags),
6394 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6405 if (rq->core == rq) { in sched_core_cpu_starting()
6419 rq->core = core_rq; in sched_core_cpu_starting()
6421 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6435 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6440 if (rq->core != rq) in sched_core_cpu_deactivate()
6455 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6456 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6457 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6458 core_rq->core_forceidle_count = rq->core_forceidle_count; in sched_core_cpu_deactivate()
6459 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6460 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; in sched_core_cpu_deactivate()
6467 core_rq->core_forceidle_start = 0; in sched_core_cpu_deactivate()
6472 rq->core = core_rq; in sched_core_cpu_deactivate()
6480 if (rq->core != rq) in sched_core_cpu_dying()
6481 rq->core = rq; in sched_core_cpu_dying()
6504 #define SM_IDLE (-1)
6523 * task to the run-queue and that's it.
6525 * Now, if the new task added to the run-queue preempts the current
6529 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6531 * - in syscall or exception context, at the next outmost
6535 * - in IRQ context, return from interrupt-handler to
6538 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6541 * - cond_resched() call
6542 * - explicit schedule() call
6543 * - return from syscall or exception to user-space
6544 * - return from interrupt-handler to user-space
6565 prev = rq->curr; in __schedule()
6576 * Make sure that signal_pending_state()->signal_pending() below in __schedule()
6583 * LOCK rq->lock LOCK p->pi_state in __schedule()
6585 * if (signal_pending_state()) if (p->state & @state) in __schedule()
6588 * after coming from user-space, before storing to rq->curr; this in __schedule()
6596 rq->clock_update_flags <<= 1; in __schedule()
6598 rq->clock_update_flags = RQCF_UPDATED; in __schedule()
6600 switch_count = &prev->nivcsw; in __schedule()
6606 * We must load prev->state once (task_struct::state is volatile), such in __schedule()
6609 prev_state = READ_ONCE(prev->__state); in __schedule()
6612 if (!rq->nr_running && !scx_enabled()) { in __schedule()
6618 WRITE_ONCE(prev->__state, TASK_RUNNING); in __schedule()
6622 prev->sched_contributes_to_load = in __schedule()
6632 * prev_state = prev->state; if (p->on_rq && ...) in __schedule()
6634 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); in __schedule()
6635 * p->state = TASK_WAKING in __schedule()
6639 * After this, schedule() must not care about p->state any more. in __schedule()
6644 switch_count = &prev->nvcsw; in __schedule()
6652 rq->last_seen_need_resched_ns = 0; in __schedule()
6656 rq->nr_switches++; in __schedule()
6658 * RCU users of rcu_dereference(rq->curr) may not see in __schedule()
6661 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6665 * rq->curr, before returning to user-space. in __schedule()
6669 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, in __schedule()
6670 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() in __schedule()
6671 * on PowerPC and on RISC-V. in __schedule()
6672 * - finish_lock_switch() for weakly-ordered in __schedule()
6674 * - switch_to() for arm64 (weakly-ordered, spin_unlock in __schedule()
6680 * On RISC-V, this barrier pairing is also needed for the in __schedule()
6707 current->flags |= PF_NOFREEZE; in do_task_dead()
6712 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ in do_task_dead()
6724 * will use a blocking primitive -- which would lead to recursion. in sched_submit_work()
6728 task_flags = tsk->flags; in sched_submit_work()
6743 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); in sched_submit_work()
6749 blk_flush_plug(tsk->plug, true); in sched_submit_work()
6756 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { in sched_update_worker()
6757 if (tsk->flags & PF_BLOCK_TS) in sched_update_worker()
6759 if (tsk->flags & PF_WQ_WORKER) in sched_update_worker()
6761 else if (tsk->flags & PF_IO_WORKER) in sched_update_worker()
6780 lockdep_assert(!tsk->sched_rt_mutex); in schedule()
6792 * state (have scheduled out non-voluntarily) by making sure that all
6795 * (schedule out non-voluntarily).
6809 WARN_ON_ONCE(current->__state); in schedule_idle()
6835 * schedule_preempt_disabled - called with preemption disabled
6885 * This is the entry point to schedule() from in-kernel preemption
6891 * If there is a non-zero preempt_count or interrupts are disabled, in preempt_schedule()
6923 * preempt_schedule_notrace - preempt_schedule called by tracing
7027 return try_to_wake_up(curr->private, mode, wake_flags); in default_wake_function()
7051 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7059 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); in rt_mutex_pre_schedule()
7065 lockdep_assert(current->sched_rt_mutex); in rt_mutex_schedule()
7072 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); in rt_mutex_post_schedule()
7076 * rt_mutex_setprio - set the current priority of a task
7081 * not touch ->normal_prio like __setscheduler().
7094 /* XXX used to be waiter->prio, not waiter->task->prio */ in rt_mutex_setprio()
7095 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7100 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7106 * Set under pi_lock && rq->lock, such that the value can be used under in rt_mutex_setprio()
7111 * ensure a task is de-boosted (pi_task is set to NULL) before the in rt_mutex_setprio()
7113 * points to a blocked task -- which guarantees the task is present. in rt_mutex_setprio()
7115 p->pi_top_task = pi_task; in rt_mutex_setprio()
7120 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7124 * Idle task boosting is a no-no in general. There is one in rt_mutex_setprio()
7128 * the timer wheel base->lock on the CPU and another CPU wants in rt_mutex_setprio()
7135 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7136 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7137 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7142 oldprio = p->prio; in rt_mutex_setprio()
7147 prev_class = p->sched_class; in rt_mutex_setprio()
7148 next_class = __setscheduler_class(p->policy, prio); in rt_mutex_setprio()
7150 if (prev_class != next_class && p->se.sched_delayed) in rt_mutex_setprio()
7162 * 1. -rt task is running and holds mutex A in rt_mutex_setprio()
7163 * --> -dl task blocks on mutex A in rt_mutex_setprio()
7165 * 2. -dl task is running and holds mutex A in rt_mutex_setprio()
7166 * --> -dl task blocks on mutex A and could preempt the in rt_mutex_setprio()
7170 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7171 (pi_task && dl_prio(pi_task->prio) && in rt_mutex_setprio()
7172 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7173 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7176 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7180 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7185 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7187 p->rt.timeout = 0; in rt_mutex_setprio()
7190 p->sched_class = next_class; in rt_mutex_setprio()
7191 p->prio = prio; in rt_mutex_setprio()
7221 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick in __cond_resched()
7222 * whether the current CPU is in an RCU read-side critical section, in __cond_resched()
7224 * in kernel context. In contrast, in non-preemptible kernels, in __cond_resched()
7225 * RCU readers leave no in-memory hints, which means that CPU-bound in __cond_resched()
7273 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7276 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7337 #include <linux/entry-common.h>
7349 * cond_resched <- __cond_resched
7350 * might_resched <- RET0
7351 * preempt_schedule <- NOP
7352 * preempt_schedule_notrace <- NOP
7353 * irqentry_exit_cond_resched <- NOP
7356 * cond_resched <- __cond_resched
7357 * might_resched <- __cond_resched
7358 * preempt_schedule <- NOP
7359 * preempt_schedule_notrace <- NOP
7360 * irqentry_exit_cond_resched <- NOP
7363 * cond_resched <- RET0
7364 * might_resched <- RET0
7365 * preempt_schedule <- preempt_schedule
7366 * preempt_schedule_notrace <- preempt_schedule_notrace
7367 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7371 preempt_dynamic_undefined = -1,
7390 return -EINVAL; in sched_dynamic_mode()
7409 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in in __sched_dynamic_update()
7543 int old_iowait = current->in_iowait; in io_schedule_prepare()
7545 current->in_iowait = 1; in io_schedule_prepare()
7546 blk_flush_plug(current->plug, true); in io_schedule_prepare()
7552 current->in_iowait = token; in io_schedule_finish()
7556 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7590 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
7598 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
7600 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", in sched_show_task()
7615 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
7643 * reset the NMI-timeout, listing all files on a slow in show_state_filter()
7668 * init_idle - set up an idle thread for a given CPU
7688 raw_spin_lock_irqsave(&idle->pi_lock, flags); in init_idle()
7691 idle->__state = TASK_RUNNING; in init_idle()
7692 idle->se.exec_start = sched_clock(); in init_idle()
7695 * look like a proper per-CPU kthread. in init_idle()
7697 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; in init_idle()
7711 * holding rq->lock, the CPU isn't yet set to this CPU so the in init_idle()
7715 * use task_rq_lock() here and obtain the other rq->lock. in init_idle()
7723 rq->idle = idle; in init_idle()
7724 rcu_assign_pointer(rq->curr, idle); in init_idle()
7725 idle->on_rq = TASK_ON_RQ_QUEUED; in init_idle()
7727 idle->on_cpu = 1; in init_idle()
7730 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); in init_idle()
7738 idle->sched_class = &idle_sched_class; in init_idle()
7742 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); in init_idle()
7774 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
7775 ret = -EINVAL; in task_can_attach()
7792 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
7793 return -EINVAL; in migrate_task_to()
7820 p->numa_preferred_nid = nid; in sched_setnuma()
7837 struct mm_struct *mm = current->active_mm; in idle_task_exit()
7840 BUG_ON(current != this_rq()->idle); in idle_task_exit()
7857 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
7863 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
7868 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
7878 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7885 struct task_struct *push_task = rq->curr; in balance_push()
7892 rq->balance_callback = &balance_push_callback; in balance_push()
7898 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
7902 * Both the cpu-hotplug and stop task are in this case and are in balance_push()
7919 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
7920 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
7922 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
7930 * Temporarily drop rq->lock such that we can wake-up the stop task. in balance_push()
7935 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
7953 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
7954 rq->balance_callback = &balance_push_callback; in balance_push_set()
7955 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
7956 rq->balance_callback = NULL; in balance_push_set()
7971 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
7972 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
7994 if (!rq->online) { in set_rq_online()
7997 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
7998 rq->online = 1; in set_rq_online()
8001 if (class->rq_online) in set_rq_online()
8002 class->rq_online(rq); in set_rq_online()
8009 if (rq->online) { in set_rq_offline()
8014 if (class->rq_offline) in set_rq_offline()
8015 class->rq_offline(rq); in set_rq_offline()
8018 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
8019 rq->online = 0; in set_rq_offline()
8028 if (rq->rd) { in sched_set_rq_online()
8029 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_online()
8040 if (rq->rd) { in sched_set_rq_offline()
8041 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_offline()
8070 if (--num_cpus_frozen) in cpuset_cpu_active()
8174 * preempt-disabled and RCU users of this state to go away such that in sched_cpu_deactivate()
8218 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
8253 * stable. We need to take the tear-down thread which is calling this into
8256 * Also see the comment "Global load-average calculations".
8273 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
8281 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
8294 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
8321 /* Move init over to a non-isolated CPU */ in sched_init_smp()
8324 current->flags &= ~PF_NO_SETAFFINITY; in sched_init_smp()
8439 raw_spin_lock_init(&rq->__lock); in sched_init()
8440 rq->nr_running = 0; in sched_init()
8441 rq->calc_load_active = 0; in sched_init()
8442 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
8443 init_cfs_rq(&rq->cfs); in sched_init()
8444 init_rt_rq(&rq->rt); in sched_init()
8445 init_dl_rq(&rq->dl); in sched_init()
8447 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
8448 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
8452 * In case of task-groups formed through the cgroup filesystem, it in sched_init()
8455 * root_task_group and its child task-groups in a fair manner, in sched_init()
8456 * based on each entity's (task or task-group's) weight in sched_init()
8457 * (se->load.weight). in sched_init()
8466 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init()
8468 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
8477 rq->rt.rt_runtime = global_rt_runtime(); in sched_init()
8478 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
8481 rq->sd = NULL; in sched_init()
8482 rq->rd = NULL; in sched_init()
8483 rq->cpu_capacity = SCHED_CAPACITY_SCALE; in sched_init()
8484 rq->balance_callback = &balance_push_callback; in sched_init()
8485 rq->active_balance = 0; in sched_init()
8486 rq->next_balance = jiffies; in sched_init()
8487 rq->push_cpu = 0; in sched_init()
8488 rq->cpu = i; in sched_init()
8489 rq->online = 0; in sched_init()
8490 rq->idle_stamp = 0; in sched_init()
8491 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
8492 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
8494 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
8498 rq->last_blocked_load_update_tick = jiffies; in sched_init()
8499 atomic_set(&rq->nohz_flags, 0); in sched_init()
8501 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
8504 rcuwait_init(&rq->hotplug_wait); in sched_init()
8508 atomic_set(&rq->nr_iowait, 0); in sched_init()
8512 rq->core = rq; in sched_init()
8513 rq->core_pick = NULL; in sched_init()
8514 rq->core_dl_server = NULL; in sched_init()
8515 rq->core_enabled = 0; in sched_init()
8516 rq->core_tree = RB_ROOT; in sched_init()
8517 rq->core_forceidle_count = 0; in sched_init()
8518 rq->core_forceidle_occupation = 0; in sched_init()
8519 rq->core_forceidle_start = 0; in sched_init()
8521 rq->core_cookie = 0UL; in sched_init()
8523 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); in sched_init()
8537 * is dressed up as a per-CPU kthread and thus needs to play the part in sched_init()
8538 * if we want to avoid special-casing it in code that deals with per-CPU in sched_init()
8575 * Blocking primitives will set (and therefore destroy) current->state, in __might_sleep()
8579 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, in __might_sleep()
8582 (void *)current->task_state_change, in __might_sleep()
8583 (void *)current->task_state_change); in __might_sleep()
8621 !is_idle_task(current) && !current->non_block_count) || in __might_resched()
8636 in_atomic(), irqs_disabled(), current->non_block_count, in __might_resched()
8637 current->pid, current->comm); in __might_resched()
8681 current->pid, current->comm); in __cant_sleep()
8713 current->pid, current->comm); in __cant_migrate()
8736 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
8739 p->se.exec_start = 0; in normalize_rt_tasks()
8740 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
8741 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
8742 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
8766 * stopped - every CPU needs to be quiescent, and no scheduling
8773 * curr_task - return the current task for a given CPU.
8798 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
8800 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
8826 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
8836 return ERR_PTR(-ENOMEM); in sched_create_group()
8851 return ERR_PTR(-ENOMEM); in sched_create_group()
8859 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
8864 tg->parent = parent; in sched_online_group()
8865 INIT_LIST_HEAD(&tg->children); in sched_online_group()
8866 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
8882 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
8903 list_del_rcu(&tg->list); in sched_release_group()
8904 list_del_rcu(&tg->siblings); in sched_release_group()
8926 tsk->sched_task_group = group; in sched_change_group()
8929 if (tsk->sched_class->task_change_group) in sched_change_group()
8930 tsk->sched_class->task_change_group(tsk); in sched_change_group()
8940 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8958 if (group == tsk->sched_task_group) in sched_move_task()
9000 return ERR_PTR(-ENOMEM); in cpu_cgroup_css_alloc()
9002 return &tg->css; in cpu_cgroup_css_alloc()
9009 struct task_group *parent = css_tg(css->parent); in cpu_cgroup_css_online()
9061 return -EINVAL; in cpu_cgroup_can_attach()
9097 uc_parent = css_tg(css)->parent in cpu_util_update_eff()
9098 ? css_tg(css)->parent->uclamp : NULL; in cpu_util_update_eff()
9102 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; in cpu_util_update_eff()
9114 uc_se = css_tg(css)->uclamp; in cpu_util_update_eff()
9164 req.ret = -ERANGE; in capacity_from_percent()
9192 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
9193 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
9196 * Because of not recoverable conversion rounding we keep track of the in cpu_uclamp_write()
9199 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
9231 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
9239 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
9261 return scale_load_down(tg->shares); in tg_weight()
9263 return sched_weight_from_cgroup(tg->scx_weight); in tg_weight()
9302 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
9305 return -EINVAL; in tg_set_cfs_bandwidth()
9313 return -EINVAL; in tg_set_cfs_bandwidth()
9321 return -EINVAL; in tg_set_cfs_bandwidth()
9327 return -EINVAL; in tg_set_cfs_bandwidth()
9331 return -EINVAL; in tg_set_cfs_bandwidth()
9334 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
9345 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; in tg_set_cfs_bandwidth()
9347 * If we need to toggle cfs_bandwidth_used, off->on must occur in tg_set_cfs_bandwidth()
9348 * before making related changes, and on->off must occur afterwards in tg_set_cfs_bandwidth()
9353 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { in tg_set_cfs_bandwidth()
9354 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
9355 cfs_b->quota = quota; in tg_set_cfs_bandwidth()
9356 cfs_b->burst = burst; in tg_set_cfs_bandwidth()
9369 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
9370 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
9373 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
9374 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
9376 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
9390 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
9391 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
9397 return -EINVAL; in tg_set_cfs_quota()
9406 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
9407 return -1; in tg_get_cfs_quota()
9409 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
9420 return -EINVAL; in tg_set_cfs_period()
9423 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
9424 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
9433 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
9444 return -EINVAL; in tg_set_cfs_burst()
9447 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
9448 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
9457 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
9513 if (tg == d->tg) { in normalize_cfs_quota()
9514 period = d->period; in normalize_cfs_quota()
9515 quota = d->quota; in normalize_cfs_quota()
9522 if (quota == RUNTIME_INF || quota == -1) in normalize_cfs_quota()
9531 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
9532 s64 quota = 0, parent_quota = -1; in tg_cfs_schedulable_down()
9534 if (!tg->parent) { in tg_cfs_schedulable_down()
9537 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
9540 parent_quota = parent_b->hierarchical_quota; in tg_cfs_schedulable_down()
9544 * always take the non-RUNTIME_INF min. On cgroup1, only in tg_cfs_schedulable_down()
9558 return -EINVAL; in tg_cfs_schedulable_down()
9561 cfs_b->hierarchical_quota = quota; in tg_cfs_schedulable_down()
9586 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
9588 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); in cpu_cfs_stat_show()
9589 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); in cpu_cfs_stat_show()
9590 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); in cpu_cfs_stat_show()
9598 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
9599 ws += schedstat_val(stats->wait_sum); in cpu_cfs_stat_show()
9605 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); in cpu_cfs_stat_show()
9606 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); in cpu_cfs_stat_show()
9617 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
9663 return css_tg(css)->idle; in cpu_idle_read_s64()
9751 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
9754 throttled_usec = cfs_b->throttled_time; in cpu_extra_stat_show()
9756 burst_usec = cfs_b->burst_time; in cpu_extra_stat_show()
9764 cfs_b->nr_periods, cfs_b->nr_throttled, in cpu_extra_stat_show()
9765 throttled_usec, cfs_b->nr_burst, burst_usec); in cpu_extra_stat_show()
9804 return -ERANGE; in cpu_weight_write_u64()
9823 delta = abs(sched_prio_to_weight[prio] - weight); in cpu_weight_nice_read_s64()
9829 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); in cpu_weight_nice_read_s64()
9839 return -ERANGE; in cpu_weight_nice_write_s64()
9841 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; in cpu_weight_nice_write_s64()
9871 return -EINVAL; in cpu_period_quota_parse()
9880 return -EINVAL; in cpu_period_quota_parse()
9899 u64 burst = tg->cfs_bandwidth.burst; in cpu_max_write()
10002 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10003 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10007 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10013 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10014 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10015 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10016 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10024 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10027 * pre-calculated inverse to speed up arithmetics by turning divisions
10031 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10032 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10033 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10034 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10049 * @cid_lock: Guarantee forward-progress of cid allocation.
10051 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10052 * is only used when contention is detected by the lock-free allocation so
10058 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10060 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10069 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10075 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10092 * per-mm/cpu cid value.
10094 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10095 * task->mm != mm for the rest of the discussion. There are two scheduler state
10098 * (TSA) Store to rq->curr with transition from (N) to (Y)
10100 * (TSB) Store to rq->curr with transition from (Y) to (N)
10102 * On the remote-clear side, there is one transition we care about:
10107 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10121 * Context switch CS-1 Remote-clear
10122 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10124 * - switch_mm_cid()
10125 * - memory barrier (see switch_mm_cid()
10129 * - mm_cid_get (next)
10130 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10137 * still an active task on the cpu. Remote-clear will therefore not transition
10158 t->migrate_from_cpu = task_cpu(t); in sched_mm_cid_migrate_from()
10166 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_fetch_cid()
10171 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10173 last_mm_cid = t->last_mm_cid; in __sched_mm_cid_migrate_from_fetch_cid()
10179 if (last_mm_cid == -1) in __sched_mm_cid_migrate_from_fetch_cid()
10180 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10181 src_cid = READ_ONCE(src_pcpu_cid->cid); in __sched_mm_cid_migrate_from_fetch_cid()
10183 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10191 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid()
10192 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_fetch_cid()
10193 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_fetch_cid()
10194 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10207 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_try_steal_cid()
10210 if (src_cid == -1) in __sched_mm_cid_migrate_from_try_steal_cid()
10211 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10218 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) in __sched_mm_cid_migrate_from_try_steal_cid()
10219 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10222 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10223 * rq->curr->mm matches the scheduler barrier in context_switch() in __sched_mm_cid_migrate_from_try_steal_cid()
10224 * between store to rq->curr and load of prev and next task's in __sched_mm_cid_migrate_from_try_steal_cid()
10225 * per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10227 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10228 * rq->curr->mm_cid_active matches the barrier in in __sched_mm_cid_migrate_from_try_steal_cid()
10230 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in __sched_mm_cid_migrate_from_try_steal_cid()
10231 * load of per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10236 * the lazy-put flag, this task will be responsible for transitioning in __sched_mm_cid_migrate_from_try_steal_cid()
10237 * from lazy-put flag set to MM_CID_UNSET. in __sched_mm_cid_migrate_from_try_steal_cid()
10240 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_try_steal_cid()
10241 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_try_steal_cid()
10246 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10247 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10254 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in __sched_mm_cid_migrate_from_try_steal_cid()
10255 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10267 struct mm_struct *mm = t->mm; in sched_mm_cid_migrate_to()
10275 src_cpu = t->migrate_from_cpu; in sched_mm_cid_migrate_to()
10276 if (src_cpu == -1) { in sched_mm_cid_migrate_to()
10277 t->last_mm_cid = -1; in sched_mm_cid_migrate_to()
10290 * greater or equal to the number of allowed CPUs, because user-space in sched_mm_cid_migrate_to()
10294 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); in sched_mm_cid_migrate_to()
10295 dst_cid = READ_ONCE(dst_pcpu_cid->cid); in sched_mm_cid_migrate_to()
10297 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed) in sched_mm_cid_migrate_to()
10299 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); in sched_mm_cid_migrate_to()
10302 if (src_cid == -1) in sched_mm_cid_migrate_to()
10306 if (src_cid == -1) in sched_mm_cid_migrate_to()
10314 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); in sched_mm_cid_migrate_to()
10324 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear()
10335 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) in sched_mm_cid_remote_clear()
10339 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10340 * rq->curr->mm matches the scheduler barrier in context_switch() in sched_mm_cid_remote_clear()
10341 * between store to rq->curr and load of prev and next task's in sched_mm_cid_remote_clear()
10342 * per-mm/cpu cid. in sched_mm_cid_remote_clear()
10344 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10345 * rq->curr->mm_cid_active matches the barrier in in sched_mm_cid_remote_clear()
10347 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in sched_mm_cid_remote_clear()
10348 * load of per-mm/cpu cid. in sched_mm_cid_remote_clear()
10353 * the lazy-put flag, that task will be responsible for transitioning in sched_mm_cid_remote_clear()
10354 * from lazy-put flag set to MM_CID_UNSET. in sched_mm_cid_remote_clear()
10357 t = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear()
10358 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) in sched_mm_cid_remote_clear()
10368 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in sched_mm_cid_remote_clear()
10381 * rq->clock load is racy on 32-bit but one spurious clear once in a in sched_mm_cid_remote_clear_old()
10384 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old()
10385 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
10393 curr = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear_old()
10394 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old()
10395 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old()
10400 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
10411 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
10412 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear_weight()
10428 work->next = work; /* Prevent double-add */ in task_mm_cid_work()
10429 if (t->flags & PF_EXITING) in task_mm_cid_work()
10431 mm = t->mm; in task_mm_cid_work()
10434 old_scan = READ_ONCE(mm->mm_cid_next_scan); in task_mm_cid_work()
10439 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); in task_mm_cid_work()
10447 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) in task_mm_cid_work()
10464 struct mm_struct *mm = t->mm; in init_sched_mm_cid()
10468 mm_users = atomic_read(&mm->mm_users); in init_sched_mm_cid()
10470 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); in init_sched_mm_cid()
10472 t->cid_work.next = &t->cid_work; /* Protect against double add */ in init_sched_mm_cid()
10473 init_task_work(&t->cid_work, task_mm_cid_work); in init_sched_mm_cid()
10478 struct callback_head *work = &curr->cid_work; in task_tick_mm_cid()
10481 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || in task_tick_mm_cid()
10482 work->next != work) in task_tick_mm_cid()
10484 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) in task_tick_mm_cid()
10493 struct mm_struct *mm = t->mm; in sched_mm_cid_exit_signals()
10503 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_exit_signals()
10505 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_exit_signals()
10510 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_exit_signals()
10515 struct mm_struct *mm = t->mm; in sched_mm_cid_before_execve()
10525 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_before_execve()
10527 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_before_execve()
10532 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_before_execve()
10537 struct mm_struct *mm = t->mm; in sched_mm_cid_after_execve()
10547 WRITE_ONCE(t->mm_cid_active, 1); in sched_mm_cid_after_execve()
10549 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_after_execve()
10553 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); in sched_mm_cid_after_execve()
10560 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); in sched_mm_cid_fork()
10561 t->mm_cid_active = 1; in sched_mm_cid_fork()
10581 if (ctx->queued) in sched_deq_and_put_task()
10583 if (ctx->running) in sched_deq_and_put_task()
10589 struct rq *rq = task_rq(ctx->p); in sched_enq_and_set_task()
10593 if (ctx->queued) in sched_enq_and_set_task()
10594 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK); in sched_enq_and_set_task()
10595 if (ctx->running) in sched_enq_and_set_task()
10596 set_next_task(rq, ctx->p); in sched_enq_and_set_task()