Lines Matching +full:assigned +full:- +full:resolution +full:- +full:bits

1 /* SPDX-License-Identifier: GPL-2.0 */
119 * Asymmetric CPU capacity bits
130 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
133 * Helpers for converting nanosecond timing to jiffy resolution
138 * Increase resolution of nice-level calculations for 64-bit architectures.
139 * The extra resolution improves shares distribution and load balancing of
140 * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group
141 * hierarchies, especially on larger systems. This is not a user-visible change
142 * and does not change the user-interface for setting shares/weights.
144 * We increase resolution only if we have enough bits to allow this increased
145 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
149 * increase coverage and consistency always enable it on 64-bit platforms.
170 * independent resolution, but they should be well calibrated. We use
174 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
181 * 10 -> just above 1us
182 * 9 -> just above 0.5us
228 return idle_policy(p->policy); in task_has_idle_policy()
233 return rt_policy(p->policy); in task_has_rt_policy()
238 return dl_policy(p->policy); in task_has_dl_policy()
245 s64 diff = sample - *avg; in update_avg()
252 * is UB; cap at size-1.
255 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
260 * maps pretty well onto the shares value used by scheduler and the round-trip
294 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); in dl_entity_is_special()
307 dl_time_before(a->deadline, b->deadline); in dl_entity_preempt()
311 * This is the priority-queue data structure of the RT scheduling class:
333 * To keep the bandwidth of -deadline tasks under control
335 * - store the maximum -deadline bandwidth of each cpu;
336 * - cache the fraction of bandwidth that is currently allocated in
340 * one used for RT-throttling (rt_bandwidth), with the main difference
347 * - bw (< 100%) is the deadline bandwidth of each CPU;
348 * - total_bw is the currently allocated bandwidth in each root domain;
371 * dl_se::rq -- runqueue we belong to.
373 * dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the
376 * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this
379 * dl_server_update() -- called from update_curr_common(), propagates runtime
383 * dl_server_stop() -- start/stop the server when it has (no) tasks.
385 * dl_server_init() -- initializes the server.
449 * it in its own cache-line separated from the fields above which
482 /* The two decimal precision [%] value requested from user-space */
500 * (The default weight is 1024 - so there's no practical
605 * applicable for 32-bits architectures.
645 /* CFS-related fields in a runqueue */
696 * Where f(tg) is the recursive weight fraction assigned to
710 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
745 /* scx_rq->flags, protected by the rq lock */
793 /* Real-Time classes' related field in a runqueue: */
829 return rt_rq->rt_queued && rt_rq->rt_nr_running; in rt_rq_is_runnable()
855 * an rb-tree, ordered by tasks' deadlines, with caching
870 * Utilization of the tasks "assigned" to this runqueue (including
876 * runqueue (inactive utilization = this_bw - running_bw).
897 #define entity_is_task(se) (!se->my_q)
902 se->runnable_weight = se->my_q->h_nr_running; in se_update_runnable()
907 if (se->sched_delayed) in se_runnable()
911 return !!se->on_rq; in se_runnable()
913 return se->runnable_weight; in se_runnable()
924 if (se->sched_delayed) in se_runnable()
927 return !!se->on_rq; in se_runnable()
934 * XXX we want to get rid of these helpers and use the full load resolution.
938 return scale_load_down(se->load.weight); in se_weight()
954 * We add the notion of a root-domain which will be used to define per-domain
957 * exclusive cpuset is created, we also create and attach a new root-domain
970 * - More than one runnable task
971 * - Running task is misfit
975 /* Indicate one or more CPUs over-utilized (tipping point) */
980 * than one runnable -deadline task (as it is below for RT tasks).
1017 * NULL-terminated list of performance domains intersecting with the
1031 return READ_ONCE(rd->overloaded); in get_rd_overloaded()
1037 WRITE_ONCE(rd->overloaded, status); in set_rd_overloaded()
1047 * struct uclamp_bucket - Utilization clamp bucket
1056 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
1060 * struct uclamp_rq - rq's utilization clamp
1072 * - for util_min: we want to run the CPU at least at the max of the minimum
1074 * - for util_max: we want to allow the CPU to run up to the max of the
1079 * the metrics required to compute all the per-rq utilization clamp values.
1090 * This is the main, per-CPU runqueue data structure.
1285 /* shared state -- careful with sched_core_cpu_deactivate() */
1309 return cfs_rq->rq; in rq_of()
1323 return rq->cpu; in cpu_of()
1334 return p->migration_disabled; in is_migration_disabled()
1345 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1355 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; in sched_core_enabled()
1365 * stable unless you actually hold a relevant rq->__lock.
1370 return &rq->core->__lock; in rq_lockp()
1372 return &rq->__lock; in rq_lockp()
1377 if (rq->core_enabled) in __rq_lockp()
1378 return &rq->core->__lock; in __rq_lockp()
1380 return &rq->__lock; in __rq_lockp()
1400 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1423 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1436 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { in sched_group_cookie_match()
1445 return !RB_EMPTY_NODE(&p->core_node); in sched_core_enqueued()
1468 return &rq->__lock; in rq_lockp()
1473 return &rq->__lock; in __rq_lockp()
1565 return p->se.cfs_rq; in task_cfs_rq()
1571 return se->cfs_rq; in cfs_rq_of()
1577 return grp->my_q; in group_cfs_rq()
1586 return &task_rq(p)->cfs; in task_cfs_rq()
1594 return &rq->cfs; in cfs_rq_of()
1608 * rq::clock_update_flags bits
1610 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
1614 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
1617 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
1624 * if (rq-clock_update_flags >= RQCF_UPDATED)
1640 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1648 return rq->clock; in rq_clock()
1656 return rq->clock_task; in rq_clock_task()
1662 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1672 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1682 * to clear RQCF_ACT_SKIP of rq->clock_update_flags.
1687 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); in rq_clock_start_loop_update()
1688 rq->clock_update_flags |= RQCF_ACT_SKIP; in rq_clock_start_loop_update()
1694 rq->clock_update_flags &= ~RQCF_ACT_SKIP; in rq_clock_stop_loop_update()
1718 * copy of the (on-stack) 'struct rq_flags rf'.
1720 * Also see Documentation/locking/lockdep-design.rst.
1724 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); in rq_pin_lock()
1727 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1728 rf->clock_update_flags = 0; in rq_pin_lock()
1730 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); in rq_pin_lock()
1738 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1739 rf->clock_update_flags = RQCF_UPDATED; in rq_unpin_lock()
1742 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); in rq_unpin_lock()
1747 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); in rq_repin_lock()
1753 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1759 __acquires(rq->lock);
1763 __acquires(p->pi_lock)
1764 __acquires(rq->lock);
1767 __releases(rq->lock) in __task_rq_unlock()
1775 __releases(rq->lock) in task_rq_unlock()
1776 __releases(p->pi_lock) in task_rq_unlock()
1780 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_unlock()
1784 _T->rq = task_rq_lock(_T->lock, &_T->rf),
1785 task_rq_unlock(_T->rq, _T->lock, &_T->rf),
1789 __acquires(rq->lock) in rq_lock_irqsave()
1791 raw_spin_rq_lock_irqsave(rq, rf->flags); in rq_lock_irqsave()
1796 __acquires(rq->lock) in rq_lock_irq()
1803 __acquires(rq->lock) in rq_lock()
1810 __releases(rq->lock) in rq_unlock_irqrestore()
1813 raw_spin_rq_unlock_irqrestore(rq, rf->flags); in rq_unlock_irqrestore()
1817 __releases(rq->lock) in rq_unlock_irq()
1824 __releases(rq->lock) in rq_unlock()
1831 rq_lock(_T->lock, &_T->rf),
1832 rq_unlock(_T->lock, &_T->rf),
1836 rq_lock_irq(_T->lock, &_T->rf),
1837 rq_unlock_irq(_T->lock, &_T->rf),
1841 rq_lock_irqsave(_T->lock, &_T->rf),
1842 rq_unlock_irqrestore(_T->lock, &_T->rf),
1846 __acquires(rq->lock) in this_rq_lock_irq()
1927 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) in queue_balance_callback()
1930 head->func = func; in queue_balance_callback()
1931 head->next = rq->balance_callback; in queue_balance_callback()
1932 rq->balance_callback = head; in queue_balance_callback()
1939 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1943 * preempt-disabled sections.
1946 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1947 __sd; __sd = __sd->parent)
1957 * highest_flag_domain - Return highest sched_domain containing flag.
1971 if (sd->flags & flag) { in highest_flag_domain()
1992 if (sd->flags & flag) in lowest_flag_domain()
2023 unsigned long min_capacity; /* Min per-CPU capacity in group */
2024 unsigned long max_capacity; /* Max per-CPU capacity in group */
2057 return to_cpumask(sg->cpumask); in sched_group_span()
2065 return to_cpumask(sg->sgc->cpumask); in group_balance_mask()
2082 if (!p->user_cpus_ptr) in task_user_cpus()
2084 return p->user_cpus_ptr; in task_user_cpus()
2126 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
2134 return p->sched_task_group; in task_group()
2145 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq()
2146 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq()
2147 p->se.parent = tg->se[cpu]; in set_task_rq()
2148 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; in set_task_rq()
2152 p->rt.rt_rq = tg->rt_rq[cpu]; in set_task_rq()
2153 p->rt.parent = tg->rt_se[cpu]; in set_task_rq()
2173 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be in __set_task_cpu()
2175 * per-task data have been completed by this moment. in __set_task_cpu()
2178 WRITE_ONCE(task_thread_info(p)->cpu, cpu); in __set_task_cpu()
2179 p->wake_cpu = cpu; in __set_task_cpu()
2205 * To support run-time toggling of sched features, all the translation units
2266 return rq->curr == p; in task_current()
2272 return p->on_cpu; in task_on_cpu()
2280 return p->on_rq == TASK_ON_RQ_QUEUED; in task_on_rq_queued()
2285 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; in task_on_rq_migrating()
2296 #define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */
2322 * DEQUEUE_SLEEP - task is no longer runnable
2323 * ENQUEUE_WAKEUP - task just became runnable
2325 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
2329 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
2332 * NOCLOCK - skip the update_rq_clock() (avoids double updates)
2334 * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE)
2336 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
2337 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
2338 * ENQUEUE_MIGRATED - the task was migrated during wakeup
2339 * ENQUEUE_RQ_SELECTED - ->select_task_rq() was called
2368 #define RETRY_TASK ((void *)-1UL)
2427 * The switched_from() call is allowed to drop rq->lock, therefore we
2429 * rq->lock. They are however serialized by p->pi_lock.
2455 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
2456 prev->sched_class->put_prev_task(rq, prev, NULL); in put_prev_task()
2461 next->sched_class->set_next_task(rq, next, false); in set_next_task()
2469 prev->dl_server = NULL; in __put_prev_set_next_dl_server()
2470 next->dl_server = rq->dl_server; in __put_prev_set_next_dl_server()
2471 rq->dl_server = NULL; in __put_prev_set_next_dl_server()
2478 WARN_ON_ONCE(rq->curr != prev); in put_prev_set_next_task()
2485 prev->sched_class->put_prev_task(rq, prev, next); in put_prev_set_next_task()
2486 next->sched_class->set_next_task(rq, next, true); in put_prev_set_next_task()
2493 * include/asm-generic/vmlinux.lds.h
2504 /* Defined in include/asm-generic/vmlinux.lds.h */
2559 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
2564 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
2569 return rq->rt.rt_queued > 0; in sched_rt_runnable()
2574 return rq->cfs.nr_running > 0; in sched_fair_runnable()
2597 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in task_allowed_on_cpu()
2601 if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p)) in task_allowed_on_cpu()
2619 struct task_struct *p = rq->curr; in get_push_task()
2623 if (rq->push_busy) in get_push_task()
2626 if (p->nr_cpus_allowed == 1) in get_push_task()
2629 if (p->migration_disabled) in get_push_task()
2632 rq->push_busy = true; in get_push_task()
2648 return set_cpus_allowed_ptr(p, ctx->new_mask); in __set_cpus_allowed_ptr()
2663 rq->idle_state = idle_state; in idle_set_state()
2670 return rq->idle_state; in idle_get_state()
2709 #define MAX_BW_BITS (64 - BW_SHIFT)
2710 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
2745 unsigned prev_nr = rq->nr_running; in add_nr_running()
2747 rq->nr_running = prev_nr + count; in add_nr_running()
2753 if (prev_nr < 2 && rq->nr_running >= 2) in add_nr_running()
2754 set_rd_overloaded(rq->rd, 1); in add_nr_running()
2762 rq->nr_running -= count; in sub_nr_running()
2764 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2773 if (p->sched_contributes_to_load) in __block_task()
2774 rq->nr_uninterruptible++; in __block_task()
2776 if (p->in_iowait) { in __block_task()
2777 atomic_inc(&rq->nr_iowait); in __block_task()
2781 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in __block_task()
2785 * this task, rendering our rq->__lock ineffective. in __block_task()
2788 * LOCK rq->__lock LOCK p->pi_lock in __block_task()
2794 * RELEASE p->on_rq = 0 if (p->on_rq && ...) in __block_task()
2797 * ACQUIRE (after ctrl-dep) in __block_task()
2803 * LOCK rq->__lock in __block_task()
2805 * STORE p->on_rq = 1 in __block_task()
2806 * UNLOCK rq->__lock in __block_task()
2808 * Callers must ensure to not reference @p after this -- we no longer in __block_task()
2811 smp_store_release(&p->on_rq, 0); in __block_task()
2847 * - enabled by features
2848 * - hrtimer is actually high res
2854 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2898 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2904 * ------ * SCHED_CAPACITY_SCALE
2919 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning.
2923 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in double_rq_clock_clear_update()
2926 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in double_rq_clock_clear_update()
2945 * In order to not have {0,2},{1,3} turn into into an AB-BA, in rq_order_less()
2946 * order by core-id first and cpu-id second. in rq_order_less()
2950 * double_rq_lock(0,3); will take core-0, core-1 lock in rq_order_less()
2951 * double_rq_lock(1,2); will take core-1, core-0 lock in rq_order_less()
2953 * when only cpu-id is considered. in rq_order_less()
2955 if (rq1->core->cpu < rq2->core->cpu) in rq_order_less()
2957 if (rq1->core->cpu > rq2->core->cpu) in rq_order_less()
2961 * __sched_core_flip() relies on SMT having cpu-id lock order. in rq_order_less()
2964 return rq1->cpu < rq2->cpu; in rq_order_less()
2972 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2980 __releases(this_rq->lock) in _double_lock_balance()
2981 __acquires(busiest->lock) in _double_lock_balance()
2982 __acquires(this_rq->lock) in _double_lock_balance()
2994 * already in proper order on entry. This favors lower CPU-ids and will
2999 __releases(this_rq->lock) in _double_lock_balance()
3000 __acquires(busiest->lock) in _double_lock_balance()
3001 __acquires(this_rq->lock) in _double_lock_balance()
3024 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
3034 __releases(busiest->lock) in double_unlock_balance()
3038 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); in double_unlock_balance()
3075 double_raw_lock(_T->lock, _T->lock2),
3076 double_raw_unlock(_T->lock, _T->lock2))
3079 * double_rq_unlock - safely unlock two runqueues
3085 __releases(rq1->lock) in double_rq_unlock()
3086 __releases(rq2->lock) in double_rq_unlock()
3091 __release(rq2->lock); in double_rq_unlock()
3103 * double_rq_lock - safely lock two runqueues
3109 __acquires(rq1->lock) in double_rq_lock()
3110 __acquires(rq2->lock) in double_rq_lock()
3115 __acquire(rq2->lock); /* Fake it out ;) */ in double_rq_lock()
3120 * double_rq_unlock - safely unlock two runqueues
3126 __releases(rq1->lock) in double_rq_unlock()
3127 __releases(rq2->lock) in double_rq_unlock()
3131 __release(rq2->lock); in double_rq_unlock()
3137 double_rq_lock(_T->lock, _T->lock2),
3138 double_rq_unlock(_T->lock, _T->lock2))
3190 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
3226 seq = __u64_stats_fetch_begin(&irqtime->sync); in irq_time_read()
3227 total = irqtime->total; in irq_time_read()
3228 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); in irq_time_read()
3240 * cpufreq_update_util - Take a note about CPU utilization changes.
3247 * It can only be called from RCU-sched read-side critical sections.
3258 * but that really is a band-aid. Going forward it should be replaced with
3268 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
3299 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise.
3305 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); in dl_task_fits_capacity()
3310 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
3315 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
3324 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
3338 return READ_ONCE(rq->uclamp[clamp_id].value); in uclamp_rq_get()
3344 WRITE_ONCE(rq->uclamp[clamp_id].value, value); in uclamp_rq_set()
3349 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; in uclamp_rq_is_idle()
3362 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_is_capped()
3372 * Returns true if userspace opted-in to use uclamp and aggregation at rq level
3398 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); in uclamp_bucket_id()
3404 uc_se->value = value; in uclamp_se_set()
3405 uc_se->bucket_id = uclamp_bucket_id(value); in uclamp_se_set()
3406 uc_se->user_defined = user_defined; in uclamp_se_set()
3452 return READ_ONCE(rq->avg_irq.util_avg); in cpu_util_irq()
3458 util *= (max - irq); in scale_irq_capacity()
3482 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
3505 * - prior user-space memory accesses and store to rq->membarrier_state,
3506 * - store to rq->membarrier_state and following user-space memory accesses.
3507 * In the same way it provides those guarantees around store to rq->curr.
3518 membarrier_state = atomic_read(&next_mm->membarrier_state); in membarrier_switch_mm()
3519 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
3522 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
3538 if (!(p->flags & PF_KTHREAD)) in is_per_cpu_kthread()
3541 if (p->nr_cpus_allowed != 1) in is_per_cpu_kthread()
3580 * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
3589 struct mm_struct *mm = t->mm; in mm_cid_put_lazy()
3590 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; in mm_cid_put_lazy()
3594 cid = __this_cpu_read(pcpu_cid->cid); in mm_cid_put_lazy()
3596 !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) in mm_cid_put_lazy()
3603 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; in mm_cid_pcpu_unset()
3607 cid = __this_cpu_read(pcpu_cid->cid); in mm_cid_pcpu_unset()
3612 * Attempt transition from valid or lazy-put to unset. in mm_cid_pcpu_unset()
3614 res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); in mm_cid_pcpu_unset()
3641 * filled. This only happens during concurrent remote-clear in __mm_cid_try_get()
3651 return -1; in __mm_cid_try_get()
3658 * with the per-cpu cid value, allowing to estimate how recently it was used.
3662 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time()
3665 WRITE_ONCE(pcpu_cid->time, rq->clock); in mm_cid_snapshot_time()
3673 * All allocations (even those using the cid_lock) are lock-free. If in __mm_cid_get()
3723 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; in mm_cid_get()
3729 cid = __this_cpu_read(pcpu_cid->cid); in mm_cid_get()
3735 if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) in mm_cid_get()
3739 __this_cpu_write(pcpu_cid->cid, cid); in mm_cid_get()
3749 * Provide a memory barrier between rq->curr store and load of in switch_mm_cid()
3750 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. in switch_mm_cid()
3754 if (!next->mm) { // to kernel in switch_mm_cid()
3756 * user -> kernel transition does not guarantee a barrier, but in switch_mm_cid()
3760 if (prev->mm) // from user in switch_mm_cid()
3763 * kernel -> kernel transition does not change rq->curr->mm in switch_mm_cid()
3768 * kernel -> user transition does not provide a barrier in switch_mm_cid()
3769 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. in switch_mm_cid()
3772 if (!prev->mm) { // from kernel in switch_mm_cid()
3776 * user->user transition relies on an implicit in switch_mm_cid()
3778 * current->mm changes. If the architecture in switch_mm_cid()
3780 * barrier, it is emitted here. If current->mm in switch_mm_cid()
3786 if (prev->mm_cid_active) { in switch_mm_cid()
3787 mm_cid_snapshot_time(rq, prev->mm); in switch_mm_cid()
3789 prev->mm_cid = -1; in switch_mm_cid()
3791 if (next->mm_cid_active) in switch_mm_cid()
3792 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); in switch_mm_cid()
3811 prio = min(prio, pi_task->prio); in __rt_effective_prio()