Lines Matching +full:gain +full:- +full:scaling +full:- +full:p
1 /* SPDX-License-Identifier: GPL-2.0 */
41 #include <linux/posix-timers_types.h>
91 * We have two separate sets of flags: task->__state
92 * is about runnability, while task->exit_state are
98 /* Used in tsk->__state: */
104 /* Used in tsk->exit_state: */
108 /* Used in tsk->__state again: */
121 #define TASK_ANY (TASK_STATE_MAX-1)
144 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
146 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
147 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
148 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED…
151 * Special states are those that do not use the normal wait-loop pattern. See
162 current->task_state_change = _THIS_IP_; \
168 current->task_state_change = _THIS_IP_; \
173 current->saved_state_change = current->task_state_change;\
174 current->task_state_change = _THIS_IP_; \
179 current->task_state_change = current->saved_state_change;\
190 * set_current_state() includes a barrier so that the write of current->__state
210 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
213 * accessing p->__state.
215 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
229 WRITE_ONCE(current->__state, (state_value)); \
235 smp_store_mb(current->__state, (state_value)); \
240 * can not use the regular condition based wait-loop. In that case we must
241 * serialize against wakeups such that any possible in-flight TASK_RUNNING
248 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
250 WRITE_ONCE(current->__state, (state_value)); \
251 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
272 * raw_spin_unlock_irq(&lock->wait_lock);
274 * raw_spin_lock_irq(&lock->wait_lock);
282 raw_spin_lock(¤t->pi_lock); \
283 current->saved_state = current->__state; \
285 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
286 raw_spin_unlock(¤t->pi_lock); \
292 raw_spin_lock(¤t->pi_lock); \
294 WRITE_ONCE(current->__state, current->saved_state); \
295 current->saved_state = TASK_RUNNING; \
296 raw_spin_unlock(¤t->pi_lock); \
299 #define get_current_state() READ_ONCE(current->__state)
331 * struct prev_cputime - snapshot of system and user cputime
453 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
454 * capacity scaling. The scaling is done through the rq_clock_pelt that is used
464 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
468 * For all other cases (including 32-bit kernels), struct load_weight's
542 /* For load-balancing: */
572 /* cached value of my_q->h_nr_running */
581 * collide with read-mostly values above.
672 * Bandwidth enforcement timer. Each -deadline task has its
679 * at the "0-lag time". When a -deadline task blocks, it contributes
680 * to GRUB's active utilization until the "0-lag time", hence a
687 * Bits for DL-server functionality. Also see the comment near
718 * @user_defined: the requested clamp value comes from user-space
721 * which is pre-computed and stored to avoid expensive integer divisions from
725 * which can be different from the clamp value "requested" from user-space.
729 * The user_defined bit is set whenever a task has got a task-specific clamp
732 * restrictive task-specific value has been requested, thus allowing to
755 perf_invalid_context = -1,
793 * scheduling-critical items should be added above here.
948 * queueing no longer being serialized by p->on_cpu. However:
950 * p->XXX = X; ttwu()
951 * schedule() if (p->on_rq && ..) // false
952 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
954 * p->on_rq = 0; p->sched_remote_wakeup = Y;
957 * ->sched_remote_wakeup gets used, so it can be in this word.
981 /* disallow userland-initiated cgroup migration */
1022 /* Canary value for the -fstack-protector GCC feature: */
1027 * older sibling, respectively. (p->father can be replaced with
1028 * p->real_parent->pid)
1048 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1094 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1124 * - normally initialized setup_new_exec()
1125 * - access it with [gs]et_task_comm()
1126 * - lock it with task_lock()
1181 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1266 /* Protected by ->alloc_lock: */
1275 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1324 * - RCU read-side critical section
1325 * - current->numa_group from everywhere
1326 * - task's runqueue locked, task not running
1335 * faults_memory: Exponential decaying average of faults on a per-node
1398 /* Start of a write-and-pause period: */
1496 /* Cache for current->cgroups->memcg->objcg lookups: */
1545 /* Used by BPF for per-TASK xdp storage */
1583 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1601 /* CPU-specific state of this task: */
1605 * WARNING: on x86, 'thread_struct' contains a variable-sized
1638 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); in task_state_index()
1645 BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1)); in task_index_to_char()
1668 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1696 * Only the _current_ task can read/write to tsk->flags, but other
1697 * tasks can access tsk->flags in readonly mode for example
1701 * child->flags of its traced child (same goes for fork, the parent
1702 * can write to the child->flags), because we're guaranteed the
1703 * child is not running and in turn not changing child->flags
1706 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1707 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1712 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1717 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1720 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) argument
1726 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1727 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1733 /* Per-process atomic flags. */
1734 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1744 static inline bool task_##func(struct task_struct *p) \
1745 { return test_bit(PFA_##name, &p->atomic_flags); }
1748 static inline void task_set_##func(struct task_struct *p) \
1749 { set_bit(PFA_##name, &p->atomic_flags); }
1752 static inline void task_clear_##func(struct task_struct *p) \
1753 { clear_bit(PFA_##name, &p->atomic_flags); }
1787 current->flags &= ~flags; in TASK_PFA_TEST()
1788 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1792 extern int task_can_attach(struct task_struct *p);
1797 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1798 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1801 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1802 * @p: the task
1807 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1809 extern void release_user_cpus_ptr(struct task_struct *p);
1810 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1811 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1812 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1814 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() argument
1817 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr() argument
1821 return -EINVAL; in set_cpus_allowed_ptr()
1826 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1827 return -EINVAL; in dup_user_cpus_ptr()
1830 static inline void release_user_cpus_ptr(struct task_struct *p) in release_user_cpus_ptr() argument
1832 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1835 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity() argument
1841 extern int yield_to(struct task_struct *p, bool preempt);
1842 extern void set_user_nice(struct task_struct *p, long nice);
1843 extern int task_prio(const struct task_struct *p);
1846 * task_nice - return the nice value of a given task.
1847 * @p: the task in question.
1849 * Return: The nice value [ -20 ... 0 ... 19 ].
1851 static inline int task_nice(const struct task_struct *p) in task_nice() argument
1853 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1856 extern int can_nice(const struct task_struct *p, const int nice);
1857 extern int task_curr(const struct task_struct *p);
1862 extern void sched_set_fifo(struct task_struct *p);
1863 extern void sched_set_fifo_low(struct task_struct *p);
1864 extern void sched_set_normal(struct task_struct *p, int nice);
1870 * is_idle_task - is the specified task an idle task?
1871 * @p: the task in question.
1873 * Return: 1 if @p is an idle task. 0 otherwise.
1875 static __always_inline bool is_idle_task(const struct task_struct *p) in is_idle_task() argument
1877 return !!(p->flags & PF_IDLE); in is_idle_task()
1881 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1900 # define task_thread_info(task) (&(task)->thread_info)
1902 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
2073 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2112 * Wrappers for p->thread_info->cpu access. No-op on UP.
2116 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() argument
2118 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2121 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2125 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() argument
2130 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) in set_task_cpu() argument
2136 static inline bool task_is_runnable(struct task_struct *p) in task_is_runnable() argument
2138 return p->on_rq && !p->se.sched_delayed; in task_is_runnable()
2141 extern bool sched_task_on_rq(struct task_struct *p);
2142 extern unsigned long get_wchan(struct task_struct *p);
2176 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
2185 extern void sched_core_fork(struct task_struct *p);
2191 static inline void sched_core_fork(struct task_struct *p) { } in sched_core_fork() argument
2200 swap(current->alloc_tag, tag); in alloc_tag_save()
2207 WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); in alloc_tag_restore()
2209 current->alloc_tag = old; in alloc_tag_restore()