/linux-6.12.1/kernel/sched/ |
D | pelt.c | 103 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 143 if (runnable) in accumulate_sum() 144 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum() 181 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 217 runnable = running = 0; in ___update_load_sum() 226 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum()
|
D | ext.c | 317 void (*runnable)(struct task_struct *p, u64 enq_flags); member 336 void (*stopping)(struct task_struct *p, bool runnable); 2111 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p)) in enqueue_task_scx() 2112 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags); in enqueue_task_scx() 5551 static void stopping_stub(struct task_struct *p, bool runnable) {} in stopping_stub() argument 5586 .runnable = runnable_stub,
|
D | fair.c | 2037 unsigned long runnable; member 2073 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100)))) in numa_classify() 2078 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100)))) in numa_classify() 2129 ns->runnable += cpu_runnable(rq); in update_numa_stats() 7277 unsigned int runnable; in cpu_runnable_without() local 7284 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without() 7287 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without() 7289 return runnable; in cpu_runnable_without() 8016 unsigned long runnable; in cpu_util() local 8019 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util() [all …]
|
/linux-6.12.1/tools/sched_ext/ |
D | scx_flatcg.bpf.c | 419 static void update_active_weight_sums(struct cgroup *cgrp, bool runnable) in update_active_weight_sums() argument 436 if (runnable) { in update_active_weight_sums() 453 if (!runnable) in update_active_weight_sums() 479 if (runnable) { in update_active_weight_sums() 506 if (runnable) in update_active_weight_sums() 542 void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument 945 .runnable = (void *)fcg_runnable,
|
D | scx_simple.bpf.c | 115 void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument
|
D | scx_central.bpf.c | 251 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument
|
/linux-6.12.1/Documentation/scheduler/ |
D | schedutil.rst | 35 Using this we track 2 key metrics: 'running' and 'runnable'. 'Running' 36 reflects the time an entity spends on the CPU, while 'runnable' reflects the 40 while 'runnable' will increase to reflect the amount of contention. 83 The result is that the above 'running' and 'runnable' metrics become invariant 104 A further runqueue wide sum (of runnable tasks) is maintained of:
|
D | sched-design-CFS.rst | 48 up CPU time between runnable tasks as close to "ideal multitasking hardware" as 75 CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the 174 Called when a task enters a runnable state. 180 When a task is no longer runnable, this function is called to keep the 192 This function checks if a task that entered the runnable state should
|
D | sched-ext.rst | 18 a runnable task stalls, or on invoking the SysRq key sequence 63 detection of any internal error including stalled runnable tasks aborts the 211 global DSQ. If that doesn't yield a runnable task either, ``ops.dispatch()`` 280 * If the previous task is an SCX task and still runnable, keep executing
|
D | sched-eevdf.rst | 14 runnable tasks with the same priority. To do so, it assigns a virtual run
|
D | sched-bwc.rst | 15 slices as threads in the cgroup become runnable. Once all quota has been 202 decide which application is chosen to run, as they will both be runnable and
|
/linux-6.12.1/tools/testing/selftests/sched_ext/ |
D | maximal.bpf.c | 40 void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() argument 138 .runnable = (void *) maximal_runnable,
|
D | select_cpu_vtime.bpf.c | 67 bool runnable) in BPF_STRUCT_OPS() argument
|
/linux-6.12.1/Documentation/timers/ |
D | no_hz.rst | 24 have only one runnable task (CONFIG_NO_HZ_FULL=y). Unless you 44 will frequently be multiple runnable tasks per CPU. In these cases, 107 If a CPU has only one runnable task, there is little point in sending it 109 Note that omitting scheduling-clock ticks for CPUs with only one runnable 113 sending scheduling-clock interrupts to CPUs with a single runnable task, 257 runnable task for a given CPU, even though there are a number 260 runnable high-priority SCHED_FIFO task and an arbitrary number 267 single runnable SCHED_FIFO task and multiple runnable SCHED_OTHER 270 And even when there are multiple runnable tasks on a given CPU,
|
/linux-6.12.1/drivers/gpu/drm/panthor/ |
D | panthor_sched.c | 239 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; member 2078 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup() 2098 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup() 2252 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); in tick_ctx_apply() 2282 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { in tick_ctx_update_resched_target() 2337 &sched->groups.runnable[prio], in tick_work() 2347 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], in tick_work() 2459 &sched->groups.runnable[group->priority]); in sync_upd_work() 2478 struct list_head *queue = &sched->groups.runnable[group->priority]; in group_schedule_locked() 2578 &sched->groups.runnable[group->priority]); in panthor_group_start() [all …]
|
/linux-6.12.1/Documentation/accounting/ |
D | delay-accounting.rst | 7 runnable task may wait for a free CPU to run on. 12 a) waiting for a CPU (while being runnable)
|
D | taskstats-struct.rst | 112 /* Delay waiting for cpu, while runnable
|
/linux-6.12.1/Documentation/virt/ |
D | guest-halt-polling.rst | 18 even with other runnable tasks in the host.
|
/linux-6.12.1/Documentation/virt/kvm/ |
D | halt-polling.rst | 18 interval or some other task on the runqueue is runnable the scheduler is 150 - Halt polling will only be conducted by the host when no other tasks are runnable on
|
/linux-6.12.1/Documentation/admin-guide/pm/ |
D | cpuidle.rst | 90 Tasks can be in various states. In particular, they are *runnable* if there are 93 events to occur or similar). When a task becomes runnable, the CPU scheduler 94 assigns it to one of the available CPUs to run and if there are no more runnable 97 another CPU). [If there are multiple runnable tasks assigned to one CPU 101 The special "idle" task becomes runnable if there are no other runnable tasks 193 multiple runnable tasks assigned to one CPU at the same time, the only way to 216 not be shared between multiple runnable tasks, the primary reason for using the
|
/linux-6.12.1/Documentation/core-api/ |
D | workqueue.rst | 126 number of the currently runnable workers. Generally, work items are 129 stalling should be optimal. As long as there are one or more runnable 224 concurrency level. In other words, runnable CPU intensive 232 regulated by the concurrency management and runnable
|
/linux-6.12.1/tools/perf/Documentation/ |
D | perf-sched.txt | 67 task scheduling delay (time between runnable and actually running) and
|
/linux-6.12.1/Documentation/arch/arm64/ |
D | asymmetric-32bit.rst | 154 ``KVM_EXIT_FAIL_ENTRY`` and will remain non-runnable until successfully
|
/linux-6.12.1/Documentation/arch/s390/ |
D | vfio-ccw.rst | 331 space, and assemble a runnable kernel channel program by updating the 382 channel program, which becomes runnable for a real device.
|
/linux-6.12.1/Documentation/admin-guide/hw-vuln/ |
D | core-scheduling.rst | 108 highest priority task with the same cookie is selected if there is one runnable
|