Home
last modified time | relevance | path

Searched refs:task_cpu (Results 1 – 25 of 36) sorted by relevance

12

/linux-6.12.1/arch/x86/um/
Dptrace_32.c173 int err, n, cpu = task_cpu(child); in get_fpregs()
190 int n, cpu = task_cpu(child); in set_fpregs()
203 int err, n, cpu = task_cpu(child); in get_fpxregs()
219 int n, cpu = task_cpu(child); in set_fpxregs()
/linux-6.12.1/Documentation/translations/zh_CN/scheduler/
Dsched-capacity.rst302 task_util(p) < capacity(task_cpu(p))
358 则任务可能变为CPU受限的,也就是说 ``task_util(p) > capacity(task_cpu(p))`` ;CPU算力
374 task_uclamp_min(p) <= capacity(task_cpu(cpu))
387 task_bandwidth(p) < capacity(task_cpu(p))
/linux-6.12.1/kernel/sched/
Dstop_task.c15 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
Dcore.c314 int cpu = task_cpu(p); in sched_core_next()
2102 return cpu_curr(task_cpu(p)) == p; in task_curr()
2440 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task()
2534 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2567 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2581 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop()
2874 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
3064 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3286 if (task_cpu(p) != new_cpu) { in set_task_cpu()
3348 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
[all …]
Ddeadline.c454 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
459 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
1782 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1791 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
2533 int cpu = task_cpu(task); in find_later_rq()
2679 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2910 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
3258 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
Dpsi.c891 task->pid, task->comm, task_cpu(task), in psi_flags_change()
902 int cpu = task_cpu(task); in psi_task_change()
920 int cpu = task_cpu(prev); in psi_task_switch()
995 int cpu = task_cpu(curr); in psi_account_irqtime()
Dcpudeadline.c137 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
Didle.c435 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
Dcpuacct.c336 unsigned int cpu = task_cpu(tsk); in cpuacct_charge()
Dfair.c2494 .src_cpu = task_cpu(p), in task_numa_migrate()
3613 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
7257 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7280 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
8029 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
8031 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
8102 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
10581 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
13281 set_task_rq(p, task_cpu(p)); in task_change_group_fair()
/linux-6.12.1/kernel/rcu/
Dtasks.h1005 cpu = task_cpu(t); in rcu_tasks_is_holdout()
1115 cpu = task_cpu(t); in check_holdout_task()
1682 int cpu = task_cpu(t); in trc_inspect_reader()
1703 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader()
1762 cpu = task_cpu(t); in trc_wait_for_one_reader()
1897 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task()
1916 cpu = task_cpu(t); in show_stalled_task_trace()
Dtree_stall.h431 cpu = task_cpu(rcuc); in rcu_is_rcuc_kthread_starving()
540 cpu = gpk ? task_cpu(gpk) : -1; in rcu_check_gp_kthread_starvation()
583 cpu = task_cpu(gpk); in rcu_check_gp_kthread_expired_fqs_timer()
Dtree_nocb.h1542 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state()
1586 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
/linux-6.12.1/kernel/trace/
Dtrace_sched_wakeup.c397 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
425 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
571 wakeup_cpu = task_cpu(p); in probe_wakeup()
/linux-6.12.1/include/linux/
Dkdb.h193 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
Dsched.h2116 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2125 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2176 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
/linux-6.12.1/include/linux/sched/
Dtopology.h298 return cpu_to_node(task_cpu(p)); in task_node()
/linux-6.12.1/Documentation/scheduler/
Dsched-capacity.rst342 task_util(p) < capacity(task_cpu(p))
405 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``;
424 task_uclamp_min(p) <= capacity(task_cpu(cpu))
438 task_bandwidth(p) < capacity(task_cpu(p))
/linux-6.12.1/include/trace/events/
Dsched.h158 __entry->target_cpu = task_cpu(p);
292 __entry->orig_cpu = task_cpu(p);
/linux-6.12.1/arch/powerpc/kernel/
Dprocess.c2162 unsigned long cpu = task_cpu(p); in valid_irq_stack()
2183 unsigned long cpu = task_cpu(p); in valid_emergency_stack()
2217 unsigned long cpu = task_cpu(p); in valid_emergency_stack()
/linux-6.12.1/kernel/
Dstop_machine.c58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); in print_stop_info()
/linux-6.12.1/arch/parisc/kernel/
Dtraps.c153 level, task_cpu(current), cr30, cr31); in show_regs()
/linux-6.12.1/arch/mips/kernel/
Dprocess.c850 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()
/linux-6.12.1/fs/proc/
Darray.c642 seq_put_decimal_ll(m, " ", task_cpu(task)); in do_task_stat()
/linux-6.12.1/kernel/time/
Dtick-sched.c460 cpu = task_cpu(tsk); in tick_nohz_kick_task()

12