Home
last modified time | relevance | path

Searched refs:cfs_rq (Results 1 – 8 of 8) sorted by relevance

/linux-6.12.1/kernel/sched/
Dfair.c309 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
311 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
314 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
317 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
328 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
329 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
336 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
337 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
347 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
352 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
[all …]
Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
157 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in update_idle_cfs_rq_clock_pelt() argument
161 if (unlikely(cfs_rq->throttle_count)) in update_idle_cfs_rq_clock_pelt()
164 throttled = cfs_rq->throttled_clock_pelt_time; in update_idle_cfs_rq_clock_pelt()
166 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled); in update_idle_cfs_rq_clock_pelt()
170 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
172 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_pelt()
173 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
175 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
[all …]
Dpelt.c306 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
309 cfs_rq->curr == se)) { in __update_load_avg_se()
320 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
322 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq()
323 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
324 cfs_rq->h_nr_running, in __update_load_avg_cfs_rq()
325 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
327 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq()
328 trace_pelt_cfs_tp(cfs_rq); in __update_load_avg_cfs_rq()
Ddebug.c805 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
814 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); in print_cfs_rq()
821 root = __pick_root_entity(cfs_rq); in print_cfs_rq()
824 first = __pick_first_entity(cfs_rq); in print_cfs_rq()
827 last = __pick_last_entity(cfs_rq); in print_cfs_rq()
830 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq()
840 SPLIT_NS(avg_vruntime(cfs_rq))); in print_cfs_rq()
845 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); in print_cfs_rq()
846 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running); in print_cfs_rq()
848 cfs_rq->idle_nr_running); in print_cfs_rq()
[all …]
Dsched.h79 struct cfs_rq;
444 struct cfs_rq **cfs_rq; member
545 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
552 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
579 struct cfs_rq *prev, struct cfs_rq *next);
582 struct cfs_rq *prev, struct cfs_rq *next) { } in set_task_rq_fair()
646 struct cfs_rq { struct
1128 struct cfs_rq cfs;
1307 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1309 return cfs_rq->rq; in rq_of()
[all …]
Dcore.c4446 p->se.cfs_rq = NULL; in __sched_fork()
5479 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
8398 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
9369 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local
9370 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
9373 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
9374 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
9376 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
9377 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
9617 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
/linux-6.12.1/include/trace/events/
Dsched.h779 TP_PROTO(struct cfs_rq *cfs_rq),
780 TP_ARGS(cfs_rq));
811 TP_PROTO(struct cfs_rq *cfs_rq),
812 TP_ARGS(cfs_rq));
/linux-6.12.1/include/linux/
Dsched.h59 struct cfs_rq;
569 struct cfs_rq *cfs_rq; member
571 struct cfs_rq *my_q;