Lines Matching refs:se

289 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)  in calc_delta_fair()  argument
291 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
292 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
306 #define for_each_sched_entity(se) \ argument
307 for (; se; se = se->parent)
409 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
411 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
412 return se->cfs_rq; in is_same_group()
417 static inline struct sched_entity *parent_entity(const struct sched_entity *se) in parent_entity() argument
419 return se->parent; in parent_entity()
423 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
435 se_depth = (*se)->depth; in find_matching_se()
440 *se = parent_entity(*se); in find_matching_se()
448 while (!is_same_group(*se, *pse)) { in find_matching_se()
449 *se = parent_entity(*se); in find_matching_se()
464 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
466 if (entity_is_task(se)) in se_is_idle()
467 return task_has_idle_policy(task_of(se)); in se_is_idle()
468 return cfs_rq_is_idle(group_cfs_rq(se)); in se_is_idle()
473 #define for_each_sched_entity(se) \ argument
474 for (; se; se = NULL)
492 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
498 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
512 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
514 return task_has_idle_policy(task_of(se)); in se_is_idle()
554 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_key() argument
556 return (s64)(se->vruntime - cfs_rq->min_vruntime); in entity_key()
621 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_add() argument
623 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_add()
624 s64 key = entity_key(cfs_rq, se); in avg_vruntime_add()
631 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_sub() argument
633 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_sub()
634 s64 key = entity_key(cfs_rq, se); in avg_vruntime_sub()
692 static s64 entity_lag(u64 avruntime, struct sched_entity *se) in entity_lag() argument
696 vlag = avruntime - se->vruntime; in entity_lag()
697 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); in entity_lag()
702 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_entity_lag() argument
704 SCHED_WARN_ON(!se->on_rq); in update_entity_lag()
706 se->vlag = entity_lag(avg_vruntime(cfs_rq), se); in update_entity_lag()
742 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_eligible() argument
744 return vruntime_eligible(cfs_rq, se->vruntime); in entity_eligible()
763 struct sched_entity *se = __pick_root_entity(cfs_rq); in update_min_vruntime() local
774 if (se) { in update_min_vruntime()
776 vruntime = se->min_vruntime; in update_min_vruntime()
778 vruntime = min_vruntime(vruntime, se->min_vruntime); in update_min_vruntime()
807 static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node) in __min_vruntime_update() argument
811 if (vruntime_gt(min_vruntime, se, rse)) in __min_vruntime_update()
812 se->min_vruntime = rse->min_vruntime; in __min_vruntime_update()
816 static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node) in __min_slice_update() argument
820 if (rse->min_slice < se->min_slice) in __min_slice_update()
821 se->min_slice = rse->min_slice; in __min_slice_update()
828 static inline bool min_vruntime_update(struct sched_entity *se, bool exit) in min_vruntime_update() argument
830 u64 old_min_vruntime = se->min_vruntime; in min_vruntime_update()
831 u64 old_min_slice = se->min_slice; in min_vruntime_update()
832 struct rb_node *node = &se->run_node; in min_vruntime_update()
834 se->min_vruntime = se->vruntime; in min_vruntime_update()
835 __min_vruntime_update(se, node->rb_right); in min_vruntime_update()
836 __min_vruntime_update(se, node->rb_left); in min_vruntime_update()
838 se->min_slice = se->slice; in min_vruntime_update()
839 __min_slice_update(se, node->rb_right); in min_vruntime_update()
840 __min_slice_update(se, node->rb_left); in min_vruntime_update()
842 return se->min_vruntime == old_min_vruntime && in min_vruntime_update()
843 se->min_slice == old_min_slice; in min_vruntime_update()
852 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
854 avg_vruntime_add(cfs_rq, se); in __enqueue_entity()
855 se->min_vruntime = se->vruntime; in __enqueue_entity()
856 se->min_slice = se->slice; in __enqueue_entity()
857 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __enqueue_entity()
861 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
863 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __dequeue_entity()
865 avg_vruntime_sub(cfs_rq, se); in __dequeue_entity()
910 struct sched_entity *se = __pick_first_entity(cfs_rq); in pick_eevdf() local
919 return curr && curr->on_rq ? curr : se; in pick_eevdf()
932 if (se && entity_eligible(cfs_rq, se)) { in pick_eevdf()
933 best = se; in pick_eevdf()
951 se = __node_2_se(node); in pick_eevdf()
958 if (entity_eligible(cfs_rq, se)) { in pick_eevdf()
959 best = se; in pick_eevdf()
1001 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1007 static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_deadline() argument
1009 if ((s64)(se->vruntime - se->deadline) < 0) in update_deadline()
1017 if (!se->custom_slice) in update_deadline()
1018 se->slice = sysctl_sched_base_slice; in update_deadline()
1023 se->deadline = se->vruntime + calc_delta_fair(se->slice, se); in update_deadline()
1039 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1041 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1051 if (entity_is_task(se)) in init_entity_runnable_average()
1052 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
1086 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
1087 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
1088 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1103 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1109 sa->util_avg = cfs_rq->avg.util_avg * se_weight(se); in post_init_entity_util_avg()
1123 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1178 struct sched_entity *pse, struct sched_entity *se) in do_preempt_short() argument
1183 if (pse->slice >= se->slice) in do_preempt_short()
1189 if (entity_before(pse, se)) in do_preempt_short()
1192 if (!entity_eligible(cfs_rq, se)) in do_preempt_short()
1206 delta_exec = update_curr_se(rq, &curr->se); in update_curr_common()
1261 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
1265 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
1273 stats = __schedstats_from_se(se); in update_stats_wait_start_fair()
1275 if (entity_is_task(se)) in update_stats_wait_start_fair()
1276 p = task_of(se); in update_stats_wait_start_fair()
1282 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
1290 stats = __schedstats_from_se(se); in update_stats_wait_end_fair()
1301 if (entity_is_task(se)) in update_stats_wait_end_fair()
1302 p = task_of(se); in update_stats_wait_end_fair()
1308 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
1316 stats = __schedstats_from_se(se); in update_stats_enqueue_sleeper_fair()
1318 if (entity_is_task(se)) in update_stats_enqueue_sleeper_fair()
1319 tsk = task_of(se); in update_stats_enqueue_sleeper_fair()
1328 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1337 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1338 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1341 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1345 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1355 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1356 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1358 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue_fair()
1359 struct task_struct *tsk = task_of(se); in update_stats_dequeue_fair()
1377 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1382 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2770 now = p->se.exec_start; in numa_get_avg_runtime()
2771 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2781 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3294 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3525 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3526 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3598 now = curr->se.sum_exec_runtime; in task_tick_numa()
3665 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3667 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3669 if (entity_is_task(se)) { in account_entity_enqueue()
3672 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3673 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3677 if (se_is_idle(se)) in account_entity_enqueue()
3682 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3684 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3686 if (entity_is_task(se)) { in account_entity_dequeue()
3687 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3688 list_del_init(&se->group_node); in account_entity_dequeue()
3692 if (se_is_idle(se)) in account_entity_dequeue()
3746 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3748 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3749 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3753 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3755 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3756 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3763 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3765 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3768 static void reweight_eevdf(struct sched_entity *se, u64 avruntime, in reweight_eevdf() argument
3771 unsigned long old_weight = se->load.weight; in reweight_eevdf()
3851 if (avruntime != se->vruntime) { in reweight_eevdf()
3852 vlag = entity_lag(avruntime, se); in reweight_eevdf()
3854 se->vruntime = avruntime - vlag; in reweight_eevdf()
3869 vslice = (s64)(se->deadline - avruntime); in reweight_eevdf()
3871 se->deadline = avruntime + vslice; in reweight_eevdf()
3874 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3877 bool curr = cfs_rq->curr == se; in reweight_entity()
3880 if (se->on_rq) { in reweight_entity()
3885 __dequeue_entity(cfs_rq, se); in reweight_entity()
3886 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3888 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3890 if (se->on_rq) { in reweight_entity()
3891 reweight_eevdf(se, avruntime, weight); in reweight_entity()
3897 se->vlag = div_s64(se->vlag * se->load.weight, weight); in reweight_entity()
3900 update_load_set(&se->load, weight); in reweight_entity()
3904 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3906 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3910 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3911 if (se->on_rq) { in reweight_entity()
3912 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3914 __enqueue_entity(cfs_rq, se); in reweight_entity()
3930 struct sched_entity *se = &p->se; in reweight_task_fair() local
3931 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task_fair()
3932 struct load_weight *load = &se->load; in reweight_task_fair()
3934 reweight_entity(cfs_rq, se, lw->weight); in reweight_task_fair()
4054 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
4056 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
4070 if (unlikely(se->load.weight != shares)) in update_cfs_group()
4071 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
4075 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
4266 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
4282 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4288 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
4289 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4360 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
4362 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4377 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4378 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4379 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4380 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4392 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
4394 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4408 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4409 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4410 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4411 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4422 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
4446 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4459 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4468 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4471 load_sum = se_weight(se) * runnable_sum; in update_tg_cfs_load()
4474 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4478 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4480 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4481 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4496 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4500 if (entity_is_task(se)) in propagate_entity_load_avg()
4503 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
4509 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
4513 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4514 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4515 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4518 trace_pelt_se_tp(se); in propagate_entity_load_avg()
4527 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
4529 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
4535 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4559 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4569 static inline void migrate_se_pelt_lag(struct sched_entity *se) in migrate_se_pelt_lag() argument
4576 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4579 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4645 __update_load_avg_blocked_se(now, se); in migrate_se_pelt_lag()
4648 static void migrate_se_pelt_lag(struct sched_entity *se) {} in migrate_se_pelt_lag() argument
4738 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4753 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4754 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4762 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4764 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4766 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4767 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4768 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4770 se->avg.load_sum = 1; in attach_entity_load_avg()
4772 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4773 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4774 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4775 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4776 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4778 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4793 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4795 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4796 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4797 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4802 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4803 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4808 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4824 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4833 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4834 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4837 decayed |= propagate_entity_load_avg(se); in update_load_avg()
4839 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4848 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4856 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4870 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
4872 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
4876 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
4883 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
4885 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
4894 sync_entity_load_avg(se); in remove_entity_load_avg()
4898 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4899 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4900 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4918 return READ_ONCE(p->se.avg.util_avg); in task_util()
4923 return READ_ONCE(p->se.avg.runnable_avg); in task_runnable()
4928 return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED; in _task_util_est()
4987 ewma = READ_ONCE(p->se.avg.util_est); in util_est_update()
5052 WRITE_ONCE(p->se.avg.util_est, ewma); in util_est_update()
5054 trace_sched_util_est_se_tp(&p->se); in util_est_update()
5235 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
5240 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
5243 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
5245 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
5266 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in place_entity() argument
5271 if (!se->custom_slice) in place_entity()
5272 se->slice = sysctl_sched_base_slice; in place_entity()
5273 vslice = calc_delta_fair(se->slice, se); in place_entity()
5287 lag = se->vlag; in place_entity()
5345 lag *= load + scale_load_down(se->load.weight); in place_entity()
5351 se->vruntime = vruntime - lag; in place_entity()
5353 if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) { in place_entity()
5354 se->deadline += se->vruntime; in place_entity()
5355 se->rel_deadline = 0; in place_entity()
5370 se->deadline = se->vruntime + vslice; in place_entity()
5379 requeue_delayed_entity(struct sched_entity *se);
5382 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
5384 bool curr = cfs_rq->curr == se; in enqueue_entity()
5391 place_entity(cfs_rq, se, flags); in enqueue_entity()
5404 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
5405 se_update_runnable(se); in enqueue_entity()
5411 update_cfs_group(se); in enqueue_entity()
5418 place_entity(cfs_rq, se, flags); in enqueue_entity()
5420 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
5424 se->exec_start = 0; in enqueue_entity()
5427 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
5429 __enqueue_entity(cfs_rq, se); in enqueue_entity()
5430 se->on_rq = 1; in enqueue_entity()
5449 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
5451 for_each_sched_entity(se) { in __clear_buddies_next()
5452 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
5453 if (cfs_rq->next != se) in __clear_buddies_next()
5460 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
5462 if (cfs_rq->next == se) in clear_buddies()
5463 __clear_buddies_next(se); in clear_buddies()
5468 static inline void finish_delayed_dequeue_entity(struct sched_entity *se) in finish_delayed_dequeue_entity() argument
5470 se->sched_delayed = 0; in finish_delayed_dequeue_entity()
5471 if (sched_feat(DELAY_ZERO) && se->vlag > 0) in finish_delayed_dequeue_entity()
5472 se->vlag = 0; in finish_delayed_dequeue_entity()
5476 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
5483 SCHED_WARN_ON(!se->sched_delayed); in dequeue_entity()
5493 SCHED_WARN_ON(delay && se->sched_delayed); in dequeue_entity()
5496 !entity_eligible(cfs_rq, se)) { in dequeue_entity()
5497 if (cfs_rq->next == se) in dequeue_entity()
5499 update_load_avg(cfs_rq, se, 0); in dequeue_entity()
5500 se->sched_delayed = 1; in dequeue_entity()
5506 if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) in dequeue_entity()
5518 update_load_avg(cfs_rq, se, action); in dequeue_entity()
5519 se_update_runnable(se); in dequeue_entity()
5521 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
5523 clear_buddies(cfs_rq, se); in dequeue_entity()
5525 update_entity_lag(cfs_rq, se); in dequeue_entity()
5527 se->deadline -= se->vruntime; in dequeue_entity()
5528 se->rel_deadline = 1; in dequeue_entity()
5531 if (se != cfs_rq->curr) in dequeue_entity()
5532 __dequeue_entity(cfs_rq, se); in dequeue_entity()
5533 se->on_rq = 0; in dequeue_entity()
5534 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
5539 update_cfs_group(se); in dequeue_entity()
5551 finish_delayed_dequeue_entity(se); in dequeue_entity()
5560 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
5562 clear_buddies(cfs_rq, se); in set_next_entity()
5565 if (se->on_rq) { in set_next_entity()
5571 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5572 __dequeue_entity(cfs_rq, se); in set_next_entity()
5573 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5578 se->vlag = se->deadline; in set_next_entity()
5581 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5583 cfs_rq->curr = se; in set_next_entity()
5591 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5594 stats = __schedstats_from_se(se); in set_next_entity()
5597 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
5600 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
5603 static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
5625 struct sched_entity *se = pick_eevdf(cfs_rq); in pick_next_entity() local
5626 if (se->sched_delayed) { in pick_next_entity()
5627 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in pick_next_entity()
5633 return se; in pick_next_entity()
5919 struct sched_entity *se; in throttle_cfs_rq() local
5944 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5953 for_each_sched_entity(se) { in throttle_cfs_rq()
5954 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5958 if (!se->on_rq) in throttle_cfs_rq()
5967 if (se->sched_delayed) in throttle_cfs_rq()
5969 dequeue_entity(qcfs_rq, se, flags); in throttle_cfs_rq()
5971 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5979 se = parent_entity(se); in throttle_cfs_rq()
5984 for_each_sched_entity(se) { in throttle_cfs_rq()
5985 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5987 if (!se->on_rq) in throttle_cfs_rq()
5990 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
5991 se_update_runnable(se); in throttle_cfs_rq()
5993 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
6022 struct sched_entity *se; in unthrottle_cfs_rq() local
6026 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
6050 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6051 if (list_add_leaf_cfs_rq(cfs_rq_of(se))) in unthrottle_cfs_rq()
6059 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6060 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
6063 if (se->sched_delayed) { in unthrottle_cfs_rq()
6066 dequeue_entity(qcfs_rq, se, flags); in unthrottle_cfs_rq()
6067 } else if (se->on_rq) in unthrottle_cfs_rq()
6069 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
6071 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6082 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6083 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
6085 update_load_avg(qcfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
6086 se_update_runnable(se); in unthrottle_cfs_rq()
6088 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6815 struct sched_entity *se = &p->se; in hrtick_start_fair() local
6820 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
6821 u64 slice = se->slice; in hrtick_start_fair()
6919 requeue_delayed_entity(struct sched_entity *se) in requeue_delayed_entity() argument
6921 struct cfs_rq *cfs_rq = cfs_rq_of(se); in requeue_delayed_entity()
6928 SCHED_WARN_ON(!se->sched_delayed); in requeue_delayed_entity()
6929 SCHED_WARN_ON(!se->on_rq); in requeue_delayed_entity()
6932 update_entity_lag(cfs_rq, se); in requeue_delayed_entity()
6933 if (se->vlag > 0) { in requeue_delayed_entity()
6935 if (se != cfs_rq->curr) in requeue_delayed_entity()
6936 __dequeue_entity(cfs_rq, se); in requeue_delayed_entity()
6937 se->vlag = 0; in requeue_delayed_entity()
6938 place_entity(cfs_rq, se, 0); in requeue_delayed_entity()
6939 if (se != cfs_rq->curr) in requeue_delayed_entity()
6940 __enqueue_entity(cfs_rq, se); in requeue_delayed_entity()
6945 update_load_avg(cfs_rq, se, 0); in requeue_delayed_entity()
6946 se->sched_delayed = 0; in requeue_delayed_entity()
6958 struct sched_entity *se = &p->se; in enqueue_task_fair() local
6970 if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) in enqueue_task_fair()
6974 requeue_delayed_entity(se); in enqueue_task_fair()
6986 for_each_sched_entity(se) { in enqueue_task_fair()
6987 if (se->on_rq) { in enqueue_task_fair()
6988 if (se->sched_delayed) in enqueue_task_fair()
6989 requeue_delayed_entity(se); in enqueue_task_fair()
6992 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
7000 se->slice = slice; in enqueue_task_fair()
7001 se->custom_slice = 1; in enqueue_task_fair()
7003 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
7019 for_each_sched_entity(se) { in enqueue_task_fair()
7020 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
7022 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
7023 se_update_runnable(se); in enqueue_task_fair()
7024 update_cfs_group(se); in enqueue_task_fair()
7026 se->slice = slice; in enqueue_task_fair()
7073 static void set_next_buddy(struct sched_entity *se);
7084 static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) in dequeue_entities() argument
7096 if (entity_is_task(se)) { in dequeue_entities()
7097 p = task_of(se); in dequeue_entities()
7101 cfs_rq = group_cfs_rq(se); in dequeue_entities()
7105 for_each_sched_entity(se) { in dequeue_entities()
7106 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7108 if (!dequeue_entity(cfs_rq, se, flags)) { in dequeue_entities()
7109 if (p && &p->se == se) in dequeue_entities()
7130 se = parent_entity(se); in dequeue_entities()
7135 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_entities()
7136 set_next_buddy(se); in dequeue_entities()
7143 for_each_sched_entity(se) { in dequeue_entities()
7144 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7146 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entities()
7147 se_update_runnable(se); in dequeue_entities()
7148 update_cfs_group(se); in dequeue_entities()
7150 se->slice = slice; in dequeue_entities()
7198 if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) in dequeue_task_fair()
7202 if (dequeue_entities(rq, &p->se, flags) < 0) in dequeue_task_fair()
7257 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7280 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7287 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7522 sync_entity_load_avg(&p->se); in sched_balance_find_dst_cpu()
7855 sync_entity_load_avg(&p->se); in select_idle_sibling()
8102 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8423 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
8665 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
8668 remove_entity_load_avg(se); in migrate_task_rq_fair()
8680 migrate_se_pelt_lag(se); in migrate_task_rq_fair()
8684 se->avg.last_update_time = 0; in migrate_task_rq_fair()
8691 struct sched_entity *se = &p->se; in task_dead_fair() local
8693 if (se->sched_delayed) { in task_dead_fair()
8698 if (se->sched_delayed) { in task_dead_fair()
8700 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in task_dead_fair()
8705 remove_entity_load_avg(se); in task_dead_fair()
8750 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
8752 for_each_sched_entity(se) { in set_next_buddy()
8753 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
8755 if (se_is_idle(se)) in set_next_buddy()
8757 cfs_rq_of(se)->next = se; in set_next_buddy()
8767 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup_fair() local
8771 if (unlikely(se == pse)) in check_preempt_wakeup_fair()
8803 find_matching_se(&se, &pse); in check_preempt_wakeup_fair()
8806 cse_is_idle = se_is_idle(se); in check_preempt_wakeup_fair()
8824 cfs_rq = cfs_rq_of(se); in check_preempt_wakeup_fair()
8833 if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline) in check_preempt_wakeup_fair()
8834 se->vlag = se->deadline + 1; in check_preempt_wakeup_fair()
8850 struct sched_entity *se; in pick_task_fair() local
8866 se = pick_next_entity(rq, cfs_rq); in pick_task_fair()
8867 if (!se) in pick_task_fair()
8869 cfs_rq = group_cfs_rq(se); in pick_task_fair()
8872 return task_of(se); in pick_task_fair()
8881 struct sched_entity *se; in pick_next_task_fair() local
8889 se = &p->se; in pick_next_task_fair()
8909 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
8912 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
8913 int se_depth = se->depth; in pick_next_task_fair()
8921 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
8922 se = parent_entity(se); in pick_next_task_fair()
8927 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8994 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
8997 for_each_sched_entity(se) { in put_prev_task_fair()
8998 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8999 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
9010 struct sched_entity *se = &curr->se; in yield_task_fair() local
9018 clear_buddies(cfs_rq, se); in yield_task_fair()
9032 se->deadline += calc_delta_fair(se->slice, se); in yield_task_fair()
9037 struct sched_entity *se = &p->se; in yield_to_task_fair() local
9040 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
9044 set_next_buddy(se); in yield_to_task_fair()
9280 (&p->se == cfs_rq_of(&p->se)->next)) in task_hot()
9296 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9477 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
9542 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
9597 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
9620 list_move(&p->se.group_node, tasks); in detach_tasks()
9673 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
9674 list_del_init(&p->se.group_node); in attach_tasks()
9757 struct sched_entity *se; in __update_blocked_fair() local
9770 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
9771 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
9772 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
9797 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
9805 for_each_sched_entity(se) { in update_cfs_rq_h_load()
9806 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
9807 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
9812 if (!se) { in update_cfs_rq_h_load()
9817 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
9819 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9821 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
9832 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9850 return p->se.avg.load_avg; in task_h_load()
10581 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12916 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) in __entity_slice_used() argument
12918 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; in __entity_slice_used()
12919 u64 slice = se->slice; in __entity_slice_used()
12945 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
12952 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq, in se_fi_update() argument
12955 for_each_sched_entity(se) { in se_fi_update()
12956 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update()
12970 struct sched_entity *se = &p->se; in task_vruntime_update() local
12975 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
12982 const struct sched_entity *sea = &a->se; in cfs_prio_less()
12983 const struct sched_entity *seb = &b->se; in cfs_prio_less()
13052 struct sched_entity *se = &curr->se; in task_tick_fair() local
13054 for_each_sched_entity(se) { in task_tick_fair()
13055 cfs_rq = cfs_rq_of(se); in task_tick_fair()
13056 entity_tick(cfs_rq, se, queued); in task_tick_fair()
13108 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
13110 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13119 se = se->parent; in propagate_entity_cfs_rq()
13121 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
13122 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13124 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
13134 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
13137 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
13139 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
13148 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
13153 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
13154 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
13156 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
13159 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
13161 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
13164 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
13165 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
13167 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
13172 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
13174 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
13179 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
13181 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
13191 SCHED_WARN_ON(p->se.sched_delayed); in switched_to_fair()
13212 struct sched_entity *se = &p->se; in __set_next_task_fair() local
13220 list_move(&se->group_node, &rq->cfs_tasks); in __set_next_task_fair()
13226 SCHED_WARN_ON(se->sched_delayed); in __set_next_task_fair()
13243 struct sched_entity *se = &p->se; in set_next_task_fair() local
13245 for_each_sched_entity(se) { in set_next_task_fair()
13246 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
13248 set_next_entity(cfs_rq, se); in set_next_task_fair()
13279 p->se.avg.last_update_time = 0; in task_change_group_fair()
13292 if (tg->se) in free_fair_sched_group()
13293 kfree(tg->se[i]); in free_fair_sched_group()
13297 kfree(tg->se); in free_fair_sched_group()
13302 struct sched_entity *se; in alloc_fair_sched_group() local
13309 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
13310 if (!tg->se) in alloc_fair_sched_group()
13323 se = kzalloc_node(sizeof(struct sched_entity_stats), in alloc_fair_sched_group()
13325 if (!se) in alloc_fair_sched_group()
13329 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
13330 init_entity_runnable_average(se); in alloc_fair_sched_group()
13343 struct sched_entity *se; in online_fair_sched_group() local
13350 se = tg->se[i]; in online_fair_sched_group()
13353 attach_entity_cfs_rq(se); in online_fair_sched_group()
13367 struct sched_entity *se = tg->se[cpu]; in unregister_fair_sched_group() local
13370 if (se) { in unregister_fair_sched_group()
13371 if (se->sched_delayed) { in unregister_fair_sched_group()
13373 if (se->sched_delayed) { in unregister_fair_sched_group()
13375 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in unregister_fair_sched_group()
13379 remove_entity_load_avg(se); in unregister_fair_sched_group()
13394 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
13404 tg->se[cpu] = se; in init_tg_cfs_entry()
13407 if (!se) in init_tg_cfs_entry()
13411 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
13412 se->depth = 0; in init_tg_cfs_entry()
13414 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
13415 se->depth = parent->depth + 1; in init_tg_cfs_entry()
13418 se->my_q = cfs_rq; in init_tg_cfs_entry()
13420 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
13421 se->parent = parent; in init_tg_cfs_entry()
13435 if (!tg->se[0]) in __sched_group_set_shares()
13446 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares() local
13452 for_each_sched_entity(se) { in __sched_group_set_shares()
13453 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __sched_group_set_shares()
13454 update_cfs_group(se); in __sched_group_set_shares()
13497 struct sched_entity *se = tg->se[i]; in sched_group_set_idle() local
13509 if (se->on_rq) { in sched_group_set_idle()
13510 parent_cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
13522 for_each_sched_entity(se) { in sched_group_set_idle()
13523 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
13525 if (!se->on_rq) in sched_group_set_idle()
13554 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
13562 rr_interval = NS_TO_JIFFIES(se->slice); in get_rr_interval_fair()