Lines Matching refs:cfs_rq
310 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
312 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
315 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
318 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
329 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
330 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
337 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
338 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
348 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
353 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
369 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
374 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
378 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
380 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
381 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq()
390 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
391 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
393 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
394 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
404 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
405 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
409 static inline struct cfs_rq *
412 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
413 return se->cfs_rq; in is_same_group()
460 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) in cfs_rq_is_idle() argument
462 return cfs_rq->idle > 0; in cfs_rq_is_idle()
477 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
482 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
490 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
491 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
508 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) in cfs_rq_is_idle() argument
521 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
555 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_key() argument
557 return (s64)(se->vruntime - cfs_rq->min_vruntime); in entity_key()
622 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_add() argument
625 s64 key = entity_key(cfs_rq, se); in avg_vruntime_add()
627 cfs_rq->avg_vruntime += key * weight; in avg_vruntime_add()
628 cfs_rq->avg_load += weight; in avg_vruntime_add()
632 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_sub() argument
635 s64 key = entity_key(cfs_rq, se); in avg_vruntime_sub()
637 cfs_rq->avg_vruntime -= key * weight; in avg_vruntime_sub()
638 cfs_rq->avg_load -= weight; in avg_vruntime_sub()
642 void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) in avg_vruntime_update() argument
647 cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta; in avg_vruntime_update()
654 u64 avg_vruntime(struct cfs_rq *cfs_rq) in avg_vruntime() argument
656 struct sched_entity *curr = cfs_rq->curr; in avg_vruntime()
657 s64 avg = cfs_rq->avg_vruntime; in avg_vruntime()
658 long load = cfs_rq->avg_load; in avg_vruntime()
663 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
674 return cfs_rq->min_vruntime + avg; in avg_vruntime()
693 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_entity_lag() argument
699 vlag = avg_vruntime(cfs_rq) - se->vruntime; in update_entity_lag()
722 static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime) in vruntime_eligible() argument
724 struct sched_entity *curr = cfs_rq->curr; in vruntime_eligible()
725 s64 avg = cfs_rq->avg_vruntime; in vruntime_eligible()
726 long load = cfs_rq->avg_load; in vruntime_eligible()
731 avg += entity_key(cfs_rq, curr) * weight; in vruntime_eligible()
735 return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load; in vruntime_eligible()
738 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_eligible() argument
740 return vruntime_eligible(cfs_rq, se->vruntime); in entity_eligible()
743 static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) in __update_min_vruntime() argument
745 u64 min_vruntime = cfs_rq->min_vruntime; in __update_min_vruntime()
751 avg_vruntime_update(cfs_rq, delta); in __update_min_vruntime()
757 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
759 struct sched_entity *se = __pick_root_entity(cfs_rq); in update_min_vruntime()
760 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
761 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
778 cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime); in update_min_vruntime()
781 static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq) in cfs_rq_min_slice() argument
783 struct sched_entity *root = __pick_root_entity(cfs_rq); in cfs_rq_min_slice()
784 struct sched_entity *curr = cfs_rq->curr; in cfs_rq_min_slice()
848 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
850 avg_vruntime_add(cfs_rq, se); in __enqueue_entity()
853 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __enqueue_entity()
857 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
859 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __dequeue_entity()
861 avg_vruntime_sub(cfs_rq, se); in __dequeue_entity()
864 struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq) in __pick_root_entity() argument
866 struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node; in __pick_root_entity()
874 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
876 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); in __pick_first_entity()
892 static inline void set_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_protect_slice() argument
898 slice = cfs_rq_min_slice(cfs_rq); in set_protect_slice()
907 static inline void update_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_protect_slice() argument
909 u64 slice = cfs_rq_min_slice(cfs_rq); in update_protect_slice()
944 static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect) in __pick_eevdf() argument
946 struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node; in __pick_eevdf()
947 struct sched_entity *se = __pick_first_entity(cfs_rq); in __pick_eevdf()
948 struct sched_entity *curr = cfs_rq->curr; in __pick_eevdf()
955 if (cfs_rq->nr_queued == 1) in __pick_eevdf()
958 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) in __pick_eevdf()
965 if (se && entity_eligible(cfs_rq, se)) { in __pick_eevdf()
978 if (left && vruntime_eligible(cfs_rq, in __pick_eevdf()
991 if (entity_eligible(cfs_rq, se)) { in __pick_eevdf()
1005 static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) in pick_eevdf() argument
1007 return __pick_eevdf(cfs_rq, true); in pick_eevdf()
1010 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
1012 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity()
1035 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1041 static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_deadline() argument
1120 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg() local
1122 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
1123 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1136 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1141 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1142 sa->util_avg = cfs_rq->avg.util_avg * se_weight(se); in post_init_entity_util_avg()
1143 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
1207 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
1215 struct sched_entity *curr = cfs_rq->curr; in update_curr()
1216 struct rq *rq = rq_of(cfs_rq); in update_curr()
1228 resched = update_deadline(cfs_rq, curr); in update_curr()
1229 update_min_vruntime(cfs_rq); in update_curr()
1246 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
1248 if (cfs_rq->nr_queued == 1) in update_curr()
1253 clear_buddies(cfs_rq, curr); in update_curr()
1263 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
1276 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
1280 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
1302 __update_stats_wait_end(rq_of(cfs_rq), p, stats); in update_stats_wait_end_fair()
1306 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
1319 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats); in update_stats_enqueue_sleeper_fair()
1326 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1335 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1336 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1339 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1343 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1353 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1354 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1364 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair()
1367 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair()
1375 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1380 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
3680 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3682 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3684 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
3689 cfs_rq->nr_queued++; in account_entity_enqueue()
3693 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3695 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3697 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3700 cfs_rq->nr_queued--; in account_entity_dequeue()
3752 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3754 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3755 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3759 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3761 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3762 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3764 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3765 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3768 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
3770 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3773 bool curr = cfs_rq->curr == se; in reweight_entity()
3777 update_curr(cfs_rq); in reweight_entity()
3778 update_entity_lag(cfs_rq, se); in reweight_entity()
3781 cfs_rq->nr_queued--; in reweight_entity()
3783 __dequeue_entity(cfs_rq, se); in reweight_entity()
3784 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3786 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3804 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3806 place_entity(cfs_rq, se, 0); in reweight_entity()
3807 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3809 __enqueue_entity(cfs_rq, se); in reweight_entity()
3810 cfs_rq->nr_queued++; in reweight_entity()
3819 update_min_vruntime(cfs_rq); in reweight_entity()
3827 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task_fair() local
3830 reweight_entity(cfs_rq, se, lw->weight); in reweight_task_fair()
3834 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3910 static long calc_group_shares(struct cfs_rq *cfs_rq) in calc_group_shares() argument
3913 struct task_group *tg = cfs_rq->tg; in calc_group_shares()
3917 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3922 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_group_shares()
3950 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3971 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) in cfs_rq_util_change() argument
3973 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change()
3975 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change()
4017 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
4019 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
4020 cfs_rq->last_update_time_copy); in cfs_rq_last_update_time()
4031 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) in child_cfs_rq_on_list() argument
4033 struct cfs_rq *prev_cfs_rq; in child_cfs_rq_on_list()
4035 struct rq *rq = rq_of(cfs_rq); in child_cfs_rq_on_list()
4037 if (cfs_rq->on_list) { in child_cfs_rq_on_list()
4038 prev = cfs_rq->leaf_cfs_rq_list.prev; in child_cfs_rq_on_list()
4046 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); in child_cfs_rq_on_list()
4048 return (prev_cfs_rq->tg->parent == cfs_rq->tg); in child_cfs_rq_on_list()
4051 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
4053 if (cfs_rq->load.weight) in cfs_rq_is_decayed()
4056 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
4059 if (child_cfs_rq_on_list(cfs_rq)) in cfs_rq_is_decayed()
4079 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
4087 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
4091 if (!cpu_active(cpu_of(rq_of(cfs_rq)))) in update_tg_load_avg()
4098 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq))); in update_tg_load_avg()
4099 if (now - cfs_rq->last_update_tg_load_avg < NSEC_PER_MSEC) in update_tg_load_avg()
4102 delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
4103 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
4104 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
4105 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
4106 cfs_rq->last_update_tg_load_avg = now; in update_tg_load_avg()
4110 static inline void clear_tg_load_avg(struct cfs_rq *cfs_rq) in clear_tg_load_avg() argument
4118 if (cfs_rq->tg == &root_task_group) in clear_tg_load_avg()
4121 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq))); in clear_tg_load_avg()
4122 delta = 0 - cfs_rq->tg_load_avg_contrib; in clear_tg_load_avg()
4123 atomic_long_add(delta, &cfs_rq->tg->load_avg); in clear_tg_load_avg()
4124 cfs_rq->tg_load_avg_contrib = 0; in clear_tg_load_avg()
4125 cfs_rq->last_update_tg_load_avg = now; in clear_tg_load_avg()
4144 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in clear_tg_offline_cfs_rqs() local
4146 clear_tg_load_avg(cfs_rq); in clear_tg_offline_cfs_rqs()
4159 struct cfs_rq *prev, struct cfs_rq *next) in set_task_rq_fair()
4252 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
4265 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4275 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4276 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4279 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4280 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4284 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
4297 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4306 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4307 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4309 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4310 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4314 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
4331 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4374 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4375 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4377 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4378 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4381 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) in add_tg_cfs_propagate() argument
4383 cfs_rq->propagate = 1; in add_tg_cfs_propagate()
4384 cfs_rq->prop_runnable_sum += runnable_sum; in add_tg_cfs_propagate()
4390 struct cfs_rq *cfs_rq, *gcfs_rq; in propagate_entity_load_avg() local
4401 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
4403 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); in propagate_entity_load_avg()
4405 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4406 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4407 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4409 trace_pelt_cfs_tp(cfs_rq); in propagate_entity_load_avg()
4421 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
4447 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} in update_tg_load_avg() argument
4456 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} in add_tg_cfs_propagate() argument
4464 struct cfs_rq *cfs_rq; in migrate_se_pelt_lag() local
4471 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4472 rq = rq_of(cfs_rq); in migrate_se_pelt_lag()
4512 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle); in migrate_se_pelt_lag()
4525 lut = cfs_rq_last_update_time(cfs_rq); in migrate_se_pelt_lag()
4559 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
4562 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4565 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg()
4567 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4569 raw_spin_lock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
4570 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
4571 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
4572 swap(cfs_rq->removed.runnable_avg, removed_runnable); in update_cfs_rq_load_avg()
4573 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg()
4574 raw_spin_unlock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
4609 add_tg_cfs_propagate(cfs_rq, in update_cfs_rq_load_avg()
4615 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); in update_cfs_rq_load_avg()
4617 cfs_rq->last_update_time_copy, in update_cfs_rq_load_avg()
4630 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4636 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4645 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4646 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4664 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4665 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4666 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4667 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4668 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4670 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4672 cfs_rq_util_change(cfs_rq, 0); in attach_entity_load_avg()
4674 trace_pelt_cfs_tp(cfs_rq); in attach_entity_load_avg()
4685 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4687 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4688 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4689 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4691 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4692 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4694 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4695 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4697 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4698 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4700 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4702 cfs_rq_util_change(cfs_rq, 0); in detach_entity_load_avg()
4704 trace_pelt_cfs_tp(cfs_rq); in detach_entity_load_avg()
4716 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4718 u64 now = cfs_rq_clock_pelt(cfs_rq); in update_load_avg()
4726 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4728 decayed = update_cfs_rq_load_avg(now, cfs_rq); in update_load_avg()
4740 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4741 update_tg_load_avg(cfs_rq); in update_load_avg()
4748 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4749 update_tg_load_avg(cfs_rq); in update_load_avg()
4751 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
4754 update_tg_load_avg(cfs_rq); in update_load_avg()
4764 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg() local
4767 last_update_time = cfs_rq_last_update_time(cfs_rq); in sync_entity_load_avg()
4777 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
4788 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
4789 ++cfs_rq->removed.nr; in remove_entity_load_avg()
4790 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4791 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4792 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4793 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
4796 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_avg() argument
4798 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4801 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
4803 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4828 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, in util_est_enqueue() argument
4837 enqueued = cfs_rq->avg.util_est; in util_est_enqueue()
4839 WRITE_ONCE(cfs_rq->avg.util_est, enqueued); in util_est_enqueue()
4841 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_enqueue()
4844 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, in util_est_dequeue() argument
4853 enqueued = cfs_rq->avg.util_est; in util_est_dequeue()
4855 WRITE_ONCE(cfs_rq->avg.util_est, enqueued); in util_est_dequeue()
4857 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_dequeue()
4862 static inline void util_est_update(struct cfs_rq *cfs_rq, in util_est_update() argument
5125 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in place_entity() argument
5127 u64 vslice, vruntime = avg_vruntime(cfs_rq); in place_entity()
5142 if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) { in place_entity()
5143 struct sched_entity *curr = cfs_rq->curr; in place_entity()
5200 load = cfs_rq->avg_load; in place_entity()
5232 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
5233 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
5239 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
5241 bool curr = cfs_rq->curr == se; in enqueue_entity()
5248 place_entity(cfs_rq, se, flags); in enqueue_entity()
5250 update_curr(cfs_rq); in enqueue_entity()
5261 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
5275 place_entity(cfs_rq, se, flags); in enqueue_entity()
5277 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
5284 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
5286 __enqueue_entity(cfs_rq, se); in enqueue_entity()
5289 if (cfs_rq->nr_queued == 1) { in enqueue_entity()
5290 check_enqueue_throttle(cfs_rq); in enqueue_entity()
5291 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
5293 if (cfs_rq->pelt_clock_throttled) { in enqueue_entity()
5294 struct rq *rq = rq_of(cfs_rq); in enqueue_entity()
5296 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - in enqueue_entity()
5297 cfs_rq->throttled_clock_pelt; in enqueue_entity()
5298 cfs_rq->pelt_clock_throttled = 0; in enqueue_entity()
5307 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
5308 if (cfs_rq->next != se) in __clear_buddies_next()
5311 cfs_rq->next = NULL; in __clear_buddies_next()
5315 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
5317 if (cfs_rq->next == se) in clear_buddies()
5321 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5336 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_delayed() local
5338 cfs_rq->h_nr_runnable--; in set_delayed()
5356 struct cfs_rq *cfs_rq = cfs_rq_of(se); in clear_delayed() local
5358 cfs_rq->h_nr_runnable++; in clear_delayed()
5370 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
5375 update_curr(cfs_rq); in dequeue_entity()
5376 clear_buddies(cfs_rq, se); in dequeue_entity()
5392 !entity_eligible(cfs_rq, se)) { in dequeue_entity()
5393 update_load_avg(cfs_rq, se, 0); in dequeue_entity()
5411 update_load_avg(cfs_rq, se, action); in dequeue_entity()
5414 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
5416 update_entity_lag(cfs_rq, se); in dequeue_entity()
5422 if (se != cfs_rq->curr) in dequeue_entity()
5423 __dequeue_entity(cfs_rq, se); in dequeue_entity()
5425 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
5428 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
5439 update_min_vruntime(cfs_rq); in dequeue_entity()
5444 if (cfs_rq->nr_queued == 0) { in dequeue_entity()
5445 update_idle_cfs_rq_clock_pelt(cfs_rq); in dequeue_entity()
5447 if (throttled_hierarchy(cfs_rq)) { in dequeue_entity()
5448 struct rq *rq = rq_of(cfs_rq); in dequeue_entity()
5450 list_del_leaf_cfs_rq(cfs_rq); in dequeue_entity()
5451 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); in dequeue_entity()
5452 cfs_rq->pelt_clock_throttled = 1; in dequeue_entity()
5461 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
5463 clear_buddies(cfs_rq, se); in set_next_entity()
5472 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5473 __dequeue_entity(cfs_rq, se); in set_next_entity()
5474 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5476 set_protect_slice(cfs_rq, se); in set_next_entity()
5479 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5480 WARN_ON_ONCE(cfs_rq->curr); in set_next_entity()
5481 cfs_rq->curr = se; in set_next_entity()
5489 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5511 pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq) in pick_next_entity() argument
5519 cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) { in pick_next_entity()
5521 WARN_ON_ONCE(cfs_rq->next->sched_delayed); in pick_next_entity()
5522 return cfs_rq->next; in pick_next_entity()
5525 se = pick_eevdf(cfs_rq); in pick_next_entity()
5536 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5538 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
5545 update_curr(cfs_rq); in put_prev_entity()
5548 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
5551 update_stats_wait_start_fair(cfs_rq, prev); in put_prev_entity()
5553 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
5555 update_load_avg(cfs_rq, prev, 0); in put_prev_entity()
5557 WARN_ON_ONCE(cfs_rq->curr != prev); in put_prev_entity()
5558 cfs_rq->curr = NULL; in put_prev_entity()
5562 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
5567 update_curr(cfs_rq); in entity_tick()
5572 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
5581 resched_curr_lazy(rq_of(cfs_rq)); in entity_tick()
5658 struct cfs_rq *cfs_rq, u64 target_runtime) in __assign_cfs_rq_runtime() argument
5665 min_amount = target_runtime - cfs_rq->runtime_remaining; in __assign_cfs_rq_runtime()
5679 cfs_rq->runtime_remaining += amount; in __assign_cfs_rq_runtime()
5681 return cfs_rq->runtime_remaining > 0; in __assign_cfs_rq_runtime()
5685 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
5687 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime()
5691 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); in assign_cfs_rq_runtime()
5697 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
5700 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
5702 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
5705 if (cfs_rq->throttled) in __account_cfs_rq_runtime()
5711 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
5712 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
5716 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
5718 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
5721 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
5724 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
5726 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
5729 static inline bool cfs_rq_pelt_clock_throttled(struct cfs_rq *cfs_rq) in cfs_rq_pelt_clock_throttled() argument
5731 return cfs_bandwidth_used() && cfs_rq->pelt_clock_throttled; in cfs_rq_pelt_clock_throttled()
5735 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
5737 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
5742 return throttled_hierarchy(task_group(p)->cfs_rq[dst_cpu]); in lb_throttled_hierarchy()
5755 struct cfs_rq *cfs_rq; in throttle_cfs_rq_work() local
5770 cfs_rq = cfs_rq_of(se); in throttle_cfs_rq_work()
5780 if (!cfs_rq->throttle_count) in throttle_cfs_rq_work()
5786 list_add(&p->throttle_node, &cfs_rq->throttled_limbo_list); in throttle_cfs_rq_work()
5834 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se); in enqueue_throttled_task() local
5873 if (throttled_hierarchy(cfs_rq) && in enqueue_throttled_task()
5874 !task_current_donor(rq_of(cfs_rq), p)) { in enqueue_throttled_task()
5875 list_add(&p->throttle_node, &cfs_rq->throttled_limbo_list); in enqueue_throttled_task()
5888 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
5891 if (--cfs_rq->throttle_count) in tg_unthrottle_up()
5894 if (cfs_rq->pelt_clock_throttled) { in tg_unthrottle_up()
5895 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - in tg_unthrottle_up()
5896 cfs_rq->throttled_clock_pelt; in tg_unthrottle_up()
5897 cfs_rq->pelt_clock_throttled = 0; in tg_unthrottle_up()
5900 if (cfs_rq->throttled_clock_self) { in tg_unthrottle_up()
5901 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self; in tg_unthrottle_up()
5903 cfs_rq->throttled_clock_self = 0; in tg_unthrottle_up()
5908 cfs_rq->throttled_clock_self_time += delta; in tg_unthrottle_up()
5912 list_for_each_entry_safe(p, tmp, &cfs_rq->throttled_limbo_list, throttle_node) { in tg_unthrottle_up()
5915 enqueue_task_fair(rq_of(cfs_rq), p, ENQUEUE_WAKEUP); in tg_unthrottle_up()
5919 if (!cfs_rq_is_decayed(cfs_rq)) in tg_unthrottle_up()
5920 list_add_leaf_cfs_rq(cfs_rq); in tg_unthrottle_up()
5945 static void record_throttle_clock(struct cfs_rq *cfs_rq) in record_throttle_clock() argument
5947 struct rq *rq = rq_of(cfs_rq); in record_throttle_clock()
5949 if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock) in record_throttle_clock()
5950 cfs_rq->throttled_clock = rq_clock(rq); in record_throttle_clock()
5952 if (!cfs_rq->throttled_clock_self) in record_throttle_clock()
5953 cfs_rq->throttled_clock_self = rq_clock(rq); in record_throttle_clock()
5959 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
5961 if (cfs_rq->throttle_count++) in tg_throttle_down()
5968 if (!cfs_rq->nr_queued) { in tg_throttle_down()
5969 list_del_leaf_cfs_rq(cfs_rq); in tg_throttle_down()
5970 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); in tg_throttle_down()
5971 cfs_rq->pelt_clock_throttled = 1; in tg_throttle_down()
5974 WARN_ON_ONCE(cfs_rq->throttled_clock_self); in tg_throttle_down()
5975 WARN_ON_ONCE(!list_empty(&cfs_rq->throttled_limbo_list)); in tg_throttle_down()
5979 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
5981 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
5982 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
5987 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { in throttle_cfs_rq()
5998 list_add_tail_rcu(&cfs_rq->throttled_list, in throttle_cfs_rq()
6008 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
6015 cfs_rq->throttled = 1; in throttle_cfs_rq()
6016 WARN_ON_ONCE(cfs_rq->throttled_clock); in throttle_cfs_rq()
6020 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
6022 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
6023 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
6024 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
6035 if (cfs_rq->runtime_enabled && cfs_rq->runtime_remaining <= 0) in unthrottle_cfs_rq()
6038 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
6043 if (cfs_rq->throttled_clock) { in unthrottle_cfs_rq()
6044 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
6045 cfs_rq->throttled_clock = 0; in unthrottle_cfs_rq()
6047 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
6051 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
6053 if (!cfs_rq->load.weight) { in unthrottle_cfs_rq()
6054 if (!cfs_rq->on_list) in unthrottle_cfs_rq()
6075 struct cfs_rq *cursor, *tmp; in __cfsb_csd_unthrottle()
6112 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) in __unthrottle_cfs_rq_async() argument
6114 struct rq *rq = rq_of(cfs_rq); in __unthrottle_cfs_rq_async()
6118 unthrottle_cfs_rq(cfs_rq); in __unthrottle_cfs_rq_async()
6123 if (WARN_ON_ONCE(!list_empty(&cfs_rq->throttled_csd_list))) in __unthrottle_cfs_rq_async()
6127 list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list); in __unthrottle_cfs_rq_async()
6132 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq_async() argument
6134 lockdep_assert_rq_held(rq_of(cfs_rq)); in unthrottle_cfs_rq_async()
6136 if (WARN_ON_ONCE(!cfs_rq_throttled(cfs_rq) || in unthrottle_cfs_rq_async()
6137 cfs_rq->runtime_remaining <= 0)) in unthrottle_cfs_rq_async()
6140 __unthrottle_cfs_rq_async(cfs_rq); in unthrottle_cfs_rq_async()
6148 struct cfs_rq *cfs_rq, *tmp; in distribute_cfs_runtime() local
6154 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
6156 rq = rq_of(cfs_rq); in distribute_cfs_runtime()
6164 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
6168 if (!list_empty(&cfs_rq->throttled_csd_list)) in distribute_cfs_runtime()
6172 WARN_ON_ONCE(cfs_rq->runtime_remaining > 0); in distribute_cfs_runtime()
6175 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
6182 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
6185 if (cfs_rq->runtime_remaining > 0) { in distribute_cfs_runtime()
6187 unthrottle_cfs_rq_async(cfs_rq); in distribute_cfs_runtime()
6194 list_add_tail(&cfs_rq->throttled_csd_list, in distribute_cfs_runtime()
6205 list_for_each_entry_safe(cfs_rq, tmp, &local_unthrottle, in distribute_cfs_runtime()
6207 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
6211 list_del_init(&cfs_rq->throttled_csd_list); in distribute_cfs_runtime()
6213 if (cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
6214 unthrottle_cfs_rq(cfs_rq); in distribute_cfs_runtime()
6335 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
6337 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
6338 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
6355 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
6358 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
6363 if (!cfs_rq->runtime_enabled || cfs_rq->nr_queued) in return_cfs_rq_runtime()
6366 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
6403 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
6409 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
6413 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
6417 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
6418 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
6419 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
6424 struct cfs_rq *pcfs_rq, *cfs_rq; in sync_throttle() local
6432 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
6433 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
6435 cfs_rq->throttle_count = pcfs_rq->throttle_count; in sync_throttle()
6436 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()
6445 if (cfs_rq->throttle_count) in sync_throttle()
6446 cfs_rq->pelt_clock_throttled = 1; in sync_throttle()
6450 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
6455 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
6462 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
6465 return throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
6554 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
6556 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
6557 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
6558 INIT_LIST_HEAD(&cfs_rq->throttled_csd_list); in init_cfs_rq_runtime()
6559 INIT_LIST_HEAD(&cfs_rq->throttled_limbo_list); in init_cfs_rq_runtime()
6625 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
6628 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
6654 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
6656 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
6663 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
6665 if (!cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
6672 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
6673 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
6682 struct cfs_rq *cfs_rq = task_cfs_rq(p); in cfs_task_bw_constrained() local
6687 if (cfs_rq->runtime_enabled || in cfs_task_bw_constrained()
6688 tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF) in cfs_task_bw_constrained()
6722 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
6723 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
6724 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
6726 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
6731 static void record_throttle_clock(struct cfs_rq *cfs_rq) {} in record_throttle_clock() argument
6733 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
6738 static inline bool cfs_rq_pelt_clock_throttled(struct cfs_rq *cfs_rq) in cfs_rq_pelt_clock_throttled() argument
6743 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
6755 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
6884 struct cfs_rq *cfs_rq = cfs_rq_of(se); in requeue_delayed_entity() local
6895 update_entity_lag(cfs_rq, se); in requeue_delayed_entity()
6897 cfs_rq->nr_queued--; in requeue_delayed_entity()
6898 if (se != cfs_rq->curr) in requeue_delayed_entity()
6899 __dequeue_entity(cfs_rq, se); in requeue_delayed_entity()
6901 place_entity(cfs_rq, se, 0); in requeue_delayed_entity()
6902 if (se != cfs_rq->curr) in requeue_delayed_entity()
6903 __enqueue_entity(cfs_rq, se); in requeue_delayed_entity()
6904 cfs_rq->nr_queued++; in requeue_delayed_entity()
6908 update_load_avg(cfs_rq, se, 0); in requeue_delayed_entity()
6920 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
6962 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6973 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6974 slice = cfs_rq_min_slice(cfs_rq); in enqueue_task_fair()
6976 cfs_rq->h_nr_runnable += h_nr_runnable; in enqueue_task_fair()
6977 cfs_rq->h_nr_queued++; in enqueue_task_fair()
6978 cfs_rq->h_nr_idle += h_nr_idle; in enqueue_task_fair()
6980 if (cfs_rq_is_idle(cfs_rq)) in enqueue_task_fair()
6987 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6989 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
6994 if (se != cfs_rq->curr) in enqueue_task_fair()
6996 slice = cfs_rq_min_slice(cfs_rq); in enqueue_task_fair()
6998 cfs_rq->h_nr_runnable += h_nr_runnable; in enqueue_task_fair()
6999 cfs_rq->h_nr_queued++; in enqueue_task_fair()
7000 cfs_rq->h_nr_idle += h_nr_idle; in enqueue_task_fair()
7002 if (cfs_rq_is_idle(cfs_rq)) in enqueue_task_fair()
7059 struct cfs_rq *cfs_rq; in dequeue_entities() local
7071 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7073 if (!dequeue_entity(cfs_rq, se, flags)) { in dequeue_entities()
7077 slice = cfs_rq_min_slice(cfs_rq); in dequeue_entities()
7081 cfs_rq->h_nr_runnable -= h_nr_runnable; in dequeue_entities()
7082 cfs_rq->h_nr_queued -= h_nr_queued; in dequeue_entities()
7083 cfs_rq->h_nr_idle -= h_nr_idle; in dequeue_entities()
7085 if (cfs_rq_is_idle(cfs_rq)) in dequeue_entities()
7088 if (throttled_hierarchy(cfs_rq) && task_throttled) in dequeue_entities()
7089 record_throttle_clock(cfs_rq); in dequeue_entities()
7092 if (cfs_rq->load.weight) { in dequeue_entities()
7093 slice = cfs_rq_min_slice(cfs_rq); in dequeue_entities()
7110 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7112 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entities()
7117 if (se != cfs_rq->curr) in dequeue_entities()
7119 slice = cfs_rq_min_slice(cfs_rq); in dequeue_entities()
7121 cfs_rq->h_nr_runnable -= h_nr_runnable; in dequeue_entities()
7122 cfs_rq->h_nr_queued -= h_nr_queued; in dequeue_entities()
7123 cfs_rq->h_nr_idle -= h_nr_idle; in dequeue_entities()
7125 if (cfs_rq_is_idle(cfs_rq)) in dequeue_entities()
7128 if (throttled_hierarchy(cfs_rq) && task_throttled) in dequeue_entities()
7129 record_throttle_clock(cfs_rq); in dequeue_entities()
7226 struct cfs_rq *cfs_rq; in cpu_load_without() local
7233 cfs_rq = &rq->cfs; in cpu_load_without()
7234 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
7249 struct cfs_rq *cfs_rq; in cpu_runnable_without() local
7256 cfs_rq = &rq->cfs; in cpu_runnable_without()
7257 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
7991 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util() local
7992 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7996 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
8014 util_est = READ_ONCE(cfs_rq->avg.util_est); in cpu_util()
8742 struct cfs_rq *cfs_rq = task_cfs_rq(donor); in check_preempt_wakeup_fair() local
8806 cfs_rq = cfs_rq_of(se); in check_preempt_wakeup_fair()
8807 update_curr(cfs_rq); in check_preempt_wakeup_fair()
8817 if (__pick_eevdf(cfs_rq, !do_preempt_short) == pse) in check_preempt_wakeup_fair()
8821 update_protect_slice(cfs_rq, se); in check_preempt_wakeup_fair()
8835 struct cfs_rq *cfs_rq; in pick_task_fair() local
8840 cfs_rq = &rq->cfs; in pick_task_fair()
8841 if (!cfs_rq->nr_queued) in pick_task_fair()
8848 if (cfs_rq->curr && cfs_rq->curr->on_rq) in pick_task_fair()
8849 update_curr(cfs_rq); in pick_task_fair()
8851 throttled |= check_cfs_rq_runtime(cfs_rq); in pick_task_fair()
8853 se = pick_next_entity(rq, cfs_rq); in pick_task_fair()
8856 cfs_rq = group_cfs_rq(se); in pick_task_fair()
8857 } while (cfs_rq); in pick_task_fair()
8900 struct cfs_rq *cfs_rq; in pick_next_task_fair() local
8902 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
8916 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
8917 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8980 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
8983 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8984 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
8994 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
9003 clear_buddies(cfs_rq, se); in yield_task_fair()
9009 update_curr(cfs_rq); in yield_task_fair()
9355 struct cfs_rq *dst_cfs_rq; in task_is_ineligible_on_dst_cpu()
9358 dst_cfs_rq = task_group(p)->cfs_rq[dest_cpu]; in task_is_ineligible_on_dst_cpu()
9722 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) in cfs_rq_has_blocked() argument
9724 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9727 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9761 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } in cfs_rq_has_blocked() argument
9787 struct cfs_rq *cfs_rq, *pos; in __update_blocked_fair() local
9795 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
9798 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { in __update_blocked_fair()
9799 update_tg_load_avg(cfs_rq); in __update_blocked_fair()
9801 if (cfs_rq->nr_queued == 0) in __update_blocked_fair()
9802 update_idle_cfs_rq_clock_pelt(cfs_rq); in __update_blocked_fair()
9804 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
9809 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
9817 if (cfs_rq_is_decayed(cfs_rq)) in __update_blocked_fair()
9818 list_del_leaf_cfs_rq(cfs_rq); in __update_blocked_fair()
9821 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
9833 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
9835 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
9836 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
9840 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
9843 WRITE_ONCE(cfs_rq->h_load_next, NULL); in update_cfs_rq_h_load()
9845 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
9846 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
9847 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
9852 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
9853 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
9856 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
9857 load = cfs_rq->h_load; in update_cfs_rq_h_load()
9859 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
9860 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
9861 cfs_rq->h_load = load; in update_cfs_rq_h_load()
9862 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
9868 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
9870 update_cfs_rq_h_load(cfs_rq); in task_h_load()
9871 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9872 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
9877 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair() local
9880 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); in __update_blocked_fair()
9881 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
13021 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update() local
13024 if (cfs_rq->forceidle_seq == fi_seq) in se_fi_update()
13026 cfs_rq->forceidle_seq = fi_seq; in se_fi_update()
13029 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; in se_fi_update()
13049 struct cfs_rq *cfs_rqa; in cfs_prio_less()
13050 struct cfs_rq *cfs_rqb; in cfs_prio_less()
13060 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { in cfs_prio_less()
13073 cfs_rqa = sea->cfs_rq; in cfs_prio_less()
13074 cfs_rqb = seb->cfs_rq; in cfs_prio_less()
13093 struct cfs_rq *cfs_rq; in task_is_throttled_fair() local
13096 cfs_rq = task_group(p)->cfs_rq[cpu]; in task_is_throttled_fair()
13098 cfs_rq = &cpu_rq(cpu)->cfs; in task_is_throttled_fair()
13100 return throttled_hierarchy(cfs_rq); in task_is_throttled_fair()
13116 struct cfs_rq *cfs_rq; in task_tick_fair() local
13120 cfs_rq = cfs_rq_of(se); in task_tick_fair()
13121 entity_tick(cfs_rq, se, queued); in task_tick_fair()
13175 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq() local
13183 if (!cfs_rq_pelt_clock_throttled(cfs_rq)) in propagate_entity_cfs_rq()
13184 list_add_leaf_cfs_rq(cfs_rq); in propagate_entity_cfs_rq()
13190 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13192 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
13194 if (!cfs_rq_pelt_clock_throttled(cfs_rq)) in propagate_entity_cfs_rq()
13195 list_add_leaf_cfs_rq(cfs_rq); in propagate_entity_cfs_rq()
13198 assert_list_leaf_cfs_rq(rq_of(cfs_rq)); in propagate_entity_cfs_rq()
13206 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq() local
13218 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
13219 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
13220 update_tg_load_avg(cfs_rq); in detach_entity_cfs_rq()
13226 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq() local
13229 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
13230 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
13231 update_tg_load_avg(cfs_rq); in attach_entity_cfs_rq()
13309 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair() local
13311 set_next_entity(cfs_rq, se); in set_next_task_fair()
13313 account_cfs_rq_runtime(cfs_rq, 0); in set_next_task_fair()
13319 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
13321 cfs_rq->tasks_timeline = RB_ROOT_CACHED; in init_cfs_rq()
13322 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); in init_cfs_rq()
13323 raw_spin_lock_init(&cfs_rq->removed.lock); in init_cfs_rq()
13349 if (tg->cfs_rq) in free_fair_sched_group()
13350 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
13355 kfree(tg->cfs_rq); in free_fair_sched_group()
13362 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
13365 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
13366 if (!tg->cfs_rq) in alloc_fair_sched_group()
13377 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
13379 if (!cfs_rq) in alloc_fair_sched_group()
13387 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
13388 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
13395 kfree(cfs_rq); in alloc_fair_sched_group()
13425 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; in unregister_fair_sched_group() local
13436 list_del_leaf_cfs_rq(cfs_rq); in unregister_fair_sched_group()
13445 if (cfs_rq->on_list) { in unregister_fair_sched_group()
13447 list_del_leaf_cfs_rq(cfs_rq); in unregister_fair_sched_group()
13452 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
13458 cfs_rq->tg = tg; in init_tg_cfs_entry()
13459 cfs_rq->rq = rq; in init_tg_cfs_entry()
13460 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
13462 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
13470 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
13473 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
13477 se->my_q = cfs_rq; in init_tg_cfs_entry()
13557 struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i]; in sched_group_set_idle()
13574 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle() local
13579 cfs_rq->h_nr_idle += idle_task_delta; in sched_group_set_idle()
13582 if (cfs_rq_is_idle(cfs_rq)) in sched_group_set_idle()
13672 struct cfs_rq *cfs_rq, *pos; in print_cfs_stats() local
13675 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()
13676 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()