Lines Matching refs:avg
657 s64 avg = cfs_rq->avg_vruntime; in avg_vruntime() local
663 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
669 if (avg < 0) in avg_vruntime()
670 avg -= (load - 1); in avg_vruntime()
671 avg = div_s64(avg, load); in avg_vruntime()
674 return cfs_rq->min_vruntime + avg; in avg_vruntime()
725 s64 avg = cfs_rq->avg_vruntime; in vruntime_eligible() local
731 avg += entity_key(cfs_rq, curr) * weight; in vruntime_eligible()
735 return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load; in vruntime_eligible()
1074 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1121 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1123 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1136 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1141 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1142 sa->util_avg = cfs_rq->avg.util_avg * se_weight(se); in post_init_entity_util_avg()
1143 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
2780 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3754 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3755 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3761 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3762 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3764 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3765 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3799 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3801 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3917 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
4019 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
4056 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
4102 delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
4105 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
4174 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4181 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4254 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4265 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4269 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4270 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4271 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4272 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4275 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4276 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4279 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4280 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4286 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4297 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4300 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4301 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4302 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4303 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4306 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4307 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4309 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4310 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4331 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4338 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4346 load_sum = div_u64(gcfs_rq->avg.load_sum, in update_tg_cfs_load()
4351 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4360 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4366 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4370 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4372 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4373 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4374 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4375 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4377 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4378 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4427 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4468 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4562 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4567 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4636 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4645 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4646 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4654 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4656 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4658 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4659 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4660 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4662 se->avg.load_sum = 1; in attach_entity_load_avg()
4665 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4666 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4667 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4668 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4670 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4688 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4689 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4691 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4692 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4694 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4695 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4697 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4698 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4700 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4725 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4731 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4790 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4791 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4792 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4798 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4803 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4810 return READ_ONCE(p->se.avg.util_avg); in task_util()
4815 return READ_ONCE(p->se.avg.runnable_avg); in task_runnable()
4820 return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED; in _task_util_est()
4837 enqueued = cfs_rq->avg.util_est; in util_est_enqueue()
4839 WRITE_ONCE(cfs_rq->avg.util_est, enqueued); in util_est_enqueue()
4853 enqueued = cfs_rq->avg.util_est; in util_est_dequeue()
4855 WRITE_ONCE(cfs_rq->avg.util_est, enqueued); in util_est_dequeue()
4879 ewma = READ_ONCE(p->se.avg.util_est); in util_est_update()
4937 WRITE_ONCE(p->se.avg.util_est, ewma); in util_est_update()
7230 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7234 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
7253 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7257 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
7260 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7992 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7996 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
8014 util_est = READ_ONCE(cfs_rq->avg.util_est); in cpu_util()
8079 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8661 se->avg.last_update_time = 0; in migrate_task_rq_fair()
9724 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9727 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9858 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9871 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9889 return p->se.avg.load_avg; in task_h_load()
10626 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
13214 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
13339 p->se.avg.last_update_time = 0; in task_change_group_fair()