Lines Matching defs:busiest

9069  * first so the group_type can simply be compared when selecting the busiest
9848 struct sched_group *busiest; /* Busiest group in this sd */
9855 struct sg_lb_stats busiest_stat; /* Statistics of the busiest group */
9865 * busiest_stat::idle_cpus to the worst busiest group because
9869 .busiest = NULL,
10015 * When this is so detected; this group becomes a candidate for busiest; see
10145 * @sgs: Load-balancing statistics of the candidate busiest group
10146 * @group: The candidate busiest group
10200 struct sg_lb_stats *busiest,
10206 if (!env->idle || !busiest->sum_nr_running)
10209 ncores_busiest = sds->busiest->cores;
10213 imbalance = busiest->sum_nr_running;
10219 imbalance = ncores_local * busiest->sum_nr_running;
10227 busiest->sum_nr_running > 1)
10342 * update_sd_pick_busiest - return 1 on busiest group
10345 * @sg: sched_group candidate to be checked for being the busiest
10349 * busiest group.
10352 * busiest group. %false otherwise.
10359 struct sg_lb_stats *busiest = &sds->busiest_stat;
10377 if (sgs->group_type > busiest->group_type)
10380 if (sgs->group_type < busiest->group_type)
10384 * The candidate and the current busiest group are the same type of
10385 * group. Let check which one is the busiest according to the type.
10391 return sgs->avg_load > busiest->avg_load;
10402 return sched_asym_prefer(READ_ONCE(sds->busiest->asym_prefer_cpu),
10410 return sgs->group_misfit_task_load > busiest->group_misfit_task_load;
10417 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
10435 if (sgs->avg_load < busiest->avg_load)
10438 if (sgs->avg_load == busiest->avg_load) {
10443 if (sds->busiest->flags & SD_SHARE_CPUCAPACITY)
10455 if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
10470 if (sgs->idle_cpus > busiest->idle_cpus)
10472 else if ((sgs->idle_cpus == busiest->idle_cpus) &&
10473 (sgs->sum_nr_running <= busiest->sum_nr_running))
10957 sds->busiest = sg;
10970 * Indicate that the child domain of the busiest group prefers tasks
10974 if (sds->busiest)
10975 sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING);
11002 struct sg_lb_stats *local, *busiest;
11005 busiest = &sds->busiest_stat;
11007 if (busiest->group_type == group_misfit_task) {
11018 env->imbalance = busiest->group_misfit_task_load;
11023 if (busiest->group_type == group_asym_packing) {
11029 env->imbalance = busiest->sum_h_nr_running;
11033 if (busiest->group_type == group_smt_balance) {
11040 if (busiest->group_type == group_imbalanced) {
11054 * emptying busiest.
11057 if ((busiest->group_type > group_fully_busy) &&
11060 * If busiest is overloaded, try to fill spare
11062 * in busiest or busiest still being overloaded but
11075 * waiting task in this overloaded busiest group. Let's
11086 if (busiest->group_weight == 1 || sds->prefer_sibling) {
11092 env->imbalance = sibling_imbalance(env, sds, busiest, local);
11101 (local->idle_cpus - busiest->idle_cpus));
11121 * busiest group
11134 * busiest group don't try to pull any tasks.
11136 if (local->avg_load >= busiest->avg_load) {
11165 (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
11173 * Decision matrix according to the local and busiest group type:
11175 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
11193 * sched_balance_find_src_group - Returns the busiest group within the sched_domain
11200 * Return: - The busiest group if imbalance exists.
11204 struct sg_lb_stats *local, *busiest;
11216 if (!sds.busiest)
11219 busiest = &sds.busiest_stat;
11222 if (busiest->group_type == group_misfit_task)
11230 if (busiest->group_type == group_asym_packing)
11234 * If the busiest group is imbalanced the below checks don't
11238 if (busiest->group_type == group_imbalanced)
11243 * If the local group is busier than the selected busiest group
11246 if (local->group_type > busiest->group_type)
11256 * busiest group don't try to pull any tasks.
11258 if (local->avg_load >= busiest->avg_load)
11273 * If the busiest group is more loaded, use imbalance_pct to be
11276 if (100 * busiest->avg_load <=
11282 * Try to move all excess tasks to a sibling domain of the busiest
11286 sibling_imbalance(env, &sds, busiest, local) > 1)
11289 if (busiest->group_type != group_overloaded) {
11292 * If the busiest group is not overloaded (and as a
11299 if (busiest->group_type == group_smt_balance &&
11300 smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
11305 if (busiest->group_weight > 1 &&
11306 local->idle_cpus <= (busiest->idle_cpus + 1)) {
11308 * If the busiest group is not overloaded
11309 * and there is no imbalance between this and busiest
11319 if (busiest->sum_h_nr_running == 1) {
11321 * busiest doesn't have any tasks waiting to run
11330 return env->imbalance ? sds.busiest : NULL;
11338 * sched_balance_find_src_rq - find the busiest runqueue among the CPUs in the group.
11343 struct rq *busiest = NULL, *rq;
11365 * If we ignore the actual busiest queue to migrate another
11366 * task, the next balance pass can still reduce the busiest
11433 busiest = rq;
11450 busiest = rq;
11457 busiest = rq;
11468 busiest = rq;
11476 return busiest;
11650 struct rq *busiest;
11681 busiest = sched_balance_find_src_rq(&env, group);
11682 if (!busiest) {
11687 WARN_ON_ONCE(busiest == env.dst_rq);
11691 env.src_cpu = busiest->cpu;
11692 env.src_rq = busiest;
11697 if (busiest->nr_running > 1) {
11700 * an imbalance but busiest->nr_running <= 1, the group is
11704 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
11707 rq_lock_irqsave(busiest, &rf);
11708 update_rq_clock(busiest);
11719 * unlock busiest->lock, and we are able to be sure
11724 rq_unlock(busiest, &rf);
11787 __cpumask_clear_cpu(cpu_of(busiest), cpus);
11791 * active CPUs remaining as possible busiest CPUs to
11823 raw_spin_rq_lock_irqsave(busiest, flags);
11827 * if the curr task on busiest CPU can't be
11830 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
11831 raw_spin_rq_unlock_irqrestore(busiest, flags);
11843 if (!busiest->active_balance) {
11844 busiest->active_balance = 1;
11845 busiest->push_cpu = this_cpu;
11850 raw_spin_rq_unlock_irqrestore(busiest, flags);
11852 stop_one_cpu_nowait(cpu_of(busiest),
11853 active_load_balance_cpu_stop, busiest,
11854 &busiest->active_balance_work);
11957 * running tasks off the busiest CPU onto idle CPUs. It requires at