Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance
| /linux/kernel/sched/ |
| H A D | fair.c | 9941 struct sched_group *busiest; /* Busiest group in this sd */ member 9962 .busiest = NULL, in init_sd_lb_stats() 10293 struct sg_lb_stats *busiest, in sibling_imbalance() argument 10299 if (!env->idle || !busiest->sum_nr_running) in sibling_imbalance() 10302 ncores_busiest = sds->busiest->cores; in sibling_imbalance() 10306 imbalance = busiest->sum_nr_running; in sibling_imbalance() 10312 imbalance = ncores_local * busiest->sum_nr_running; in sibling_imbalance() 10320 busiest->sum_nr_running > 1) in sibling_imbalance() 10452 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local 10470 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest() [all …]
|
| H A D | sched.h | 2952 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2954 __acquires(busiest->lock) in _double_lock_balance() 2958 double_rq_lock(this_rq, busiest); in _double_lock_balance() 2971 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2973 __acquires(busiest->lock) in _double_lock_balance() 2976 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || in _double_lock_balance() 2977 likely(raw_spin_rq_trylock(busiest))) { in _double_lock_balance() 2978 double_rq_clock_clear_update(this_rq, busiest); in _double_lock_balance() 2982 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance() 2983 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); in _double_lock_balance() [all …]
|
| /linux/Documentation/scheduler/ |
| H A D | sched-domains.rst | 48 Initially, sched_balance_rq() finds the busiest group in the current sched domain. 49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in 51 CPU's runqueue and the newly found busiest one and starts moving tasks from it
|