1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22 
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 
30 #include <trace/events/sched.h>
31 
32 #include "sched.h"
33 
34 /*
35  * Targeted preemption latency for CPU-bound tasks:
36  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
37  *
38  * NOTE: this latency value is not the same as the concept of
39  * 'timeslice length' - timeslices in CFS are of variable length
40  * and have no persistent notion like in traditional, time-slice
41  * based scheduling concepts.
42  *
43  * (to see the precise effective timeslice length of your workload,
44  *  run vmstat and monitor the context-switches (cs) field)
45  */
46 unsigned int sysctl_sched_latency = 6000000ULL;
47 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
48 
49 /*
50  * The initial- and re-scaling of tunables is configurable
51  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
52  *
53  * Options are:
54  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
55  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
56  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
57  */
58 enum sched_tunable_scaling sysctl_sched_tunable_scaling
59 	= SCHED_TUNABLESCALING_LOG;
60 
61 /*
62  * Minimal preemption granularity for CPU-bound tasks:
63  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
64  */
65 unsigned int sysctl_sched_min_granularity = 750000ULL;
66 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
67 
68 /*
69  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
70  */
71 static unsigned int sched_nr_latency = 8;
72 
73 /*
74  * After fork, child runs first. If set to 0 (default) then
75  * parent will (try to) run first.
76  */
77 unsigned int sysctl_sched_child_runs_first __read_mostly;
78 
79 /*
80  * SCHED_OTHER wake-up granularity.
81  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
82  *
83  * This option delays the preemption effects of decoupled workloads
84  * and reduces their over-scheduling. Synchronous workloads will still
85  * have immediate wakeup/sleep latencies.
86  */
87 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89 
90 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91 
92 /*
93  * The exponential sliding  window over which load is averaged for shares
94  * distribution.
95  * (default: 10msec)
96  */
97 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98 
99 #ifdef CONFIG_CFS_BANDWIDTH
100 /*
101  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102  * each time a cfs_rq requests quota.
103  *
104  * Note: in the case that the slice exceeds the runtime remaining (either due
105  * to consumption or the quota being specified to be smaller than the slice)
106  * we will always only issue the remaining available time.
107  *
108  * default: 5 msec, units: microseconds
109   */
110 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
111 #endif
112 
113 /*
114  * Increase the granularity value when there are more CPUs,
115  * because with more CPUs the 'effective latency' as visible
116  * to users decreases. But the relationship is not linear,
117  * so pick a second-best guess by going with the log2 of the
118  * number of CPUs.
119  *
120  * This idea comes from the SD scheduler of Con Kolivas:
121  */
get_update_sysctl_factor(void)122 static int get_update_sysctl_factor(void)
123 {
124 	unsigned int cpus = min_t(int, num_online_cpus(), 8);
125 	unsigned int factor;
126 
127 	switch (sysctl_sched_tunable_scaling) {
128 	case SCHED_TUNABLESCALING_NONE:
129 		factor = 1;
130 		break;
131 	case SCHED_TUNABLESCALING_LINEAR:
132 		factor = cpus;
133 		break;
134 	case SCHED_TUNABLESCALING_LOG:
135 	default:
136 		factor = 1 + ilog2(cpus);
137 		break;
138 	}
139 
140 	return factor;
141 }
142 
update_sysctl(void)143 static void update_sysctl(void)
144 {
145 	unsigned int factor = get_update_sysctl_factor();
146 
147 #define SET_SYSCTL(name) \
148 	(sysctl_##name = (factor) * normalized_sysctl_##name)
149 	SET_SYSCTL(sched_min_granularity);
150 	SET_SYSCTL(sched_latency);
151 	SET_SYSCTL(sched_wakeup_granularity);
152 #undef SET_SYSCTL
153 }
154 
sched_init_granularity(void)155 void sched_init_granularity(void)
156 {
157 	update_sysctl();
158 }
159 
160 #if BITS_PER_LONG == 32
161 # define WMULT_CONST	(~0UL)
162 #else
163 # define WMULT_CONST	(1UL << 32)
164 #endif
165 
166 #define WMULT_SHIFT	32
167 
168 /*
169  * Shift right and round:
170  */
171 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
172 
173 /*
174  * delta *= weight / lw
175  */
176 static unsigned long
calc_delta_mine(unsigned long delta_exec,unsigned long weight,struct load_weight * lw)177 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
178 		struct load_weight *lw)
179 {
180 	u64 tmp;
181 
182 	/*
183 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
184 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
185 	 * 2^SCHED_LOAD_RESOLUTION.
186 	 */
187 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
188 		tmp = (u64)delta_exec * scale_load_down(weight);
189 	else
190 		tmp = (u64)delta_exec;
191 
192 	if (!lw->inv_weight) {
193 		unsigned long w = scale_load_down(lw->weight);
194 
195 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
196 			lw->inv_weight = 1;
197 		else if (unlikely(!w))
198 			lw->inv_weight = WMULT_CONST;
199 		else
200 			lw->inv_weight = WMULT_CONST / w;
201 	}
202 
203 	/*
204 	 * Check whether we'd overflow the 64-bit multiplication:
205 	 */
206 	if (unlikely(tmp > WMULT_CONST))
207 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
208 			WMULT_SHIFT/2);
209 	else
210 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
211 
212 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
213 }
214 
215 
216 const struct sched_class fair_sched_class;
217 
218 /**************************************************************
219  * CFS operations on generic schedulable entities:
220  */
221 
222 #ifdef CONFIG_FAIR_GROUP_SCHED
223 
224 /* cpu runqueue to which this cfs_rq is attached */
rq_of(struct cfs_rq * cfs_rq)225 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
226 {
227 	return cfs_rq->rq;
228 }
229 
230 /* An entity is a task if it doesn't "own" a runqueue */
231 #define entity_is_task(se)	(!se->my_q)
232 
task_of(struct sched_entity * se)233 static inline struct task_struct *task_of(struct sched_entity *se)
234 {
235 #ifdef CONFIG_SCHED_DEBUG
236 	WARN_ON_ONCE(!entity_is_task(se));
237 #endif
238 	return container_of(se, struct task_struct, se);
239 }
240 
241 /* Walk up scheduling entities hierarchy */
242 #define for_each_sched_entity(se) \
243 		for (; se; se = se->parent)
244 
task_cfs_rq(struct task_struct * p)245 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
246 {
247 	return p->se.cfs_rq;
248 }
249 
250 /* runqueue on which this entity is (to be) queued */
cfs_rq_of(struct sched_entity * se)251 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
252 {
253 	return se->cfs_rq;
254 }
255 
256 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)257 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
258 {
259 	return grp->my_q;
260 }
261 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)262 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
263 {
264 	if (!cfs_rq->on_list) {
265 		/*
266 		 * Ensure we either appear before our parent (if already
267 		 * enqueued) or force our parent to appear after us when it is
268 		 * enqueued.  The fact that we always enqueue bottom-up
269 		 * reduces this to two cases.
270 		 */
271 		if (cfs_rq->tg->parent &&
272 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
273 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
274 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
275 		} else {
276 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
277 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
278 		}
279 
280 		cfs_rq->on_list = 1;
281 	}
282 }
283 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)284 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
285 {
286 	if (cfs_rq->on_list) {
287 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
288 		cfs_rq->on_list = 0;
289 	}
290 }
291 
292 /* Iterate thr' all leaf cfs_rq's on a runqueue */
293 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
294 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
295 
296 /* Do the two (enqueued) entities belong to the same group ? */
297 static inline int
is_same_group(struct sched_entity * se,struct sched_entity * pse)298 is_same_group(struct sched_entity *se, struct sched_entity *pse)
299 {
300 	if (se->cfs_rq == pse->cfs_rq)
301 		return 1;
302 
303 	return 0;
304 }
305 
parent_entity(struct sched_entity * se)306 static inline struct sched_entity *parent_entity(struct sched_entity *se)
307 {
308 	return se->parent;
309 }
310 
311 /* return depth at which a sched entity is present in the hierarchy */
depth_se(struct sched_entity * se)312 static inline int depth_se(struct sched_entity *se)
313 {
314 	int depth = 0;
315 
316 	for_each_sched_entity(se)
317 		depth++;
318 
319 	return depth;
320 }
321 
322 static void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)323 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
324 {
325 	int se_depth, pse_depth;
326 
327 	/*
328 	 * preemption test can be made between sibling entities who are in the
329 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
330 	 * both tasks until we find their ancestors who are siblings of common
331 	 * parent.
332 	 */
333 
334 	/* First walk up until both entities are at same depth */
335 	se_depth = depth_se(*se);
336 	pse_depth = depth_se(*pse);
337 
338 	while (se_depth > pse_depth) {
339 		se_depth--;
340 		*se = parent_entity(*se);
341 	}
342 
343 	while (pse_depth > se_depth) {
344 		pse_depth--;
345 		*pse = parent_entity(*pse);
346 	}
347 
348 	while (!is_same_group(*se, *pse)) {
349 		*se = parent_entity(*se);
350 		*pse = parent_entity(*pse);
351 	}
352 }
353 
354 #else	/* !CONFIG_FAIR_GROUP_SCHED */
355 
task_of(struct sched_entity * se)356 static inline struct task_struct *task_of(struct sched_entity *se)
357 {
358 	return container_of(se, struct task_struct, se);
359 }
360 
rq_of(struct cfs_rq * cfs_rq)361 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
362 {
363 	return container_of(cfs_rq, struct rq, cfs);
364 }
365 
366 #define entity_is_task(se)	1
367 
368 #define for_each_sched_entity(se) \
369 		for (; se; se = NULL)
370 
task_cfs_rq(struct task_struct * p)371 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
372 {
373 	return &task_rq(p)->cfs;
374 }
375 
cfs_rq_of(struct sched_entity * se)376 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
377 {
378 	struct task_struct *p = task_of(se);
379 	struct rq *rq = task_rq(p);
380 
381 	return &rq->cfs;
382 }
383 
384 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)385 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
386 {
387 	return NULL;
388 }
389 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)390 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
391 {
392 }
393 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)394 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
395 {
396 }
397 
398 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
399 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
400 
401 static inline int
is_same_group(struct sched_entity * se,struct sched_entity * pse)402 is_same_group(struct sched_entity *se, struct sched_entity *pse)
403 {
404 	return 1;
405 }
406 
parent_entity(struct sched_entity * se)407 static inline struct sched_entity *parent_entity(struct sched_entity *se)
408 {
409 	return NULL;
410 }
411 
412 static inline void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)413 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
414 {
415 }
416 
417 #endif	/* CONFIG_FAIR_GROUP_SCHED */
418 
419 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
420 				   unsigned long delta_exec);
421 
422 /**************************************************************
423  * Scheduling class tree data structure manipulation methods:
424  */
425 
max_vruntime(u64 min_vruntime,u64 vruntime)426 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
427 {
428 	s64 delta = (s64)(vruntime - min_vruntime);
429 	if (delta > 0)
430 		min_vruntime = vruntime;
431 
432 	return min_vruntime;
433 }
434 
min_vruntime(u64 min_vruntime,u64 vruntime)435 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
436 {
437 	s64 delta = (s64)(vruntime - min_vruntime);
438 	if (delta < 0)
439 		min_vruntime = vruntime;
440 
441 	return min_vruntime;
442 }
443 
entity_before(struct sched_entity * a,struct sched_entity * b)444 static inline int entity_before(struct sched_entity *a,
445 				struct sched_entity *b)
446 {
447 	return (s64)(a->vruntime - b->vruntime) < 0;
448 }
449 
update_min_vruntime(struct cfs_rq * cfs_rq)450 static void update_min_vruntime(struct cfs_rq *cfs_rq)
451 {
452 	u64 vruntime = cfs_rq->min_vruntime;
453 
454 	if (cfs_rq->curr)
455 		vruntime = cfs_rq->curr->vruntime;
456 
457 	if (cfs_rq->rb_leftmost) {
458 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
459 						   struct sched_entity,
460 						   run_node);
461 
462 		if (!cfs_rq->curr)
463 			vruntime = se->vruntime;
464 		else
465 			vruntime = min_vruntime(vruntime, se->vruntime);
466 	}
467 
468 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
469 #ifndef CONFIG_64BIT
470 	smp_wmb();
471 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
472 #endif
473 }
474 
475 /*
476  * Enqueue an entity into the rb-tree:
477  */
__enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)478 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
479 {
480 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
481 	struct rb_node *parent = NULL;
482 	struct sched_entity *entry;
483 	int leftmost = 1;
484 
485 	/*
486 	 * Find the right place in the rbtree:
487 	 */
488 	while (*link) {
489 		parent = *link;
490 		entry = rb_entry(parent, struct sched_entity, run_node);
491 		/*
492 		 * We dont care about collisions. Nodes with
493 		 * the same key stay together.
494 		 */
495 		if (entity_before(se, entry)) {
496 			link = &parent->rb_left;
497 		} else {
498 			link = &parent->rb_right;
499 			leftmost = 0;
500 		}
501 	}
502 
503 	/*
504 	 * Maintain a cache of leftmost tree entries (it is frequently
505 	 * used):
506 	 */
507 	if (leftmost)
508 		cfs_rq->rb_leftmost = &se->run_node;
509 
510 	rb_link_node(&se->run_node, parent, link);
511 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
512 }
513 
__dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)514 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
515 {
516 	if (cfs_rq->rb_leftmost == &se->run_node) {
517 		struct rb_node *next_node;
518 
519 		next_node = rb_next(&se->run_node);
520 		cfs_rq->rb_leftmost = next_node;
521 	}
522 
523 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
524 }
525 
__pick_first_entity(struct cfs_rq * cfs_rq)526 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
527 {
528 	struct rb_node *left = cfs_rq->rb_leftmost;
529 
530 	if (!left)
531 		return NULL;
532 
533 	return rb_entry(left, struct sched_entity, run_node);
534 }
535 
__pick_next_entity(struct sched_entity * se)536 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
537 {
538 	struct rb_node *next = rb_next(&se->run_node);
539 
540 	if (!next)
541 		return NULL;
542 
543 	return rb_entry(next, struct sched_entity, run_node);
544 }
545 
546 #ifdef CONFIG_SCHED_DEBUG
__pick_last_entity(struct cfs_rq * cfs_rq)547 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
548 {
549 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
550 
551 	if (!last)
552 		return NULL;
553 
554 	return rb_entry(last, struct sched_entity, run_node);
555 }
556 
557 /**************************************************************
558  * Scheduling class statistics methods:
559  */
560 
sched_proc_update_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)561 int sched_proc_update_handler(struct ctl_table *table, int write,
562 		void __user *buffer, size_t *lenp,
563 		loff_t *ppos)
564 {
565 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
566 	int factor = get_update_sysctl_factor();
567 
568 	if (ret || !write)
569 		return ret;
570 
571 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
572 					sysctl_sched_min_granularity);
573 
574 #define WRT_SYSCTL(name) \
575 	(normalized_sysctl_##name = sysctl_##name / (factor))
576 	WRT_SYSCTL(sched_min_granularity);
577 	WRT_SYSCTL(sched_latency);
578 	WRT_SYSCTL(sched_wakeup_granularity);
579 #undef WRT_SYSCTL
580 
581 	return 0;
582 }
583 #endif
584 
585 /*
586  * delta /= w
587  */
588 static inline unsigned long
calc_delta_fair(unsigned long delta,struct sched_entity * se)589 calc_delta_fair(unsigned long delta, struct sched_entity *se)
590 {
591 	if (unlikely(se->load.weight != NICE_0_LOAD))
592 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
593 
594 	return delta;
595 }
596 
597 /*
598  * The idea is to set a period in which each task runs once.
599  *
600  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
601  * this period because otherwise the slices get too small.
602  *
603  * p = (nr <= nl) ? l : l*nr/nl
604  */
__sched_period(unsigned long nr_running)605 static u64 __sched_period(unsigned long nr_running)
606 {
607 	u64 period = sysctl_sched_latency;
608 	unsigned long nr_latency = sched_nr_latency;
609 
610 	if (unlikely(nr_running > nr_latency)) {
611 		period = sysctl_sched_min_granularity;
612 		period *= nr_running;
613 	}
614 
615 	return period;
616 }
617 
618 /*
619  * We calculate the wall-time slice from the period by taking a part
620  * proportional to the weight.
621  *
622  * s = p*P[w/rw]
623  */
sched_slice(struct cfs_rq * cfs_rq,struct sched_entity * se)624 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
625 {
626 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
627 
628 	for_each_sched_entity(se) {
629 		struct load_weight *load;
630 		struct load_weight lw;
631 
632 		cfs_rq = cfs_rq_of(se);
633 		load = &cfs_rq->load;
634 
635 		if (unlikely(!se->on_rq)) {
636 			lw = cfs_rq->load;
637 
638 			update_load_add(&lw, se->load.weight);
639 			load = &lw;
640 		}
641 		slice = calc_delta_mine(slice, se->load.weight, load);
642 	}
643 	return slice;
644 }
645 
646 /*
647  * We calculate the vruntime slice of a to be inserted task
648  *
649  * vs = s/w
650  */
sched_vslice(struct cfs_rq * cfs_rq,struct sched_entity * se)651 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
652 {
653 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
654 }
655 
656 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
657 static void update_cfs_shares(struct cfs_rq *cfs_rq);
658 
659 /*
660  * Update the current task's runtime statistics. Skip current tasks that
661  * are not in our scheduling class.
662  */
663 static inline void
__update_curr(struct cfs_rq * cfs_rq,struct sched_entity * curr,unsigned long delta_exec)664 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
665 	      unsigned long delta_exec)
666 {
667 	unsigned long delta_exec_weighted;
668 
669 	schedstat_set(curr->statistics.exec_max,
670 		      max((u64)delta_exec, curr->statistics.exec_max));
671 
672 	curr->sum_exec_runtime += delta_exec;
673 	schedstat_add(cfs_rq, exec_clock, delta_exec);
674 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
675 
676 	curr->vruntime += delta_exec_weighted;
677 	update_min_vruntime(cfs_rq);
678 
679 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
680 	cfs_rq->load_unacc_exec_time += delta_exec;
681 #endif
682 }
683 
update_curr(struct cfs_rq * cfs_rq)684 static void update_curr(struct cfs_rq *cfs_rq)
685 {
686 	struct sched_entity *curr = cfs_rq->curr;
687 	u64 now = rq_of(cfs_rq)->clock_task;
688 	unsigned long delta_exec;
689 
690 	if (unlikely(!curr))
691 		return;
692 
693 	/*
694 	 * Get the amount of time the current task was running
695 	 * since the last time we changed load (this cannot
696 	 * overflow on 32 bits):
697 	 */
698 	delta_exec = (unsigned long)(now - curr->exec_start);
699 	if (!delta_exec)
700 		return;
701 
702 	__update_curr(cfs_rq, curr, delta_exec);
703 	curr->exec_start = now;
704 
705 	if (entity_is_task(curr)) {
706 		struct task_struct *curtask = task_of(curr);
707 
708 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
709 		cpuacct_charge(curtask, delta_exec);
710 		account_group_exec_runtime(curtask, delta_exec);
711 	}
712 
713 	account_cfs_rq_runtime(cfs_rq, delta_exec);
714 }
715 
716 static inline void
update_stats_wait_start(struct cfs_rq * cfs_rq,struct sched_entity * se)717 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
718 {
719 	schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
720 }
721 
722 /*
723  * Task is being enqueued - update stats:
724  */
update_stats_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)725 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
726 {
727 	/*
728 	 * Are we enqueueing a waiting task? (for current tasks
729 	 * a dequeue/enqueue event is a NOP)
730 	 */
731 	if (se != cfs_rq->curr)
732 		update_stats_wait_start(cfs_rq, se);
733 }
734 
735 static void
update_stats_wait_end(struct cfs_rq * cfs_rq,struct sched_entity * se)736 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
737 {
738 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
739 			rq_of(cfs_rq)->clock - se->statistics.wait_start));
740 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
741 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
742 			rq_of(cfs_rq)->clock - se->statistics.wait_start);
743 #ifdef CONFIG_SCHEDSTATS
744 	if (entity_is_task(se)) {
745 		trace_sched_stat_wait(task_of(se),
746 			rq_of(cfs_rq)->clock - se->statistics.wait_start);
747 	}
748 #endif
749 	schedstat_set(se->statistics.wait_start, 0);
750 }
751 
752 static inline void
update_stats_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)753 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
754 {
755 	/*
756 	 * Mark the end of the wait period if dequeueing a
757 	 * waiting task:
758 	 */
759 	if (se != cfs_rq->curr)
760 		update_stats_wait_end(cfs_rq, se);
761 }
762 
763 /*
764  * We are picking a new current task - update its stats:
765  */
766 static inline void
update_stats_curr_start(struct cfs_rq * cfs_rq,struct sched_entity * se)767 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
768 {
769 	/*
770 	 * We are starting a new run period:
771 	 */
772 	se->exec_start = rq_of(cfs_rq)->clock_task;
773 }
774 
775 /**************************************************
776  * Scheduling class queueing methods:
777  */
778 
779 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
780 static void
add_cfs_task_weight(struct cfs_rq * cfs_rq,unsigned long weight)781 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
782 {
783 	cfs_rq->task_weight += weight;
784 }
785 #else
786 static inline void
add_cfs_task_weight(struct cfs_rq * cfs_rq,unsigned long weight)787 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
788 {
789 }
790 #endif
791 
792 static void
account_entity_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)793 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
794 {
795 	update_load_add(&cfs_rq->load, se->load.weight);
796 	if (!parent_entity(se))
797 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
798 	if (entity_is_task(se)) {
799 		add_cfs_task_weight(cfs_rq, se->load.weight);
800 		list_add(&se->group_node, &cfs_rq->tasks);
801 	}
802 	cfs_rq->nr_running++;
803 }
804 
805 static void
account_entity_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)806 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
807 {
808 	update_load_sub(&cfs_rq->load, se->load.weight);
809 	if (!parent_entity(se))
810 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
811 	if (entity_is_task(se)) {
812 		add_cfs_task_weight(cfs_rq, -se->load.weight);
813 		list_del_init(&se->group_node);
814 	}
815 	cfs_rq->nr_running--;
816 }
817 
818 #ifdef CONFIG_FAIR_GROUP_SCHED
819 /* we need this in update_cfs_load and load-balance functions below */
820 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
821 # ifdef CONFIG_SMP
update_cfs_rq_load_contribution(struct cfs_rq * cfs_rq,int global_update)822 static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
823 					    int global_update)
824 {
825 	struct task_group *tg = cfs_rq->tg;
826 	long load_avg;
827 
828 	load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
829 	load_avg -= cfs_rq->load_contribution;
830 
831 	if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
832 		atomic_add(load_avg, &tg->load_weight);
833 		cfs_rq->load_contribution += load_avg;
834 	}
835 }
836 
update_cfs_load(struct cfs_rq * cfs_rq,int global_update)837 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
838 {
839 	u64 period = sysctl_sched_shares_window;
840 	u64 now, delta;
841 	unsigned long load = cfs_rq->load.weight;
842 
843 	if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
844 		return;
845 
846 	now = rq_of(cfs_rq)->clock_task;
847 	delta = now - cfs_rq->load_stamp;
848 
849 	/* truncate load history at 4 idle periods */
850 	if (cfs_rq->load_stamp > cfs_rq->load_last &&
851 	    now - cfs_rq->load_last > 4 * period) {
852 		cfs_rq->load_period = 0;
853 		cfs_rq->load_avg = 0;
854 		delta = period - 1;
855 	}
856 
857 	cfs_rq->load_stamp = now;
858 	cfs_rq->load_unacc_exec_time = 0;
859 	cfs_rq->load_period += delta;
860 	if (load) {
861 		cfs_rq->load_last = now;
862 		cfs_rq->load_avg += delta * load;
863 	}
864 
865 	/* consider updating load contribution on each fold or truncate */
866 	if (global_update || cfs_rq->load_period > period
867 	    || !cfs_rq->load_period)
868 		update_cfs_rq_load_contribution(cfs_rq, global_update);
869 
870 	while (cfs_rq->load_period > period) {
871 		/*
872 		 * Inline assembly required to prevent the compiler
873 		 * optimising this loop into a divmod call.
874 		 * See __iter_div_u64_rem() for another example of this.
875 		 */
876 		asm("" : "+rm" (cfs_rq->load_period));
877 		cfs_rq->load_period /= 2;
878 		cfs_rq->load_avg /= 2;
879 	}
880 
881 	if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
882 		list_del_leaf_cfs_rq(cfs_rq);
883 }
884 
calc_tg_weight(struct task_group * tg,struct cfs_rq * cfs_rq)885 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
886 {
887 	long tg_weight;
888 
889 	/*
890 	 * Use this CPU's actual weight instead of the last load_contribution
891 	 * to gain a more accurate current total weight. See
892 	 * update_cfs_rq_load_contribution().
893 	 */
894 	tg_weight = atomic_read(&tg->load_weight);
895 	tg_weight -= cfs_rq->load_contribution;
896 	tg_weight += cfs_rq->load.weight;
897 
898 	return tg_weight;
899 }
900 
calc_cfs_shares(struct cfs_rq * cfs_rq,struct task_group * tg)901 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
902 {
903 	long tg_weight, load, shares;
904 
905 	tg_weight = calc_tg_weight(tg, cfs_rq);
906 	load = cfs_rq->load.weight;
907 
908 	shares = (tg->shares * load);
909 	if (tg_weight)
910 		shares /= tg_weight;
911 
912 	if (shares < MIN_SHARES)
913 		shares = MIN_SHARES;
914 	if (shares > tg->shares)
915 		shares = tg->shares;
916 
917 	return shares;
918 }
919 
update_entity_shares_tick(struct cfs_rq * cfs_rq)920 static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
921 {
922 	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
923 		update_cfs_load(cfs_rq, 0);
924 		update_cfs_shares(cfs_rq);
925 	}
926 }
927 # else /* CONFIG_SMP */
update_cfs_load(struct cfs_rq * cfs_rq,int global_update)928 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
929 {
930 }
931 
calc_cfs_shares(struct cfs_rq * cfs_rq,struct task_group * tg)932 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
933 {
934 	return tg->shares;
935 }
936 
update_entity_shares_tick(struct cfs_rq * cfs_rq)937 static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
938 {
939 }
940 # endif /* CONFIG_SMP */
reweight_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,unsigned long weight)941 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
942 			    unsigned long weight)
943 {
944 	if (se->on_rq) {
945 		/* commit outstanding execution time */
946 		if (cfs_rq->curr == se)
947 			update_curr(cfs_rq);
948 		account_entity_dequeue(cfs_rq, se);
949 	}
950 
951 	update_load_set(&se->load, weight);
952 
953 	if (se->on_rq)
954 		account_entity_enqueue(cfs_rq, se);
955 }
956 
update_cfs_shares(struct cfs_rq * cfs_rq)957 static void update_cfs_shares(struct cfs_rq *cfs_rq)
958 {
959 	struct task_group *tg;
960 	struct sched_entity *se;
961 	long shares;
962 
963 	tg = cfs_rq->tg;
964 	se = tg->se[cpu_of(rq_of(cfs_rq))];
965 	if (!se || throttled_hierarchy(cfs_rq))
966 		return;
967 #ifndef CONFIG_SMP
968 	if (likely(se->load.weight == tg->shares))
969 		return;
970 #endif
971 	shares = calc_cfs_shares(cfs_rq, tg);
972 
973 	reweight_entity(cfs_rq_of(se), se, shares);
974 }
975 #else /* CONFIG_FAIR_GROUP_SCHED */
update_cfs_load(struct cfs_rq * cfs_rq,int global_update)976 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
977 {
978 }
979 
update_cfs_shares(struct cfs_rq * cfs_rq)980 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
981 {
982 }
983 
update_entity_shares_tick(struct cfs_rq * cfs_rq)984 static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
985 {
986 }
987 #endif /* CONFIG_FAIR_GROUP_SCHED */
988 
enqueue_sleeper(struct cfs_rq * cfs_rq,struct sched_entity * se)989 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
990 {
991 #ifdef CONFIG_SCHEDSTATS
992 	struct task_struct *tsk = NULL;
993 
994 	if (entity_is_task(se))
995 		tsk = task_of(se);
996 
997 	if (se->statistics.sleep_start) {
998 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
999 
1000 		if ((s64)delta < 0)
1001 			delta = 0;
1002 
1003 		if (unlikely(delta > se->statistics.sleep_max))
1004 			se->statistics.sleep_max = delta;
1005 
1006 		se->statistics.sleep_start = 0;
1007 		se->statistics.sum_sleep_runtime += delta;
1008 
1009 		if (tsk) {
1010 			account_scheduler_latency(tsk, delta >> 10, 1);
1011 			trace_sched_stat_sleep(tsk, delta);
1012 		}
1013 	}
1014 	if (se->statistics.block_start) {
1015 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
1016 
1017 		if ((s64)delta < 0)
1018 			delta = 0;
1019 
1020 		if (unlikely(delta > se->statistics.block_max))
1021 			se->statistics.block_max = delta;
1022 
1023 		se->statistics.block_start = 0;
1024 		se->statistics.sum_sleep_runtime += delta;
1025 
1026 		if (tsk) {
1027 			if (tsk->in_iowait) {
1028 				se->statistics.iowait_sum += delta;
1029 				se->statistics.iowait_count++;
1030 				trace_sched_stat_iowait(tsk, delta);
1031 			}
1032 
1033 			trace_sched_stat_blocked(tsk, delta);
1034 
1035 			/*
1036 			 * Blocking time is in units of nanosecs, so shift by
1037 			 * 20 to get a milliseconds-range estimation of the
1038 			 * amount of time that the task spent sleeping:
1039 			 */
1040 			if (unlikely(prof_on == SLEEP_PROFILING)) {
1041 				profile_hits(SLEEP_PROFILING,
1042 						(void *)get_wchan(tsk),
1043 						delta >> 20);
1044 			}
1045 			account_scheduler_latency(tsk, delta >> 10, 0);
1046 		}
1047 	}
1048 #endif
1049 }
1050 
check_spread(struct cfs_rq * cfs_rq,struct sched_entity * se)1051 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1052 {
1053 #ifdef CONFIG_SCHED_DEBUG
1054 	s64 d = se->vruntime - cfs_rq->min_vruntime;
1055 
1056 	if (d < 0)
1057 		d = -d;
1058 
1059 	if (d > 3*sysctl_sched_latency)
1060 		schedstat_inc(cfs_rq, nr_spread_over);
1061 #endif
1062 }
1063 
1064 static void
place_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int initial)1065 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1066 {
1067 	u64 vruntime = cfs_rq->min_vruntime;
1068 
1069 	/*
1070 	 * The 'current' period is already promised to the current tasks,
1071 	 * however the extra weight of the new task will slow them down a
1072 	 * little, place the new task so that it fits in the slot that
1073 	 * stays open at the end.
1074 	 */
1075 	if (initial && sched_feat(START_DEBIT))
1076 		vruntime += sched_vslice(cfs_rq, se);
1077 
1078 	/* sleeps up to a single latency don't count. */
1079 	if (!initial) {
1080 		unsigned long thresh = sysctl_sched_latency;
1081 
1082 		/*
1083 		 * Halve their sleep time's effect, to allow
1084 		 * for a gentler effect of sleepers:
1085 		 */
1086 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
1087 			thresh >>= 1;
1088 
1089 		vruntime -= thresh;
1090 	}
1091 
1092 	/* ensure we never gain time by being placed backwards. */
1093 	vruntime = max_vruntime(se->vruntime, vruntime);
1094 
1095 	se->vruntime = vruntime;
1096 }
1097 
1098 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1099 
1100 static void
enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1101 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1102 {
1103 	/*
1104 	 * Update the normalized vruntime before updating min_vruntime
1105 	 * through callig update_curr().
1106 	 */
1107 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1108 		se->vruntime += cfs_rq->min_vruntime;
1109 
1110 	/*
1111 	 * Update run-time statistics of the 'current'.
1112 	 */
1113 	update_curr(cfs_rq);
1114 	update_cfs_load(cfs_rq, 0);
1115 	account_entity_enqueue(cfs_rq, se);
1116 	update_cfs_shares(cfs_rq);
1117 
1118 	if (flags & ENQUEUE_WAKEUP) {
1119 		place_entity(cfs_rq, se, 0);
1120 		enqueue_sleeper(cfs_rq, se);
1121 	}
1122 
1123 	update_stats_enqueue(cfs_rq, se);
1124 	check_spread(cfs_rq, se);
1125 	if (se != cfs_rq->curr)
1126 		__enqueue_entity(cfs_rq, se);
1127 	se->on_rq = 1;
1128 
1129 	if (cfs_rq->nr_running == 1) {
1130 		list_add_leaf_cfs_rq(cfs_rq);
1131 		check_enqueue_throttle(cfs_rq);
1132 	}
1133 }
1134 
__clear_buddies_last(struct sched_entity * se)1135 static void __clear_buddies_last(struct sched_entity *se)
1136 {
1137 	for_each_sched_entity(se) {
1138 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1139 		if (cfs_rq->last == se)
1140 			cfs_rq->last = NULL;
1141 		else
1142 			break;
1143 	}
1144 }
1145 
__clear_buddies_next(struct sched_entity * se)1146 static void __clear_buddies_next(struct sched_entity *se)
1147 {
1148 	for_each_sched_entity(se) {
1149 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1150 		if (cfs_rq->next == se)
1151 			cfs_rq->next = NULL;
1152 		else
1153 			break;
1154 	}
1155 }
1156 
__clear_buddies_skip(struct sched_entity * se)1157 static void __clear_buddies_skip(struct sched_entity *se)
1158 {
1159 	for_each_sched_entity(se) {
1160 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1161 		if (cfs_rq->skip == se)
1162 			cfs_rq->skip = NULL;
1163 		else
1164 			break;
1165 	}
1166 }
1167 
clear_buddies(struct cfs_rq * cfs_rq,struct sched_entity * se)1168 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1169 {
1170 	if (cfs_rq->last == se)
1171 		__clear_buddies_last(se);
1172 
1173 	if (cfs_rq->next == se)
1174 		__clear_buddies_next(se);
1175 
1176 	if (cfs_rq->skip == se)
1177 		__clear_buddies_skip(se);
1178 }
1179 
1180 static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1181 
1182 static void
dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1183 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1184 {
1185 	/*
1186 	 * Update run-time statistics of the 'current'.
1187 	 */
1188 	update_curr(cfs_rq);
1189 
1190 	update_stats_dequeue(cfs_rq, se);
1191 	if (flags & DEQUEUE_SLEEP) {
1192 #ifdef CONFIG_SCHEDSTATS
1193 		if (entity_is_task(se)) {
1194 			struct task_struct *tsk = task_of(se);
1195 
1196 			if (tsk->state & TASK_INTERRUPTIBLE)
1197 				se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1198 			if (tsk->state & TASK_UNINTERRUPTIBLE)
1199 				se->statistics.block_start = rq_of(cfs_rq)->clock;
1200 		}
1201 #endif
1202 	}
1203 
1204 	clear_buddies(cfs_rq, se);
1205 
1206 	if (se != cfs_rq->curr)
1207 		__dequeue_entity(cfs_rq, se);
1208 	se->on_rq = 0;
1209 	update_cfs_load(cfs_rq, 0);
1210 	account_entity_dequeue(cfs_rq, se);
1211 
1212 	/*
1213 	 * Normalize the entity after updating the min_vruntime because the
1214 	 * update can refer to the ->curr item and we need to reflect this
1215 	 * movement in our normalized position.
1216 	 */
1217 	if (!(flags & DEQUEUE_SLEEP))
1218 		se->vruntime -= cfs_rq->min_vruntime;
1219 
1220 	/* return excess runtime on last dequeue */
1221 	return_cfs_rq_runtime(cfs_rq);
1222 
1223 	update_min_vruntime(cfs_rq);
1224 	update_cfs_shares(cfs_rq);
1225 }
1226 
1227 /*
1228  * Preempt the current task with a newly woken task if needed:
1229  */
1230 static void
check_preempt_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr)1231 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1232 {
1233 	unsigned long ideal_runtime, delta_exec;
1234 	struct sched_entity *se;
1235 	s64 delta;
1236 
1237 	ideal_runtime = sched_slice(cfs_rq, curr);
1238 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1239 	if (delta_exec > ideal_runtime) {
1240 		resched_task(rq_of(cfs_rq)->curr);
1241 		/*
1242 		 * The current task ran long enough, ensure it doesn't get
1243 		 * re-elected due to buddy favours.
1244 		 */
1245 		clear_buddies(cfs_rq, curr);
1246 		return;
1247 	}
1248 
1249 	/*
1250 	 * Ensure that a task that missed wakeup preemption by a
1251 	 * narrow margin doesn't have to wait for a full slice.
1252 	 * This also mitigates buddy induced latencies under load.
1253 	 */
1254 	if (delta_exec < sysctl_sched_min_granularity)
1255 		return;
1256 
1257 	se = __pick_first_entity(cfs_rq);
1258 	delta = curr->vruntime - se->vruntime;
1259 
1260 	if (delta < 0)
1261 		return;
1262 
1263 	if (delta > ideal_runtime)
1264 		resched_task(rq_of(cfs_rq)->curr);
1265 }
1266 
1267 static void
set_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)1268 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1269 {
1270 	/* 'current' is not kept within the tree. */
1271 	if (se->on_rq) {
1272 		/*
1273 		 * Any task has to be enqueued before it get to execute on
1274 		 * a CPU. So account for the time it spent waiting on the
1275 		 * runqueue.
1276 		 */
1277 		update_stats_wait_end(cfs_rq, se);
1278 		__dequeue_entity(cfs_rq, se);
1279 	}
1280 
1281 	update_stats_curr_start(cfs_rq, se);
1282 	cfs_rq->curr = se;
1283 #ifdef CONFIG_SCHEDSTATS
1284 	/*
1285 	 * Track our maximum slice length, if the CPU's load is at
1286 	 * least twice that of our own weight (i.e. dont track it
1287 	 * when there are only lesser-weight tasks around):
1288 	 */
1289 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1290 		se->statistics.slice_max = max(se->statistics.slice_max,
1291 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
1292 	}
1293 #endif
1294 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
1295 }
1296 
1297 static int
1298 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1299 
1300 /*
1301  * Pick the next process, keeping these things in mind, in this order:
1302  * 1) keep things fair between processes/task groups
1303  * 2) pick the "next" process, since someone really wants that to run
1304  * 3) pick the "last" process, for cache locality
1305  * 4) do not run the "skip" process, if something else is available
1306  */
pick_next_entity(struct cfs_rq * cfs_rq)1307 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1308 {
1309 	struct sched_entity *se = __pick_first_entity(cfs_rq);
1310 	struct sched_entity *left = se;
1311 
1312 	/*
1313 	 * Avoid running the skip buddy, if running something else can
1314 	 * be done without getting too unfair.
1315 	 */
1316 	if (cfs_rq->skip == se) {
1317 		struct sched_entity *second = __pick_next_entity(se);
1318 		if (second && wakeup_preempt_entity(second, left) < 1)
1319 			se = second;
1320 	}
1321 
1322 	/*
1323 	 * Prefer last buddy, try to return the CPU to a preempted task.
1324 	 */
1325 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1326 		se = cfs_rq->last;
1327 
1328 	/*
1329 	 * Someone really wants this to run. If it's not unfair, run it.
1330 	 */
1331 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1332 		se = cfs_rq->next;
1333 
1334 	clear_buddies(cfs_rq, se);
1335 
1336 	return se;
1337 }
1338 
1339 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1340 
put_prev_entity(struct cfs_rq * cfs_rq,struct sched_entity * prev)1341 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1342 {
1343 	/*
1344 	 * If still on the runqueue then deactivate_task()
1345 	 * was not called and update_curr() has to be done:
1346 	 */
1347 	if (prev->on_rq)
1348 		update_curr(cfs_rq);
1349 
1350 	/* throttle cfs_rqs exceeding runtime */
1351 	check_cfs_rq_runtime(cfs_rq);
1352 
1353 	check_spread(cfs_rq, prev);
1354 	if (prev->on_rq) {
1355 		update_stats_wait_start(cfs_rq, prev);
1356 		/* Put 'current' back into the tree. */
1357 		__enqueue_entity(cfs_rq, prev);
1358 	}
1359 	cfs_rq->curr = NULL;
1360 }
1361 
1362 static void
entity_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr,int queued)1363 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1364 {
1365 	/*
1366 	 * Update run-time statistics of the 'current'.
1367 	 */
1368 	update_curr(cfs_rq);
1369 
1370 	/*
1371 	 * Update share accounting for long-running entities.
1372 	 */
1373 	update_entity_shares_tick(cfs_rq);
1374 
1375 #ifdef CONFIG_SCHED_HRTICK
1376 	/*
1377 	 * queued ticks are scheduled to match the slice, so don't bother
1378 	 * validating it and just reschedule.
1379 	 */
1380 	if (queued) {
1381 		resched_task(rq_of(cfs_rq)->curr);
1382 		return;
1383 	}
1384 	/*
1385 	 * don't let the period tick interfere with the hrtick preemption
1386 	 */
1387 	if (!sched_feat(DOUBLE_TICK) &&
1388 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1389 		return;
1390 #endif
1391 
1392 	if (cfs_rq->nr_running > 1)
1393 		check_preempt_tick(cfs_rq, curr);
1394 }
1395 
1396 
1397 /**************************************************
1398  * CFS bandwidth control machinery
1399  */
1400 
1401 #ifdef CONFIG_CFS_BANDWIDTH
1402 
1403 #ifdef HAVE_JUMP_LABEL
1404 static struct jump_label_key __cfs_bandwidth_used;
1405 
cfs_bandwidth_used(void)1406 static inline bool cfs_bandwidth_used(void)
1407 {
1408 	return static_branch(&__cfs_bandwidth_used);
1409 }
1410 
account_cfs_bandwidth_used(int enabled,int was_enabled)1411 void account_cfs_bandwidth_used(int enabled, int was_enabled)
1412 {
1413 	/* only need to count groups transitioning between enabled/!enabled */
1414 	if (enabled && !was_enabled)
1415 		jump_label_inc(&__cfs_bandwidth_used);
1416 	else if (!enabled && was_enabled)
1417 		jump_label_dec(&__cfs_bandwidth_used);
1418 }
1419 #else /* HAVE_JUMP_LABEL */
cfs_bandwidth_used(void)1420 static bool cfs_bandwidth_used(void)
1421 {
1422 	return true;
1423 }
1424 
account_cfs_bandwidth_used(int enabled,int was_enabled)1425 void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
1426 #endif /* HAVE_JUMP_LABEL */
1427 
1428 /*
1429  * default period for cfs group bandwidth.
1430  * default: 0.1s, units: nanoseconds
1431  */
default_cfs_period(void)1432 static inline u64 default_cfs_period(void)
1433 {
1434 	return 100000000ULL;
1435 }
1436 
sched_cfs_bandwidth_slice(void)1437 static inline u64 sched_cfs_bandwidth_slice(void)
1438 {
1439 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
1440 }
1441 
1442 /*
1443  * Replenish runtime according to assigned quota and update expiration time.
1444  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1445  * additional synchronization around rq->lock.
1446  *
1447  * requires cfs_b->lock
1448  */
__refill_cfs_bandwidth_runtime(struct cfs_bandwidth * cfs_b)1449 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
1450 {
1451 	u64 now;
1452 
1453 	if (cfs_b->quota == RUNTIME_INF)
1454 		return;
1455 
1456 	now = sched_clock_cpu(smp_processor_id());
1457 	cfs_b->runtime = cfs_b->quota;
1458 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
1459 }
1460 
tg_cfs_bandwidth(struct task_group * tg)1461 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
1462 {
1463 	return &tg->cfs_bandwidth;
1464 }
1465 
1466 /* returns 0 on failure to allocate runtime */
assign_cfs_rq_runtime(struct cfs_rq * cfs_rq)1467 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1468 {
1469 	struct task_group *tg = cfs_rq->tg;
1470 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
1471 	u64 amount = 0, min_amount, expires;
1472 
1473 	/* note: this is a positive sum as runtime_remaining <= 0 */
1474 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
1475 
1476 	raw_spin_lock(&cfs_b->lock);
1477 	if (cfs_b->quota == RUNTIME_INF)
1478 		amount = min_amount;
1479 	else {
1480 		/*
1481 		 * If the bandwidth pool has become inactive, then at least one
1482 		 * period must have elapsed since the last consumption.
1483 		 * Refresh the global state and ensure bandwidth timer becomes
1484 		 * active.
1485 		 */
1486 		if (!cfs_b->timer_active) {
1487 			__refill_cfs_bandwidth_runtime(cfs_b);
1488 			__start_cfs_bandwidth(cfs_b);
1489 		}
1490 
1491 		if (cfs_b->runtime > 0) {
1492 			amount = min(cfs_b->runtime, min_amount);
1493 			cfs_b->runtime -= amount;
1494 			cfs_b->idle = 0;
1495 		}
1496 	}
1497 	expires = cfs_b->runtime_expires;
1498 	raw_spin_unlock(&cfs_b->lock);
1499 
1500 	cfs_rq->runtime_remaining += amount;
1501 	/*
1502 	 * we may have advanced our local expiration to account for allowed
1503 	 * spread between our sched_clock and the one on which runtime was
1504 	 * issued.
1505 	 */
1506 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
1507 		cfs_rq->runtime_expires = expires;
1508 
1509 	return cfs_rq->runtime_remaining > 0;
1510 }
1511 
1512 /*
1513  * Note: This depends on the synchronization provided by sched_clock and the
1514  * fact that rq->clock snapshots this value.
1515  */
expire_cfs_rq_runtime(struct cfs_rq * cfs_rq)1516 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1517 {
1518 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1519 	struct rq *rq = rq_of(cfs_rq);
1520 
1521 	/* if the deadline is ahead of our clock, nothing to do */
1522 	if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
1523 		return;
1524 
1525 	if (cfs_rq->runtime_remaining < 0)
1526 		return;
1527 
1528 	/*
1529 	 * If the local deadline has passed we have to consider the
1530 	 * possibility that our sched_clock is 'fast' and the global deadline
1531 	 * has not truly expired.
1532 	 *
1533 	 * Fortunately we can check determine whether this the case by checking
1534 	 * whether the global deadline has advanced.
1535 	 */
1536 
1537 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
1538 		/* extend local deadline, drift is bounded above by 2 ticks */
1539 		cfs_rq->runtime_expires += TICK_NSEC;
1540 	} else {
1541 		/* global deadline is ahead, expiration has passed */
1542 		cfs_rq->runtime_remaining = 0;
1543 	}
1544 }
1545 
__account_cfs_rq_runtime(struct cfs_rq * cfs_rq,unsigned long delta_exec)1546 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1547 				     unsigned long delta_exec)
1548 {
1549 	/* dock delta_exec before expiring quota (as it could span periods) */
1550 	cfs_rq->runtime_remaining -= delta_exec;
1551 	expire_cfs_rq_runtime(cfs_rq);
1552 
1553 	if (likely(cfs_rq->runtime_remaining > 0))
1554 		return;
1555 
1556 	/*
1557 	 * if we're unable to extend our runtime we resched so that the active
1558 	 * hierarchy can be throttled
1559 	 */
1560 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
1561 		resched_task(rq_of(cfs_rq)->curr);
1562 }
1563 
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,unsigned long delta_exec)1564 static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1565 						   unsigned long delta_exec)
1566 {
1567 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1568 		return;
1569 
1570 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
1571 }
1572 
cfs_rq_throttled(struct cfs_rq * cfs_rq)1573 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
1574 {
1575 	return cfs_bandwidth_used() && cfs_rq->throttled;
1576 }
1577 
1578 /* check whether cfs_rq, or any parent, is throttled */
throttled_hierarchy(struct cfs_rq * cfs_rq)1579 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
1580 {
1581 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
1582 }
1583 
1584 /*
1585  * Ensure that neither of the group entities corresponding to src_cpu or
1586  * dest_cpu are members of a throttled hierarchy when performing group
1587  * load-balance operations.
1588  */
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)1589 static inline int throttled_lb_pair(struct task_group *tg,
1590 				    int src_cpu, int dest_cpu)
1591 {
1592 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
1593 
1594 	src_cfs_rq = tg->cfs_rq[src_cpu];
1595 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
1596 
1597 	return throttled_hierarchy(src_cfs_rq) ||
1598 	       throttled_hierarchy(dest_cfs_rq);
1599 }
1600 
1601 /* updated child weight may affect parent so we have to do this bottom up */
tg_unthrottle_up(struct task_group * tg,void * data)1602 static int tg_unthrottle_up(struct task_group *tg, void *data)
1603 {
1604 	struct rq *rq = data;
1605 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1606 
1607 	cfs_rq->throttle_count--;
1608 #ifdef CONFIG_SMP
1609 	if (!cfs_rq->throttle_count) {
1610 		u64 delta = rq->clock_task - cfs_rq->load_stamp;
1611 
1612 		/* leaving throttled state, advance shares averaging windows */
1613 		cfs_rq->load_stamp += delta;
1614 		cfs_rq->load_last += delta;
1615 
1616 		/* update entity weight now that we are on_rq again */
1617 		update_cfs_shares(cfs_rq);
1618 	}
1619 #endif
1620 
1621 	return 0;
1622 }
1623 
tg_throttle_down(struct task_group * tg,void * data)1624 static int tg_throttle_down(struct task_group *tg, void *data)
1625 {
1626 	struct rq *rq = data;
1627 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1628 
1629 	/* group is entering throttled state, record last load */
1630 	if (!cfs_rq->throttle_count)
1631 		update_cfs_load(cfs_rq, 0);
1632 	cfs_rq->throttle_count++;
1633 
1634 	return 0;
1635 }
1636 
throttle_cfs_rq(struct cfs_rq * cfs_rq)1637 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
1638 {
1639 	struct rq *rq = rq_of(cfs_rq);
1640 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1641 	struct sched_entity *se;
1642 	long task_delta, dequeue = 1;
1643 
1644 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1645 
1646 	/* account load preceding throttle */
1647 	rcu_read_lock();
1648 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
1649 	rcu_read_unlock();
1650 
1651 	task_delta = cfs_rq->h_nr_running;
1652 	for_each_sched_entity(se) {
1653 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
1654 		/* throttled entity or throttle-on-deactivate */
1655 		if (!se->on_rq)
1656 			break;
1657 
1658 		if (dequeue)
1659 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
1660 		qcfs_rq->h_nr_running -= task_delta;
1661 
1662 		if (qcfs_rq->load.weight)
1663 			dequeue = 0;
1664 	}
1665 
1666 	if (!se)
1667 		rq->nr_running -= task_delta;
1668 
1669 	cfs_rq->throttled = 1;
1670 	cfs_rq->throttled_timestamp = rq->clock;
1671 	raw_spin_lock(&cfs_b->lock);
1672 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
1673 	raw_spin_unlock(&cfs_b->lock);
1674 }
1675 
unthrottle_cfs_rq(struct cfs_rq * cfs_rq)1676 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
1677 {
1678 	struct rq *rq = rq_of(cfs_rq);
1679 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1680 	struct sched_entity *se;
1681 	int enqueue = 1;
1682 	long task_delta;
1683 
1684 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1685 
1686 	cfs_rq->throttled = 0;
1687 	raw_spin_lock(&cfs_b->lock);
1688 	cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
1689 	list_del_rcu(&cfs_rq->throttled_list);
1690 	raw_spin_unlock(&cfs_b->lock);
1691 	cfs_rq->throttled_timestamp = 0;
1692 
1693 	update_rq_clock(rq);
1694 	/* update hierarchical throttle state */
1695 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
1696 
1697 	if (!cfs_rq->load.weight)
1698 		return;
1699 
1700 	task_delta = cfs_rq->h_nr_running;
1701 	for_each_sched_entity(se) {
1702 		if (se->on_rq)
1703 			enqueue = 0;
1704 
1705 		cfs_rq = cfs_rq_of(se);
1706 		if (enqueue)
1707 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
1708 		cfs_rq->h_nr_running += task_delta;
1709 
1710 		if (cfs_rq_throttled(cfs_rq))
1711 			break;
1712 	}
1713 
1714 	if (!se)
1715 		rq->nr_running += task_delta;
1716 
1717 	/* determine whether we need to wake up potentially idle cpu */
1718 	if (rq->curr == rq->idle && rq->cfs.nr_running)
1719 		resched_task(rq->curr);
1720 }
1721 
distribute_cfs_runtime(struct cfs_bandwidth * cfs_b,u64 remaining,u64 expires)1722 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
1723 		u64 remaining, u64 expires)
1724 {
1725 	struct cfs_rq *cfs_rq;
1726 	u64 runtime = remaining;
1727 
1728 	rcu_read_lock();
1729 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
1730 				throttled_list) {
1731 		struct rq *rq = rq_of(cfs_rq);
1732 
1733 		raw_spin_lock(&rq->lock);
1734 		if (!cfs_rq_throttled(cfs_rq))
1735 			goto next;
1736 
1737 		runtime = -cfs_rq->runtime_remaining + 1;
1738 		if (runtime > remaining)
1739 			runtime = remaining;
1740 		remaining -= runtime;
1741 
1742 		cfs_rq->runtime_remaining += runtime;
1743 		cfs_rq->runtime_expires = expires;
1744 
1745 		/* we check whether we're throttled above */
1746 		if (cfs_rq->runtime_remaining > 0)
1747 			unthrottle_cfs_rq(cfs_rq);
1748 
1749 next:
1750 		raw_spin_unlock(&rq->lock);
1751 
1752 		if (!remaining)
1753 			break;
1754 	}
1755 	rcu_read_unlock();
1756 
1757 	return remaining;
1758 }
1759 
1760 /*
1761  * Responsible for refilling a task_group's bandwidth and unthrottling its
1762  * cfs_rqs as appropriate. If there has been no activity within the last
1763  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
1764  * used to track this state.
1765  */
do_sched_cfs_period_timer(struct cfs_bandwidth * cfs_b,int overrun)1766 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1767 {
1768 	u64 runtime, runtime_expires;
1769 	int idle = 1, throttled;
1770 
1771 	raw_spin_lock(&cfs_b->lock);
1772 	/* no need to continue the timer with no bandwidth constraint */
1773 	if (cfs_b->quota == RUNTIME_INF)
1774 		goto out_unlock;
1775 
1776 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1777 	/* idle depends on !throttled (for the case of a large deficit) */
1778 	idle = cfs_b->idle && !throttled;
1779 	cfs_b->nr_periods += overrun;
1780 
1781 	/* if we're going inactive then everything else can be deferred */
1782 	if (idle)
1783 		goto out_unlock;
1784 
1785 	__refill_cfs_bandwidth_runtime(cfs_b);
1786 
1787 	if (!throttled) {
1788 		/* mark as potentially idle for the upcoming period */
1789 		cfs_b->idle = 1;
1790 		goto out_unlock;
1791 	}
1792 
1793 	/* account preceding periods in which throttling occurred */
1794 	cfs_b->nr_throttled += overrun;
1795 
1796 	/*
1797 	 * There are throttled entities so we must first use the new bandwidth
1798 	 * to unthrottle them before making it generally available.  This
1799 	 * ensures that all existing debts will be paid before a new cfs_rq is
1800 	 * allowed to run.
1801 	 */
1802 	runtime = cfs_b->runtime;
1803 	runtime_expires = cfs_b->runtime_expires;
1804 	cfs_b->runtime = 0;
1805 
1806 	/*
1807 	 * This check is repeated as we are holding onto the new bandwidth
1808 	 * while we unthrottle.  This can potentially race with an unthrottled
1809 	 * group trying to acquire new bandwidth from the global pool.
1810 	 */
1811 	while (throttled && runtime > 0) {
1812 		raw_spin_unlock(&cfs_b->lock);
1813 		/* we can't nest cfs_b->lock while distributing bandwidth */
1814 		runtime = distribute_cfs_runtime(cfs_b, runtime,
1815 						 runtime_expires);
1816 		raw_spin_lock(&cfs_b->lock);
1817 
1818 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1819 	}
1820 
1821 	/* return (any) remaining runtime */
1822 	cfs_b->runtime = runtime;
1823 	/*
1824 	 * While we are ensured activity in the period following an
1825 	 * unthrottle, this also covers the case in which the new bandwidth is
1826 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
1827 	 * timer to remain active while there are any throttled entities.)
1828 	 */
1829 	cfs_b->idle = 0;
1830 out_unlock:
1831 	if (idle)
1832 		cfs_b->timer_active = 0;
1833 	raw_spin_unlock(&cfs_b->lock);
1834 
1835 	return idle;
1836 }
1837 
1838 /* a cfs_rq won't donate quota below this amount */
1839 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
1840 /* minimum remaining period time to redistribute slack quota */
1841 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
1842 /* how long we wait to gather additional slack before distributing */
1843 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
1844 
1845 /* are we near the end of the current quota period? */
runtime_refresh_within(struct cfs_bandwidth * cfs_b,u64 min_expire)1846 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
1847 {
1848 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
1849 	u64 remaining;
1850 
1851 	/* if the call-back is running a quota refresh is already occurring */
1852 	if (hrtimer_callback_running(refresh_timer))
1853 		return 1;
1854 
1855 	/* is a quota refresh about to occur? */
1856 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
1857 	if (remaining < min_expire)
1858 		return 1;
1859 
1860 	return 0;
1861 }
1862 
start_cfs_slack_bandwidth(struct cfs_bandwidth * cfs_b)1863 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
1864 {
1865 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
1866 
1867 	/* if there's a quota refresh soon don't bother with slack */
1868 	if (runtime_refresh_within(cfs_b, min_left))
1869 		return;
1870 
1871 	start_bandwidth_timer(&cfs_b->slack_timer,
1872 				ns_to_ktime(cfs_bandwidth_slack_period));
1873 }
1874 
1875 /* we know any runtime found here is valid as update_curr() precedes return */
__return_cfs_rq_runtime(struct cfs_rq * cfs_rq)1876 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1877 {
1878 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1879 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
1880 
1881 	if (slack_runtime <= 0)
1882 		return;
1883 
1884 	raw_spin_lock(&cfs_b->lock);
1885 	if (cfs_b->quota != RUNTIME_INF &&
1886 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
1887 		cfs_b->runtime += slack_runtime;
1888 
1889 		/* we are under rq->lock, defer unthrottling using a timer */
1890 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
1891 		    !list_empty(&cfs_b->throttled_cfs_rq))
1892 			start_cfs_slack_bandwidth(cfs_b);
1893 	}
1894 	raw_spin_unlock(&cfs_b->lock);
1895 
1896 	/* even if it's not valid for return we don't want to try again */
1897 	cfs_rq->runtime_remaining -= slack_runtime;
1898 }
1899 
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)1900 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1901 {
1902 	if (!cfs_bandwidth_used())
1903 		return;
1904 
1905 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
1906 		return;
1907 
1908 	__return_cfs_rq_runtime(cfs_rq);
1909 }
1910 
1911 /*
1912  * This is done with a timer (instead of inline with bandwidth return) since
1913  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
1914  */
do_sched_cfs_slack_timer(struct cfs_bandwidth * cfs_b)1915 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
1916 {
1917 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
1918 	u64 expires;
1919 
1920 	/* confirm we're still not at a refresh boundary */
1921 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
1922 		return;
1923 
1924 	raw_spin_lock(&cfs_b->lock);
1925 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
1926 		runtime = cfs_b->runtime;
1927 		cfs_b->runtime = 0;
1928 	}
1929 	expires = cfs_b->runtime_expires;
1930 	raw_spin_unlock(&cfs_b->lock);
1931 
1932 	if (!runtime)
1933 		return;
1934 
1935 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
1936 
1937 	raw_spin_lock(&cfs_b->lock);
1938 	if (expires == cfs_b->runtime_expires)
1939 		cfs_b->runtime = runtime;
1940 	raw_spin_unlock(&cfs_b->lock);
1941 }
1942 
1943 /*
1944  * When a group wakes up we want to make sure that its quota is not already
1945  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
1946  * runtime as update_curr() throttling can not not trigger until it's on-rq.
1947  */
check_enqueue_throttle(struct cfs_rq * cfs_rq)1948 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
1949 {
1950 	if (!cfs_bandwidth_used())
1951 		return;
1952 
1953 	/* an active group must be handled by the update_curr()->put() path */
1954 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
1955 		return;
1956 
1957 	/* ensure the group is not already throttled */
1958 	if (cfs_rq_throttled(cfs_rq))
1959 		return;
1960 
1961 	/* update runtime allocation */
1962 	account_cfs_rq_runtime(cfs_rq, 0);
1963 	if (cfs_rq->runtime_remaining <= 0)
1964 		throttle_cfs_rq(cfs_rq);
1965 }
1966 
1967 /* conditionally throttle active cfs_rq's from put_prev_entity() */
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)1968 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1969 {
1970 	if (!cfs_bandwidth_used())
1971 		return;
1972 
1973 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
1974 		return;
1975 
1976 	/*
1977 	 * it's possible for a throttled entity to be forced into a running
1978 	 * state (e.g. set_curr_task), in this case we're finished.
1979 	 */
1980 	if (cfs_rq_throttled(cfs_rq))
1981 		return;
1982 
1983 	throttle_cfs_rq(cfs_rq);
1984 }
1985 
1986 static inline u64 default_cfs_period(void);
1987 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
1988 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
1989 
sched_cfs_slack_timer(struct hrtimer * timer)1990 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
1991 {
1992 	struct cfs_bandwidth *cfs_b =
1993 		container_of(timer, struct cfs_bandwidth, slack_timer);
1994 	do_sched_cfs_slack_timer(cfs_b);
1995 
1996 	return HRTIMER_NORESTART;
1997 }
1998 
sched_cfs_period_timer(struct hrtimer * timer)1999 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2000 {
2001 	struct cfs_bandwidth *cfs_b =
2002 		container_of(timer, struct cfs_bandwidth, period_timer);
2003 	ktime_t now;
2004 	int overrun;
2005 	int idle = 0;
2006 
2007 	for (;;) {
2008 		now = hrtimer_cb_get_time(timer);
2009 		overrun = hrtimer_forward(timer, now, cfs_b->period);
2010 
2011 		if (!overrun)
2012 			break;
2013 
2014 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
2015 	}
2016 
2017 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2018 }
2019 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2020 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2021 {
2022 	raw_spin_lock_init(&cfs_b->lock);
2023 	cfs_b->runtime = 0;
2024 	cfs_b->quota = RUNTIME_INF;
2025 	cfs_b->period = ns_to_ktime(default_cfs_period());
2026 
2027 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2028 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2029 	cfs_b->period_timer.function = sched_cfs_period_timer;
2030 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2031 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
2032 }
2033 
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)2034 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2035 {
2036 	cfs_rq->runtime_enabled = 0;
2037 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
2038 }
2039 
2040 /* requires cfs_b->lock, may release to reprogram timer */
__start_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2041 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2042 {
2043 	/*
2044 	 * The timer may be active because we're trying to set a new bandwidth
2045 	 * period or because we're racing with the tear-down path
2046 	 * (timer_active==0 becomes visible before the hrtimer call-back
2047 	 * terminates).  In either case we ensure that it's re-programmed
2048 	 */
2049 	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2050 		raw_spin_unlock(&cfs_b->lock);
2051 		/* ensure cfs_b->lock is available while we wait */
2052 		hrtimer_cancel(&cfs_b->period_timer);
2053 
2054 		raw_spin_lock(&cfs_b->lock);
2055 		/* if someone else restarted the timer then we're done */
2056 		if (cfs_b->timer_active)
2057 			return;
2058 	}
2059 
2060 	cfs_b->timer_active = 1;
2061 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2062 }
2063 
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2064 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2065 {
2066 	hrtimer_cancel(&cfs_b->period_timer);
2067 	hrtimer_cancel(&cfs_b->slack_timer);
2068 }
2069 
unthrottle_offline_cfs_rqs(struct rq * rq)2070 void unthrottle_offline_cfs_rqs(struct rq *rq)
2071 {
2072 	struct cfs_rq *cfs_rq;
2073 
2074 	for_each_leaf_cfs_rq(rq, cfs_rq) {
2075 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2076 
2077 		if (!cfs_rq->runtime_enabled)
2078 			continue;
2079 
2080 		/*
2081 		 * clock_task is not advancing so we just need to make sure
2082 		 * there's some valid quota amount
2083 		 */
2084 		cfs_rq->runtime_remaining = cfs_b->quota;
2085 		if (cfs_rq_throttled(cfs_rq))
2086 			unthrottle_cfs_rq(cfs_rq);
2087 	}
2088 }
2089 
2090 #else /* CONFIG_CFS_BANDWIDTH */
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,unsigned long delta_exec)2091 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2092 				     unsigned long delta_exec) {}
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)2093 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
check_enqueue_throttle(struct cfs_rq * cfs_rq)2094 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)2095 static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2096 
cfs_rq_throttled(struct cfs_rq * cfs_rq)2097 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2098 {
2099 	return 0;
2100 }
2101 
throttled_hierarchy(struct cfs_rq * cfs_rq)2102 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2103 {
2104 	return 0;
2105 }
2106 
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)2107 static inline int throttled_lb_pair(struct task_group *tg,
2108 				    int src_cpu, int dest_cpu)
2109 {
2110 	return 0;
2111 }
2112 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2113 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2114 
2115 #ifdef CONFIG_FAIR_GROUP_SCHED
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)2116 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2117 #endif
2118 
tg_cfs_bandwidth(struct task_group * tg)2119 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2120 {
2121 	return NULL;
2122 }
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2123 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
unthrottle_offline_cfs_rqs(struct rq * rq)2124 void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2125 
2126 #endif /* CONFIG_CFS_BANDWIDTH */
2127 
2128 /**************************************************
2129  * CFS operations on tasks:
2130  */
2131 
2132 #ifdef CONFIG_SCHED_HRTICK
hrtick_start_fair(struct rq * rq,struct task_struct * p)2133 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2134 {
2135 	struct sched_entity *se = &p->se;
2136 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2137 
2138 	WARN_ON(task_rq(p) != rq);
2139 
2140 	if (cfs_rq->nr_running > 1) {
2141 		u64 slice = sched_slice(cfs_rq, se);
2142 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2143 		s64 delta = slice - ran;
2144 
2145 		if (delta < 0) {
2146 			if (rq->curr == p)
2147 				resched_task(p);
2148 			return;
2149 		}
2150 
2151 		/*
2152 		 * Don't schedule slices shorter than 10000ns, that just
2153 		 * doesn't make sense. Rely on vruntime for fairness.
2154 		 */
2155 		if (rq->curr != p)
2156 			delta = max_t(s64, 10000LL, delta);
2157 
2158 		hrtick_start(rq, delta);
2159 	}
2160 }
2161 
2162 /*
2163  * called from enqueue/dequeue and updates the hrtick when the
2164  * current task is from our class and nr_running is low enough
2165  * to matter.
2166  */
hrtick_update(struct rq * rq)2167 static void hrtick_update(struct rq *rq)
2168 {
2169 	struct task_struct *curr = rq->curr;
2170 
2171 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2172 		return;
2173 
2174 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2175 		hrtick_start_fair(rq, curr);
2176 }
2177 #else /* !CONFIG_SCHED_HRTICK */
2178 static inline void
hrtick_start_fair(struct rq * rq,struct task_struct * p)2179 hrtick_start_fair(struct rq *rq, struct task_struct *p)
2180 {
2181 }
2182 
hrtick_update(struct rq * rq)2183 static inline void hrtick_update(struct rq *rq)
2184 {
2185 }
2186 #endif
2187 
2188 /*
2189  * The enqueue_task method is called before nr_running is
2190  * increased. Here we update the fair scheduling stats and
2191  * then put the task into the rbtree:
2192  */
2193 static void
enqueue_task_fair(struct rq * rq,struct task_struct * p,int flags)2194 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2195 {
2196 	struct cfs_rq *cfs_rq;
2197 	struct sched_entity *se = &p->se;
2198 
2199 	for_each_sched_entity(se) {
2200 		if (se->on_rq)
2201 			break;
2202 		cfs_rq = cfs_rq_of(se);
2203 		enqueue_entity(cfs_rq, se, flags);
2204 
2205 		/*
2206 		 * end evaluation on encountering a throttled cfs_rq
2207 		 *
2208 		 * note: in the case of encountering a throttled cfs_rq we will
2209 		 * post the final h_nr_running increment below.
2210 		*/
2211 		if (cfs_rq_throttled(cfs_rq))
2212 			break;
2213 		cfs_rq->h_nr_running++;
2214 
2215 		flags = ENQUEUE_WAKEUP;
2216 	}
2217 
2218 	for_each_sched_entity(se) {
2219 		cfs_rq = cfs_rq_of(se);
2220 		cfs_rq->h_nr_running++;
2221 
2222 		if (cfs_rq_throttled(cfs_rq))
2223 			break;
2224 
2225 		update_cfs_load(cfs_rq, 0);
2226 		update_cfs_shares(cfs_rq);
2227 	}
2228 
2229 	if (!se)
2230 		inc_nr_running(rq);
2231 	hrtick_update(rq);
2232 }
2233 
2234 static void set_next_buddy(struct sched_entity *se);
2235 
2236 /*
2237  * The dequeue_task method is called before nr_running is
2238  * decreased. We remove the task from the rbtree and
2239  * update the fair scheduling stats:
2240  */
dequeue_task_fair(struct rq * rq,struct task_struct * p,int flags)2241 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2242 {
2243 	struct cfs_rq *cfs_rq;
2244 	struct sched_entity *se = &p->se;
2245 	int task_sleep = flags & DEQUEUE_SLEEP;
2246 
2247 	for_each_sched_entity(se) {
2248 		cfs_rq = cfs_rq_of(se);
2249 		dequeue_entity(cfs_rq, se, flags);
2250 
2251 		/*
2252 		 * end evaluation on encountering a throttled cfs_rq
2253 		 *
2254 		 * note: in the case of encountering a throttled cfs_rq we will
2255 		 * post the final h_nr_running decrement below.
2256 		*/
2257 		if (cfs_rq_throttled(cfs_rq))
2258 			break;
2259 		cfs_rq->h_nr_running--;
2260 
2261 		/* Don't dequeue parent if it has other entities besides us */
2262 		if (cfs_rq->load.weight) {
2263 			/*
2264 			 * Bias pick_next to pick a task from this cfs_rq, as
2265 			 * p is sleeping when it is within its sched_slice.
2266 			 */
2267 			if (task_sleep && parent_entity(se))
2268 				set_next_buddy(parent_entity(se));
2269 
2270 			/* avoid re-evaluating load for this entity */
2271 			se = parent_entity(se);
2272 			break;
2273 		}
2274 		flags |= DEQUEUE_SLEEP;
2275 	}
2276 
2277 	for_each_sched_entity(se) {
2278 		cfs_rq = cfs_rq_of(se);
2279 		cfs_rq->h_nr_running--;
2280 
2281 		if (cfs_rq_throttled(cfs_rq))
2282 			break;
2283 
2284 		update_cfs_load(cfs_rq, 0);
2285 		update_cfs_shares(cfs_rq);
2286 	}
2287 
2288 	if (!se)
2289 		dec_nr_running(rq);
2290 	hrtick_update(rq);
2291 }
2292 
2293 #ifdef CONFIG_SMP
2294 /* Used instead of source_load when we know the type == 0 */
weighted_cpuload(const int cpu)2295 static unsigned long weighted_cpuload(const int cpu)
2296 {
2297 	return cpu_rq(cpu)->load.weight;
2298 }
2299 
2300 /*
2301  * Return a low guess at the load of a migration-source cpu weighted
2302  * according to the scheduling class and "nice" value.
2303  *
2304  * We want to under-estimate the load of migration sources, to
2305  * balance conservatively.
2306  */
source_load(int cpu,int type)2307 static unsigned long source_load(int cpu, int type)
2308 {
2309 	struct rq *rq = cpu_rq(cpu);
2310 	unsigned long total = weighted_cpuload(cpu);
2311 
2312 	if (type == 0 || !sched_feat(LB_BIAS))
2313 		return total;
2314 
2315 	return min(rq->cpu_load[type-1], total);
2316 }
2317 
2318 /*
2319  * Return a high guess at the load of a migration-target cpu weighted
2320  * according to the scheduling class and "nice" value.
2321  */
target_load(int cpu,int type)2322 static unsigned long target_load(int cpu, int type)
2323 {
2324 	struct rq *rq = cpu_rq(cpu);
2325 	unsigned long total = weighted_cpuload(cpu);
2326 
2327 	if (type == 0 || !sched_feat(LB_BIAS))
2328 		return total;
2329 
2330 	return max(rq->cpu_load[type-1], total);
2331 }
2332 
power_of(int cpu)2333 static unsigned long power_of(int cpu)
2334 {
2335 	return cpu_rq(cpu)->cpu_power;
2336 }
2337 
cpu_avg_load_per_task(int cpu)2338 static unsigned long cpu_avg_load_per_task(int cpu)
2339 {
2340 	struct rq *rq = cpu_rq(cpu);
2341 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2342 
2343 	if (nr_running)
2344 		return rq->load.weight / nr_running;
2345 
2346 	return 0;
2347 }
2348 
2349 
task_waking_fair(struct task_struct * p)2350 static void task_waking_fair(struct task_struct *p)
2351 {
2352 	struct sched_entity *se = &p->se;
2353 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2354 	u64 min_vruntime;
2355 
2356 #ifndef CONFIG_64BIT
2357 	u64 min_vruntime_copy;
2358 
2359 	do {
2360 		min_vruntime_copy = cfs_rq->min_vruntime_copy;
2361 		smp_rmb();
2362 		min_vruntime = cfs_rq->min_vruntime;
2363 	} while (min_vruntime != min_vruntime_copy);
2364 #else
2365 	min_vruntime = cfs_rq->min_vruntime;
2366 #endif
2367 
2368 	se->vruntime -= min_vruntime;
2369 }
2370 
2371 #ifdef CONFIG_FAIR_GROUP_SCHED
2372 /*
2373  * effective_load() calculates the load change as seen from the root_task_group
2374  *
2375  * Adding load to a group doesn't make a group heavier, but can cause movement
2376  * of group shares between cpus. Assuming the shares were perfectly aligned one
2377  * can calculate the shift in shares.
2378  *
2379  * Calculate the effective load difference if @wl is added (subtracted) to @tg
2380  * on this @cpu and results in a total addition (subtraction) of @wg to the
2381  * total group weight.
2382  *
2383  * Given a runqueue weight distribution (rw_i) we can compute a shares
2384  * distribution (s_i) using:
2385  *
2386  *   s_i = rw_i / \Sum rw_j						(1)
2387  *
2388  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2389  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2390  * shares distribution (s_i):
2391  *
2392  *   rw_i = {   2,   4,   1,   0 }
2393  *   s_i  = { 2/7, 4/7, 1/7,   0 }
2394  *
2395  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2396  * task used to run on and the CPU the waker is running on), we need to
2397  * compute the effect of waking a task on either CPU and, in case of a sync
2398  * wakeup, compute the effect of the current task going to sleep.
2399  *
2400  * So for a change of @wl to the local @cpu with an overall group weight change
2401  * of @wl we can compute the new shares distribution (s'_i) using:
2402  *
2403  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
2404  *
2405  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2406  * differences in waking a task to CPU 0. The additional task changes the
2407  * weight and shares distributions like:
2408  *
2409  *   rw'_i = {   3,   4,   1,   0 }
2410  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
2411  *
2412  * We can then compute the difference in effective weight by using:
2413  *
2414  *   dw_i = S * (s'_i - s_i)						(3)
2415  *
2416  * Where 'S' is the group weight as seen by its parent.
2417  *
2418  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2419  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2420  * 4/7) times the weight of the group.
2421  */
effective_load(struct task_group * tg,int cpu,long wl,long wg)2422 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
2423 {
2424 	struct sched_entity *se = tg->se[cpu];
2425 
2426 	if (!tg->parent)	/* the trivial, non-cgroup case */
2427 		return wl;
2428 
2429 	for_each_sched_entity(se) {
2430 		long w, W;
2431 
2432 		tg = se->my_q->tg;
2433 
2434 		/*
2435 		 * W = @wg + \Sum rw_j
2436 		 */
2437 		W = wg + calc_tg_weight(tg, se->my_q);
2438 
2439 		/*
2440 		 * w = rw_i + @wl
2441 		 */
2442 		w = se->my_q->load.weight + wl;
2443 
2444 		/*
2445 		 * wl = S * s'_i; see (2)
2446 		 */
2447 		if (W > 0 && w < W)
2448 			wl = (w * tg->shares) / W;
2449 		else
2450 			wl = tg->shares;
2451 
2452 		/*
2453 		 * Per the above, wl is the new se->load.weight value; since
2454 		 * those are clipped to [MIN_SHARES, ...) do so now. See
2455 		 * calc_cfs_shares().
2456 		 */
2457 		if (wl < MIN_SHARES)
2458 			wl = MIN_SHARES;
2459 
2460 		/*
2461 		 * wl = dw_i = S * (s'_i - s_i); see (3)
2462 		 */
2463 		wl -= se->load.weight;
2464 
2465 		/*
2466 		 * Recursively apply this logic to all parent groups to compute
2467 		 * the final effective load change on the root group. Since
2468 		 * only the @tg group gets extra weight, all parent groups can
2469 		 * only redistribute existing shares. @wl is the shift in shares
2470 		 * resulting from this level per the above.
2471 		 */
2472 		wg = 0;
2473 	}
2474 
2475 	return wl;
2476 }
2477 #else
2478 
effective_load(struct task_group * tg,int cpu,unsigned long wl,unsigned long wg)2479 static inline unsigned long effective_load(struct task_group *tg, int cpu,
2480 		unsigned long wl, unsigned long wg)
2481 {
2482 	return wl;
2483 }
2484 
2485 #endif
2486 
wake_affine(struct sched_domain * sd,struct task_struct * p,int sync)2487 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
2488 {
2489 	s64 this_load, load;
2490 	int idx, this_cpu, prev_cpu;
2491 	unsigned long tl_per_task;
2492 	struct task_group *tg;
2493 	unsigned long weight;
2494 	int balanced;
2495 
2496 	idx	  = sd->wake_idx;
2497 	this_cpu  = smp_processor_id();
2498 	prev_cpu  = task_cpu(p);
2499 	load	  = source_load(prev_cpu, idx);
2500 	this_load = target_load(this_cpu, idx);
2501 
2502 	/*
2503 	 * If sync wakeup then subtract the (maximum possible)
2504 	 * effect of the currently running task from the load
2505 	 * of the current CPU:
2506 	 */
2507 	if (sync) {
2508 		tg = task_group(current);
2509 		weight = current->se.load.weight;
2510 
2511 		this_load += effective_load(tg, this_cpu, -weight, -weight);
2512 		load += effective_load(tg, prev_cpu, 0, -weight);
2513 	}
2514 
2515 	tg = task_group(p);
2516 	weight = p->se.load.weight;
2517 
2518 	/*
2519 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2520 	 * due to the sync cause above having dropped this_load to 0, we'll
2521 	 * always have an imbalance, but there's really nothing you can do
2522 	 * about that, so that's good too.
2523 	 *
2524 	 * Otherwise check if either cpus are near enough in load to allow this
2525 	 * task to be woken on this_cpu.
2526 	 */
2527 	if (this_load > 0) {
2528 		s64 this_eff_load, prev_eff_load;
2529 
2530 		this_eff_load = 100;
2531 		this_eff_load *= power_of(prev_cpu);
2532 		this_eff_load *= this_load +
2533 			effective_load(tg, this_cpu, weight, weight);
2534 
2535 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
2536 		prev_eff_load *= power_of(this_cpu);
2537 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
2538 
2539 		balanced = this_eff_load <= prev_eff_load;
2540 	} else
2541 		balanced = true;
2542 
2543 	/*
2544 	 * If the currently running task will sleep within
2545 	 * a reasonable amount of time then attract this newly
2546 	 * woken task:
2547 	 */
2548 	if (sync && balanced)
2549 		return 1;
2550 
2551 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
2552 	tl_per_task = cpu_avg_load_per_task(this_cpu);
2553 
2554 	if (balanced ||
2555 	    (this_load <= load &&
2556 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
2557 		/*
2558 		 * This domain has SD_WAKE_AFFINE and
2559 		 * p is cache cold in this domain, and
2560 		 * there is no bad imbalance.
2561 		 */
2562 		schedstat_inc(sd, ttwu_move_affine);
2563 		schedstat_inc(p, se.statistics.nr_wakeups_affine);
2564 
2565 		return 1;
2566 	}
2567 	return 0;
2568 }
2569 
2570 /*
2571  * find_idlest_group finds and returns the least busy CPU group within the
2572  * domain.
2573  */
2574 static struct sched_group *
find_idlest_group(struct sched_domain * sd,struct task_struct * p,int this_cpu,int load_idx)2575 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
2576 		  int this_cpu, int load_idx)
2577 {
2578 	struct sched_group *idlest = NULL, *group = sd->groups;
2579 	unsigned long min_load = ULONG_MAX, this_load = 0;
2580 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
2581 
2582 	do {
2583 		unsigned long load, avg_load;
2584 		int local_group;
2585 		int i;
2586 
2587 		/* Skip over this group if it has no CPUs allowed */
2588 		if (!cpumask_intersects(sched_group_cpus(group),
2589 					tsk_cpus_allowed(p)))
2590 			continue;
2591 
2592 		local_group = cpumask_test_cpu(this_cpu,
2593 					       sched_group_cpus(group));
2594 
2595 		/* Tally up the load of all CPUs in the group */
2596 		avg_load = 0;
2597 
2598 		for_each_cpu(i, sched_group_cpus(group)) {
2599 			/* Bias balancing toward cpus of our domain */
2600 			if (local_group)
2601 				load = source_load(i, load_idx);
2602 			else
2603 				load = target_load(i, load_idx);
2604 
2605 			avg_load += load;
2606 		}
2607 
2608 		/* Adjust by relative CPU power of the group */
2609 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
2610 
2611 		if (local_group) {
2612 			this_load = avg_load;
2613 		} else if (avg_load < min_load) {
2614 			min_load = avg_load;
2615 			idlest = group;
2616 		}
2617 	} while (group = group->next, group != sd->groups);
2618 
2619 	if (!idlest || 100*this_load < imbalance*min_load)
2620 		return NULL;
2621 	return idlest;
2622 }
2623 
2624 /*
2625  * find_idlest_cpu - find the idlest cpu among the cpus in group.
2626  */
2627 static int
find_idlest_cpu(struct sched_group * group,struct task_struct * p,int this_cpu)2628 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2629 {
2630 	unsigned long load, min_load = ULONG_MAX;
2631 	int idlest = -1;
2632 	int i;
2633 
2634 	/* Traverse only the allowed CPUs */
2635 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
2636 		load = weighted_cpuload(i);
2637 
2638 		if (load < min_load || (load == min_load && i == this_cpu)) {
2639 			min_load = load;
2640 			idlest = i;
2641 		}
2642 	}
2643 
2644 	return idlest;
2645 }
2646 
2647 /*
2648  * Try and locate an idle CPU in the sched_domain.
2649  */
select_idle_sibling(struct task_struct * p,int target)2650 static int select_idle_sibling(struct task_struct *p, int target)
2651 {
2652 	int cpu = smp_processor_id();
2653 	int prev_cpu = task_cpu(p);
2654 	struct sched_domain *sd;
2655 	struct sched_group *sg;
2656 	int i;
2657 
2658 	/*
2659 	 * If the task is going to be woken-up on this cpu and if it is
2660 	 * already idle, then it is the right target.
2661 	 */
2662 	if (target == cpu && idle_cpu(cpu))
2663 		return cpu;
2664 
2665 	/*
2666 	 * If the task is going to be woken-up on the cpu where it previously
2667 	 * ran and if it is currently idle, then it the right target.
2668 	 */
2669 	if (target == prev_cpu && idle_cpu(prev_cpu))
2670 		return prev_cpu;
2671 
2672 	/*
2673 	 * Otherwise, iterate the domains and find an elegible idle cpu.
2674 	 */
2675 	rcu_read_lock();
2676 
2677 	sd = rcu_dereference(per_cpu(sd_llc, target));
2678 	for_each_lower_domain(sd) {
2679 		sg = sd->groups;
2680 		do {
2681 			if (!cpumask_intersects(sched_group_cpus(sg),
2682 						tsk_cpus_allowed(p)))
2683 				goto next;
2684 
2685 			for_each_cpu(i, sched_group_cpus(sg)) {
2686 				if (!idle_cpu(i))
2687 					goto next;
2688 			}
2689 
2690 			target = cpumask_first_and(sched_group_cpus(sg),
2691 					tsk_cpus_allowed(p));
2692 			goto done;
2693 next:
2694 			sg = sg->next;
2695 		} while (sg != sd->groups);
2696 	}
2697 done:
2698 	rcu_read_unlock();
2699 
2700 	return target;
2701 }
2702 
2703 /*
2704  * sched_balance_self: balance the current task (running on cpu) in domains
2705  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2706  * SD_BALANCE_EXEC.
2707  *
2708  * Balance, ie. select the least loaded group.
2709  *
2710  * Returns the target CPU number, or the same CPU if no balancing is needed.
2711  *
2712  * preempt must be disabled.
2713  */
2714 static int
select_task_rq_fair(struct task_struct * p,int sd_flag,int wake_flags)2715 select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2716 {
2717 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
2718 	int cpu = smp_processor_id();
2719 	int prev_cpu = task_cpu(p);
2720 	int new_cpu = cpu;
2721 	int want_affine = 0;
2722 	int want_sd = 1;
2723 	int sync = wake_flags & WF_SYNC;
2724 
2725 	if (p->rt.nr_cpus_allowed == 1)
2726 		return prev_cpu;
2727 
2728 	if (sd_flag & SD_BALANCE_WAKE) {
2729 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
2730 			want_affine = 1;
2731 		new_cpu = prev_cpu;
2732 	}
2733 
2734 	rcu_read_lock();
2735 	for_each_domain(cpu, tmp) {
2736 		if (!(tmp->flags & SD_LOAD_BALANCE))
2737 			continue;
2738 
2739 		/*
2740 		 * If power savings logic is enabled for a domain, see if we
2741 		 * are not overloaded, if so, don't balance wider.
2742 		 */
2743 		if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
2744 			unsigned long power = 0;
2745 			unsigned long nr_running = 0;
2746 			unsigned long capacity;
2747 			int i;
2748 
2749 			for_each_cpu(i, sched_domain_span(tmp)) {
2750 				power += power_of(i);
2751 				nr_running += cpu_rq(i)->cfs.nr_running;
2752 			}
2753 
2754 			capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
2755 
2756 			if (tmp->flags & SD_POWERSAVINGS_BALANCE)
2757 				nr_running /= 2;
2758 
2759 			if (nr_running < capacity)
2760 				want_sd = 0;
2761 		}
2762 
2763 		/*
2764 		 * If both cpu and prev_cpu are part of this domain,
2765 		 * cpu is a valid SD_WAKE_AFFINE target.
2766 		 */
2767 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
2768 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
2769 			affine_sd = tmp;
2770 			want_affine = 0;
2771 		}
2772 
2773 		if (!want_sd && !want_affine)
2774 			break;
2775 
2776 		if (!(tmp->flags & sd_flag))
2777 			continue;
2778 
2779 		if (want_sd)
2780 			sd = tmp;
2781 	}
2782 
2783 	if (affine_sd) {
2784 		if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
2785 			prev_cpu = cpu;
2786 
2787 		new_cpu = select_idle_sibling(p, prev_cpu);
2788 		goto unlock;
2789 	}
2790 
2791 	while (sd) {
2792 		int load_idx = sd->forkexec_idx;
2793 		struct sched_group *group;
2794 		int weight;
2795 
2796 		if (!(sd->flags & sd_flag)) {
2797 			sd = sd->child;
2798 			continue;
2799 		}
2800 
2801 		if (sd_flag & SD_BALANCE_WAKE)
2802 			load_idx = sd->wake_idx;
2803 
2804 		group = find_idlest_group(sd, p, cpu, load_idx);
2805 		if (!group) {
2806 			sd = sd->child;
2807 			continue;
2808 		}
2809 
2810 		new_cpu = find_idlest_cpu(group, p, cpu);
2811 		if (new_cpu == -1 || new_cpu == cpu) {
2812 			/* Now try balancing at a lower domain level of cpu */
2813 			sd = sd->child;
2814 			continue;
2815 		}
2816 
2817 		/* Now try balancing at a lower domain level of new_cpu */
2818 		cpu = new_cpu;
2819 		weight = sd->span_weight;
2820 		sd = NULL;
2821 		for_each_domain(cpu, tmp) {
2822 			if (weight <= tmp->span_weight)
2823 				break;
2824 			if (tmp->flags & sd_flag)
2825 				sd = tmp;
2826 		}
2827 		/* while loop will break here if sd == NULL */
2828 	}
2829 unlock:
2830 	rcu_read_unlock();
2831 
2832 	return new_cpu;
2833 }
2834 #endif /* CONFIG_SMP */
2835 
2836 static unsigned long
wakeup_gran(struct sched_entity * curr,struct sched_entity * se)2837 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
2838 {
2839 	unsigned long gran = sysctl_sched_wakeup_granularity;
2840 
2841 	/*
2842 	 * Since its curr running now, convert the gran from real-time
2843 	 * to virtual-time in his units.
2844 	 *
2845 	 * By using 'se' instead of 'curr' we penalize light tasks, so
2846 	 * they get preempted easier. That is, if 'se' < 'curr' then
2847 	 * the resulting gran will be larger, therefore penalizing the
2848 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
2849 	 * be smaller, again penalizing the lighter task.
2850 	 *
2851 	 * This is especially important for buddies when the leftmost
2852 	 * task is higher priority than the buddy.
2853 	 */
2854 	return calc_delta_fair(gran, se);
2855 }
2856 
2857 /*
2858  * Should 'se' preempt 'curr'.
2859  *
2860  *             |s1
2861  *        |s2
2862  *   |s3
2863  *         g
2864  *      |<--->|c
2865  *
2866  *  w(c, s1) = -1
2867  *  w(c, s2) =  0
2868  *  w(c, s3) =  1
2869  *
2870  */
2871 static int
wakeup_preempt_entity(struct sched_entity * curr,struct sched_entity * se)2872 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
2873 {
2874 	s64 gran, vdiff = curr->vruntime - se->vruntime;
2875 
2876 	if (vdiff <= 0)
2877 		return -1;
2878 
2879 	gran = wakeup_gran(curr, se);
2880 	if (vdiff > gran)
2881 		return 1;
2882 
2883 	return 0;
2884 }
2885 
set_last_buddy(struct sched_entity * se)2886 static void set_last_buddy(struct sched_entity *se)
2887 {
2888 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
2889 		return;
2890 
2891 	for_each_sched_entity(se)
2892 		cfs_rq_of(se)->last = se;
2893 }
2894 
set_next_buddy(struct sched_entity * se)2895 static void set_next_buddy(struct sched_entity *se)
2896 {
2897 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
2898 		return;
2899 
2900 	for_each_sched_entity(se)
2901 		cfs_rq_of(se)->next = se;
2902 }
2903 
set_skip_buddy(struct sched_entity * se)2904 static void set_skip_buddy(struct sched_entity *se)
2905 {
2906 	for_each_sched_entity(se)
2907 		cfs_rq_of(se)->skip = se;
2908 }
2909 
2910 /*
2911  * Preempt the current task with a newly woken task if needed:
2912  */
check_preempt_wakeup(struct rq * rq,struct task_struct * p,int wake_flags)2913 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2914 {
2915 	struct task_struct *curr = rq->curr;
2916 	struct sched_entity *se = &curr->se, *pse = &p->se;
2917 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
2918 	int scale = cfs_rq->nr_running >= sched_nr_latency;
2919 	int next_buddy_marked = 0;
2920 
2921 	if (unlikely(se == pse))
2922 		return;
2923 
2924 	/*
2925 	 * This is possible from callers such as pull_task(), in which we
2926 	 * unconditionally check_prempt_curr() after an enqueue (which may have
2927 	 * lead to a throttle).  This both saves work and prevents false
2928 	 * next-buddy nomination below.
2929 	 */
2930 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
2931 		return;
2932 
2933 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
2934 		set_next_buddy(pse);
2935 		next_buddy_marked = 1;
2936 	}
2937 
2938 	/*
2939 	 * We can come here with TIF_NEED_RESCHED already set from new task
2940 	 * wake up path.
2941 	 *
2942 	 * Note: this also catches the edge-case of curr being in a throttled
2943 	 * group (e.g. via set_curr_task), since update_curr() (in the
2944 	 * enqueue of curr) will have resulted in resched being set.  This
2945 	 * prevents us from potentially nominating it as a false LAST_BUDDY
2946 	 * below.
2947 	 */
2948 	if (test_tsk_need_resched(curr))
2949 		return;
2950 
2951 	/* Idle tasks are by definition preempted by non-idle tasks. */
2952 	if (unlikely(curr->policy == SCHED_IDLE) &&
2953 	    likely(p->policy != SCHED_IDLE))
2954 		goto preempt;
2955 
2956 	/*
2957 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
2958 	 * is driven by the tick):
2959 	 */
2960 	if (unlikely(p->policy != SCHED_NORMAL))
2961 		return;
2962 
2963 	find_matching_se(&se, &pse);
2964 	update_curr(cfs_rq_of(se));
2965 	BUG_ON(!pse);
2966 	if (wakeup_preempt_entity(se, pse) == 1) {
2967 		/*
2968 		 * Bias pick_next to pick the sched entity that is
2969 		 * triggering this preemption.
2970 		 */
2971 		if (!next_buddy_marked)
2972 			set_next_buddy(pse);
2973 		goto preempt;
2974 	}
2975 
2976 	return;
2977 
2978 preempt:
2979 	resched_task(curr);
2980 	/*
2981 	 * Only set the backward buddy when the current task is still
2982 	 * on the rq. This can happen when a wakeup gets interleaved
2983 	 * with schedule on the ->pre_schedule() or idle_balance()
2984 	 * point, either of which can * drop the rq lock.
2985 	 *
2986 	 * Also, during early boot the idle thread is in the fair class,
2987 	 * for obvious reasons its a bad idea to schedule back to it.
2988 	 */
2989 	if (unlikely(!se->on_rq || curr == rq->idle))
2990 		return;
2991 
2992 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
2993 		set_last_buddy(se);
2994 }
2995 
pick_next_task_fair(struct rq * rq)2996 static struct task_struct *pick_next_task_fair(struct rq *rq)
2997 {
2998 	struct task_struct *p;
2999 	struct cfs_rq *cfs_rq = &rq->cfs;
3000 	struct sched_entity *se;
3001 
3002 	if (!cfs_rq->nr_running)
3003 		return NULL;
3004 
3005 	do {
3006 		se = pick_next_entity(cfs_rq);
3007 		set_next_entity(cfs_rq, se);
3008 		cfs_rq = group_cfs_rq(se);
3009 	} while (cfs_rq);
3010 
3011 	p = task_of(se);
3012 	if (hrtick_enabled(rq))
3013 		hrtick_start_fair(rq, p);
3014 
3015 	return p;
3016 }
3017 
3018 /*
3019  * Account for a descheduled task:
3020  */
put_prev_task_fair(struct rq * rq,struct task_struct * prev)3021 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3022 {
3023 	struct sched_entity *se = &prev->se;
3024 	struct cfs_rq *cfs_rq;
3025 
3026 	for_each_sched_entity(se) {
3027 		cfs_rq = cfs_rq_of(se);
3028 		put_prev_entity(cfs_rq, se);
3029 	}
3030 }
3031 
3032 /*
3033  * sched_yield() is very simple
3034  *
3035  * The magic of dealing with the ->skip buddy is in pick_next_entity.
3036  */
yield_task_fair(struct rq * rq)3037 static void yield_task_fair(struct rq *rq)
3038 {
3039 	struct task_struct *curr = rq->curr;
3040 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3041 	struct sched_entity *se = &curr->se;
3042 
3043 	/*
3044 	 * Are we the only task in the tree?
3045 	 */
3046 	if (unlikely(rq->nr_running == 1))
3047 		return;
3048 
3049 	clear_buddies(cfs_rq, se);
3050 
3051 	if (curr->policy != SCHED_BATCH) {
3052 		update_rq_clock(rq);
3053 		/*
3054 		 * Update run-time statistics of the 'current'.
3055 		 */
3056 		update_curr(cfs_rq);
3057 		/*
3058 		 * Tell update_rq_clock() that we've just updated,
3059 		 * so we don't do microscopic update in schedule()
3060 		 * and double the fastpath cost.
3061 		 */
3062 		 rq->skip_clock_update = 1;
3063 	}
3064 
3065 	set_skip_buddy(se);
3066 }
3067 
yield_to_task_fair(struct rq * rq,struct task_struct * p,bool preempt)3068 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3069 {
3070 	struct sched_entity *se = &p->se;
3071 
3072 	/* throttled hierarchies are not runnable */
3073 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3074 		return false;
3075 
3076 	/* Tell the scheduler that we'd really like pse to run next. */
3077 	set_next_buddy(se);
3078 
3079 	yield_task_fair(rq);
3080 
3081 	return true;
3082 }
3083 
3084 #ifdef CONFIG_SMP
3085 /**************************************************
3086  * Fair scheduling class load-balancing methods:
3087  */
3088 
3089 /*
3090  * pull_task - move a task from a remote runqueue to the local runqueue.
3091  * Both runqueues must be locked.
3092  */
pull_task(struct rq * src_rq,struct task_struct * p,struct rq * this_rq,int this_cpu)3093 static void pull_task(struct rq *src_rq, struct task_struct *p,
3094 		      struct rq *this_rq, int this_cpu)
3095 {
3096 	deactivate_task(src_rq, p, 0);
3097 	set_task_cpu(p, this_cpu);
3098 	activate_task(this_rq, p, 0);
3099 	check_preempt_curr(this_rq, p, 0);
3100 }
3101 
3102 /*
3103  * Is this task likely cache-hot:
3104  */
3105 static int
task_hot(struct task_struct * p,u64 now,struct sched_domain * sd)3106 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3107 {
3108 	s64 delta;
3109 
3110 	if (p->sched_class != &fair_sched_class)
3111 		return 0;
3112 
3113 	if (unlikely(p->policy == SCHED_IDLE))
3114 		return 0;
3115 
3116 	/*
3117 	 * Buddy candidates are cache hot:
3118 	 */
3119 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3120 			(&p->se == cfs_rq_of(&p->se)->next ||
3121 			 &p->se == cfs_rq_of(&p->se)->last))
3122 		return 1;
3123 
3124 	if (sysctl_sched_migration_cost == -1)
3125 		return 1;
3126 	if (sysctl_sched_migration_cost == 0)
3127 		return 0;
3128 
3129 	delta = now - p->se.exec_start;
3130 
3131 	return delta < (s64)sysctl_sched_migration_cost;
3132 }
3133 
3134 #define LBF_ALL_PINNED	0x01
3135 #define LBF_NEED_BREAK	0x02	/* clears into HAD_BREAK */
3136 #define LBF_HAD_BREAK	0x04
3137 #define LBF_HAD_BREAKS	0x0C	/* count HAD_BREAKs overflows into ABORT */
3138 #define LBF_ABORT	0x10
3139 
3140 /*
3141  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3142  */
3143 static
can_migrate_task(struct task_struct * p,struct rq * rq,int this_cpu,struct sched_domain * sd,enum cpu_idle_type idle,int * lb_flags)3144 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3145 		     struct sched_domain *sd, enum cpu_idle_type idle,
3146 		     int *lb_flags)
3147 {
3148 	int tsk_cache_hot = 0;
3149 	/*
3150 	 * We do not migrate tasks that are:
3151 	 * 1) running (obviously), or
3152 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3153 	 * 3) are cache-hot on their current CPU.
3154 	 */
3155 	if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
3156 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3157 		return 0;
3158 	}
3159 	*lb_flags &= ~LBF_ALL_PINNED;
3160 
3161 	if (task_running(rq, p)) {
3162 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3163 		return 0;
3164 	}
3165 
3166 	/*
3167 	 * Aggressive migration if:
3168 	 * 1) task is cache cold, or
3169 	 * 2) too many balance attempts have failed.
3170 	 */
3171 
3172 	tsk_cache_hot = task_hot(p, rq->clock_task, sd);
3173 	if (!tsk_cache_hot ||
3174 		sd->nr_balance_failed > sd->cache_nice_tries) {
3175 #ifdef CONFIG_SCHEDSTATS
3176 		if (tsk_cache_hot) {
3177 			schedstat_inc(sd, lb_hot_gained[idle]);
3178 			schedstat_inc(p, se.statistics.nr_forced_migrations);
3179 		}
3180 #endif
3181 		return 1;
3182 	}
3183 
3184 	if (tsk_cache_hot) {
3185 		schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3186 		return 0;
3187 	}
3188 	return 1;
3189 }
3190 
3191 /*
3192  * move_one_task tries to move exactly one task from busiest to this_rq, as
3193  * part of active balancing operations within "domain".
3194  * Returns 1 if successful and 0 otherwise.
3195  *
3196  * Called with both runqueues locked.
3197  */
3198 static int
move_one_task(struct rq * this_rq,int this_cpu,struct rq * busiest,struct sched_domain * sd,enum cpu_idle_type idle)3199 move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3200 	      struct sched_domain *sd, enum cpu_idle_type idle)
3201 {
3202 	struct task_struct *p, *n;
3203 	struct cfs_rq *cfs_rq;
3204 	int pinned = 0;
3205 
3206 	for_each_leaf_cfs_rq(busiest, cfs_rq) {
3207 		list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
3208 			if (throttled_lb_pair(task_group(p),
3209 					      busiest->cpu, this_cpu))
3210 				break;
3211 
3212 			if (!can_migrate_task(p, busiest, this_cpu,
3213 						sd, idle, &pinned))
3214 				continue;
3215 
3216 			pull_task(busiest, p, this_rq, this_cpu);
3217 			/*
3218 			 * Right now, this is only the second place pull_task()
3219 			 * is called, so we can safely collect pull_task()
3220 			 * stats here rather than inside pull_task().
3221 			 */
3222 			schedstat_inc(sd, lb_gained[idle]);
3223 			return 1;
3224 		}
3225 	}
3226 
3227 	return 0;
3228 }
3229 
3230 static unsigned long
balance_tasks(struct rq * this_rq,int this_cpu,struct rq * busiest,unsigned long max_load_move,struct sched_domain * sd,enum cpu_idle_type idle,int * lb_flags,struct cfs_rq * busiest_cfs_rq)3231 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3232 	      unsigned long max_load_move, struct sched_domain *sd,
3233 	      enum cpu_idle_type idle, int *lb_flags,
3234 	      struct cfs_rq *busiest_cfs_rq)
3235 {
3236 	int loops = 0, pulled = 0;
3237 	long rem_load_move = max_load_move;
3238 	struct task_struct *p, *n;
3239 
3240 	if (max_load_move == 0)
3241 		goto out;
3242 
3243 	list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
3244 		if (loops++ > sysctl_sched_nr_migrate) {
3245 			*lb_flags |= LBF_NEED_BREAK;
3246 			break;
3247 		}
3248 
3249 		if ((p->se.load.weight >> 1) > rem_load_move ||
3250 		    !can_migrate_task(p, busiest, this_cpu, sd, idle,
3251 				      lb_flags))
3252 			continue;
3253 
3254 		pull_task(busiest, p, this_rq, this_cpu);
3255 		pulled++;
3256 		rem_load_move -= p->se.load.weight;
3257 
3258 #ifdef CONFIG_PREEMPT
3259 		/*
3260 		 * NEWIDLE balancing is a source of latency, so preemptible
3261 		 * kernels will stop after the first task is pulled to minimize
3262 		 * the critical section.
3263 		 */
3264 		if (idle == CPU_NEWLY_IDLE) {
3265 			*lb_flags |= LBF_ABORT;
3266 			break;
3267 		}
3268 #endif
3269 
3270 		/*
3271 		 * We only want to steal up to the prescribed amount of
3272 		 * weighted load.
3273 		 */
3274 		if (rem_load_move <= 0)
3275 			break;
3276 	}
3277 out:
3278 	/*
3279 	 * Right now, this is one of only two places pull_task() is called,
3280 	 * so we can safely collect pull_task() stats here rather than
3281 	 * inside pull_task().
3282 	 */
3283 	schedstat_add(sd, lb_gained[idle], pulled);
3284 
3285 	return max_load_move - rem_load_move;
3286 }
3287 
3288 #ifdef CONFIG_FAIR_GROUP_SCHED
3289 /*
3290  * update tg->load_weight by folding this cpu's load_avg
3291  */
update_shares_cpu(struct task_group * tg,int cpu)3292 static int update_shares_cpu(struct task_group *tg, int cpu)
3293 {
3294 	struct cfs_rq *cfs_rq;
3295 	unsigned long flags;
3296 	struct rq *rq;
3297 
3298 	if (!tg->se[cpu])
3299 		return 0;
3300 
3301 	rq = cpu_rq(cpu);
3302 	cfs_rq = tg->cfs_rq[cpu];
3303 
3304 	raw_spin_lock_irqsave(&rq->lock, flags);
3305 
3306 	update_rq_clock(rq);
3307 	update_cfs_load(cfs_rq, 1);
3308 
3309 	/*
3310 	 * We need to update shares after updating tg->load_weight in
3311 	 * order to adjust the weight of groups with long running tasks.
3312 	 */
3313 	update_cfs_shares(cfs_rq);
3314 
3315 	raw_spin_unlock_irqrestore(&rq->lock, flags);
3316 
3317 	return 0;
3318 }
3319 
update_shares(int cpu)3320 static void update_shares(int cpu)
3321 {
3322 	struct cfs_rq *cfs_rq;
3323 	struct rq *rq = cpu_rq(cpu);
3324 
3325 	rcu_read_lock();
3326 	/*
3327 	 * Iterates the task_group tree in a bottom up fashion, see
3328 	 * list_add_leaf_cfs_rq() for details.
3329 	 */
3330 	for_each_leaf_cfs_rq(rq, cfs_rq) {
3331 		/* throttled entities do not contribute to load */
3332 		if (throttled_hierarchy(cfs_rq))
3333 			continue;
3334 
3335 		update_shares_cpu(cfs_rq->tg, cpu);
3336 	}
3337 	rcu_read_unlock();
3338 }
3339 
3340 /*
3341  * Compute the cpu's hierarchical load factor for each task group.
3342  * This needs to be done in a top-down fashion because the load of a child
3343  * group is a fraction of its parents load.
3344  */
tg_load_down(struct task_group * tg,void * data)3345 static int tg_load_down(struct task_group *tg, void *data)
3346 {
3347 	unsigned long load;
3348 	long cpu = (long)data;
3349 
3350 	if (!tg->parent) {
3351 		load = cpu_rq(cpu)->load.weight;
3352 	} else {
3353 		load = tg->parent->cfs_rq[cpu]->h_load;
3354 		load *= tg->se[cpu]->load.weight;
3355 		load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
3356 	}
3357 
3358 	tg->cfs_rq[cpu]->h_load = load;
3359 
3360 	return 0;
3361 }
3362 
update_h_load(long cpu)3363 static void update_h_load(long cpu)
3364 {
3365 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
3366 }
3367 
3368 static unsigned long
load_balance_fair(struct rq * this_rq,int this_cpu,struct rq * busiest,unsigned long max_load_move,struct sched_domain * sd,enum cpu_idle_type idle,int * lb_flags)3369 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3370 		  unsigned long max_load_move,
3371 		  struct sched_domain *sd, enum cpu_idle_type idle,
3372 		  int *lb_flags)
3373 {
3374 	long rem_load_move = max_load_move;
3375 	struct cfs_rq *busiest_cfs_rq;
3376 
3377 	rcu_read_lock();
3378 	update_h_load(cpu_of(busiest));
3379 
3380 	for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
3381 		unsigned long busiest_h_load = busiest_cfs_rq->h_load;
3382 		unsigned long busiest_weight = busiest_cfs_rq->load.weight;
3383 		u64 rem_load, moved_load;
3384 
3385 		if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
3386 			break;
3387 
3388 		/*
3389 		 * empty group or part of a throttled hierarchy
3390 		 */
3391 		if (!busiest_cfs_rq->task_weight ||
3392 		    throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu))
3393 			continue;
3394 
3395 		rem_load = (u64)rem_load_move * busiest_weight;
3396 		rem_load = div_u64(rem_load, busiest_h_load + 1);
3397 
3398 		moved_load = balance_tasks(this_rq, this_cpu, busiest,
3399 				rem_load, sd, idle, lb_flags,
3400 				busiest_cfs_rq);
3401 
3402 		if (!moved_load)
3403 			continue;
3404 
3405 		moved_load *= busiest_h_load;
3406 		moved_load = div_u64(moved_load, busiest_weight + 1);
3407 
3408 		rem_load_move -= moved_load;
3409 		if (rem_load_move < 0)
3410 			break;
3411 	}
3412 	rcu_read_unlock();
3413 
3414 	return max_load_move - rem_load_move;
3415 }
3416 #else
update_shares(int cpu)3417 static inline void update_shares(int cpu)
3418 {
3419 }
3420 
3421 static unsigned long
load_balance_fair(struct rq * this_rq,int this_cpu,struct rq * busiest,unsigned long max_load_move,struct sched_domain * sd,enum cpu_idle_type idle,int * lb_flags)3422 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3423 		  unsigned long max_load_move,
3424 		  struct sched_domain *sd, enum cpu_idle_type idle,
3425 		  int *lb_flags)
3426 {
3427 	return balance_tasks(this_rq, this_cpu, busiest,
3428 			max_load_move, sd, idle, lb_flags,
3429 			&busiest->cfs);
3430 }
3431 #endif
3432 
3433 /*
3434  * move_tasks tries to move up to max_load_move weighted load from busiest to
3435  * this_rq, as part of a balancing operation within domain "sd".
3436  * Returns 1 if successful and 0 otherwise.
3437  *
3438  * Called with both runqueues locked.
3439  */
move_tasks(struct rq * this_rq,int this_cpu,struct rq * busiest,unsigned long max_load_move,struct sched_domain * sd,enum cpu_idle_type idle,int * lb_flags)3440 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3441 		      unsigned long max_load_move,
3442 		      struct sched_domain *sd, enum cpu_idle_type idle,
3443 		      int *lb_flags)
3444 {
3445 	unsigned long total_load_moved = 0, load_moved;
3446 
3447 	do {
3448 		load_moved = load_balance_fair(this_rq, this_cpu, busiest,
3449 				max_load_move - total_load_moved,
3450 				sd, idle, lb_flags);
3451 
3452 		total_load_moved += load_moved;
3453 
3454 		if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
3455 			break;
3456 
3457 #ifdef CONFIG_PREEMPT
3458 		/*
3459 		 * NEWIDLE balancing is a source of latency, so preemptible
3460 		 * kernels will stop after the first task is pulled to minimize
3461 		 * the critical section.
3462 		 */
3463 		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) {
3464 			*lb_flags |= LBF_ABORT;
3465 			break;
3466 		}
3467 #endif
3468 	} while (load_moved && max_load_move > total_load_moved);
3469 
3470 	return total_load_moved > 0;
3471 }
3472 
3473 /********** Helpers for find_busiest_group ************************/
3474 /*
3475  * sd_lb_stats - Structure to store the statistics of a sched_domain
3476  * 		during load balancing.
3477  */
3478 struct sd_lb_stats {
3479 	struct sched_group *busiest; /* Busiest group in this sd */
3480 	struct sched_group *this;  /* Local group in this sd */
3481 	unsigned long total_load;  /* Total load of all groups in sd */
3482 	unsigned long total_pwr;   /*	Total power of all groups in sd */
3483 	unsigned long avg_load;	   /* Average load across all groups in sd */
3484 
3485 	/** Statistics of this group */
3486 	unsigned long this_load;
3487 	unsigned long this_load_per_task;
3488 	unsigned long this_nr_running;
3489 	unsigned long this_has_capacity;
3490 	unsigned int  this_idle_cpus;
3491 
3492 	/* Statistics of the busiest group */
3493 	unsigned int  busiest_idle_cpus;
3494 	unsigned long max_load;
3495 	unsigned long busiest_load_per_task;
3496 	unsigned long busiest_nr_running;
3497 	unsigned long busiest_group_capacity;
3498 	unsigned long busiest_has_capacity;
3499 	unsigned int  busiest_group_weight;
3500 
3501 	int group_imb; /* Is there imbalance in this sd */
3502 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3503 	int power_savings_balance; /* Is powersave balance needed for this sd */
3504 	struct sched_group *group_min; /* Least loaded group in sd */
3505 	struct sched_group *group_leader; /* Group which relieves group_min */
3506 	unsigned long min_load_per_task; /* load_per_task in group_min */
3507 	unsigned long leader_nr_running; /* Nr running of group_leader */
3508 	unsigned long min_nr_running; /* Nr running of group_min */
3509 #endif
3510 };
3511 
3512 /*
3513  * sg_lb_stats - stats of a sched_group required for load_balancing
3514  */
3515 struct sg_lb_stats {
3516 	unsigned long avg_load; /*Avg load across the CPUs of the group */
3517 	unsigned long group_load; /* Total load over the CPUs of the group */
3518 	unsigned long sum_nr_running; /* Nr tasks running in the group */
3519 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3520 	unsigned long group_capacity;
3521 	unsigned long idle_cpus;
3522 	unsigned long group_weight;
3523 	int group_imb; /* Is there an imbalance in the group ? */
3524 	int group_has_capacity; /* Is there extra capacity in the group? */
3525 };
3526 
3527 /**
3528  * get_sd_load_idx - Obtain the load index for a given sched domain.
3529  * @sd: The sched_domain whose load_idx is to be obtained.
3530  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3531  */
get_sd_load_idx(struct sched_domain * sd,enum cpu_idle_type idle)3532 static inline int get_sd_load_idx(struct sched_domain *sd,
3533 					enum cpu_idle_type idle)
3534 {
3535 	int load_idx;
3536 
3537 	switch (idle) {
3538 	case CPU_NOT_IDLE:
3539 		load_idx = sd->busy_idx;
3540 		break;
3541 
3542 	case CPU_NEWLY_IDLE:
3543 		load_idx = sd->newidle_idx;
3544 		break;
3545 	default:
3546 		load_idx = sd->idle_idx;
3547 		break;
3548 	}
3549 
3550 	return load_idx;
3551 }
3552 
3553 
3554 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3555 /**
3556  * init_sd_power_savings_stats - Initialize power savings statistics for
3557  * the given sched_domain, during load balancing.
3558  *
3559  * @sd: Sched domain whose power-savings statistics are to be initialized.
3560  * @sds: Variable containing the statistics for sd.
3561  * @idle: Idle status of the CPU at which we're performing load-balancing.
3562  */
init_sd_power_savings_stats(struct sched_domain * sd,struct sd_lb_stats * sds,enum cpu_idle_type idle)3563 static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3564 	struct sd_lb_stats *sds, enum cpu_idle_type idle)
3565 {
3566 	/*
3567 	 * Busy processors will not participate in power savings
3568 	 * balance.
3569 	 */
3570 	if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3571 		sds->power_savings_balance = 0;
3572 	else {
3573 		sds->power_savings_balance = 1;
3574 		sds->min_nr_running = ULONG_MAX;
3575 		sds->leader_nr_running = 0;
3576 	}
3577 }
3578 
3579 /**
3580  * update_sd_power_savings_stats - Update the power saving stats for a
3581  * sched_domain while performing load balancing.
3582  *
3583  * @group: sched_group belonging to the sched_domain under consideration.
3584  * @sds: Variable containing the statistics of the sched_domain
3585  * @local_group: Does group contain the CPU for which we're performing
3586  * 		load balancing ?
3587  * @sgs: Variable containing the statistics of the group.
3588  */
update_sd_power_savings_stats(struct sched_group * group,struct sd_lb_stats * sds,int local_group,struct sg_lb_stats * sgs)3589 static inline void update_sd_power_savings_stats(struct sched_group *group,
3590 	struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3591 {
3592 
3593 	if (!sds->power_savings_balance)
3594 		return;
3595 
3596 	/*
3597 	 * If the local group is idle or completely loaded
3598 	 * no need to do power savings balance at this domain
3599 	 */
3600 	if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3601 				!sds->this_nr_running))
3602 		sds->power_savings_balance = 0;
3603 
3604 	/*
3605 	 * If a group is already running at full capacity or idle,
3606 	 * don't include that group in power savings calculations
3607 	 */
3608 	if (!sds->power_savings_balance ||
3609 		sgs->sum_nr_running >= sgs->group_capacity ||
3610 		!sgs->sum_nr_running)
3611 		return;
3612 
3613 	/*
3614 	 * Calculate the group which has the least non-idle load.
3615 	 * This is the group from where we need to pick up the load
3616 	 * for saving power
3617 	 */
3618 	if ((sgs->sum_nr_running < sds->min_nr_running) ||
3619 	    (sgs->sum_nr_running == sds->min_nr_running &&
3620 	     group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3621 		sds->group_min = group;
3622 		sds->min_nr_running = sgs->sum_nr_running;
3623 		sds->min_load_per_task = sgs->sum_weighted_load /
3624 						sgs->sum_nr_running;
3625 	}
3626 
3627 	/*
3628 	 * Calculate the group which is almost near its
3629 	 * capacity but still has some space to pick up some load
3630 	 * from other group and save more power
3631 	 */
3632 	if (sgs->sum_nr_running + 1 > sgs->group_capacity)
3633 		return;
3634 
3635 	if (sgs->sum_nr_running > sds->leader_nr_running ||
3636 	    (sgs->sum_nr_running == sds->leader_nr_running &&
3637 	     group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3638 		sds->group_leader = group;
3639 		sds->leader_nr_running = sgs->sum_nr_running;
3640 	}
3641 }
3642 
3643 /**
3644  * check_power_save_busiest_group - see if there is potential for some power-savings balance
3645  * @sds: Variable containing the statistics of the sched_domain
3646  *	under consideration.
3647  * @this_cpu: Cpu at which we're currently performing load-balancing.
3648  * @imbalance: Variable to store the imbalance.
3649  *
3650  * Description:
3651  * Check if we have potential to perform some power-savings balance.
3652  * If yes, set the busiest group to be the least loaded group in the
3653  * sched_domain, so that it's CPUs can be put to idle.
3654  *
3655  * Returns 1 if there is potential to perform power-savings balance.
3656  * Else returns 0.
3657  */
check_power_save_busiest_group(struct sd_lb_stats * sds,int this_cpu,unsigned long * imbalance)3658 static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3659 					int this_cpu, unsigned long *imbalance)
3660 {
3661 	if (!sds->power_savings_balance)
3662 		return 0;
3663 
3664 	if (sds->this != sds->group_leader ||
3665 			sds->group_leader == sds->group_min)
3666 		return 0;
3667 
3668 	*imbalance = sds->min_load_per_task;
3669 	sds->busiest = sds->group_min;
3670 
3671 	return 1;
3672 
3673 }
3674 #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
init_sd_power_savings_stats(struct sched_domain * sd,struct sd_lb_stats * sds,enum cpu_idle_type idle)3675 static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3676 	struct sd_lb_stats *sds, enum cpu_idle_type idle)
3677 {
3678 	return;
3679 }
3680 
update_sd_power_savings_stats(struct sched_group * group,struct sd_lb_stats * sds,int local_group,struct sg_lb_stats * sgs)3681 static inline void update_sd_power_savings_stats(struct sched_group *group,
3682 	struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3683 {
3684 	return;
3685 }
3686 
check_power_save_busiest_group(struct sd_lb_stats * sds,int this_cpu,unsigned long * imbalance)3687 static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3688 					int this_cpu, unsigned long *imbalance)
3689 {
3690 	return 0;
3691 }
3692 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3693 
3694 
default_scale_freq_power(struct sched_domain * sd,int cpu)3695 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3696 {
3697 	return SCHED_POWER_SCALE;
3698 }
3699 
arch_scale_freq_power(struct sched_domain * sd,int cpu)3700 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3701 {
3702 	return default_scale_freq_power(sd, cpu);
3703 }
3704 
default_scale_smt_power(struct sched_domain * sd,int cpu)3705 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
3706 {
3707 	unsigned long weight = sd->span_weight;
3708 	unsigned long smt_gain = sd->smt_gain;
3709 
3710 	smt_gain /= weight;
3711 
3712 	return smt_gain;
3713 }
3714 
arch_scale_smt_power(struct sched_domain * sd,int cpu)3715 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3716 {
3717 	return default_scale_smt_power(sd, cpu);
3718 }
3719 
scale_rt_power(int cpu)3720 unsigned long scale_rt_power(int cpu)
3721 {
3722 	struct rq *rq = cpu_rq(cpu);
3723 	u64 total, available;
3724 
3725 	total = sched_avg_period() + (rq->clock - rq->age_stamp);
3726 
3727 	if (unlikely(total < rq->rt_avg)) {
3728 		/* Ensures that power won't end up being negative */
3729 		available = 0;
3730 	} else {
3731 		available = total - rq->rt_avg;
3732 	}
3733 
3734 	if (unlikely((s64)total < SCHED_POWER_SCALE))
3735 		total = SCHED_POWER_SCALE;
3736 
3737 	total >>= SCHED_POWER_SHIFT;
3738 
3739 	return div_u64(available, total);
3740 }
3741 
update_cpu_power(struct sched_domain * sd,int cpu)3742 static void update_cpu_power(struct sched_domain *sd, int cpu)
3743 {
3744 	unsigned long weight = sd->span_weight;
3745 	unsigned long power = SCHED_POWER_SCALE;
3746 	struct sched_group *sdg = sd->groups;
3747 
3748 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3749 		if (sched_feat(ARCH_POWER))
3750 			power *= arch_scale_smt_power(sd, cpu);
3751 		else
3752 			power *= default_scale_smt_power(sd, cpu);
3753 
3754 		power >>= SCHED_POWER_SHIFT;
3755 	}
3756 
3757 	sdg->sgp->power_orig = power;
3758 
3759 	if (sched_feat(ARCH_POWER))
3760 		power *= arch_scale_freq_power(sd, cpu);
3761 	else
3762 		power *= default_scale_freq_power(sd, cpu);
3763 
3764 	power >>= SCHED_POWER_SHIFT;
3765 
3766 	power *= scale_rt_power(cpu);
3767 	power >>= SCHED_POWER_SHIFT;
3768 
3769 	if (!power)
3770 		power = 1;
3771 
3772 	cpu_rq(cpu)->cpu_power = power;
3773 	sdg->sgp->power = power;
3774 }
3775 
update_group_power(struct sched_domain * sd,int cpu)3776 void update_group_power(struct sched_domain *sd, int cpu)
3777 {
3778 	struct sched_domain *child = sd->child;
3779 	struct sched_group *group, *sdg = sd->groups;
3780 	unsigned long power;
3781 
3782 	if (!child) {
3783 		update_cpu_power(sd, cpu);
3784 		return;
3785 	}
3786 
3787 	power = 0;
3788 
3789 	group = child->groups;
3790 	do {
3791 		power += group->sgp->power;
3792 		group = group->next;
3793 	} while (group != child->groups);
3794 
3795 	sdg->sgp->power = power;
3796 }
3797 
3798 /*
3799  * Try and fix up capacity for tiny siblings, this is needed when
3800  * things like SD_ASYM_PACKING need f_b_g to select another sibling
3801  * which on its own isn't powerful enough.
3802  *
3803  * See update_sd_pick_busiest() and check_asym_packing().
3804  */
3805 static inline int
fix_small_capacity(struct sched_domain * sd,struct sched_group * group)3806 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3807 {
3808 	/*
3809 	 * Only siblings can have significantly less than SCHED_POWER_SCALE
3810 	 */
3811 	if (!(sd->flags & SD_SHARE_CPUPOWER))
3812 		return 0;
3813 
3814 	/*
3815 	 * If ~90% of the cpu_power is still there, we're good.
3816 	 */
3817 	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
3818 		return 1;
3819 
3820 	return 0;
3821 }
3822 
3823 /**
3824  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3825  * @sd: The sched_domain whose statistics are to be updated.
3826  * @group: sched_group whose statistics are to be updated.
3827  * @this_cpu: Cpu for which load balance is currently performed.
3828  * @idle: Idle status of this_cpu
3829  * @load_idx: Load index of sched_domain of this_cpu for load calc.
3830  * @local_group: Does group contain this_cpu.
3831  * @cpus: Set of cpus considered for load balancing.
3832  * @balance: Should we balance.
3833  * @sgs: variable to hold the statistics for this group.
3834  */
update_sg_lb_stats(struct sched_domain * sd,struct sched_group * group,int this_cpu,enum cpu_idle_type idle,int load_idx,int local_group,const struct cpumask * cpus,int * balance,struct sg_lb_stats * sgs)3835 static inline void update_sg_lb_stats(struct sched_domain *sd,
3836 			struct sched_group *group, int this_cpu,
3837 			enum cpu_idle_type idle, int load_idx,
3838 			int local_group, const struct cpumask *cpus,
3839 			int *balance, struct sg_lb_stats *sgs)
3840 {
3841 	unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
3842 	int i;
3843 	unsigned int balance_cpu = -1, first_idle_cpu = 0;
3844 	unsigned long avg_load_per_task = 0;
3845 
3846 	if (local_group)
3847 		balance_cpu = group_first_cpu(group);
3848 
3849 	/* Tally up the load of all CPUs in the group */
3850 	max_cpu_load = 0;
3851 	min_cpu_load = ~0UL;
3852 	max_nr_running = 0;
3853 
3854 	for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3855 		struct rq *rq = cpu_rq(i);
3856 
3857 		/* Bias balancing toward cpus of our domain */
3858 		if (local_group) {
3859 			if (idle_cpu(i) && !first_idle_cpu) {
3860 				first_idle_cpu = 1;
3861 				balance_cpu = i;
3862 			}
3863 
3864 			load = target_load(i, load_idx);
3865 		} else {
3866 			load = source_load(i, load_idx);
3867 			if (load > max_cpu_load) {
3868 				max_cpu_load = load;
3869 				max_nr_running = rq->nr_running;
3870 			}
3871 			if (min_cpu_load > load)
3872 				min_cpu_load = load;
3873 		}
3874 
3875 		sgs->group_load += load;
3876 		sgs->sum_nr_running += rq->nr_running;
3877 		sgs->sum_weighted_load += weighted_cpuload(i);
3878 		if (idle_cpu(i))
3879 			sgs->idle_cpus++;
3880 	}
3881 
3882 	/*
3883 	 * First idle cpu or the first cpu(busiest) in this sched group
3884 	 * is eligible for doing load balancing at this and above
3885 	 * domains. In the newly idle case, we will allow all the cpu's
3886 	 * to do the newly idle load balance.
3887 	 */
3888 	if (idle != CPU_NEWLY_IDLE && local_group) {
3889 		if (balance_cpu != this_cpu) {
3890 			*balance = 0;
3891 			return;
3892 		}
3893 		update_group_power(sd, this_cpu);
3894 	}
3895 
3896 	/* Adjust by relative CPU power of the group */
3897 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
3898 
3899 	/*
3900 	 * Consider the group unbalanced when the imbalance is larger
3901 	 * than the average weight of a task.
3902 	 *
3903 	 * APZ: with cgroup the avg task weight can vary wildly and
3904 	 *      might not be a suitable number - should we keep a
3905 	 *      normalized nr_running number somewhere that negates
3906 	 *      the hierarchy?
3907 	 */
3908 	if (sgs->sum_nr_running)
3909 		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
3910 
3911 	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
3912 		sgs->group_imb = 1;
3913 
3914 	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
3915 						SCHED_POWER_SCALE);
3916 	if (!sgs->group_capacity)
3917 		sgs->group_capacity = fix_small_capacity(sd, group);
3918 	sgs->group_weight = group->group_weight;
3919 
3920 	if (sgs->group_capacity > sgs->sum_nr_running)
3921 		sgs->group_has_capacity = 1;
3922 }
3923 
3924 /**
3925  * update_sd_pick_busiest - return 1 on busiest group
3926  * @sd: sched_domain whose statistics are to be checked
3927  * @sds: sched_domain statistics
3928  * @sg: sched_group candidate to be checked for being the busiest
3929  * @sgs: sched_group statistics
3930  * @this_cpu: the current cpu
3931  *
3932  * Determine if @sg is a busier group than the previously selected
3933  * busiest group.
3934  */
update_sd_pick_busiest(struct sched_domain * sd,struct sd_lb_stats * sds,struct sched_group * sg,struct sg_lb_stats * sgs,int this_cpu)3935 static bool update_sd_pick_busiest(struct sched_domain *sd,
3936 				   struct sd_lb_stats *sds,
3937 				   struct sched_group *sg,
3938 				   struct sg_lb_stats *sgs,
3939 				   int this_cpu)
3940 {
3941 	if (sgs->avg_load <= sds->max_load)
3942 		return false;
3943 
3944 	if (sgs->sum_nr_running > sgs->group_capacity)
3945 		return true;
3946 
3947 	if (sgs->group_imb)
3948 		return true;
3949 
3950 	/*
3951 	 * ASYM_PACKING needs to move all the work to the lowest
3952 	 * numbered CPUs in the group, therefore mark all groups
3953 	 * higher than ourself as busy.
3954 	 */
3955 	if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
3956 	    this_cpu < group_first_cpu(sg)) {
3957 		if (!sds->busiest)
3958 			return true;
3959 
3960 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
3961 			return true;
3962 	}
3963 
3964 	return false;
3965 }
3966 
3967 /**
3968  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3969  * @sd: sched_domain whose statistics are to be updated.
3970  * @this_cpu: Cpu for which load balance is currently performed.
3971  * @idle: Idle status of this_cpu
3972  * @cpus: Set of cpus considered for load balancing.
3973  * @balance: Should we balance.
3974  * @sds: variable to hold the statistics for this sched_domain.
3975  */
update_sd_lb_stats(struct sched_domain * sd,int this_cpu,enum cpu_idle_type idle,const struct cpumask * cpus,int * balance,struct sd_lb_stats * sds)3976 static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3977 			enum cpu_idle_type idle, const struct cpumask *cpus,
3978 			int *balance, struct sd_lb_stats *sds)
3979 {
3980 	struct sched_domain *child = sd->child;
3981 	struct sched_group *sg = sd->groups;
3982 	struct sg_lb_stats sgs;
3983 	int load_idx, prefer_sibling = 0;
3984 
3985 	if (child && child->flags & SD_PREFER_SIBLING)
3986 		prefer_sibling = 1;
3987 
3988 	init_sd_power_savings_stats(sd, sds, idle);
3989 	load_idx = get_sd_load_idx(sd, idle);
3990 
3991 	do {
3992 		int local_group;
3993 
3994 		local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
3995 		memset(&sgs, 0, sizeof(sgs));
3996 		update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
3997 				local_group, cpus, balance, &sgs);
3998 
3999 		if (local_group && !(*balance))
4000 			return;
4001 
4002 		sds->total_load += sgs.group_load;
4003 		sds->total_pwr += sg->sgp->power;
4004 
4005 		/*
4006 		 * In case the child domain prefers tasks go to siblings
4007 		 * first, lower the sg capacity to one so that we'll try
4008 		 * and move all the excess tasks away. We lower the capacity
4009 		 * of a group only if the local group has the capacity to fit
4010 		 * these excess tasks, i.e. nr_running < group_capacity. The
4011 		 * extra check prevents the case where you always pull from the
4012 		 * heaviest group when it is already under-utilized (possible
4013 		 * with a large weight task outweighs the tasks on the system).
4014 		 */
4015 		if (prefer_sibling && !local_group && sds->this_has_capacity)
4016 			sgs.group_capacity = min(sgs.group_capacity, 1UL);
4017 
4018 		if (local_group) {
4019 			sds->this_load = sgs.avg_load;
4020 			sds->this = sg;
4021 			sds->this_nr_running = sgs.sum_nr_running;
4022 			sds->this_load_per_task = sgs.sum_weighted_load;
4023 			sds->this_has_capacity = sgs.group_has_capacity;
4024 			sds->this_idle_cpus = sgs.idle_cpus;
4025 		} else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
4026 			sds->max_load = sgs.avg_load;
4027 			sds->busiest = sg;
4028 			sds->busiest_nr_running = sgs.sum_nr_running;
4029 			sds->busiest_idle_cpus = sgs.idle_cpus;
4030 			sds->busiest_group_capacity = sgs.group_capacity;
4031 			sds->busiest_load_per_task = sgs.sum_weighted_load;
4032 			sds->busiest_has_capacity = sgs.group_has_capacity;
4033 			sds->busiest_group_weight = sgs.group_weight;
4034 			sds->group_imb = sgs.group_imb;
4035 		}
4036 
4037 		update_sd_power_savings_stats(sg, sds, local_group, &sgs);
4038 		sg = sg->next;
4039 	} while (sg != sd->groups);
4040 }
4041 
4042 /**
4043  * check_asym_packing - Check to see if the group is packed into the
4044  *			sched doman.
4045  *
4046  * This is primarily intended to used at the sibling level.  Some
4047  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
4048  * case of POWER7, it can move to lower SMT modes only when higher
4049  * threads are idle.  When in lower SMT modes, the threads will
4050  * perform better since they share less core resources.  Hence when we
4051  * have idle threads, we want them to be the higher ones.
4052  *
4053  * This packing function is run on idle threads.  It checks to see if
4054  * the busiest CPU in this domain (core in the P7 case) has a higher
4055  * CPU number than the packing function is being run on.  Here we are
4056  * assuming lower CPU number will be equivalent to lower a SMT thread
4057  * number.
4058  *
4059  * Returns 1 when packing is required and a task should be moved to
4060  * this CPU.  The amount of the imbalance is returned in *imbalance.
4061  *
4062  * @sd: The sched_domain whose packing is to be checked.
4063  * @sds: Statistics of the sched_domain which is to be packed
4064  * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4065  * @imbalance: returns amount of imbalanced due to packing.
4066  */
check_asym_packing(struct sched_domain * sd,struct sd_lb_stats * sds,int this_cpu,unsigned long * imbalance)4067 static int check_asym_packing(struct sched_domain *sd,
4068 			      struct sd_lb_stats *sds,
4069 			      int this_cpu, unsigned long *imbalance)
4070 {
4071 	int busiest_cpu;
4072 
4073 	if (!(sd->flags & SD_ASYM_PACKING))
4074 		return 0;
4075 
4076 	if (!sds->busiest)
4077 		return 0;
4078 
4079 	busiest_cpu = group_first_cpu(sds->busiest);
4080 	if (this_cpu > busiest_cpu)
4081 		return 0;
4082 
4083 	*imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
4084 				       SCHED_POWER_SCALE);
4085 	return 1;
4086 }
4087 
4088 /**
4089  * fix_small_imbalance - Calculate the minor imbalance that exists
4090  *			amongst the groups of a sched_domain, during
4091  *			load balancing.
4092  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4093  * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4094  * @imbalance: Variable to store the imbalance.
4095  */
fix_small_imbalance(struct sd_lb_stats * sds,int this_cpu,unsigned long * imbalance)4096 static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4097 				int this_cpu, unsigned long *imbalance)
4098 {
4099 	unsigned long tmp, pwr_now = 0, pwr_move = 0;
4100 	unsigned int imbn = 2;
4101 	unsigned long scaled_busy_load_per_task;
4102 
4103 	if (sds->this_nr_running) {
4104 		sds->this_load_per_task /= sds->this_nr_running;
4105 		if (sds->busiest_load_per_task >
4106 				sds->this_load_per_task)
4107 			imbn = 1;
4108 	} else
4109 		sds->this_load_per_task =
4110 			cpu_avg_load_per_task(this_cpu);
4111 
4112 	scaled_busy_load_per_task = sds->busiest_load_per_task
4113 					 * SCHED_POWER_SCALE;
4114 	scaled_busy_load_per_task /= sds->busiest->sgp->power;
4115 
4116 	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4117 			(scaled_busy_load_per_task * imbn)) {
4118 		*imbalance = sds->busiest_load_per_task;
4119 		return;
4120 	}
4121 
4122 	/*
4123 	 * OK, we don't have enough imbalance to justify moving tasks,
4124 	 * however we may be able to increase total CPU power used by
4125 	 * moving them.
4126 	 */
4127 
4128 	pwr_now += sds->busiest->sgp->power *
4129 			min(sds->busiest_load_per_task, sds->max_load);
4130 	pwr_now += sds->this->sgp->power *
4131 			min(sds->this_load_per_task, sds->this_load);
4132 	pwr_now /= SCHED_POWER_SCALE;
4133 
4134 	/* Amount of load we'd subtract */
4135 	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4136 		sds->busiest->sgp->power;
4137 	if (sds->max_load > tmp)
4138 		pwr_move += sds->busiest->sgp->power *
4139 			min(sds->busiest_load_per_task, sds->max_load - tmp);
4140 
4141 	/* Amount of load we'd add */
4142 	if (sds->max_load * sds->busiest->sgp->power <
4143 		sds->busiest_load_per_task * SCHED_POWER_SCALE)
4144 		tmp = (sds->max_load * sds->busiest->sgp->power) /
4145 			sds->this->sgp->power;
4146 	else
4147 		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4148 			sds->this->sgp->power;
4149 	pwr_move += sds->this->sgp->power *
4150 			min(sds->this_load_per_task, sds->this_load + tmp);
4151 	pwr_move /= SCHED_POWER_SCALE;
4152 
4153 	/* Move if we gain throughput */
4154 	if (pwr_move > pwr_now)
4155 		*imbalance = sds->busiest_load_per_task;
4156 }
4157 
4158 /**
4159  * calculate_imbalance - Calculate the amount of imbalance present within the
4160  *			 groups of a given sched_domain during load balance.
4161  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4162  * @this_cpu: Cpu for which currently load balance is being performed.
4163  * @imbalance: The variable to store the imbalance.
4164  */
calculate_imbalance(struct sd_lb_stats * sds,int this_cpu,unsigned long * imbalance)4165 static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
4166 		unsigned long *imbalance)
4167 {
4168 	unsigned long max_pull, load_above_capacity = ~0UL;
4169 
4170 	sds->busiest_load_per_task /= sds->busiest_nr_running;
4171 	if (sds->group_imb) {
4172 		sds->busiest_load_per_task =
4173 			min(sds->busiest_load_per_task, sds->avg_load);
4174 	}
4175 
4176 	/*
4177 	 * In the presence of smp nice balancing, certain scenarios can have
4178 	 * max load less than avg load(as we skip the groups at or below
4179 	 * its cpu_power, while calculating max_load..)
4180 	 */
4181 	if (sds->max_load < sds->avg_load) {
4182 		*imbalance = 0;
4183 		return fix_small_imbalance(sds, this_cpu, imbalance);
4184 	}
4185 
4186 	if (!sds->group_imb) {
4187 		/*
4188 		 * Don't want to pull so many tasks that a group would go idle.
4189 		 */
4190 		load_above_capacity = (sds->busiest_nr_running -
4191 						sds->busiest_group_capacity);
4192 
4193 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4194 
4195 		load_above_capacity /= sds->busiest->sgp->power;
4196 	}
4197 
4198 	/*
4199 	 * We're trying to get all the cpus to the average_load, so we don't
4200 	 * want to push ourselves above the average load, nor do we wish to
4201 	 * reduce the max loaded cpu below the average load. At the same time,
4202 	 * we also don't want to reduce the group load below the group capacity
4203 	 * (so that we can implement power-savings policies etc). Thus we look
4204 	 * for the minimum possible imbalance.
4205 	 * Be careful of negative numbers as they'll appear as very large values
4206 	 * with unsigned longs.
4207 	 */
4208 	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4209 
4210 	/* How much load to actually move to equalise the imbalance */
4211 	*imbalance = min(max_pull * sds->busiest->sgp->power,
4212 		(sds->avg_load - sds->this_load) * sds->this->sgp->power)
4213 			/ SCHED_POWER_SCALE;
4214 
4215 	/*
4216 	 * if *imbalance is less than the average load per runnable task
4217 	 * there is no guarantee that any tasks will be moved so we'll have
4218 	 * a think about bumping its value to force at least one task to be
4219 	 * moved
4220 	 */
4221 	if (*imbalance < sds->busiest_load_per_task)
4222 		return fix_small_imbalance(sds, this_cpu, imbalance);
4223 
4224 }
4225 
4226 /******* find_busiest_group() helpers end here *********************/
4227 
4228 /**
4229  * find_busiest_group - Returns the busiest group within the sched_domain
4230  * if there is an imbalance. If there isn't an imbalance, and
4231  * the user has opted for power-savings, it returns a group whose
4232  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4233  * such a group exists.
4234  *
4235  * Also calculates the amount of weighted load which should be moved
4236  * to restore balance.
4237  *
4238  * @sd: The sched_domain whose busiest group is to be returned.
4239  * @this_cpu: The cpu for which load balancing is currently being performed.
4240  * @imbalance: Variable which stores amount of weighted load which should
4241  *		be moved to restore balance/put a group to idle.
4242  * @idle: The idle status of this_cpu.
4243  * @cpus: The set of CPUs under consideration for load-balancing.
4244  * @balance: Pointer to a variable indicating if this_cpu
4245  *	is the appropriate cpu to perform load balancing at this_level.
4246  *
4247  * Returns:	- the busiest group if imbalance exists.
4248  *		- If no imbalance and user has opted for power-savings balance,
4249  *		   return the least loaded group whose CPUs can be
4250  *		   put to idle by rebalancing its tasks onto our group.
4251  */
4252 static struct sched_group *
find_busiest_group(struct sched_domain * sd,int this_cpu,unsigned long * imbalance,enum cpu_idle_type idle,const struct cpumask * cpus,int * balance)4253 find_busiest_group(struct sched_domain *sd, int this_cpu,
4254 		   unsigned long *imbalance, enum cpu_idle_type idle,
4255 		   const struct cpumask *cpus, int *balance)
4256 {
4257 	struct sd_lb_stats sds;
4258 
4259 	memset(&sds, 0, sizeof(sds));
4260 
4261 	/*
4262 	 * Compute the various statistics relavent for load balancing at
4263 	 * this level.
4264 	 */
4265 	update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
4266 
4267 	/*
4268 	 * this_cpu is not the appropriate cpu to perform load balancing at
4269 	 * this level.
4270 	 */
4271 	if (!(*balance))
4272 		goto ret;
4273 
4274 	if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
4275 	    check_asym_packing(sd, &sds, this_cpu, imbalance))
4276 		return sds.busiest;
4277 
4278 	/* There is no busy sibling group to pull tasks from */
4279 	if (!sds.busiest || sds.busiest_nr_running == 0)
4280 		goto out_balanced;
4281 
4282 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4283 
4284 	/*
4285 	 * If the busiest group is imbalanced the below checks don't
4286 	 * work because they assumes all things are equal, which typically
4287 	 * isn't true due to cpus_allowed constraints and the like.
4288 	 */
4289 	if (sds.group_imb)
4290 		goto force_balance;
4291 
4292 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4293 	if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4294 			!sds.busiest_has_capacity)
4295 		goto force_balance;
4296 
4297 	/*
4298 	 * If the local group is more busy than the selected busiest group
4299 	 * don't try and pull any tasks.
4300 	 */
4301 	if (sds.this_load >= sds.max_load)
4302 		goto out_balanced;
4303 
4304 	/*
4305 	 * Don't pull any tasks if this group is already above the domain
4306 	 * average load.
4307 	 */
4308 	if (sds.this_load >= sds.avg_load)
4309 		goto out_balanced;
4310 
4311 	if (idle == CPU_IDLE) {
4312 		/*
4313 		 * This cpu is idle. If the busiest group load doesn't
4314 		 * have more tasks than the number of available cpu's and
4315 		 * there is no imbalance between this and busiest group
4316 		 * wrt to idle cpu's, it is balanced.
4317 		 */
4318 		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4319 		    sds.busiest_nr_running <= sds.busiest_group_weight)
4320 			goto out_balanced;
4321 	} else {
4322 		/*
4323 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4324 		 * imbalance_pct to be conservative.
4325 		 */
4326 		if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
4327 			goto out_balanced;
4328 	}
4329 
4330 force_balance:
4331 	/* Looks like there is an imbalance. Compute it */
4332 	calculate_imbalance(&sds, this_cpu, imbalance);
4333 	return sds.busiest;
4334 
4335 out_balanced:
4336 	/*
4337 	 * There is no obvious imbalance. But check if we can do some balancing
4338 	 * to save power.
4339 	 */
4340 	if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
4341 		return sds.busiest;
4342 ret:
4343 	*imbalance = 0;
4344 	return NULL;
4345 }
4346 
4347 /*
4348  * find_busiest_queue - find the busiest runqueue among the cpus in group.
4349  */
4350 static struct rq *
find_busiest_queue(struct sched_domain * sd,struct sched_group * group,enum cpu_idle_type idle,unsigned long imbalance,const struct cpumask * cpus)4351 find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
4352 		   enum cpu_idle_type idle, unsigned long imbalance,
4353 		   const struct cpumask *cpus)
4354 {
4355 	struct rq *busiest = NULL, *rq;
4356 	unsigned long max_load = 0;
4357 	int i;
4358 
4359 	for_each_cpu(i, sched_group_cpus(group)) {
4360 		unsigned long power = power_of(i);
4361 		unsigned long capacity = DIV_ROUND_CLOSEST(power,
4362 							   SCHED_POWER_SCALE);
4363 		unsigned long wl;
4364 
4365 		if (!capacity)
4366 			capacity = fix_small_capacity(sd, group);
4367 
4368 		if (!cpumask_test_cpu(i, cpus))
4369 			continue;
4370 
4371 		rq = cpu_rq(i);
4372 		wl = weighted_cpuload(i);
4373 
4374 		/*
4375 		 * When comparing with imbalance, use weighted_cpuload()
4376 		 * which is not scaled with the cpu power.
4377 		 */
4378 		if (capacity && rq->nr_running == 1 && wl > imbalance)
4379 			continue;
4380 
4381 		/*
4382 		 * For the load comparisons with the other cpu's, consider
4383 		 * the weighted_cpuload() scaled with the cpu power, so that
4384 		 * the load can be moved away from the cpu that is potentially
4385 		 * running at a lower capacity.
4386 		 */
4387 		wl = (wl * SCHED_POWER_SCALE) / power;
4388 
4389 		if (wl > max_load) {
4390 			max_load = wl;
4391 			busiest = rq;
4392 		}
4393 	}
4394 
4395 	return busiest;
4396 }
4397 
4398 /*
4399  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4400  * so long as it is large enough.
4401  */
4402 #define MAX_PINNED_INTERVAL	512
4403 
4404 /* Working cpumask for load_balance and load_balance_newidle. */
4405 DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4406 
need_active_balance(struct sched_domain * sd,int idle,int busiest_cpu,int this_cpu)4407 static int need_active_balance(struct sched_domain *sd, int idle,
4408 			       int busiest_cpu, int this_cpu)
4409 {
4410 	if (idle == CPU_NEWLY_IDLE) {
4411 
4412 		/*
4413 		 * ASYM_PACKING needs to force migrate tasks from busy but
4414 		 * higher numbered CPUs in order to pack all tasks in the
4415 		 * lowest numbered CPUs.
4416 		 */
4417 		if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
4418 			return 1;
4419 
4420 		/*
4421 		 * The only task running in a non-idle cpu can be moved to this
4422 		 * cpu in an attempt to completely freeup the other CPU
4423 		 * package.
4424 		 *
4425 		 * The package power saving logic comes from
4426 		 * find_busiest_group(). If there are no imbalance, then
4427 		 * f_b_g() will return NULL. However when sched_mc={1,2} then
4428 		 * f_b_g() will select a group from which a running task may be
4429 		 * pulled to this cpu in order to make the other package idle.
4430 		 * If there is no opportunity to make a package idle and if
4431 		 * there are no imbalance, then f_b_g() will return NULL and no
4432 		 * action will be taken in load_balance_newidle().
4433 		 *
4434 		 * Under normal task pull operation due to imbalance, there
4435 		 * will be more than one task in the source run queue and
4436 		 * move_tasks() will succeed.  ld_moved will be true and this
4437 		 * active balance code will not be triggered.
4438 		 */
4439 		if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
4440 			return 0;
4441 	}
4442 
4443 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
4444 }
4445 
4446 static int active_load_balance_cpu_stop(void *data);
4447 
4448 /*
4449  * Check this_cpu to ensure it is balanced within domain. Attempt to move
4450  * tasks if there is an imbalance.
4451  */
load_balance(int this_cpu,struct rq * this_rq,struct sched_domain * sd,enum cpu_idle_type idle,int * balance)4452 static int load_balance(int this_cpu, struct rq *this_rq,
4453 			struct sched_domain *sd, enum cpu_idle_type idle,
4454 			int *balance)
4455 {
4456 	int ld_moved, lb_flags = 0, active_balance = 0;
4457 	struct sched_group *group;
4458 	unsigned long imbalance;
4459 	struct rq *busiest;
4460 	unsigned long flags;
4461 	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4462 
4463 	cpumask_copy(cpus, cpu_active_mask);
4464 
4465 	schedstat_inc(sd, lb_count[idle]);
4466 
4467 redo:
4468 	group = find_busiest_group(sd, this_cpu, &imbalance, idle,
4469 				   cpus, balance);
4470 
4471 	if (*balance == 0)
4472 		goto out_balanced;
4473 
4474 	if (!group) {
4475 		schedstat_inc(sd, lb_nobusyg[idle]);
4476 		goto out_balanced;
4477 	}
4478 
4479 	busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
4480 	if (!busiest) {
4481 		schedstat_inc(sd, lb_nobusyq[idle]);
4482 		goto out_balanced;
4483 	}
4484 
4485 	BUG_ON(busiest == this_rq);
4486 
4487 	schedstat_add(sd, lb_imbalance[idle], imbalance);
4488 
4489 	ld_moved = 0;
4490 	if (busiest->nr_running > 1) {
4491 		/*
4492 		 * Attempt to move tasks. If find_busiest_group has found
4493 		 * an imbalance but busiest->nr_running <= 1, the group is
4494 		 * still unbalanced. ld_moved simply stays zero, so it is
4495 		 * correctly treated as an imbalance.
4496 		 */
4497 		lb_flags |= LBF_ALL_PINNED;
4498 		local_irq_save(flags);
4499 		double_rq_lock(this_rq, busiest);
4500 		ld_moved = move_tasks(this_rq, this_cpu, busiest,
4501 				      imbalance, sd, idle, &lb_flags);
4502 		double_rq_unlock(this_rq, busiest);
4503 		local_irq_restore(flags);
4504 
4505 		/*
4506 		 * some other cpu did the load balance for us.
4507 		 */
4508 		if (ld_moved && this_cpu != smp_processor_id())
4509 			resched_cpu(this_cpu);
4510 
4511 		if (lb_flags & LBF_ABORT)
4512 			goto out_balanced;
4513 
4514 		if (lb_flags & LBF_NEED_BREAK) {
4515 			lb_flags += LBF_HAD_BREAK - LBF_NEED_BREAK;
4516 			if (lb_flags & LBF_ABORT)
4517 				goto out_balanced;
4518 			goto redo;
4519 		}
4520 
4521 		/* All tasks on this runqueue were pinned by CPU affinity */
4522 		if (unlikely(lb_flags & LBF_ALL_PINNED)) {
4523 			cpumask_clear_cpu(cpu_of(busiest), cpus);
4524 			if (!cpumask_empty(cpus))
4525 				goto redo;
4526 			goto out_balanced;
4527 		}
4528 	}
4529 
4530 	if (!ld_moved) {
4531 		schedstat_inc(sd, lb_failed[idle]);
4532 		/*
4533 		 * Increment the failure counter only on periodic balance.
4534 		 * We do not want newidle balance, which can be very
4535 		 * frequent, pollute the failure counter causing
4536 		 * excessive cache_hot migrations and active balances.
4537 		 */
4538 		if (idle != CPU_NEWLY_IDLE)
4539 			sd->nr_balance_failed++;
4540 
4541 		if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
4542 			raw_spin_lock_irqsave(&busiest->lock, flags);
4543 
4544 			/* don't kick the active_load_balance_cpu_stop,
4545 			 * if the curr task on busiest cpu can't be
4546 			 * moved to this_cpu
4547 			 */
4548 			if (!cpumask_test_cpu(this_cpu,
4549 					tsk_cpus_allowed(busiest->curr))) {
4550 				raw_spin_unlock_irqrestore(&busiest->lock,
4551 							    flags);
4552 				lb_flags |= LBF_ALL_PINNED;
4553 				goto out_one_pinned;
4554 			}
4555 
4556 			/*
4557 			 * ->active_balance synchronizes accesses to
4558 			 * ->active_balance_work.  Once set, it's cleared
4559 			 * only after active load balance is finished.
4560 			 */
4561 			if (!busiest->active_balance) {
4562 				busiest->active_balance = 1;
4563 				busiest->push_cpu = this_cpu;
4564 				active_balance = 1;
4565 			}
4566 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
4567 
4568 			if (active_balance)
4569 				stop_one_cpu_nowait(cpu_of(busiest),
4570 					active_load_balance_cpu_stop, busiest,
4571 					&busiest->active_balance_work);
4572 
4573 			/*
4574 			 * We've kicked active balancing, reset the failure
4575 			 * counter.
4576 			 */
4577 			sd->nr_balance_failed = sd->cache_nice_tries+1;
4578 		}
4579 	} else
4580 		sd->nr_balance_failed = 0;
4581 
4582 	if (likely(!active_balance)) {
4583 		/* We were unbalanced, so reset the balancing interval */
4584 		sd->balance_interval = sd->min_interval;
4585 	} else {
4586 		/*
4587 		 * If we've begun active balancing, start to back off. This
4588 		 * case may not be covered by the all_pinned logic if there
4589 		 * is only 1 task on the busy runqueue (because we don't call
4590 		 * move_tasks).
4591 		 */
4592 		if (sd->balance_interval < sd->max_interval)
4593 			sd->balance_interval *= 2;
4594 	}
4595 
4596 	goto out;
4597 
4598 out_balanced:
4599 	schedstat_inc(sd, lb_balanced[idle]);
4600 
4601 	sd->nr_balance_failed = 0;
4602 
4603 out_one_pinned:
4604 	/* tune up the balancing interval */
4605 	if (((lb_flags & LBF_ALL_PINNED) &&
4606 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
4607 			(sd->balance_interval < sd->max_interval))
4608 		sd->balance_interval *= 2;
4609 
4610 	ld_moved = 0;
4611 out:
4612 	return ld_moved;
4613 }
4614 
4615 /*
4616  * idle_balance is called by schedule() if this_cpu is about to become
4617  * idle. Attempts to pull tasks from other CPUs.
4618  */
idle_balance(int this_cpu,struct rq * this_rq)4619 void idle_balance(int this_cpu, struct rq *this_rq)
4620 {
4621 	struct sched_domain *sd;
4622 	int pulled_task = 0;
4623 	unsigned long next_balance = jiffies + HZ;
4624 
4625 	this_rq->idle_stamp = this_rq->clock;
4626 
4627 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
4628 		return;
4629 
4630 	/*
4631 	 * Drop the rq->lock, but keep IRQ/preempt disabled.
4632 	 */
4633 	raw_spin_unlock(&this_rq->lock);
4634 
4635 	update_shares(this_cpu);
4636 	rcu_read_lock();
4637 	for_each_domain(this_cpu, sd) {
4638 		unsigned long interval;
4639 		int balance = 1;
4640 
4641 		if (!(sd->flags & SD_LOAD_BALANCE))
4642 			continue;
4643 
4644 		if (sd->flags & SD_BALANCE_NEWIDLE) {
4645 			/* If we've pulled tasks over stop searching: */
4646 			pulled_task = load_balance(this_cpu, this_rq,
4647 						   sd, CPU_NEWLY_IDLE, &balance);
4648 		}
4649 
4650 		interval = msecs_to_jiffies(sd->balance_interval);
4651 		if (time_after(next_balance, sd->last_balance + interval))
4652 			next_balance = sd->last_balance + interval;
4653 		if (pulled_task) {
4654 			this_rq->idle_stamp = 0;
4655 			break;
4656 		}
4657 	}
4658 	rcu_read_unlock();
4659 
4660 	raw_spin_lock(&this_rq->lock);
4661 
4662 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4663 		/*
4664 		 * We are going idle. next_balance may be set based on
4665 		 * a busy processor. So reset next_balance.
4666 		 */
4667 		this_rq->next_balance = next_balance;
4668 	}
4669 }
4670 
4671 /*
4672  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4673  * running tasks off the busiest CPU onto idle CPUs. It requires at
4674  * least 1 task to be running on each physical CPU where possible, and
4675  * avoids physical / logical imbalances.
4676  */
active_load_balance_cpu_stop(void * data)4677 static int active_load_balance_cpu_stop(void *data)
4678 {
4679 	struct rq *busiest_rq = data;
4680 	int busiest_cpu = cpu_of(busiest_rq);
4681 	int target_cpu = busiest_rq->push_cpu;
4682 	struct rq *target_rq = cpu_rq(target_cpu);
4683 	struct sched_domain *sd;
4684 
4685 	raw_spin_lock_irq(&busiest_rq->lock);
4686 
4687 	/* make sure the requested cpu hasn't gone down in the meantime */
4688 	if (unlikely(busiest_cpu != smp_processor_id() ||
4689 		     !busiest_rq->active_balance))
4690 		goto out_unlock;
4691 
4692 	/* Is there any task to move? */
4693 	if (busiest_rq->nr_running <= 1)
4694 		goto out_unlock;
4695 
4696 	/*
4697 	 * This condition is "impossible", if it occurs
4698 	 * we need to fix it. Originally reported by
4699 	 * Bjorn Helgaas on a 128-cpu setup.
4700 	 */
4701 	BUG_ON(busiest_rq == target_rq);
4702 
4703 	/* move a task from busiest_rq to target_rq */
4704 	double_lock_balance(busiest_rq, target_rq);
4705 
4706 	/* Search for an sd spanning us and the target CPU. */
4707 	rcu_read_lock();
4708 	for_each_domain(target_cpu, sd) {
4709 		if ((sd->flags & SD_LOAD_BALANCE) &&
4710 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
4711 				break;
4712 	}
4713 
4714 	if (likely(sd)) {
4715 		schedstat_inc(sd, alb_count);
4716 
4717 		if (move_one_task(target_rq, target_cpu, busiest_rq,
4718 				  sd, CPU_IDLE))
4719 			schedstat_inc(sd, alb_pushed);
4720 		else
4721 			schedstat_inc(sd, alb_failed);
4722 	}
4723 	rcu_read_unlock();
4724 	double_unlock_balance(busiest_rq, target_rq);
4725 out_unlock:
4726 	busiest_rq->active_balance = 0;
4727 	raw_spin_unlock_irq(&busiest_rq->lock);
4728 	return 0;
4729 }
4730 
4731 #ifdef CONFIG_NO_HZ
4732 /*
4733  * idle load balancing details
4734  * - When one of the busy CPUs notice that there may be an idle rebalancing
4735  *   needed, they will kick the idle load balancer, which then does idle
4736  *   load balancing for all the idle CPUs.
4737  */
4738 static struct {
4739 	cpumask_var_t idle_cpus_mask;
4740 	atomic_t nr_cpus;
4741 	unsigned long next_balance;     /* in jiffy units */
4742 } nohz ____cacheline_aligned;
4743 
4744 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4745 /**
4746  * lowest_flag_domain - Return lowest sched_domain containing flag.
4747  * @cpu:	The cpu whose lowest level of sched domain is to
4748  *		be returned.
4749  * @flag:	The flag to check for the lowest sched_domain
4750  *		for the given cpu.
4751  *
4752  * Returns the lowest sched_domain of a cpu which contains the given flag.
4753  */
lowest_flag_domain(int cpu,int flag)4754 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4755 {
4756 	struct sched_domain *sd;
4757 
4758 	for_each_domain(cpu, sd)
4759 		if (sd->flags & flag)
4760 			break;
4761 
4762 	return sd;
4763 }
4764 
4765 /**
4766  * for_each_flag_domain - Iterates over sched_domains containing the flag.
4767  * @cpu:	The cpu whose domains we're iterating over.
4768  * @sd:		variable holding the value of the power_savings_sd
4769  *		for cpu.
4770  * @flag:	The flag to filter the sched_domains to be iterated.
4771  *
4772  * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4773  * set, starting from the lowest sched_domain to the highest.
4774  */
4775 #define for_each_flag_domain(cpu, sd, flag) \
4776 	for (sd = lowest_flag_domain(cpu, flag); \
4777 		(sd && (sd->flags & flag)); sd = sd->parent)
4778 
4779 /**
4780  * find_new_ilb - Finds the optimum idle load balancer for nomination.
4781  * @cpu:	The cpu which is nominating a new idle_load_balancer.
4782  *
4783  * Returns:	Returns the id of the idle load balancer if it exists,
4784  *		Else, returns >= nr_cpu_ids.
4785  *
4786  * This algorithm picks the idle load balancer such that it belongs to a
4787  * semi-idle powersavings sched_domain. The idea is to try and avoid
4788  * completely idle packages/cores just for the purpose of idle load balancing
4789  * when there are other idle cpu's which are better suited for that job.
4790  */
find_new_ilb(int cpu)4791 static int find_new_ilb(int cpu)
4792 {
4793 	int ilb = cpumask_first(nohz.idle_cpus_mask);
4794 	struct sched_group *ilbg;
4795 	struct sched_domain *sd;
4796 
4797 	/*
4798 	 * Have idle load balancer selection from semi-idle packages only
4799 	 * when power-aware load balancing is enabled
4800 	 */
4801 	if (!(sched_smt_power_savings || sched_mc_power_savings))
4802 		goto out_done;
4803 
4804 	/*
4805 	 * Optimize for the case when we have no idle CPUs or only one
4806 	 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4807 	 */
4808 	if (cpumask_weight(nohz.idle_cpus_mask) < 2)
4809 		goto out_done;
4810 
4811 	rcu_read_lock();
4812 	for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4813 		ilbg = sd->groups;
4814 
4815 		do {
4816 			if (ilbg->group_weight !=
4817 				atomic_read(&ilbg->sgp->nr_busy_cpus)) {
4818 				ilb = cpumask_first_and(nohz.idle_cpus_mask,
4819 							sched_group_cpus(ilbg));
4820 				goto unlock;
4821 			}
4822 
4823 			ilbg = ilbg->next;
4824 
4825 		} while (ilbg != sd->groups);
4826 	}
4827 unlock:
4828 	rcu_read_unlock();
4829 
4830 out_done:
4831 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
4832 		return ilb;
4833 
4834 	return nr_cpu_ids;
4835 }
4836 #else /*  (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
find_new_ilb(int call_cpu)4837 static inline int find_new_ilb(int call_cpu)
4838 {
4839 	return nr_cpu_ids;
4840 }
4841 #endif
4842 
4843 /*
4844  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4845  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4846  * CPU (if there is one).
4847  */
nohz_balancer_kick(int cpu)4848 static void nohz_balancer_kick(int cpu)
4849 {
4850 	int ilb_cpu;
4851 
4852 	nohz.next_balance++;
4853 
4854 	ilb_cpu = find_new_ilb(cpu);
4855 
4856 	if (ilb_cpu >= nr_cpu_ids)
4857 		return;
4858 
4859 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
4860 		return;
4861 	/*
4862 	 * Use smp_send_reschedule() instead of resched_cpu().
4863 	 * This way we generate a sched IPI on the target cpu which
4864 	 * is idle. And the softirq performing nohz idle load balance
4865 	 * will be run before returning from the IPI.
4866 	 */
4867 	smp_send_reschedule(ilb_cpu);
4868 	return;
4869 }
4870 
clear_nohz_tick_stopped(int cpu)4871 static inline void clear_nohz_tick_stopped(int cpu)
4872 {
4873 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
4874 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
4875 		atomic_dec(&nohz.nr_cpus);
4876 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4877 	}
4878 }
4879 
set_cpu_sd_state_busy(void)4880 static inline void set_cpu_sd_state_busy(void)
4881 {
4882 	struct sched_domain *sd;
4883 	int cpu = smp_processor_id();
4884 
4885 	if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
4886 		return;
4887 	clear_bit(NOHZ_IDLE, nohz_flags(cpu));
4888 
4889 	rcu_read_lock();
4890 	for_each_domain(cpu, sd)
4891 		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
4892 	rcu_read_unlock();
4893 }
4894 
set_cpu_sd_state_idle(void)4895 void set_cpu_sd_state_idle(void)
4896 {
4897 	struct sched_domain *sd;
4898 	int cpu = smp_processor_id();
4899 
4900 	if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
4901 		return;
4902 	set_bit(NOHZ_IDLE, nohz_flags(cpu));
4903 
4904 	rcu_read_lock();
4905 	for_each_domain(cpu, sd)
4906 		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
4907 	rcu_read_unlock();
4908 }
4909 
4910 /*
4911  * This routine will record that this cpu is going idle with tick stopped.
4912  * This info will be used in performing idle load balancing in the future.
4913  */
select_nohz_load_balancer(int stop_tick)4914 void select_nohz_load_balancer(int stop_tick)
4915 {
4916 	int cpu = smp_processor_id();
4917 
4918 	/*
4919 	 * If this cpu is going down, then nothing needs to be done.
4920 	 */
4921 	if (!cpu_active(cpu))
4922 		return;
4923 
4924 	if (stop_tick) {
4925 		if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
4926 			return;
4927 
4928 		cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
4929 		atomic_inc(&nohz.nr_cpus);
4930 		set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4931 	}
4932 	return;
4933 }
4934 
sched_ilb_notifier(struct notifier_block * nfb,unsigned long action,void * hcpu)4935 static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
4936 					unsigned long action, void *hcpu)
4937 {
4938 	switch (action & ~CPU_TASKS_FROZEN) {
4939 	case CPU_DYING:
4940 		clear_nohz_tick_stopped(smp_processor_id());
4941 		return NOTIFY_OK;
4942 	default:
4943 		return NOTIFY_DONE;
4944 	}
4945 }
4946 #endif
4947 
4948 static DEFINE_SPINLOCK(balancing);
4949 
4950 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4951 
4952 /*
4953  * Scale the max load_balance interval with the number of CPUs in the system.
4954  * This trades load-balance latency on larger machines for less cross talk.
4955  */
update_max_interval(void)4956 void update_max_interval(void)
4957 {
4958 	max_load_balance_interval = HZ*num_online_cpus()/10;
4959 }
4960 
4961 /*
4962  * It checks each scheduling domain to see if it is due to be balanced,
4963  * and initiates a balancing operation if so.
4964  *
4965  * Balancing parameters are set up in arch_init_sched_domains.
4966  */
rebalance_domains(int cpu,enum cpu_idle_type idle)4967 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4968 {
4969 	int balance = 1;
4970 	struct rq *rq = cpu_rq(cpu);
4971 	unsigned long interval;
4972 	struct sched_domain *sd;
4973 	/* Earliest time when we have to do rebalance again */
4974 	unsigned long next_balance = jiffies + 60*HZ;
4975 	int update_next_balance = 0;
4976 	int need_serialize;
4977 
4978 	update_shares(cpu);
4979 
4980 	rcu_read_lock();
4981 	for_each_domain(cpu, sd) {
4982 		if (!(sd->flags & SD_LOAD_BALANCE))
4983 			continue;
4984 
4985 		interval = sd->balance_interval;
4986 		if (idle != CPU_IDLE)
4987 			interval *= sd->busy_factor;
4988 
4989 		/* scale ms to jiffies */
4990 		interval = msecs_to_jiffies(interval);
4991 		interval = clamp(interval, 1UL, max_load_balance_interval);
4992 
4993 		need_serialize = sd->flags & SD_SERIALIZE;
4994 
4995 		if (need_serialize) {
4996 			if (!spin_trylock(&balancing))
4997 				goto out;
4998 		}
4999 
5000 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
5001 			if (load_balance(cpu, rq, sd, idle, &balance)) {
5002 				/*
5003 				 * We've pulled tasks over so either we're no
5004 				 * longer idle.
5005 				 */
5006 				idle = CPU_NOT_IDLE;
5007 			}
5008 			sd->last_balance = jiffies;
5009 		}
5010 		if (need_serialize)
5011 			spin_unlock(&balancing);
5012 out:
5013 		if (time_after(next_balance, sd->last_balance + interval)) {
5014 			next_balance = sd->last_balance + interval;
5015 			update_next_balance = 1;
5016 		}
5017 
5018 		/*
5019 		 * Stop the load balance at this level. There is another
5020 		 * CPU in our sched group which is doing load balancing more
5021 		 * actively.
5022 		 */
5023 		if (!balance)
5024 			break;
5025 	}
5026 	rcu_read_unlock();
5027 
5028 	/*
5029 	 * next_balance will be updated only when there is a need.
5030 	 * When the cpu is attached to null domain for ex, it will not be
5031 	 * updated.
5032 	 */
5033 	if (likely(update_next_balance))
5034 		rq->next_balance = next_balance;
5035 }
5036 
5037 #ifdef CONFIG_NO_HZ
5038 /*
5039  * In CONFIG_NO_HZ case, the idle balance kickee will do the
5040  * rebalancing for all the cpus for whom scheduler ticks are stopped.
5041  */
nohz_idle_balance(int this_cpu,enum cpu_idle_type idle)5042 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5043 {
5044 	struct rq *this_rq = cpu_rq(this_cpu);
5045 	struct rq *rq;
5046 	int balance_cpu;
5047 
5048 	if (idle != CPU_IDLE ||
5049 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5050 		goto end;
5051 
5052 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5053 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
5054 			continue;
5055 
5056 		/*
5057 		 * If this cpu gets work to do, stop the load balancing
5058 		 * work being done for other cpus. Next load
5059 		 * balancing owner will pick it up.
5060 		 */
5061 		if (need_resched())
5062 			break;
5063 
5064 		raw_spin_lock_irq(&this_rq->lock);
5065 		update_rq_clock(this_rq);
5066 		update_cpu_load(this_rq);
5067 		raw_spin_unlock_irq(&this_rq->lock);
5068 
5069 		rebalance_domains(balance_cpu, CPU_IDLE);
5070 
5071 		rq = cpu_rq(balance_cpu);
5072 		if (time_after(this_rq->next_balance, rq->next_balance))
5073 			this_rq->next_balance = rq->next_balance;
5074 	}
5075 	nohz.next_balance = this_rq->next_balance;
5076 end:
5077 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
5078 }
5079 
5080 /*
5081  * Current heuristic for kicking the idle load balancer in the presence
5082  * of an idle cpu is the system.
5083  *   - This rq has more than one task.
5084  *   - At any scheduler domain level, this cpu's scheduler group has multiple
5085  *     busy cpu's exceeding the group's power.
5086  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5087  *     domain span are idle.
5088  */
nohz_kick_needed(struct rq * rq,int cpu)5089 static inline int nohz_kick_needed(struct rq *rq, int cpu)
5090 {
5091 	unsigned long now = jiffies;
5092 	struct sched_domain *sd;
5093 
5094 	if (unlikely(idle_cpu(cpu)))
5095 		return 0;
5096 
5097        /*
5098 	* We may be recently in ticked or tickless idle mode. At the first
5099 	* busy tick after returning from idle, we will update the busy stats.
5100 	*/
5101 	set_cpu_sd_state_busy();
5102 	clear_nohz_tick_stopped(cpu);
5103 
5104 	/*
5105 	 * None are in tickless mode and hence no need for NOHZ idle load
5106 	 * balancing.
5107 	 */
5108 	if (likely(!atomic_read(&nohz.nr_cpus)))
5109 		return 0;
5110 
5111 	if (time_before(now, nohz.next_balance))
5112 		return 0;
5113 
5114 	if (rq->nr_running >= 2)
5115 		goto need_kick;
5116 
5117 	rcu_read_lock();
5118 	for_each_domain(cpu, sd) {
5119 		struct sched_group *sg = sd->groups;
5120 		struct sched_group_power *sgp = sg->sgp;
5121 		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5122 
5123 		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5124 			goto need_kick_unlock;
5125 
5126 		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5127 		    && (cpumask_first_and(nohz.idle_cpus_mask,
5128 					  sched_domain_span(sd)) < cpu))
5129 			goto need_kick_unlock;
5130 
5131 		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5132 			break;
5133 	}
5134 	rcu_read_unlock();
5135 	return 0;
5136 
5137 need_kick_unlock:
5138 	rcu_read_unlock();
5139 need_kick:
5140 	return 1;
5141 }
5142 #else
nohz_idle_balance(int this_cpu,enum cpu_idle_type idle)5143 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5144 #endif
5145 
5146 /*
5147  * run_rebalance_domains is triggered when needed from the scheduler tick.
5148  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5149  */
run_rebalance_domains(struct softirq_action * h)5150 static void run_rebalance_domains(struct softirq_action *h)
5151 {
5152 	int this_cpu = smp_processor_id();
5153 	struct rq *this_rq = cpu_rq(this_cpu);
5154 	enum cpu_idle_type idle = this_rq->idle_balance ?
5155 						CPU_IDLE : CPU_NOT_IDLE;
5156 
5157 	rebalance_domains(this_cpu, idle);
5158 
5159 	/*
5160 	 * If this cpu has a pending nohz_balance_kick, then do the
5161 	 * balancing on behalf of the other idle cpus whose ticks are
5162 	 * stopped.
5163 	 */
5164 	nohz_idle_balance(this_cpu, idle);
5165 }
5166 
on_null_domain(int cpu)5167 static inline int on_null_domain(int cpu)
5168 {
5169 	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5170 }
5171 
5172 /*
5173  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5174  */
trigger_load_balance(struct rq * rq,int cpu)5175 void trigger_load_balance(struct rq *rq, int cpu)
5176 {
5177 	/* Don't need to rebalance while attached to NULL domain */
5178 	if (time_after_eq(jiffies, rq->next_balance) &&
5179 	    likely(!on_null_domain(cpu)))
5180 		raise_softirq(SCHED_SOFTIRQ);
5181 #ifdef CONFIG_NO_HZ
5182 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5183 		nohz_balancer_kick(cpu);
5184 #endif
5185 }
5186 
rq_online_fair(struct rq * rq)5187 static void rq_online_fair(struct rq *rq)
5188 {
5189 	update_sysctl();
5190 }
5191 
rq_offline_fair(struct rq * rq)5192 static void rq_offline_fair(struct rq *rq)
5193 {
5194 	update_sysctl();
5195 }
5196 
5197 #endif /* CONFIG_SMP */
5198 
5199 /*
5200  * scheduler tick hitting a task of our scheduling class:
5201  */
task_tick_fair(struct rq * rq,struct task_struct * curr,int queued)5202 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5203 {
5204 	struct cfs_rq *cfs_rq;
5205 	struct sched_entity *se = &curr->se;
5206 
5207 	for_each_sched_entity(se) {
5208 		cfs_rq = cfs_rq_of(se);
5209 		entity_tick(cfs_rq, se, queued);
5210 	}
5211 }
5212 
5213 /*
5214  * called on fork with the child task as argument from the parent's context
5215  *  - child not yet on the tasklist
5216  *  - preemption disabled
5217  */
task_fork_fair(struct task_struct * p)5218 static void task_fork_fair(struct task_struct *p)
5219 {
5220 	struct cfs_rq *cfs_rq;
5221 	struct sched_entity *se = &p->se, *curr;
5222 	int this_cpu = smp_processor_id();
5223 	struct rq *rq = this_rq();
5224 	unsigned long flags;
5225 
5226 	raw_spin_lock_irqsave(&rq->lock, flags);
5227 
5228 	update_rq_clock(rq);
5229 
5230 	cfs_rq = task_cfs_rq(current);
5231 	curr = cfs_rq->curr;
5232 
5233 	if (unlikely(task_cpu(p) != this_cpu)) {
5234 		rcu_read_lock();
5235 		__set_task_cpu(p, this_cpu);
5236 		rcu_read_unlock();
5237 	}
5238 
5239 	update_curr(cfs_rq);
5240 
5241 	if (curr)
5242 		se->vruntime = curr->vruntime;
5243 	place_entity(cfs_rq, se, 1);
5244 
5245 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5246 		/*
5247 		 * Upon rescheduling, sched_class::put_prev_task() will place
5248 		 * 'current' within the tree based on its new key value.
5249 		 */
5250 		swap(curr->vruntime, se->vruntime);
5251 		resched_task(rq->curr);
5252 	}
5253 
5254 	se->vruntime -= cfs_rq->min_vruntime;
5255 
5256 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5257 }
5258 
5259 /*
5260  * Priority of the task has changed. Check to see if we preempt
5261  * the current task.
5262  */
5263 static void
prio_changed_fair(struct rq * rq,struct task_struct * p,int oldprio)5264 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5265 {
5266 	if (!p->se.on_rq)
5267 		return;
5268 
5269 	/*
5270 	 * Reschedule if we are currently running on this runqueue and
5271 	 * our priority decreased, or if we are not currently running on
5272 	 * this runqueue and our priority is higher than the current's
5273 	 */
5274 	if (rq->curr == p) {
5275 		if (p->prio > oldprio)
5276 			resched_task(rq->curr);
5277 	} else
5278 		check_preempt_curr(rq, p, 0);
5279 }
5280 
switched_from_fair(struct rq * rq,struct task_struct * p)5281 static void switched_from_fair(struct rq *rq, struct task_struct *p)
5282 {
5283 	struct sched_entity *se = &p->se;
5284 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
5285 
5286 	/*
5287 	 * Ensure the task's vruntime is normalized, so that when its
5288 	 * switched back to the fair class the enqueue_entity(.flags=0) will
5289 	 * do the right thing.
5290 	 *
5291 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5292 	 * have normalized the vruntime, if it was !on_rq, then only when
5293 	 * the task is sleeping will it still have non-normalized vruntime.
5294 	 */
5295 	if (!se->on_rq && p->state != TASK_RUNNING) {
5296 		/*
5297 		 * Fix up our vruntime so that the current sleep doesn't
5298 		 * cause 'unlimited' sleep bonus.
5299 		 */
5300 		place_entity(cfs_rq, se, 0);
5301 		se->vruntime -= cfs_rq->min_vruntime;
5302 	}
5303 }
5304 
5305 /*
5306  * We switched to the sched_fair class.
5307  */
switched_to_fair(struct rq * rq,struct task_struct * p)5308 static void switched_to_fair(struct rq *rq, struct task_struct *p)
5309 {
5310 	if (!p->se.on_rq)
5311 		return;
5312 
5313 	/*
5314 	 * We were most likely switched from sched_rt, so
5315 	 * kick off the schedule if running, otherwise just see
5316 	 * if we can still preempt the current task.
5317 	 */
5318 	if (rq->curr == p)
5319 		resched_task(rq->curr);
5320 	else
5321 		check_preempt_curr(rq, p, 0);
5322 }
5323 
5324 /* Account for a task changing its policy or group.
5325  *
5326  * This routine is mostly called to set cfs_rq->curr field when a task
5327  * migrates between groups/classes.
5328  */
set_curr_task_fair(struct rq * rq)5329 static void set_curr_task_fair(struct rq *rq)
5330 {
5331 	struct sched_entity *se = &rq->curr->se;
5332 
5333 	for_each_sched_entity(se) {
5334 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5335 
5336 		set_next_entity(cfs_rq, se);
5337 		/* ensure bandwidth has been allocated on our new cfs_rq */
5338 		account_cfs_rq_runtime(cfs_rq, 0);
5339 	}
5340 }
5341 
init_cfs_rq(struct cfs_rq * cfs_rq)5342 void init_cfs_rq(struct cfs_rq *cfs_rq)
5343 {
5344 	cfs_rq->tasks_timeline = RB_ROOT;
5345 	INIT_LIST_HEAD(&cfs_rq->tasks);
5346 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5347 #ifndef CONFIG_64BIT
5348 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5349 #endif
5350 }
5351 
5352 #ifdef CONFIG_FAIR_GROUP_SCHED
task_move_group_fair(struct task_struct * p,int on_rq)5353 static void task_move_group_fair(struct task_struct *p, int on_rq)
5354 {
5355 	/*
5356 	 * If the task was not on the rq at the time of this cgroup movement
5357 	 * it must have been asleep, sleeping tasks keep their ->vruntime
5358 	 * absolute on their old rq until wakeup (needed for the fair sleeper
5359 	 * bonus in place_entity()).
5360 	 *
5361 	 * If it was on the rq, we've just 'preempted' it, which does convert
5362 	 * ->vruntime to a relative base.
5363 	 *
5364 	 * Make sure both cases convert their relative position when migrating
5365 	 * to another cgroup's rq. This does somewhat interfere with the
5366 	 * fair sleeper stuff for the first placement, but who cares.
5367 	 */
5368 	/*
5369 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
5370 	 * But there are some cases where it has already been normalized:
5371 	 *
5372 	 * - Moving a forked child which is waiting for being woken up by
5373 	 *   wake_up_new_task().
5374 	 * - Moving a task which has been woken up by try_to_wake_up() and
5375 	 *   waiting for actually being woken up by sched_ttwu_pending().
5376 	 *
5377 	 * To prevent boost or penalty in the new cfs_rq caused by delta
5378 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5379 	 */
5380 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5381 		on_rq = 1;
5382 
5383 	if (!on_rq)
5384 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5385 	set_task_rq(p, task_cpu(p));
5386 	if (!on_rq)
5387 		p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
5388 }
5389 
free_fair_sched_group(struct task_group * tg)5390 void free_fair_sched_group(struct task_group *tg)
5391 {
5392 	int i;
5393 
5394 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5395 
5396 	for_each_possible_cpu(i) {
5397 		if (tg->cfs_rq)
5398 			kfree(tg->cfs_rq[i]);
5399 		if (tg->se)
5400 			kfree(tg->se[i]);
5401 	}
5402 
5403 	kfree(tg->cfs_rq);
5404 	kfree(tg->se);
5405 }
5406 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)5407 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5408 {
5409 	struct cfs_rq *cfs_rq;
5410 	struct sched_entity *se;
5411 	int i;
5412 
5413 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5414 	if (!tg->cfs_rq)
5415 		goto err;
5416 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5417 	if (!tg->se)
5418 		goto err;
5419 
5420 	tg->shares = NICE_0_LOAD;
5421 
5422 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
5423 
5424 	for_each_possible_cpu(i) {
5425 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
5426 				      GFP_KERNEL, cpu_to_node(i));
5427 		if (!cfs_rq)
5428 			goto err;
5429 
5430 		se = kzalloc_node(sizeof(struct sched_entity),
5431 				  GFP_KERNEL, cpu_to_node(i));
5432 		if (!se)
5433 			goto err_free_rq;
5434 
5435 		init_cfs_rq(cfs_rq);
5436 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
5437 	}
5438 
5439 	return 1;
5440 
5441 err_free_rq:
5442 	kfree(cfs_rq);
5443 err:
5444 	return 0;
5445 }
5446 
unregister_fair_sched_group(struct task_group * tg,int cpu)5447 void unregister_fair_sched_group(struct task_group *tg, int cpu)
5448 {
5449 	struct rq *rq = cpu_rq(cpu);
5450 	unsigned long flags;
5451 
5452 	/*
5453 	* Only empty task groups can be destroyed; so we can speculatively
5454 	* check on_list without danger of it being re-added.
5455 	*/
5456 	if (!tg->cfs_rq[cpu]->on_list)
5457 		return;
5458 
5459 	raw_spin_lock_irqsave(&rq->lock, flags);
5460 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
5461 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5462 }
5463 
init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent)5464 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
5465 			struct sched_entity *se, int cpu,
5466 			struct sched_entity *parent)
5467 {
5468 	struct rq *rq = cpu_rq(cpu);
5469 
5470 	cfs_rq->tg = tg;
5471 	cfs_rq->rq = rq;
5472 #ifdef CONFIG_SMP
5473 	/* allow initial update_cfs_load() to truncate */
5474 	cfs_rq->load_stamp = 1;
5475 #endif
5476 	init_cfs_rq_runtime(cfs_rq);
5477 
5478 	tg->cfs_rq[cpu] = cfs_rq;
5479 	tg->se[cpu] = se;
5480 
5481 	/* se could be NULL for root_task_group */
5482 	if (!se)
5483 		return;
5484 
5485 	if (!parent)
5486 		se->cfs_rq = &rq->cfs;
5487 	else
5488 		se->cfs_rq = parent->my_q;
5489 
5490 	se->my_q = cfs_rq;
5491 	update_load_set(&se->load, 0);
5492 	se->parent = parent;
5493 }
5494 
5495 static DEFINE_MUTEX(shares_mutex);
5496 
sched_group_set_shares(struct task_group * tg,unsigned long shares)5497 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
5498 {
5499 	int i;
5500 	unsigned long flags;
5501 
5502 	/*
5503 	 * We can't change the weight of the root cgroup.
5504 	 */
5505 	if (!tg->se[0])
5506 		return -EINVAL;
5507 
5508 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
5509 
5510 	mutex_lock(&shares_mutex);
5511 	if (tg->shares == shares)
5512 		goto done;
5513 
5514 	tg->shares = shares;
5515 	for_each_possible_cpu(i) {
5516 		struct rq *rq = cpu_rq(i);
5517 		struct sched_entity *se;
5518 
5519 		se = tg->se[i];
5520 		/* Propagate contribution to hierarchy */
5521 		raw_spin_lock_irqsave(&rq->lock, flags);
5522 		for_each_sched_entity(se)
5523 			update_cfs_shares(group_cfs_rq(se));
5524 		raw_spin_unlock_irqrestore(&rq->lock, flags);
5525 	}
5526 
5527 done:
5528 	mutex_unlock(&shares_mutex);
5529 	return 0;
5530 }
5531 #else /* CONFIG_FAIR_GROUP_SCHED */
5532 
free_fair_sched_group(struct task_group * tg)5533 void free_fair_sched_group(struct task_group *tg) { }
5534 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)5535 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5536 {
5537 	return 1;
5538 }
5539 
unregister_fair_sched_group(struct task_group * tg,int cpu)5540 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
5541 
5542 #endif /* CONFIG_FAIR_GROUP_SCHED */
5543 
5544 
get_rr_interval_fair(struct rq * rq,struct task_struct * task)5545 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
5546 {
5547 	struct sched_entity *se = &task->se;
5548 	unsigned int rr_interval = 0;
5549 
5550 	/*
5551 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5552 	 * idle runqueue:
5553 	 */
5554 	if (rq->cfs.load.weight)
5555 		rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
5556 
5557 	return rr_interval;
5558 }
5559 
5560 /*
5561  * All the scheduling class methods:
5562  */
5563 const struct sched_class fair_sched_class = {
5564 	.next			= &idle_sched_class,
5565 	.enqueue_task		= enqueue_task_fair,
5566 	.dequeue_task		= dequeue_task_fair,
5567 	.yield_task		= yield_task_fair,
5568 	.yield_to_task		= yield_to_task_fair,
5569 
5570 	.check_preempt_curr	= check_preempt_wakeup,
5571 
5572 	.pick_next_task		= pick_next_task_fair,
5573 	.put_prev_task		= put_prev_task_fair,
5574 
5575 #ifdef CONFIG_SMP
5576 	.select_task_rq		= select_task_rq_fair,
5577 
5578 	.rq_online		= rq_online_fair,
5579 	.rq_offline		= rq_offline_fair,
5580 
5581 	.task_waking		= task_waking_fair,
5582 #endif
5583 
5584 	.set_curr_task          = set_curr_task_fair,
5585 	.task_tick		= task_tick_fair,
5586 	.task_fork		= task_fork_fair,
5587 
5588 	.prio_changed		= prio_changed_fair,
5589 	.switched_from		= switched_from_fair,
5590 	.switched_to		= switched_to_fair,
5591 
5592 	.get_rr_interval	= get_rr_interval_fair,
5593 
5594 #ifdef CONFIG_FAIR_GROUP_SCHED
5595 	.task_move_group	= task_move_group_fair,
5596 #endif
5597 };
5598 
5599 #ifdef CONFIG_SCHED_DEBUG
print_cfs_stats(struct seq_file * m,int cpu)5600 void print_cfs_stats(struct seq_file *m, int cpu)
5601 {
5602 	struct cfs_rq *cfs_rq;
5603 
5604 	rcu_read_lock();
5605 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5606 		print_cfs_rq(m, cpu, cfs_rq);
5607 	rcu_read_unlock();
5608 }
5609 #endif
5610 
init_sched_fair_class(void)5611 __init void init_sched_fair_class(void)
5612 {
5613 #ifdef CONFIG_SMP
5614 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
5615 
5616 #ifdef CONFIG_NO_HZ
5617 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5618 	cpu_notifier(sched_ilb_notifier, 0);
5619 #endif
5620 #endif /* SMP */
5621 
5622 }
5623