1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6 
7 int sched_rr_timeslice = RR_TIMESLICE;
8 /* More than 4 hours if BW_SHIFT equals 20. */
9 static const u64 max_rt_runtime = MAX_BW;
10 
11 /*
12  * period over which we measure -rt task CPU usage in us.
13  * default: 1s
14  */
15 int sysctl_sched_rt_period = 1000000;
16 
17 /*
18  * part of the period that we allow rt tasks to run in us.
19  * default: 0.95s
20  */
21 int sysctl_sched_rt_runtime = 950000;
22 
23 #ifdef CONFIG_SYSCTL
24 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
25 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
26 		size_t *lenp, loff_t *ppos);
27 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
28 		size_t *lenp, loff_t *ppos);
29 static const struct ctl_table sched_rt_sysctls[] = {
30 	{
31 		.procname       = "sched_rt_period_us",
32 		.data           = &sysctl_sched_rt_period,
33 		.maxlen         = sizeof(int),
34 		.mode           = 0644,
35 		.proc_handler   = sched_rt_handler,
36 		.extra1         = SYSCTL_ONE,
37 		.extra2         = SYSCTL_INT_MAX,
38 	},
39 	{
40 		.procname       = "sched_rt_runtime_us",
41 		.data           = &sysctl_sched_rt_runtime,
42 		.maxlen         = sizeof(int),
43 		.mode           = 0644,
44 		.proc_handler   = sched_rt_handler,
45 		.extra1         = SYSCTL_NEG_ONE,
46 		.extra2         = (void *)&sysctl_sched_rt_period,
47 	},
48 	{
49 		.procname       = "sched_rr_timeslice_ms",
50 		.data           = &sysctl_sched_rr_timeslice,
51 		.maxlen         = sizeof(int),
52 		.mode           = 0644,
53 		.proc_handler   = sched_rr_handler,
54 	},
55 };
56 
sched_rt_sysctl_init(void)57 static int __init sched_rt_sysctl_init(void)
58 {
59 	register_sysctl_init("kernel", sched_rt_sysctls);
60 	return 0;
61 }
62 late_initcall(sched_rt_sysctl_init);
63 #endif
64 
init_rt_rq(struct rt_rq * rt_rq)65 void init_rt_rq(struct rt_rq *rt_rq)
66 {
67 	struct rt_prio_array *array;
68 	int i;
69 
70 	array = &rt_rq->active;
71 	for (i = 0; i < MAX_RT_PRIO; i++) {
72 		INIT_LIST_HEAD(array->queue + i);
73 		__clear_bit(i, array->bitmap);
74 	}
75 	/* delimiter for bitsearch: */
76 	__set_bit(MAX_RT_PRIO, array->bitmap);
77 
78 #if defined CONFIG_SMP
79 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
80 	rt_rq->highest_prio.next = MAX_RT_PRIO-1;
81 	rt_rq->overloaded = 0;
82 	plist_head_init(&rt_rq->pushable_tasks);
83 #endif /* CONFIG_SMP */
84 	/* We start is dequeued state, because no RT tasks are queued */
85 	rt_rq->rt_queued = 0;
86 
87 #ifdef CONFIG_RT_GROUP_SCHED
88 	rt_rq->rt_time = 0;
89 	rt_rq->rt_throttled = 0;
90 	rt_rq->rt_runtime = 0;
91 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
92 #endif
93 }
94 
95 #ifdef CONFIG_RT_GROUP_SCHED
96 
97 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
98 
sched_rt_period_timer(struct hrtimer * timer)99 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
100 {
101 	struct rt_bandwidth *rt_b =
102 		container_of(timer, struct rt_bandwidth, rt_period_timer);
103 	int idle = 0;
104 	int overrun;
105 
106 	raw_spin_lock(&rt_b->rt_runtime_lock);
107 	for (;;) {
108 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
109 		if (!overrun)
110 			break;
111 
112 		raw_spin_unlock(&rt_b->rt_runtime_lock);
113 		idle = do_sched_rt_period_timer(rt_b, overrun);
114 		raw_spin_lock(&rt_b->rt_runtime_lock);
115 	}
116 	if (idle)
117 		rt_b->rt_period_active = 0;
118 	raw_spin_unlock(&rt_b->rt_runtime_lock);
119 
120 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
121 }
122 
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)123 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
124 {
125 	rt_b->rt_period = ns_to_ktime(period);
126 	rt_b->rt_runtime = runtime;
127 
128 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
129 
130 	hrtimer_setup(&rt_b->rt_period_timer, sched_rt_period_timer, CLOCK_MONOTONIC,
131 		      HRTIMER_MODE_REL_HARD);
132 }
133 
do_start_rt_bandwidth(struct rt_bandwidth * rt_b)134 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
135 {
136 	raw_spin_lock(&rt_b->rt_runtime_lock);
137 	if (!rt_b->rt_period_active) {
138 		rt_b->rt_period_active = 1;
139 		/*
140 		 * SCHED_DEADLINE updates the bandwidth, as a run away
141 		 * RT task with a DL task could hog a CPU. But DL does
142 		 * not reset the period. If a deadline task was running
143 		 * without an RT task running, it can cause RT tasks to
144 		 * throttle when they start up. Kick the timer right away
145 		 * to update the period.
146 		 */
147 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
148 		hrtimer_start_expires(&rt_b->rt_period_timer,
149 				      HRTIMER_MODE_ABS_PINNED_HARD);
150 	}
151 	raw_spin_unlock(&rt_b->rt_runtime_lock);
152 }
153 
start_rt_bandwidth(struct rt_bandwidth * rt_b)154 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
155 {
156 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
157 		return;
158 
159 	do_start_rt_bandwidth(rt_b);
160 }
161 
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)162 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
163 {
164 	hrtimer_cancel(&rt_b->rt_period_timer);
165 }
166 
167 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
168 
rt_task_of(struct sched_rt_entity * rt_se)169 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
170 {
171 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
172 
173 	return container_of(rt_se, struct task_struct, rt);
174 }
175 
rq_of_rt_rq(struct rt_rq * rt_rq)176 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
177 {
178 	return rt_rq->rq;
179 }
180 
rt_rq_of_se(struct sched_rt_entity * rt_se)181 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
182 {
183 	return rt_se->rt_rq;
184 }
185 
rq_of_rt_se(struct sched_rt_entity * rt_se)186 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
187 {
188 	struct rt_rq *rt_rq = rt_se->rt_rq;
189 
190 	return rt_rq->rq;
191 }
192 
unregister_rt_sched_group(struct task_group * tg)193 void unregister_rt_sched_group(struct task_group *tg)
194 {
195 	if (tg->rt_se)
196 		destroy_rt_bandwidth(&tg->rt_bandwidth);
197 }
198 
free_rt_sched_group(struct task_group * tg)199 void free_rt_sched_group(struct task_group *tg)
200 {
201 	int i;
202 
203 	for_each_possible_cpu(i) {
204 		if (tg->rt_rq)
205 			kfree(tg->rt_rq[i]);
206 		if (tg->rt_se)
207 			kfree(tg->rt_se[i]);
208 	}
209 
210 	kfree(tg->rt_rq);
211 	kfree(tg->rt_se);
212 }
213 
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,struct sched_rt_entity * parent)214 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
215 		struct sched_rt_entity *rt_se, int cpu,
216 		struct sched_rt_entity *parent)
217 {
218 	struct rq *rq = cpu_rq(cpu);
219 
220 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
221 	rt_rq->rt_nr_boosted = 0;
222 	rt_rq->rq = rq;
223 	rt_rq->tg = tg;
224 
225 	tg->rt_rq[cpu] = rt_rq;
226 	tg->rt_se[cpu] = rt_se;
227 
228 	if (!rt_se)
229 		return;
230 
231 	if (!parent)
232 		rt_se->rt_rq = &rq->rt;
233 	else
234 		rt_se->rt_rq = parent->my_q;
235 
236 	rt_se->my_q = rt_rq;
237 	rt_se->parent = parent;
238 	INIT_LIST_HEAD(&rt_se->run_list);
239 }
240 
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)241 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
242 {
243 	struct rt_rq *rt_rq;
244 	struct sched_rt_entity *rt_se;
245 	int i;
246 
247 	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
248 	if (!tg->rt_rq)
249 		goto err;
250 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
251 	if (!tg->rt_se)
252 		goto err;
253 
254 	init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0);
255 
256 	for_each_possible_cpu(i) {
257 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
258 				     GFP_KERNEL, cpu_to_node(i));
259 		if (!rt_rq)
260 			goto err;
261 
262 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
263 				     GFP_KERNEL, cpu_to_node(i));
264 		if (!rt_se)
265 			goto err_free_rq;
266 
267 		init_rt_rq(rt_rq);
268 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
269 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
270 	}
271 
272 	return 1;
273 
274 err_free_rq:
275 	kfree(rt_rq);
276 err:
277 	return 0;
278 }
279 
280 #else /* CONFIG_RT_GROUP_SCHED */
281 
282 #define rt_entity_is_task(rt_se) (1)
283 
rt_task_of(struct sched_rt_entity * rt_se)284 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
285 {
286 	return container_of(rt_se, struct task_struct, rt);
287 }
288 
rq_of_rt_rq(struct rt_rq * rt_rq)289 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
290 {
291 	return container_of(rt_rq, struct rq, rt);
292 }
293 
rq_of_rt_se(struct sched_rt_entity * rt_se)294 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
295 {
296 	struct task_struct *p = rt_task_of(rt_se);
297 
298 	return task_rq(p);
299 }
300 
rt_rq_of_se(struct sched_rt_entity * rt_se)301 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
302 {
303 	struct rq *rq = rq_of_rt_se(rt_se);
304 
305 	return &rq->rt;
306 }
307 
unregister_rt_sched_group(struct task_group * tg)308 void unregister_rt_sched_group(struct task_group *tg) { }
309 
free_rt_sched_group(struct task_group * tg)310 void free_rt_sched_group(struct task_group *tg) { }
311 
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)312 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
313 {
314 	return 1;
315 }
316 #endif /* CONFIG_RT_GROUP_SCHED */
317 
318 #ifdef CONFIG_SMP
319 
need_pull_rt_task(struct rq * rq,struct task_struct * prev)320 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
321 {
322 	/* Try to pull RT tasks here if we lower this rq's prio */
323 	return rq->online && rq->rt.highest_prio.curr > prev->prio;
324 }
325 
rt_overloaded(struct rq * rq)326 static inline int rt_overloaded(struct rq *rq)
327 {
328 	return atomic_read(&rq->rd->rto_count);
329 }
330 
rt_set_overload(struct rq * rq)331 static inline void rt_set_overload(struct rq *rq)
332 {
333 	if (!rq->online)
334 		return;
335 
336 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
337 	/*
338 	 * Make sure the mask is visible before we set
339 	 * the overload count. That is checked to determine
340 	 * if we should look at the mask. It would be a shame
341 	 * if we looked at the mask, but the mask was not
342 	 * updated yet.
343 	 *
344 	 * Matched by the barrier in pull_rt_task().
345 	 */
346 	smp_wmb();
347 	atomic_inc(&rq->rd->rto_count);
348 }
349 
rt_clear_overload(struct rq * rq)350 static inline void rt_clear_overload(struct rq *rq)
351 {
352 	if (!rq->online)
353 		return;
354 
355 	/* the order here really doesn't matter */
356 	atomic_dec(&rq->rd->rto_count);
357 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
358 }
359 
has_pushable_tasks(struct rq * rq)360 static inline int has_pushable_tasks(struct rq *rq)
361 {
362 	return !plist_head_empty(&rq->rt.pushable_tasks);
363 }
364 
365 static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
366 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
367 
368 static void push_rt_tasks(struct rq *);
369 static void pull_rt_task(struct rq *);
370 
rt_queue_push_tasks(struct rq * rq)371 static inline void rt_queue_push_tasks(struct rq *rq)
372 {
373 	if (!has_pushable_tasks(rq))
374 		return;
375 
376 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
377 }
378 
rt_queue_pull_task(struct rq * rq)379 static inline void rt_queue_pull_task(struct rq *rq)
380 {
381 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
382 }
383 
enqueue_pushable_task(struct rq * rq,struct task_struct * p)384 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
385 {
386 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
387 	plist_node_init(&p->pushable_tasks, p->prio);
388 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
389 
390 	/* Update the highest prio pushable task */
391 	if (p->prio < rq->rt.highest_prio.next)
392 		rq->rt.highest_prio.next = p->prio;
393 
394 	if (!rq->rt.overloaded) {
395 		rt_set_overload(rq);
396 		rq->rt.overloaded = 1;
397 	}
398 }
399 
dequeue_pushable_task(struct rq * rq,struct task_struct * p)400 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
401 {
402 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
403 
404 	/* Update the new highest prio pushable task */
405 	if (has_pushable_tasks(rq)) {
406 		p = plist_first_entry(&rq->rt.pushable_tasks,
407 				      struct task_struct, pushable_tasks);
408 		rq->rt.highest_prio.next = p->prio;
409 	} else {
410 		rq->rt.highest_prio.next = MAX_RT_PRIO-1;
411 
412 		if (rq->rt.overloaded) {
413 			rt_clear_overload(rq);
414 			rq->rt.overloaded = 0;
415 		}
416 	}
417 }
418 
419 #else
420 
enqueue_pushable_task(struct rq * rq,struct task_struct * p)421 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
422 {
423 }
424 
dequeue_pushable_task(struct rq * rq,struct task_struct * p)425 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
426 {
427 }
428 
rt_queue_push_tasks(struct rq * rq)429 static inline void rt_queue_push_tasks(struct rq *rq)
430 {
431 }
432 #endif /* CONFIG_SMP */
433 
434 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
435 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
436 
on_rt_rq(struct sched_rt_entity * rt_se)437 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
438 {
439 	return rt_se->on_rq;
440 }
441 
442 #ifdef CONFIG_UCLAMP_TASK
443 /*
444  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
445  * settings.
446  *
447  * This check is only important for heterogeneous systems where uclamp_min value
448  * is higher than the capacity of a @cpu. For non-heterogeneous system this
449  * function will always return true.
450  *
451  * The function will return true if the capacity of the @cpu is >= the
452  * uclamp_min and false otherwise.
453  *
454  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
455  * > uclamp_max.
456  */
rt_task_fits_capacity(struct task_struct * p,int cpu)457 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
458 {
459 	unsigned int min_cap;
460 	unsigned int max_cap;
461 	unsigned int cpu_cap;
462 
463 	/* Only heterogeneous systems can benefit from this check */
464 	if (!sched_asym_cpucap_active())
465 		return true;
466 
467 	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
468 	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
469 
470 	cpu_cap = arch_scale_cpu_capacity(cpu);
471 
472 	return cpu_cap >= min(min_cap, max_cap);
473 }
474 #else
rt_task_fits_capacity(struct task_struct * p,int cpu)475 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
476 {
477 	return true;
478 }
479 #endif
480 
481 #ifdef CONFIG_RT_GROUP_SCHED
482 
sched_rt_runtime(struct rt_rq * rt_rq)483 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
484 {
485 	if (!rt_rq->tg)
486 		return RUNTIME_INF;
487 
488 	return rt_rq->rt_runtime;
489 }
490 
sched_rt_period(struct rt_rq * rt_rq)491 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
492 {
493 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
494 }
495 
496 typedef struct task_group *rt_rq_iter_t;
497 
next_task_group(struct task_group * tg)498 static inline struct task_group *next_task_group(struct task_group *tg)
499 {
500 	do {
501 		tg = list_entry_rcu(tg->list.next,
502 			typeof(struct task_group), list);
503 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
504 
505 	if (&tg->list == &task_groups)
506 		tg = NULL;
507 
508 	return tg;
509 }
510 
511 #define for_each_rt_rq(rt_rq, iter, rq)					\
512 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
513 		(iter = next_task_group(iter)) &&			\
514 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
515 
516 #define for_each_sched_rt_entity(rt_se) \
517 	for (; rt_se; rt_se = rt_se->parent)
518 
group_rt_rq(struct sched_rt_entity * rt_se)519 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
520 {
521 	return rt_se->my_q;
522 }
523 
524 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
525 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
526 
sched_rt_rq_enqueue(struct rt_rq * rt_rq)527 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
528 {
529 	struct task_struct *donor = rq_of_rt_rq(rt_rq)->donor;
530 	struct rq *rq = rq_of_rt_rq(rt_rq);
531 	struct sched_rt_entity *rt_se;
532 
533 	int cpu = cpu_of(rq);
534 
535 	rt_se = rt_rq->tg->rt_se[cpu];
536 
537 	if (rt_rq->rt_nr_running) {
538 		if (!rt_se)
539 			enqueue_top_rt_rq(rt_rq);
540 		else if (!on_rt_rq(rt_se))
541 			enqueue_rt_entity(rt_se, 0);
542 
543 		if (rt_rq->highest_prio.curr < donor->prio)
544 			resched_curr(rq);
545 	}
546 }
547 
sched_rt_rq_dequeue(struct rt_rq * rt_rq)548 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
549 {
550 	struct sched_rt_entity *rt_se;
551 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
552 
553 	rt_se = rt_rq->tg->rt_se[cpu];
554 
555 	if (!rt_se) {
556 		dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
557 		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
558 		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
559 	}
560 	else if (on_rt_rq(rt_se))
561 		dequeue_rt_entity(rt_se, 0);
562 }
563 
rt_rq_throttled(struct rt_rq * rt_rq)564 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
565 {
566 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
567 }
568 
rt_se_boosted(struct sched_rt_entity * rt_se)569 static int rt_se_boosted(struct sched_rt_entity *rt_se)
570 {
571 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
572 	struct task_struct *p;
573 
574 	if (rt_rq)
575 		return !!rt_rq->rt_nr_boosted;
576 
577 	p = rt_task_of(rt_se);
578 	return p->prio != p->normal_prio;
579 }
580 
581 #ifdef CONFIG_SMP
sched_rt_period_mask(void)582 static inline const struct cpumask *sched_rt_period_mask(void)
583 {
584 	return this_rq()->rd->span;
585 }
586 #else
sched_rt_period_mask(void)587 static inline const struct cpumask *sched_rt_period_mask(void)
588 {
589 	return cpu_online_mask;
590 }
591 #endif
592 
593 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)594 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
595 {
596 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
597 }
598 
sched_rt_bandwidth(struct rt_rq * rt_rq)599 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
600 {
601 	return &rt_rq->tg->rt_bandwidth;
602 }
603 
sched_rt_bandwidth_account(struct rt_rq * rt_rq)604 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
605 {
606 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
607 
608 	return (hrtimer_active(&rt_b->rt_period_timer) ||
609 		rt_rq->rt_time < rt_b->rt_runtime);
610 }
611 
612 #ifdef CONFIG_SMP
613 /*
614  * We ran out of runtime, see if we can borrow some from our neighbours.
615  */
do_balance_runtime(struct rt_rq * rt_rq)616 static void do_balance_runtime(struct rt_rq *rt_rq)
617 {
618 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
619 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
620 	int i, weight;
621 	u64 rt_period;
622 
623 	weight = cpumask_weight(rd->span);
624 
625 	raw_spin_lock(&rt_b->rt_runtime_lock);
626 	rt_period = ktime_to_ns(rt_b->rt_period);
627 	for_each_cpu(i, rd->span) {
628 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
629 		s64 diff;
630 
631 		if (iter == rt_rq)
632 			continue;
633 
634 		raw_spin_lock(&iter->rt_runtime_lock);
635 		/*
636 		 * Either all rqs have inf runtime and there's nothing to steal
637 		 * or __disable_runtime() below sets a specific rq to inf to
638 		 * indicate its been disabled and disallow stealing.
639 		 */
640 		if (iter->rt_runtime == RUNTIME_INF)
641 			goto next;
642 
643 		/*
644 		 * From runqueues with spare time, take 1/n part of their
645 		 * spare time, but no more than our period.
646 		 */
647 		diff = iter->rt_runtime - iter->rt_time;
648 		if (diff > 0) {
649 			diff = div_u64((u64)diff, weight);
650 			if (rt_rq->rt_runtime + diff > rt_period)
651 				diff = rt_period - rt_rq->rt_runtime;
652 			iter->rt_runtime -= diff;
653 			rt_rq->rt_runtime += diff;
654 			if (rt_rq->rt_runtime == rt_period) {
655 				raw_spin_unlock(&iter->rt_runtime_lock);
656 				break;
657 			}
658 		}
659 next:
660 		raw_spin_unlock(&iter->rt_runtime_lock);
661 	}
662 	raw_spin_unlock(&rt_b->rt_runtime_lock);
663 }
664 
665 /*
666  * Ensure this RQ takes back all the runtime it lend to its neighbours.
667  */
__disable_runtime(struct rq * rq)668 static void __disable_runtime(struct rq *rq)
669 {
670 	struct root_domain *rd = rq->rd;
671 	rt_rq_iter_t iter;
672 	struct rt_rq *rt_rq;
673 
674 	if (unlikely(!scheduler_running))
675 		return;
676 
677 	for_each_rt_rq(rt_rq, iter, rq) {
678 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
679 		s64 want;
680 		int i;
681 
682 		raw_spin_lock(&rt_b->rt_runtime_lock);
683 		raw_spin_lock(&rt_rq->rt_runtime_lock);
684 		/*
685 		 * Either we're all inf and nobody needs to borrow, or we're
686 		 * already disabled and thus have nothing to do, or we have
687 		 * exactly the right amount of runtime to take out.
688 		 */
689 		if (rt_rq->rt_runtime == RUNTIME_INF ||
690 				rt_rq->rt_runtime == rt_b->rt_runtime)
691 			goto balanced;
692 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
693 
694 		/*
695 		 * Calculate the difference between what we started out with
696 		 * and what we current have, that's the amount of runtime
697 		 * we lend and now have to reclaim.
698 		 */
699 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
700 
701 		/*
702 		 * Greedy reclaim, take back as much as we can.
703 		 */
704 		for_each_cpu(i, rd->span) {
705 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
706 			s64 diff;
707 
708 			/*
709 			 * Can't reclaim from ourselves or disabled runqueues.
710 			 */
711 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
712 				continue;
713 
714 			raw_spin_lock(&iter->rt_runtime_lock);
715 			if (want > 0) {
716 				diff = min_t(s64, iter->rt_runtime, want);
717 				iter->rt_runtime -= diff;
718 				want -= diff;
719 			} else {
720 				iter->rt_runtime -= want;
721 				want -= want;
722 			}
723 			raw_spin_unlock(&iter->rt_runtime_lock);
724 
725 			if (!want)
726 				break;
727 		}
728 
729 		raw_spin_lock(&rt_rq->rt_runtime_lock);
730 		/*
731 		 * We cannot be left wanting - that would mean some runtime
732 		 * leaked out of the system.
733 		 */
734 		WARN_ON_ONCE(want);
735 balanced:
736 		/*
737 		 * Disable all the borrow logic by pretending we have inf
738 		 * runtime - in which case borrowing doesn't make sense.
739 		 */
740 		rt_rq->rt_runtime = RUNTIME_INF;
741 		rt_rq->rt_throttled = 0;
742 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
743 		raw_spin_unlock(&rt_b->rt_runtime_lock);
744 
745 		/* Make rt_rq available for pick_next_task() */
746 		sched_rt_rq_enqueue(rt_rq);
747 	}
748 }
749 
__enable_runtime(struct rq * rq)750 static void __enable_runtime(struct rq *rq)
751 {
752 	rt_rq_iter_t iter;
753 	struct rt_rq *rt_rq;
754 
755 	if (unlikely(!scheduler_running))
756 		return;
757 
758 	/*
759 	 * Reset each runqueue's bandwidth settings
760 	 */
761 	for_each_rt_rq(rt_rq, iter, rq) {
762 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
763 
764 		raw_spin_lock(&rt_b->rt_runtime_lock);
765 		raw_spin_lock(&rt_rq->rt_runtime_lock);
766 		rt_rq->rt_runtime = rt_b->rt_runtime;
767 		rt_rq->rt_time = 0;
768 		rt_rq->rt_throttled = 0;
769 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
770 		raw_spin_unlock(&rt_b->rt_runtime_lock);
771 	}
772 }
773 
balance_runtime(struct rt_rq * rt_rq)774 static void balance_runtime(struct rt_rq *rt_rq)
775 {
776 	if (!sched_feat(RT_RUNTIME_SHARE))
777 		return;
778 
779 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
780 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
781 		do_balance_runtime(rt_rq);
782 		raw_spin_lock(&rt_rq->rt_runtime_lock);
783 	}
784 }
785 #else /* !CONFIG_SMP */
balance_runtime(struct rt_rq * rt_rq)786 static inline void balance_runtime(struct rt_rq *rt_rq) {}
787 #endif /* CONFIG_SMP */
788 
do_sched_rt_period_timer(struct rt_bandwidth * rt_b,int overrun)789 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
790 {
791 	int i, idle = 1, throttled = 0;
792 	const struct cpumask *span;
793 
794 	span = sched_rt_period_mask();
795 
796 	/*
797 	 * FIXME: isolated CPUs should really leave the root task group,
798 	 * whether they are isolcpus or were isolated via cpusets, lest
799 	 * the timer run on a CPU which does not service all runqueues,
800 	 * potentially leaving other CPUs indefinitely throttled.  If
801 	 * isolation is really required, the user will turn the throttle
802 	 * off to kill the perturbations it causes anyway.  Meanwhile,
803 	 * this maintains functionality for boot and/or troubleshooting.
804 	 */
805 	if (rt_b == &root_task_group.rt_bandwidth)
806 		span = cpu_online_mask;
807 
808 	for_each_cpu(i, span) {
809 		int enqueue = 0;
810 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
811 		struct rq *rq = rq_of_rt_rq(rt_rq);
812 		struct rq_flags rf;
813 		int skip;
814 
815 		/*
816 		 * When span == cpu_online_mask, taking each rq->lock
817 		 * can be time-consuming. Try to avoid it when possible.
818 		 */
819 		raw_spin_lock(&rt_rq->rt_runtime_lock);
820 		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
821 			rt_rq->rt_runtime = rt_b->rt_runtime;
822 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
823 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
824 		if (skip)
825 			continue;
826 
827 		rq_lock(rq, &rf);
828 		update_rq_clock(rq);
829 
830 		if (rt_rq->rt_time) {
831 			u64 runtime;
832 
833 			raw_spin_lock(&rt_rq->rt_runtime_lock);
834 			if (rt_rq->rt_throttled)
835 				balance_runtime(rt_rq);
836 			runtime = rt_rq->rt_runtime;
837 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
838 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
839 				rt_rq->rt_throttled = 0;
840 				enqueue = 1;
841 
842 				/*
843 				 * When we're idle and a woken (rt) task is
844 				 * throttled wakeup_preempt() will set
845 				 * skip_update and the time between the wakeup
846 				 * and this unthrottle will get accounted as
847 				 * 'runtime'.
848 				 */
849 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
850 					rq_clock_cancel_skipupdate(rq);
851 			}
852 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
853 				idle = 0;
854 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
855 		} else if (rt_rq->rt_nr_running) {
856 			idle = 0;
857 			if (!rt_rq_throttled(rt_rq))
858 				enqueue = 1;
859 		}
860 		if (rt_rq->rt_throttled)
861 			throttled = 1;
862 
863 		if (enqueue)
864 			sched_rt_rq_enqueue(rt_rq);
865 		rq_unlock(rq, &rf);
866 	}
867 
868 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
869 		return 1;
870 
871 	return idle;
872 }
873 
sched_rt_runtime_exceeded(struct rt_rq * rt_rq)874 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
875 {
876 	u64 runtime = sched_rt_runtime(rt_rq);
877 
878 	if (rt_rq->rt_throttled)
879 		return rt_rq_throttled(rt_rq);
880 
881 	if (runtime >= sched_rt_period(rt_rq))
882 		return 0;
883 
884 	balance_runtime(rt_rq);
885 	runtime = sched_rt_runtime(rt_rq);
886 	if (runtime == RUNTIME_INF)
887 		return 0;
888 
889 	if (rt_rq->rt_time > runtime) {
890 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
891 
892 		/*
893 		 * Don't actually throttle groups that have no runtime assigned
894 		 * but accrue some time due to boosting.
895 		 */
896 		if (likely(rt_b->rt_runtime)) {
897 			rt_rq->rt_throttled = 1;
898 			printk_deferred_once("sched: RT throttling activated\n");
899 		} else {
900 			/*
901 			 * In case we did anyway, make it go away,
902 			 * replenishment is a joke, since it will replenish us
903 			 * with exactly 0 ns.
904 			 */
905 			rt_rq->rt_time = 0;
906 		}
907 
908 		if (rt_rq_throttled(rt_rq)) {
909 			sched_rt_rq_dequeue(rt_rq);
910 			return 1;
911 		}
912 	}
913 
914 	return 0;
915 }
916 
917 #else /* !CONFIG_RT_GROUP_SCHED */
918 
919 typedef struct rt_rq *rt_rq_iter_t;
920 
921 #define for_each_rt_rq(rt_rq, iter, rq) \
922 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
923 
924 #define for_each_sched_rt_entity(rt_se) \
925 	for (; rt_se; rt_se = NULL)
926 
group_rt_rq(struct sched_rt_entity * rt_se)927 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
928 {
929 	return NULL;
930 }
931 
sched_rt_rq_enqueue(struct rt_rq * rt_rq)932 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
933 {
934 	struct rq *rq = rq_of_rt_rq(rt_rq);
935 
936 	if (!rt_rq->rt_nr_running)
937 		return;
938 
939 	enqueue_top_rt_rq(rt_rq);
940 	resched_curr(rq);
941 }
942 
sched_rt_rq_dequeue(struct rt_rq * rt_rq)943 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
944 {
945 	dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
946 }
947 
rt_rq_throttled(struct rt_rq * rt_rq)948 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
949 {
950 	return false;
951 }
952 
sched_rt_period_mask(void)953 static inline const struct cpumask *sched_rt_period_mask(void)
954 {
955 	return cpu_online_mask;
956 }
957 
958 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)959 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
960 {
961 	return &cpu_rq(cpu)->rt;
962 }
963 
964 #ifdef CONFIG_SMP
__enable_runtime(struct rq * rq)965 static void __enable_runtime(struct rq *rq) { }
__disable_runtime(struct rq * rq)966 static void __disable_runtime(struct rq *rq) { }
967 #endif
968 
969 #endif /* CONFIG_RT_GROUP_SCHED */
970 
rt_se_prio(struct sched_rt_entity * rt_se)971 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
972 {
973 #ifdef CONFIG_RT_GROUP_SCHED
974 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
975 
976 	if (rt_rq)
977 		return rt_rq->highest_prio.curr;
978 #endif
979 
980 	return rt_task_of(rt_se)->prio;
981 }
982 
983 /*
984  * Update the current task's runtime statistics. Skip current tasks that
985  * are not in our scheduling class.
986  */
update_curr_rt(struct rq * rq)987 static void update_curr_rt(struct rq *rq)
988 {
989 	struct task_struct *donor = rq->donor;
990 	s64 delta_exec;
991 
992 	if (donor->sched_class != &rt_sched_class)
993 		return;
994 
995 	delta_exec = update_curr_common(rq);
996 	if (unlikely(delta_exec <= 0))
997 		return;
998 
999 #ifdef CONFIG_RT_GROUP_SCHED
1000 	struct sched_rt_entity *rt_se = &donor->rt;
1001 
1002 	if (!rt_bandwidth_enabled())
1003 		return;
1004 
1005 	for_each_sched_rt_entity(rt_se) {
1006 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1007 		int exceeded;
1008 
1009 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1010 			raw_spin_lock(&rt_rq->rt_runtime_lock);
1011 			rt_rq->rt_time += delta_exec;
1012 			exceeded = sched_rt_runtime_exceeded(rt_rq);
1013 			if (exceeded)
1014 				resched_curr(rq);
1015 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
1016 			if (exceeded)
1017 				do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1018 		}
1019 	}
1020 #endif
1021 }
1022 
1023 static void
dequeue_top_rt_rq(struct rt_rq * rt_rq,unsigned int count)1024 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1025 {
1026 	struct rq *rq = rq_of_rt_rq(rt_rq);
1027 
1028 	BUG_ON(&rq->rt != rt_rq);
1029 
1030 	if (!rt_rq->rt_queued)
1031 		return;
1032 
1033 	BUG_ON(!rq->nr_running);
1034 
1035 	sub_nr_running(rq, count);
1036 	rt_rq->rt_queued = 0;
1037 
1038 }
1039 
1040 static void
enqueue_top_rt_rq(struct rt_rq * rt_rq)1041 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1042 {
1043 	struct rq *rq = rq_of_rt_rq(rt_rq);
1044 
1045 	BUG_ON(&rq->rt != rt_rq);
1046 
1047 	if (rt_rq->rt_queued)
1048 		return;
1049 
1050 	if (rt_rq_throttled(rt_rq))
1051 		return;
1052 
1053 	if (rt_rq->rt_nr_running) {
1054 		add_nr_running(rq, rt_rq->rt_nr_running);
1055 		rt_rq->rt_queued = 1;
1056 	}
1057 
1058 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1059 	cpufreq_update_util(rq, 0);
1060 }
1061 
1062 #if defined CONFIG_SMP
1063 
1064 static void
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1065 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1066 {
1067 	struct rq *rq = rq_of_rt_rq(rt_rq);
1068 
1069 #ifdef CONFIG_RT_GROUP_SCHED
1070 	/*
1071 	 * Change rq's cpupri only if rt_rq is the top queue.
1072 	 */
1073 	if (&rq->rt != rt_rq)
1074 		return;
1075 #endif
1076 	if (rq->online && prio < prev_prio)
1077 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1078 }
1079 
1080 static void
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1081 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1082 {
1083 	struct rq *rq = rq_of_rt_rq(rt_rq);
1084 
1085 #ifdef CONFIG_RT_GROUP_SCHED
1086 	/*
1087 	 * Change rq's cpupri only if rt_rq is the top queue.
1088 	 */
1089 	if (&rq->rt != rt_rq)
1090 		return;
1091 #endif
1092 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1093 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1094 }
1095 
1096 #else /* CONFIG_SMP */
1097 
1098 static inline
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1099 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1100 static inline
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1101 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1102 
1103 #endif /* CONFIG_SMP */
1104 
1105 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1106 static void
inc_rt_prio(struct rt_rq * rt_rq,int prio)1107 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1108 {
1109 	int prev_prio = rt_rq->highest_prio.curr;
1110 
1111 	if (prio < prev_prio)
1112 		rt_rq->highest_prio.curr = prio;
1113 
1114 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1115 }
1116 
1117 static void
dec_rt_prio(struct rt_rq * rt_rq,int prio)1118 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1119 {
1120 	int prev_prio = rt_rq->highest_prio.curr;
1121 
1122 	if (rt_rq->rt_nr_running) {
1123 
1124 		WARN_ON(prio < prev_prio);
1125 
1126 		/*
1127 		 * This may have been our highest task, and therefore
1128 		 * we may have some re-computation to do
1129 		 */
1130 		if (prio == prev_prio) {
1131 			struct rt_prio_array *array = &rt_rq->active;
1132 
1133 			rt_rq->highest_prio.curr =
1134 				sched_find_first_bit(array->bitmap);
1135 		}
1136 
1137 	} else {
1138 		rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1139 	}
1140 
1141 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1142 }
1143 
1144 #else
1145 
inc_rt_prio(struct rt_rq * rt_rq,int prio)1146 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
dec_rt_prio(struct rt_rq * rt_rq,int prio)1147 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1148 
1149 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1150 
1151 #ifdef CONFIG_RT_GROUP_SCHED
1152 
1153 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1154 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1155 {
1156 	if (rt_se_boosted(rt_se))
1157 		rt_rq->rt_nr_boosted++;
1158 
1159 	if (rt_rq->tg)
1160 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1161 }
1162 
1163 static void
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1164 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1165 {
1166 	if (rt_se_boosted(rt_se))
1167 		rt_rq->rt_nr_boosted--;
1168 
1169 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1170 }
1171 
1172 #else /* CONFIG_RT_GROUP_SCHED */
1173 
1174 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1175 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1176 {
1177 }
1178 
1179 static inline
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1180 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1181 
1182 #endif /* CONFIG_RT_GROUP_SCHED */
1183 
1184 static inline
rt_se_nr_running(struct sched_rt_entity * rt_se)1185 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1186 {
1187 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1188 
1189 	if (group_rq)
1190 		return group_rq->rt_nr_running;
1191 	else
1192 		return 1;
1193 }
1194 
1195 static inline
rt_se_rr_nr_running(struct sched_rt_entity * rt_se)1196 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1197 {
1198 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1199 	struct task_struct *tsk;
1200 
1201 	if (group_rq)
1202 		return group_rq->rr_nr_running;
1203 
1204 	tsk = rt_task_of(rt_se);
1205 
1206 	return (tsk->policy == SCHED_RR) ? 1 : 0;
1207 }
1208 
1209 static inline
inc_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1210 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1211 {
1212 	int prio = rt_se_prio(rt_se);
1213 
1214 	WARN_ON(!rt_prio(prio));
1215 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1216 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1217 
1218 	inc_rt_prio(rt_rq, prio);
1219 	inc_rt_group(rt_se, rt_rq);
1220 }
1221 
1222 static inline
dec_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1223 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224 {
1225 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1226 	WARN_ON(!rt_rq->rt_nr_running);
1227 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1228 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1229 
1230 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1231 	dec_rt_group(rt_se, rt_rq);
1232 }
1233 
1234 /*
1235  * Change rt_se->run_list location unless SAVE && !MOVE
1236  *
1237  * assumes ENQUEUE/DEQUEUE flags match
1238  */
move_entity(unsigned int flags)1239 static inline bool move_entity(unsigned int flags)
1240 {
1241 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1242 		return false;
1243 
1244 	return true;
1245 }
1246 
__delist_rt_entity(struct sched_rt_entity * rt_se,struct rt_prio_array * array)1247 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1248 {
1249 	list_del_init(&rt_se->run_list);
1250 
1251 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1252 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1253 
1254 	rt_se->on_list = 0;
1255 }
1256 
1257 static inline struct sched_statistics *
__schedstats_from_rt_se(struct sched_rt_entity * rt_se)1258 __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1259 {
1260 #ifdef CONFIG_RT_GROUP_SCHED
1261 	/* schedstats is not supported for rt group. */
1262 	if (!rt_entity_is_task(rt_se))
1263 		return NULL;
1264 #endif
1265 
1266 	return &rt_task_of(rt_se)->stats;
1267 }
1268 
1269 static inline void
update_stats_wait_start_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)1270 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1271 {
1272 	struct sched_statistics *stats;
1273 	struct task_struct *p = NULL;
1274 
1275 	if (!schedstat_enabled())
1276 		return;
1277 
1278 	if (rt_entity_is_task(rt_se))
1279 		p = rt_task_of(rt_se);
1280 
1281 	stats = __schedstats_from_rt_se(rt_se);
1282 	if (!stats)
1283 		return;
1284 
1285 	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1286 }
1287 
1288 static inline void
update_stats_enqueue_sleeper_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)1289 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1290 {
1291 	struct sched_statistics *stats;
1292 	struct task_struct *p = NULL;
1293 
1294 	if (!schedstat_enabled())
1295 		return;
1296 
1297 	if (rt_entity_is_task(rt_se))
1298 		p = rt_task_of(rt_se);
1299 
1300 	stats = __schedstats_from_rt_se(rt_se);
1301 	if (!stats)
1302 		return;
1303 
1304 	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1305 }
1306 
1307 static inline void
update_stats_enqueue_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int flags)1308 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1309 			int flags)
1310 {
1311 	if (!schedstat_enabled())
1312 		return;
1313 
1314 	if (flags & ENQUEUE_WAKEUP)
1315 		update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1316 }
1317 
1318 static inline void
update_stats_wait_end_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)1319 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1320 {
1321 	struct sched_statistics *stats;
1322 	struct task_struct *p = NULL;
1323 
1324 	if (!schedstat_enabled())
1325 		return;
1326 
1327 	if (rt_entity_is_task(rt_se))
1328 		p = rt_task_of(rt_se);
1329 
1330 	stats = __schedstats_from_rt_se(rt_se);
1331 	if (!stats)
1332 		return;
1333 
1334 	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1335 }
1336 
1337 static inline void
update_stats_dequeue_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int flags)1338 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1339 			int flags)
1340 {
1341 	struct task_struct *p = NULL;
1342 
1343 	if (!schedstat_enabled())
1344 		return;
1345 
1346 	if (rt_entity_is_task(rt_se))
1347 		p = rt_task_of(rt_se);
1348 
1349 	if ((flags & DEQUEUE_SLEEP) && p) {
1350 		unsigned int state;
1351 
1352 		state = READ_ONCE(p->__state);
1353 		if (state & TASK_INTERRUPTIBLE)
1354 			__schedstat_set(p->stats.sleep_start,
1355 					rq_clock(rq_of_rt_rq(rt_rq)));
1356 
1357 		if (state & TASK_UNINTERRUPTIBLE)
1358 			__schedstat_set(p->stats.block_start,
1359 					rq_clock(rq_of_rt_rq(rt_rq)));
1360 	}
1361 }
1362 
__enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1363 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1364 {
1365 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1366 	struct rt_prio_array *array = &rt_rq->active;
1367 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1368 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1369 
1370 	/*
1371 	 * Don't enqueue the group if its throttled, or when empty.
1372 	 * The latter is a consequence of the former when a child group
1373 	 * get throttled and the current group doesn't have any other
1374 	 * active members.
1375 	 */
1376 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1377 		if (rt_se->on_list)
1378 			__delist_rt_entity(rt_se, array);
1379 		return;
1380 	}
1381 
1382 	if (move_entity(flags)) {
1383 		WARN_ON_ONCE(rt_se->on_list);
1384 		if (flags & ENQUEUE_HEAD)
1385 			list_add(&rt_se->run_list, queue);
1386 		else
1387 			list_add_tail(&rt_se->run_list, queue);
1388 
1389 		__set_bit(rt_se_prio(rt_se), array->bitmap);
1390 		rt_se->on_list = 1;
1391 	}
1392 	rt_se->on_rq = 1;
1393 
1394 	inc_rt_tasks(rt_se, rt_rq);
1395 }
1396 
__dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1397 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1398 {
1399 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1400 	struct rt_prio_array *array = &rt_rq->active;
1401 
1402 	if (move_entity(flags)) {
1403 		WARN_ON_ONCE(!rt_se->on_list);
1404 		__delist_rt_entity(rt_se, array);
1405 	}
1406 	rt_se->on_rq = 0;
1407 
1408 	dec_rt_tasks(rt_se, rt_rq);
1409 }
1410 
1411 /*
1412  * Because the prio of an upper entry depends on the lower
1413  * entries, we must remove entries top - down.
1414  */
dequeue_rt_stack(struct sched_rt_entity * rt_se,unsigned int flags)1415 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1416 {
1417 	struct sched_rt_entity *back = NULL;
1418 	unsigned int rt_nr_running;
1419 
1420 	for_each_sched_rt_entity(rt_se) {
1421 		rt_se->back = back;
1422 		back = rt_se;
1423 	}
1424 
1425 	rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1426 
1427 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1428 		if (on_rt_rq(rt_se))
1429 			__dequeue_rt_entity(rt_se, flags);
1430 	}
1431 
1432 	dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1433 }
1434 
enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1435 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1436 {
1437 	struct rq *rq = rq_of_rt_se(rt_se);
1438 
1439 	update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1440 
1441 	dequeue_rt_stack(rt_se, flags);
1442 	for_each_sched_rt_entity(rt_se)
1443 		__enqueue_rt_entity(rt_se, flags);
1444 	enqueue_top_rt_rq(&rq->rt);
1445 }
1446 
dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1447 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1448 {
1449 	struct rq *rq = rq_of_rt_se(rt_se);
1450 
1451 	update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1452 
1453 	dequeue_rt_stack(rt_se, flags);
1454 
1455 	for_each_sched_rt_entity(rt_se) {
1456 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1457 
1458 		if (rt_rq && rt_rq->rt_nr_running)
1459 			__enqueue_rt_entity(rt_se, flags);
1460 	}
1461 	enqueue_top_rt_rq(&rq->rt);
1462 }
1463 
1464 /*
1465  * Adding/removing a task to/from a priority array:
1466  */
1467 static void
enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags)1468 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1469 {
1470 	struct sched_rt_entity *rt_se = &p->rt;
1471 
1472 	if (flags & ENQUEUE_WAKEUP)
1473 		rt_se->timeout = 0;
1474 
1475 	check_schedstat_required();
1476 	update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1477 
1478 	enqueue_rt_entity(rt_se, flags);
1479 
1480 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1481 		enqueue_pushable_task(rq, p);
1482 }
1483 
dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags)1484 static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1485 {
1486 	struct sched_rt_entity *rt_se = &p->rt;
1487 
1488 	update_curr_rt(rq);
1489 	dequeue_rt_entity(rt_se, flags);
1490 
1491 	dequeue_pushable_task(rq, p);
1492 
1493 	return true;
1494 }
1495 
1496 /*
1497  * Put task to the head or the end of the run list without the overhead of
1498  * dequeue followed by enqueue.
1499  */
1500 static void
requeue_rt_entity(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int head)1501 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1502 {
1503 	if (on_rt_rq(rt_se)) {
1504 		struct rt_prio_array *array = &rt_rq->active;
1505 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1506 
1507 		if (head)
1508 			list_move(&rt_se->run_list, queue);
1509 		else
1510 			list_move_tail(&rt_se->run_list, queue);
1511 	}
1512 }
1513 
requeue_task_rt(struct rq * rq,struct task_struct * p,int head)1514 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1515 {
1516 	struct sched_rt_entity *rt_se = &p->rt;
1517 	struct rt_rq *rt_rq;
1518 
1519 	for_each_sched_rt_entity(rt_se) {
1520 		rt_rq = rt_rq_of_se(rt_se);
1521 		requeue_rt_entity(rt_rq, rt_se, head);
1522 	}
1523 }
1524 
yield_task_rt(struct rq * rq)1525 static void yield_task_rt(struct rq *rq)
1526 {
1527 	requeue_task_rt(rq, rq->curr, 0);
1528 }
1529 
1530 #ifdef CONFIG_SMP
1531 static int find_lowest_rq(struct task_struct *task);
1532 
1533 static int
select_task_rq_rt(struct task_struct * p,int cpu,int flags)1534 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1535 {
1536 	struct task_struct *curr, *donor;
1537 	struct rq *rq;
1538 	bool test;
1539 
1540 	/* For anything but wake ups, just return the task_cpu */
1541 	if (!(flags & (WF_TTWU | WF_FORK)))
1542 		goto out;
1543 
1544 	rq = cpu_rq(cpu);
1545 
1546 	rcu_read_lock();
1547 	curr = READ_ONCE(rq->curr); /* unlocked access */
1548 	donor = READ_ONCE(rq->donor);
1549 
1550 	/*
1551 	 * If the current task on @p's runqueue is an RT task, then
1552 	 * try to see if we can wake this RT task up on another
1553 	 * runqueue. Otherwise simply start this RT task
1554 	 * on its current runqueue.
1555 	 *
1556 	 * We want to avoid overloading runqueues. If the woken
1557 	 * task is a higher priority, then it will stay on this CPU
1558 	 * and the lower prio task should be moved to another CPU.
1559 	 * Even though this will probably make the lower prio task
1560 	 * lose its cache, we do not want to bounce a higher task
1561 	 * around just because it gave up its CPU, perhaps for a
1562 	 * lock?
1563 	 *
1564 	 * For equal prio tasks, we just let the scheduler sort it out.
1565 	 *
1566 	 * Otherwise, just let it ride on the affine RQ and the
1567 	 * post-schedule router will push the preempted task away
1568 	 *
1569 	 * This test is optimistic, if we get it wrong the load-balancer
1570 	 * will have to sort it out.
1571 	 *
1572 	 * We take into account the capacity of the CPU to ensure it fits the
1573 	 * requirement of the task - which is only important on heterogeneous
1574 	 * systems like big.LITTLE.
1575 	 */
1576 	test = curr &&
1577 	       unlikely(rt_task(donor)) &&
1578 	       (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio);
1579 
1580 	if (test || !rt_task_fits_capacity(p, cpu)) {
1581 		int target = find_lowest_rq(p);
1582 
1583 		/*
1584 		 * Bail out if we were forcing a migration to find a better
1585 		 * fitting CPU but our search failed.
1586 		 */
1587 		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1588 			goto out_unlock;
1589 
1590 		/*
1591 		 * Don't bother moving it if the destination CPU is
1592 		 * not running a lower priority task.
1593 		 */
1594 		if (target != -1 &&
1595 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1596 			cpu = target;
1597 	}
1598 
1599 out_unlock:
1600 	rcu_read_unlock();
1601 
1602 out:
1603 	return cpu;
1604 }
1605 
check_preempt_equal_prio(struct rq * rq,struct task_struct * p)1606 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1607 {
1608 	if (rq->curr->nr_cpus_allowed == 1 ||
1609 	    !cpupri_find(&rq->rd->cpupri, rq->donor, NULL))
1610 		return;
1611 
1612 	/*
1613 	 * p is migratable, so let's not schedule it and
1614 	 * see if it is pushed or pulled somewhere else.
1615 	 */
1616 	if (p->nr_cpus_allowed != 1 &&
1617 	    cpupri_find(&rq->rd->cpupri, p, NULL))
1618 		return;
1619 
1620 	/*
1621 	 * There appear to be other CPUs that can accept
1622 	 * the current task but none can run 'p', so lets reschedule
1623 	 * to try and push the current task away:
1624 	 */
1625 	requeue_task_rt(rq, p, 1);
1626 	resched_curr(rq);
1627 }
1628 
balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1629 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1630 {
1631 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1632 		/*
1633 		 * This is OK, because current is on_cpu, which avoids it being
1634 		 * picked for load-balance and preemption/IRQs are still
1635 		 * disabled avoiding further scheduler activity on it and we've
1636 		 * not yet started the picking loop.
1637 		 */
1638 		rq_unpin_lock(rq, rf);
1639 		pull_rt_task(rq);
1640 		rq_repin_lock(rq, rf);
1641 	}
1642 
1643 	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1644 }
1645 #endif /* CONFIG_SMP */
1646 
1647 /*
1648  * Preempt the current task with a newly woken task if needed:
1649  */
wakeup_preempt_rt(struct rq * rq,struct task_struct * p,int flags)1650 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1651 {
1652 	struct task_struct *donor = rq->donor;
1653 
1654 	if (p->prio < donor->prio) {
1655 		resched_curr(rq);
1656 		return;
1657 	}
1658 
1659 #ifdef CONFIG_SMP
1660 	/*
1661 	 * If:
1662 	 *
1663 	 * - the newly woken task is of equal priority to the current task
1664 	 * - the newly woken task is non-migratable while current is migratable
1665 	 * - current will be preempted on the next reschedule
1666 	 *
1667 	 * we should check to see if current can readily move to a different
1668 	 * cpu.  If so, we will reschedule to allow the push logic to try
1669 	 * to move current somewhere else, making room for our non-migratable
1670 	 * task.
1671 	 */
1672 	if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr))
1673 		check_preempt_equal_prio(rq, p);
1674 #endif
1675 }
1676 
set_next_task_rt(struct rq * rq,struct task_struct * p,bool first)1677 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1678 {
1679 	struct sched_rt_entity *rt_se = &p->rt;
1680 	struct rt_rq *rt_rq = &rq->rt;
1681 
1682 	p->se.exec_start = rq_clock_task(rq);
1683 	if (on_rt_rq(&p->rt))
1684 		update_stats_wait_end_rt(rt_rq, rt_se);
1685 
1686 	/* The running task is never eligible for pushing */
1687 	dequeue_pushable_task(rq, p);
1688 
1689 	if (!first)
1690 		return;
1691 
1692 	/*
1693 	 * If prev task was rt, put_prev_task() has already updated the
1694 	 * utilization. We only care of the case where we start to schedule a
1695 	 * rt task
1696 	 */
1697 	if (rq->donor->sched_class != &rt_sched_class)
1698 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1699 
1700 	rt_queue_push_tasks(rq);
1701 }
1702 
pick_next_rt_entity(struct rt_rq * rt_rq)1703 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1704 {
1705 	struct rt_prio_array *array = &rt_rq->active;
1706 	struct sched_rt_entity *next = NULL;
1707 	struct list_head *queue;
1708 	int idx;
1709 
1710 	idx = sched_find_first_bit(array->bitmap);
1711 	BUG_ON(idx >= MAX_RT_PRIO);
1712 
1713 	queue = array->queue + idx;
1714 	if (WARN_ON_ONCE(list_empty(queue)))
1715 		return NULL;
1716 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1717 
1718 	return next;
1719 }
1720 
_pick_next_task_rt(struct rq * rq)1721 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1722 {
1723 	struct sched_rt_entity *rt_se;
1724 	struct rt_rq *rt_rq  = &rq->rt;
1725 
1726 	do {
1727 		rt_se = pick_next_rt_entity(rt_rq);
1728 		if (unlikely(!rt_se))
1729 			return NULL;
1730 		rt_rq = group_rt_rq(rt_se);
1731 	} while (rt_rq);
1732 
1733 	return rt_task_of(rt_se);
1734 }
1735 
pick_task_rt(struct rq * rq)1736 static struct task_struct *pick_task_rt(struct rq *rq)
1737 {
1738 	struct task_struct *p;
1739 
1740 	if (!sched_rt_runnable(rq))
1741 		return NULL;
1742 
1743 	p = _pick_next_task_rt(rq);
1744 
1745 	return p;
1746 }
1747 
put_prev_task_rt(struct rq * rq,struct task_struct * p,struct task_struct * next)1748 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
1749 {
1750 	struct sched_rt_entity *rt_se = &p->rt;
1751 	struct rt_rq *rt_rq = &rq->rt;
1752 
1753 	if (on_rt_rq(&p->rt))
1754 		update_stats_wait_start_rt(rt_rq, rt_se);
1755 
1756 	update_curr_rt(rq);
1757 
1758 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1759 
1760 	/*
1761 	 * The previous task needs to be made eligible for pushing
1762 	 * if it is still active
1763 	 */
1764 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1765 		enqueue_pushable_task(rq, p);
1766 }
1767 
1768 #ifdef CONFIG_SMP
1769 
1770 /* Only try algorithms three times */
1771 #define RT_MAX_TRIES 3
1772 
1773 /*
1774  * Return the highest pushable rq's task, which is suitable to be executed
1775  * on the CPU, NULL otherwise
1776  */
pick_highest_pushable_task(struct rq * rq,int cpu)1777 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1778 {
1779 	struct plist_head *head = &rq->rt.pushable_tasks;
1780 	struct task_struct *p;
1781 
1782 	if (!has_pushable_tasks(rq))
1783 		return NULL;
1784 
1785 	plist_for_each_entry(p, head, pushable_tasks) {
1786 		if (task_is_pushable(rq, p, cpu))
1787 			return p;
1788 	}
1789 
1790 	return NULL;
1791 }
1792 
1793 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1794 
find_lowest_rq(struct task_struct * task)1795 static int find_lowest_rq(struct task_struct *task)
1796 {
1797 	struct sched_domain *sd;
1798 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1799 	int this_cpu = smp_processor_id();
1800 	int cpu      = task_cpu(task);
1801 	int ret;
1802 
1803 	/* Make sure the mask is initialized first */
1804 	if (unlikely(!lowest_mask))
1805 		return -1;
1806 
1807 	if (task->nr_cpus_allowed == 1)
1808 		return -1; /* No other targets possible */
1809 
1810 	/*
1811 	 * If we're on asym system ensure we consider the different capacities
1812 	 * of the CPUs when searching for the lowest_mask.
1813 	 */
1814 	if (sched_asym_cpucap_active()) {
1815 
1816 		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1817 					  task, lowest_mask,
1818 					  rt_task_fits_capacity);
1819 	} else {
1820 
1821 		ret = cpupri_find(&task_rq(task)->rd->cpupri,
1822 				  task, lowest_mask);
1823 	}
1824 
1825 	if (!ret)
1826 		return -1; /* No targets found */
1827 
1828 	/*
1829 	 * At this point we have built a mask of CPUs representing the
1830 	 * lowest priority tasks in the system.  Now we want to elect
1831 	 * the best one based on our affinity and topology.
1832 	 *
1833 	 * We prioritize the last CPU that the task executed on since
1834 	 * it is most likely cache-hot in that location.
1835 	 */
1836 	if (cpumask_test_cpu(cpu, lowest_mask))
1837 		return cpu;
1838 
1839 	/*
1840 	 * Otherwise, we consult the sched_domains span maps to figure
1841 	 * out which CPU is logically closest to our hot cache data.
1842 	 */
1843 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1844 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1845 
1846 	rcu_read_lock();
1847 	for_each_domain(cpu, sd) {
1848 		if (sd->flags & SD_WAKE_AFFINE) {
1849 			int best_cpu;
1850 
1851 			/*
1852 			 * "this_cpu" is cheaper to preempt than a
1853 			 * remote processor.
1854 			 */
1855 			if (this_cpu != -1 &&
1856 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1857 				rcu_read_unlock();
1858 				return this_cpu;
1859 			}
1860 
1861 			best_cpu = cpumask_any_and_distribute(lowest_mask,
1862 							      sched_domain_span(sd));
1863 			if (best_cpu < nr_cpu_ids) {
1864 				rcu_read_unlock();
1865 				return best_cpu;
1866 			}
1867 		}
1868 	}
1869 	rcu_read_unlock();
1870 
1871 	/*
1872 	 * And finally, if there were no matches within the domains
1873 	 * just give the caller *something* to work with from the compatible
1874 	 * locations.
1875 	 */
1876 	if (this_cpu != -1)
1877 		return this_cpu;
1878 
1879 	cpu = cpumask_any_distribute(lowest_mask);
1880 	if (cpu < nr_cpu_ids)
1881 		return cpu;
1882 
1883 	return -1;
1884 }
1885 
1886 /* Will lock the rq it finds */
find_lock_lowest_rq(struct task_struct * task,struct rq * rq)1887 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1888 {
1889 	struct rq *lowest_rq = NULL;
1890 	int tries;
1891 	int cpu;
1892 
1893 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1894 		cpu = find_lowest_rq(task);
1895 
1896 		if ((cpu == -1) || (cpu == rq->cpu))
1897 			break;
1898 
1899 		lowest_rq = cpu_rq(cpu);
1900 
1901 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1902 			/*
1903 			 * Target rq has tasks of equal or higher priority,
1904 			 * retrying does not release any lock and is unlikely
1905 			 * to yield a different result.
1906 			 */
1907 			lowest_rq = NULL;
1908 			break;
1909 		}
1910 
1911 		/* if the prio of this runqueue changed, try again */
1912 		if (double_lock_balance(rq, lowest_rq)) {
1913 			/*
1914 			 * We had to unlock the run queue. In
1915 			 * the mean time, task could have
1916 			 * migrated already or had its affinity changed.
1917 			 * Also make sure that it wasn't scheduled on its rq.
1918 			 * It is possible the task was scheduled, set
1919 			 * "migrate_disabled" and then got preempted, so we must
1920 			 * check the task migration disable flag here too.
1921 			 */
1922 			if (unlikely(task_rq(task) != rq ||
1923 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1924 				     task_on_cpu(rq, task) ||
1925 				     !rt_task(task) ||
1926 				     is_migration_disabled(task) ||
1927 				     !task_on_rq_queued(task))) {
1928 
1929 				double_unlock_balance(rq, lowest_rq);
1930 				lowest_rq = NULL;
1931 				break;
1932 			}
1933 		}
1934 
1935 		/* If this rq is still suitable use it. */
1936 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1937 			break;
1938 
1939 		/* try again */
1940 		double_unlock_balance(rq, lowest_rq);
1941 		lowest_rq = NULL;
1942 	}
1943 
1944 	return lowest_rq;
1945 }
1946 
pick_next_pushable_task(struct rq * rq)1947 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1948 {
1949 	struct task_struct *p;
1950 
1951 	if (!has_pushable_tasks(rq))
1952 		return NULL;
1953 
1954 	p = plist_first_entry(&rq->rt.pushable_tasks,
1955 			      struct task_struct, pushable_tasks);
1956 
1957 	BUG_ON(rq->cpu != task_cpu(p));
1958 	BUG_ON(task_current(rq, p));
1959 	BUG_ON(task_current_donor(rq, p));
1960 	BUG_ON(p->nr_cpus_allowed <= 1);
1961 
1962 	BUG_ON(!task_on_rq_queued(p));
1963 	BUG_ON(!rt_task(p));
1964 
1965 	return p;
1966 }
1967 
1968 /*
1969  * If the current CPU has more than one RT task, see if the non
1970  * running task can migrate over to a CPU that is running a task
1971  * of lesser priority.
1972  */
push_rt_task(struct rq * rq,bool pull)1973 static int push_rt_task(struct rq *rq, bool pull)
1974 {
1975 	struct task_struct *next_task;
1976 	struct rq *lowest_rq;
1977 	int ret = 0;
1978 
1979 	if (!rq->rt.overloaded)
1980 		return 0;
1981 
1982 	next_task = pick_next_pushable_task(rq);
1983 	if (!next_task)
1984 		return 0;
1985 
1986 retry:
1987 	/*
1988 	 * It's possible that the next_task slipped in of
1989 	 * higher priority than current. If that's the case
1990 	 * just reschedule current.
1991 	 */
1992 	if (unlikely(next_task->prio < rq->donor->prio)) {
1993 		resched_curr(rq);
1994 		return 0;
1995 	}
1996 
1997 	if (is_migration_disabled(next_task)) {
1998 		struct task_struct *push_task = NULL;
1999 		int cpu;
2000 
2001 		if (!pull || rq->push_busy)
2002 			return 0;
2003 
2004 		/*
2005 		 * Invoking find_lowest_rq() on anything but an RT task doesn't
2006 		 * make sense. Per the above priority check, curr has to
2007 		 * be of higher priority than next_task, so no need to
2008 		 * reschedule when bailing out.
2009 		 *
2010 		 * Note that the stoppers are masqueraded as SCHED_FIFO
2011 		 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2012 		 */
2013 		if (rq->donor->sched_class != &rt_sched_class)
2014 			return 0;
2015 
2016 		cpu = find_lowest_rq(rq->curr);
2017 		if (cpu == -1 || cpu == rq->cpu)
2018 			return 0;
2019 
2020 		/*
2021 		 * Given we found a CPU with lower priority than @next_task,
2022 		 * therefore it should be running. However we cannot migrate it
2023 		 * to this other CPU, instead attempt to push the current
2024 		 * running task on this CPU away.
2025 		 */
2026 		push_task = get_push_task(rq);
2027 		if (push_task) {
2028 			preempt_disable();
2029 			raw_spin_rq_unlock(rq);
2030 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2031 					    push_task, &rq->push_work);
2032 			preempt_enable();
2033 			raw_spin_rq_lock(rq);
2034 		}
2035 
2036 		return 0;
2037 	}
2038 
2039 	if (WARN_ON(next_task == rq->curr))
2040 		return 0;
2041 
2042 	/* We might release rq lock */
2043 	get_task_struct(next_task);
2044 
2045 	/* find_lock_lowest_rq locks the rq if found */
2046 	lowest_rq = find_lock_lowest_rq(next_task, rq);
2047 	if (!lowest_rq) {
2048 		struct task_struct *task;
2049 		/*
2050 		 * find_lock_lowest_rq releases rq->lock
2051 		 * so it is possible that next_task has migrated.
2052 		 *
2053 		 * We need to make sure that the task is still on the same
2054 		 * run-queue and is also still the next task eligible for
2055 		 * pushing.
2056 		 */
2057 		task = pick_next_pushable_task(rq);
2058 		if (task == next_task) {
2059 			/*
2060 			 * The task hasn't migrated, and is still the next
2061 			 * eligible task, but we failed to find a run-queue
2062 			 * to push it to.  Do not retry in this case, since
2063 			 * other CPUs will pull from us when ready.
2064 			 */
2065 			goto out;
2066 		}
2067 
2068 		if (!task)
2069 			/* No more tasks, just exit */
2070 			goto out;
2071 
2072 		/*
2073 		 * Something has shifted, try again.
2074 		 */
2075 		put_task_struct(next_task);
2076 		next_task = task;
2077 		goto retry;
2078 	}
2079 
2080 	move_queued_task_locked(rq, lowest_rq, next_task);
2081 	resched_curr(lowest_rq);
2082 	ret = 1;
2083 
2084 	double_unlock_balance(rq, lowest_rq);
2085 out:
2086 	put_task_struct(next_task);
2087 
2088 	return ret;
2089 }
2090 
push_rt_tasks(struct rq * rq)2091 static void push_rt_tasks(struct rq *rq)
2092 {
2093 	/* push_rt_task will return true if it moved an RT */
2094 	while (push_rt_task(rq, false))
2095 		;
2096 }
2097 
2098 #ifdef HAVE_RT_PUSH_IPI
2099 
2100 /*
2101  * When a high priority task schedules out from a CPU and a lower priority
2102  * task is scheduled in, a check is made to see if there's any RT tasks
2103  * on other CPUs that are waiting to run because a higher priority RT task
2104  * is currently running on its CPU. In this case, the CPU with multiple RT
2105  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2106  * up that may be able to run one of its non-running queued RT tasks.
2107  *
2108  * All CPUs with overloaded RT tasks need to be notified as there is currently
2109  * no way to know which of these CPUs have the highest priority task waiting
2110  * to run. Instead of trying to take a spinlock on each of these CPUs,
2111  * which has shown to cause large latency when done on machines with many
2112  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2113  * RT tasks waiting to run.
2114  *
2115  * Just sending an IPI to each of the CPUs is also an issue, as on large
2116  * count CPU machines, this can cause an IPI storm on a CPU, especially
2117  * if its the only CPU with multiple RT tasks queued, and a large number
2118  * of CPUs scheduling a lower priority task at the same time.
2119  *
2120  * Each root domain has its own IRQ work function that can iterate over
2121  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2122  * task must be checked if there's one or many CPUs that are lowering
2123  * their priority, there's a single IRQ work iterator that will try to
2124  * push off RT tasks that are waiting to run.
2125  *
2126  * When a CPU schedules a lower priority task, it will kick off the
2127  * IRQ work iterator that will jump to each CPU with overloaded RT tasks.
2128  * As it only takes the first CPU that schedules a lower priority task
2129  * to start the process, the rto_start variable is incremented and if
2130  * the atomic result is one, then that CPU will try to take the rto_lock.
2131  * This prevents high contention on the lock as the process handles all
2132  * CPUs scheduling lower priority tasks.
2133  *
2134  * All CPUs that are scheduling a lower priority task will increment the
2135  * rt_loop_next variable. This will make sure that the IRQ work iterator
2136  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2137  * priority task, even if the iterator is in the middle of a scan. Incrementing
2138  * the rt_loop_next will cause the iterator to perform another scan.
2139  *
2140  */
rto_next_cpu(struct root_domain * rd)2141 static int rto_next_cpu(struct root_domain *rd)
2142 {
2143 	int next;
2144 	int cpu;
2145 
2146 	/*
2147 	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2148 	 * rt_next_cpu() will simply return the first CPU found in
2149 	 * the rto_mask.
2150 	 *
2151 	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2152 	 * will return the next CPU found in the rto_mask.
2153 	 *
2154 	 * If there are no more CPUs left in the rto_mask, then a check is made
2155 	 * against rto_loop and rto_loop_next. rto_loop is only updated with
2156 	 * the rto_lock held, but any CPU may increment the rto_loop_next
2157 	 * without any locking.
2158 	 */
2159 	for (;;) {
2160 
2161 		/* When rto_cpu is -1 this acts like cpumask_first() */
2162 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2163 
2164 		rd->rto_cpu = cpu;
2165 
2166 		if (cpu < nr_cpu_ids)
2167 			return cpu;
2168 
2169 		rd->rto_cpu = -1;
2170 
2171 		/*
2172 		 * ACQUIRE ensures we see the @rto_mask changes
2173 		 * made prior to the @next value observed.
2174 		 *
2175 		 * Matches WMB in rt_set_overload().
2176 		 */
2177 		next = atomic_read_acquire(&rd->rto_loop_next);
2178 
2179 		if (rd->rto_loop == next)
2180 			break;
2181 
2182 		rd->rto_loop = next;
2183 	}
2184 
2185 	return -1;
2186 }
2187 
rto_start_trylock(atomic_t * v)2188 static inline bool rto_start_trylock(atomic_t *v)
2189 {
2190 	return !atomic_cmpxchg_acquire(v, 0, 1);
2191 }
2192 
rto_start_unlock(atomic_t * v)2193 static inline void rto_start_unlock(atomic_t *v)
2194 {
2195 	atomic_set_release(v, 0);
2196 }
2197 
tell_cpu_to_push(struct rq * rq)2198 static void tell_cpu_to_push(struct rq *rq)
2199 {
2200 	int cpu = -1;
2201 
2202 	/* Keep the loop going if the IPI is currently active */
2203 	atomic_inc(&rq->rd->rto_loop_next);
2204 
2205 	/* Only one CPU can initiate a loop at a time */
2206 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
2207 		return;
2208 
2209 	raw_spin_lock(&rq->rd->rto_lock);
2210 
2211 	/*
2212 	 * The rto_cpu is updated under the lock, if it has a valid CPU
2213 	 * then the IPI is still running and will continue due to the
2214 	 * update to loop_next, and nothing needs to be done here.
2215 	 * Otherwise it is finishing up and an IPI needs to be sent.
2216 	 */
2217 	if (rq->rd->rto_cpu < 0)
2218 		cpu = rto_next_cpu(rq->rd);
2219 
2220 	raw_spin_unlock(&rq->rd->rto_lock);
2221 
2222 	rto_start_unlock(&rq->rd->rto_loop_start);
2223 
2224 	if (cpu >= 0) {
2225 		/* Make sure the rd does not get freed while pushing */
2226 		sched_get_rd(rq->rd);
2227 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2228 	}
2229 }
2230 
2231 /* Called from hardirq context */
rto_push_irq_work_func(struct irq_work * work)2232 void rto_push_irq_work_func(struct irq_work *work)
2233 {
2234 	struct root_domain *rd =
2235 		container_of(work, struct root_domain, rto_push_work);
2236 	struct rq *rq;
2237 	int cpu;
2238 
2239 	rq = this_rq();
2240 
2241 	/*
2242 	 * We do not need to grab the lock to check for has_pushable_tasks.
2243 	 * When it gets updated, a check is made if a push is possible.
2244 	 */
2245 	if (has_pushable_tasks(rq)) {
2246 		raw_spin_rq_lock(rq);
2247 		while (push_rt_task(rq, true))
2248 			;
2249 		raw_spin_rq_unlock(rq);
2250 	}
2251 
2252 	raw_spin_lock(&rd->rto_lock);
2253 
2254 	/* Pass the IPI to the next rt overloaded queue */
2255 	cpu = rto_next_cpu(rd);
2256 
2257 	raw_spin_unlock(&rd->rto_lock);
2258 
2259 	if (cpu < 0) {
2260 		sched_put_rd(rd);
2261 		return;
2262 	}
2263 
2264 	/* Try the next RT overloaded CPU */
2265 	irq_work_queue_on(&rd->rto_push_work, cpu);
2266 }
2267 #endif /* HAVE_RT_PUSH_IPI */
2268 
pull_rt_task(struct rq * this_rq)2269 static void pull_rt_task(struct rq *this_rq)
2270 {
2271 	int this_cpu = this_rq->cpu, cpu;
2272 	bool resched = false;
2273 	struct task_struct *p, *push_task;
2274 	struct rq *src_rq;
2275 	int rt_overload_count = rt_overloaded(this_rq);
2276 
2277 	if (likely(!rt_overload_count))
2278 		return;
2279 
2280 	/*
2281 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2282 	 * see overloaded we must also see the rto_mask bit.
2283 	 */
2284 	smp_rmb();
2285 
2286 	/* If we are the only overloaded CPU do nothing */
2287 	if (rt_overload_count == 1 &&
2288 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2289 		return;
2290 
2291 #ifdef HAVE_RT_PUSH_IPI
2292 	if (sched_feat(RT_PUSH_IPI)) {
2293 		tell_cpu_to_push(this_rq);
2294 		return;
2295 	}
2296 #endif
2297 
2298 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2299 		if (this_cpu == cpu)
2300 			continue;
2301 
2302 		src_rq = cpu_rq(cpu);
2303 
2304 		/*
2305 		 * Don't bother taking the src_rq->lock if the next highest
2306 		 * task is known to be lower-priority than our current task.
2307 		 * This may look racy, but if this value is about to go
2308 		 * logically higher, the src_rq will push this task away.
2309 		 * And if its going logically lower, we do not care
2310 		 */
2311 		if (src_rq->rt.highest_prio.next >=
2312 		    this_rq->rt.highest_prio.curr)
2313 			continue;
2314 
2315 		/*
2316 		 * We can potentially drop this_rq's lock in
2317 		 * double_lock_balance, and another CPU could
2318 		 * alter this_rq
2319 		 */
2320 		push_task = NULL;
2321 		double_lock_balance(this_rq, src_rq);
2322 
2323 		/*
2324 		 * We can pull only a task, which is pushable
2325 		 * on its rq, and no others.
2326 		 */
2327 		p = pick_highest_pushable_task(src_rq, this_cpu);
2328 
2329 		/*
2330 		 * Do we have an RT task that preempts
2331 		 * the to-be-scheduled task?
2332 		 */
2333 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2334 			WARN_ON(p == src_rq->curr);
2335 			WARN_ON(!task_on_rq_queued(p));
2336 
2337 			/*
2338 			 * There's a chance that p is higher in priority
2339 			 * than what's currently running on its CPU.
2340 			 * This is just that p is waking up and hasn't
2341 			 * had a chance to schedule. We only pull
2342 			 * p if it is lower in priority than the
2343 			 * current task on the run queue
2344 			 */
2345 			if (p->prio < src_rq->donor->prio)
2346 				goto skip;
2347 
2348 			if (is_migration_disabled(p)) {
2349 				push_task = get_push_task(src_rq);
2350 			} else {
2351 				move_queued_task_locked(src_rq, this_rq, p);
2352 				resched = true;
2353 			}
2354 			/*
2355 			 * We continue with the search, just in
2356 			 * case there's an even higher prio task
2357 			 * in another runqueue. (low likelihood
2358 			 * but possible)
2359 			 */
2360 		}
2361 skip:
2362 		double_unlock_balance(this_rq, src_rq);
2363 
2364 		if (push_task) {
2365 			preempt_disable();
2366 			raw_spin_rq_unlock(this_rq);
2367 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2368 					    push_task, &src_rq->push_work);
2369 			preempt_enable();
2370 			raw_spin_rq_lock(this_rq);
2371 		}
2372 	}
2373 
2374 	if (resched)
2375 		resched_curr(this_rq);
2376 }
2377 
2378 /*
2379  * If we are not running and we are not going to reschedule soon, we should
2380  * try to push tasks away now
2381  */
task_woken_rt(struct rq * rq,struct task_struct * p)2382 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2383 {
2384 	bool need_to_push = !task_on_cpu(rq, p) &&
2385 			    !test_tsk_need_resched(rq->curr) &&
2386 			    p->nr_cpus_allowed > 1 &&
2387 			    (dl_task(rq->donor) || rt_task(rq->donor)) &&
2388 			    (rq->curr->nr_cpus_allowed < 2 ||
2389 			     rq->donor->prio <= p->prio);
2390 
2391 	if (need_to_push)
2392 		push_rt_tasks(rq);
2393 }
2394 
2395 /* Assumes rq->lock is held */
rq_online_rt(struct rq * rq)2396 static void rq_online_rt(struct rq *rq)
2397 {
2398 	if (rq->rt.overloaded)
2399 		rt_set_overload(rq);
2400 
2401 	__enable_runtime(rq);
2402 
2403 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2404 }
2405 
2406 /* Assumes rq->lock is held */
rq_offline_rt(struct rq * rq)2407 static void rq_offline_rt(struct rq *rq)
2408 {
2409 	if (rq->rt.overloaded)
2410 		rt_clear_overload(rq);
2411 
2412 	__disable_runtime(rq);
2413 
2414 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2415 }
2416 
2417 /*
2418  * When switch from the rt queue, we bring ourselves to a position
2419  * that we might want to pull RT tasks from other runqueues.
2420  */
switched_from_rt(struct rq * rq,struct task_struct * p)2421 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2422 {
2423 	/*
2424 	 * If there are other RT tasks then we will reschedule
2425 	 * and the scheduling of the other RT tasks will handle
2426 	 * the balancing. But if we are the last RT task
2427 	 * we may need to handle the pulling of RT tasks
2428 	 * now.
2429 	 */
2430 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2431 		return;
2432 
2433 	rt_queue_pull_task(rq);
2434 }
2435 
init_sched_rt_class(void)2436 void __init init_sched_rt_class(void)
2437 {
2438 	unsigned int i;
2439 
2440 	for_each_possible_cpu(i) {
2441 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2442 					GFP_KERNEL, cpu_to_node(i));
2443 	}
2444 }
2445 #endif /* CONFIG_SMP */
2446 
2447 /*
2448  * When switching a task to RT, we may overload the runqueue
2449  * with RT tasks. In this case we try to push them off to
2450  * other runqueues.
2451  */
switched_to_rt(struct rq * rq,struct task_struct * p)2452 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2453 {
2454 	/*
2455 	 * If we are running, update the avg_rt tracking, as the running time
2456 	 * will now on be accounted into the latter.
2457 	 */
2458 	if (task_current(rq, p)) {
2459 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2460 		return;
2461 	}
2462 
2463 	/*
2464 	 * If we are not running we may need to preempt the current
2465 	 * running task. If that current running task is also an RT task
2466 	 * then see if we can move to another run queue.
2467 	 */
2468 	if (task_on_rq_queued(p)) {
2469 #ifdef CONFIG_SMP
2470 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2471 			rt_queue_push_tasks(rq);
2472 #endif /* CONFIG_SMP */
2473 		if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq)))
2474 			resched_curr(rq);
2475 	}
2476 }
2477 
2478 /*
2479  * Priority of the task has changed. This may cause
2480  * us to initiate a push or pull.
2481  */
2482 static void
prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio)2483 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2484 {
2485 	if (!task_on_rq_queued(p))
2486 		return;
2487 
2488 	if (task_current_donor(rq, p)) {
2489 #ifdef CONFIG_SMP
2490 		/*
2491 		 * If our priority decreases while running, we
2492 		 * may need to pull tasks to this runqueue.
2493 		 */
2494 		if (oldprio < p->prio)
2495 			rt_queue_pull_task(rq);
2496 
2497 		/*
2498 		 * If there's a higher priority task waiting to run
2499 		 * then reschedule.
2500 		 */
2501 		if (p->prio > rq->rt.highest_prio.curr)
2502 			resched_curr(rq);
2503 #else
2504 		/* For UP simply resched on drop of prio */
2505 		if (oldprio < p->prio)
2506 			resched_curr(rq);
2507 #endif /* CONFIG_SMP */
2508 	} else {
2509 		/*
2510 		 * This task is not running, but if it is
2511 		 * greater than the current running task
2512 		 * then reschedule.
2513 		 */
2514 		if (p->prio < rq->donor->prio)
2515 			resched_curr(rq);
2516 	}
2517 }
2518 
2519 #ifdef CONFIG_POSIX_TIMERS
watchdog(struct rq * rq,struct task_struct * p)2520 static void watchdog(struct rq *rq, struct task_struct *p)
2521 {
2522 	unsigned long soft, hard;
2523 
2524 	/* max may change after cur was read, this will be fixed next tick */
2525 	soft = task_rlimit(p, RLIMIT_RTTIME);
2526 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2527 
2528 	if (soft != RLIM_INFINITY) {
2529 		unsigned long next;
2530 
2531 		if (p->rt.watchdog_stamp != jiffies) {
2532 			p->rt.timeout++;
2533 			p->rt.watchdog_stamp = jiffies;
2534 		}
2535 
2536 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2537 		if (p->rt.timeout > next) {
2538 			posix_cputimers_rt_watchdog(&p->posix_cputimers,
2539 						    p->se.sum_exec_runtime);
2540 		}
2541 	}
2542 }
2543 #else
watchdog(struct rq * rq,struct task_struct * p)2544 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2545 #endif
2546 
2547 /*
2548  * scheduler tick hitting a task of our scheduling class.
2549  *
2550  * NOTE: This function can be called remotely by the tick offload that
2551  * goes along full dynticks. Therefore no local assumption can be made
2552  * and everything must be accessed through the @rq and @curr passed in
2553  * parameters.
2554  */
task_tick_rt(struct rq * rq,struct task_struct * p,int queued)2555 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2556 {
2557 	struct sched_rt_entity *rt_se = &p->rt;
2558 
2559 	update_curr_rt(rq);
2560 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2561 
2562 	watchdog(rq, p);
2563 
2564 	/*
2565 	 * RR tasks need a special form of time-slice management.
2566 	 * FIFO tasks have no timeslices.
2567 	 */
2568 	if (p->policy != SCHED_RR)
2569 		return;
2570 
2571 	if (--p->rt.time_slice)
2572 		return;
2573 
2574 	p->rt.time_slice = sched_rr_timeslice;
2575 
2576 	/*
2577 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2578 	 * the only element on the queue
2579 	 */
2580 	for_each_sched_rt_entity(rt_se) {
2581 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2582 			requeue_task_rt(rq, p, 0);
2583 			resched_curr(rq);
2584 			return;
2585 		}
2586 	}
2587 }
2588 
get_rr_interval_rt(struct rq * rq,struct task_struct * task)2589 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2590 {
2591 	/*
2592 	 * Time slice is 0 for SCHED_FIFO tasks
2593 	 */
2594 	if (task->policy == SCHED_RR)
2595 		return sched_rr_timeslice;
2596 	else
2597 		return 0;
2598 }
2599 
2600 #ifdef CONFIG_SCHED_CORE
task_is_throttled_rt(struct task_struct * p,int cpu)2601 static int task_is_throttled_rt(struct task_struct *p, int cpu)
2602 {
2603 	struct rt_rq *rt_rq;
2604 
2605 #ifdef CONFIG_RT_GROUP_SCHED
2606 	rt_rq = task_group(p)->rt_rq[cpu];
2607 #else
2608 	rt_rq = &cpu_rq(cpu)->rt;
2609 #endif
2610 
2611 	return rt_rq_throttled(rt_rq);
2612 }
2613 #endif
2614 
2615 DEFINE_SCHED_CLASS(rt) = {
2616 
2617 	.enqueue_task		= enqueue_task_rt,
2618 	.dequeue_task		= dequeue_task_rt,
2619 	.yield_task		= yield_task_rt,
2620 
2621 	.wakeup_preempt		= wakeup_preempt_rt,
2622 
2623 	.pick_task		= pick_task_rt,
2624 	.put_prev_task		= put_prev_task_rt,
2625 	.set_next_task          = set_next_task_rt,
2626 
2627 #ifdef CONFIG_SMP
2628 	.balance		= balance_rt,
2629 	.select_task_rq		= select_task_rq_rt,
2630 	.set_cpus_allowed       = set_cpus_allowed_common,
2631 	.rq_online              = rq_online_rt,
2632 	.rq_offline             = rq_offline_rt,
2633 	.task_woken		= task_woken_rt,
2634 	.switched_from		= switched_from_rt,
2635 	.find_lock_rq		= find_lock_lowest_rq,
2636 #endif
2637 
2638 	.task_tick		= task_tick_rt,
2639 
2640 	.get_rr_interval	= get_rr_interval_rt,
2641 
2642 	.prio_changed		= prio_changed_rt,
2643 	.switched_to		= switched_to_rt,
2644 
2645 	.update_curr		= update_curr_rt,
2646 
2647 #ifdef CONFIG_SCHED_CORE
2648 	.task_is_throttled	= task_is_throttled_rt,
2649 #endif
2650 
2651 #ifdef CONFIG_UCLAMP_TASK
2652 	.uclamp_enabled		= 1,
2653 #endif
2654 };
2655 
2656 #ifdef CONFIG_RT_GROUP_SCHED
2657 /*
2658  * Ensure that the real time constraints are schedulable.
2659  */
2660 static DEFINE_MUTEX(rt_constraints_mutex);
2661 
tg_has_rt_tasks(struct task_group * tg)2662 static inline int tg_has_rt_tasks(struct task_group *tg)
2663 {
2664 	struct task_struct *task;
2665 	struct css_task_iter it;
2666 	int ret = 0;
2667 
2668 	/*
2669 	 * Autogroups do not have RT tasks; see autogroup_create().
2670 	 */
2671 	if (task_group_is_autogroup(tg))
2672 		return 0;
2673 
2674 	css_task_iter_start(&tg->css, 0, &it);
2675 	while (!ret && (task = css_task_iter_next(&it)))
2676 		ret |= rt_task(task);
2677 	css_task_iter_end(&it);
2678 
2679 	return ret;
2680 }
2681 
2682 struct rt_schedulable_data {
2683 	struct task_group *tg;
2684 	u64 rt_period;
2685 	u64 rt_runtime;
2686 };
2687 
tg_rt_schedulable(struct task_group * tg,void * data)2688 static int tg_rt_schedulable(struct task_group *tg, void *data)
2689 {
2690 	struct rt_schedulable_data *d = data;
2691 	struct task_group *child;
2692 	unsigned long total, sum = 0;
2693 	u64 period, runtime;
2694 
2695 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2696 	runtime = tg->rt_bandwidth.rt_runtime;
2697 
2698 	if (tg == d->tg) {
2699 		period = d->rt_period;
2700 		runtime = d->rt_runtime;
2701 	}
2702 
2703 	/*
2704 	 * Cannot have more runtime than the period.
2705 	 */
2706 	if (runtime > period && runtime != RUNTIME_INF)
2707 		return -EINVAL;
2708 
2709 	/*
2710 	 * Ensure we don't starve existing RT tasks if runtime turns zero.
2711 	 */
2712 	if (rt_bandwidth_enabled() && !runtime &&
2713 	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2714 		return -EBUSY;
2715 
2716 	total = to_ratio(period, runtime);
2717 
2718 	/*
2719 	 * Nobody can have more than the global setting allows.
2720 	 */
2721 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2722 		return -EINVAL;
2723 
2724 	/*
2725 	 * The sum of our children's runtime should not exceed our own.
2726 	 */
2727 	list_for_each_entry_rcu(child, &tg->children, siblings) {
2728 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
2729 		runtime = child->rt_bandwidth.rt_runtime;
2730 
2731 		if (child == d->tg) {
2732 			period = d->rt_period;
2733 			runtime = d->rt_runtime;
2734 		}
2735 
2736 		sum += to_ratio(period, runtime);
2737 	}
2738 
2739 	if (sum > total)
2740 		return -EINVAL;
2741 
2742 	return 0;
2743 }
2744 
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)2745 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2746 {
2747 	int ret;
2748 
2749 	struct rt_schedulable_data data = {
2750 		.tg = tg,
2751 		.rt_period = period,
2752 		.rt_runtime = runtime,
2753 	};
2754 
2755 	rcu_read_lock();
2756 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2757 	rcu_read_unlock();
2758 
2759 	return ret;
2760 }
2761 
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)2762 static int tg_set_rt_bandwidth(struct task_group *tg,
2763 		u64 rt_period, u64 rt_runtime)
2764 {
2765 	int i, err = 0;
2766 
2767 	/*
2768 	 * Disallowing the root group RT runtime is BAD, it would disallow the
2769 	 * kernel creating (and or operating) RT threads.
2770 	 */
2771 	if (tg == &root_task_group && rt_runtime == 0)
2772 		return -EINVAL;
2773 
2774 	/* No period doesn't make any sense. */
2775 	if (rt_period == 0)
2776 		return -EINVAL;
2777 
2778 	/*
2779 	 * Bound quota to defend quota against overflow during bandwidth shift.
2780 	 */
2781 	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2782 		return -EINVAL;
2783 
2784 	mutex_lock(&rt_constraints_mutex);
2785 	err = __rt_schedulable(tg, rt_period, rt_runtime);
2786 	if (err)
2787 		goto unlock;
2788 
2789 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2790 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2791 	tg->rt_bandwidth.rt_runtime = rt_runtime;
2792 
2793 	for_each_possible_cpu(i) {
2794 		struct rt_rq *rt_rq = tg->rt_rq[i];
2795 
2796 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2797 		rt_rq->rt_runtime = rt_runtime;
2798 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2799 	}
2800 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2801 unlock:
2802 	mutex_unlock(&rt_constraints_mutex);
2803 
2804 	return err;
2805 }
2806 
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)2807 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2808 {
2809 	u64 rt_runtime, rt_period;
2810 
2811 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2812 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2813 	if (rt_runtime_us < 0)
2814 		rt_runtime = RUNTIME_INF;
2815 	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2816 		return -EINVAL;
2817 
2818 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2819 }
2820 
sched_group_rt_runtime(struct task_group * tg)2821 long sched_group_rt_runtime(struct task_group *tg)
2822 {
2823 	u64 rt_runtime_us;
2824 
2825 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2826 		return -1;
2827 
2828 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2829 	do_div(rt_runtime_us, NSEC_PER_USEC);
2830 	return rt_runtime_us;
2831 }
2832 
sched_group_set_rt_period(struct task_group * tg,u64 rt_period_us)2833 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2834 {
2835 	u64 rt_runtime, rt_period;
2836 
2837 	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2838 		return -EINVAL;
2839 
2840 	rt_period = rt_period_us * NSEC_PER_USEC;
2841 	rt_runtime = tg->rt_bandwidth.rt_runtime;
2842 
2843 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2844 }
2845 
sched_group_rt_period(struct task_group * tg)2846 long sched_group_rt_period(struct task_group *tg)
2847 {
2848 	u64 rt_period_us;
2849 
2850 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2851 	do_div(rt_period_us, NSEC_PER_USEC);
2852 	return rt_period_us;
2853 }
2854 
2855 #ifdef CONFIG_SYSCTL
sched_rt_global_constraints(void)2856 static int sched_rt_global_constraints(void)
2857 {
2858 	int ret = 0;
2859 
2860 	mutex_lock(&rt_constraints_mutex);
2861 	ret = __rt_schedulable(NULL, 0, 0);
2862 	mutex_unlock(&rt_constraints_mutex);
2863 
2864 	return ret;
2865 }
2866 #endif /* CONFIG_SYSCTL */
2867 
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)2868 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2869 {
2870 	/* Don't accept real-time tasks when there is no way for them to run */
2871 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2872 		return 0;
2873 
2874 	return 1;
2875 }
2876 
2877 #else /* !CONFIG_RT_GROUP_SCHED */
2878 
2879 #ifdef CONFIG_SYSCTL
sched_rt_global_constraints(void)2880 static int sched_rt_global_constraints(void)
2881 {
2882 	return 0;
2883 }
2884 #endif /* CONFIG_SYSCTL */
2885 #endif /* CONFIG_RT_GROUP_SCHED */
2886 
2887 #ifdef CONFIG_SYSCTL
sched_rt_global_validate(void)2888 static int sched_rt_global_validate(void)
2889 {
2890 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2891 		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2892 		 ((u64)sysctl_sched_rt_runtime *
2893 			NSEC_PER_USEC > max_rt_runtime)))
2894 		return -EINVAL;
2895 
2896 	return 0;
2897 }
2898 
sched_rt_do_global(void)2899 static void sched_rt_do_global(void)
2900 {
2901 }
2902 
sched_rt_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2903 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
2904 		size_t *lenp, loff_t *ppos)
2905 {
2906 	int old_period, old_runtime;
2907 	static DEFINE_MUTEX(mutex);
2908 	int ret;
2909 
2910 	mutex_lock(&mutex);
2911 	sched_domains_mutex_lock();
2912 	old_period = sysctl_sched_rt_period;
2913 	old_runtime = sysctl_sched_rt_runtime;
2914 
2915 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2916 
2917 	if (!ret && write) {
2918 		ret = sched_rt_global_validate();
2919 		if (ret)
2920 			goto undo;
2921 
2922 		ret = sched_dl_global_validate();
2923 		if (ret)
2924 			goto undo;
2925 
2926 		ret = sched_rt_global_constraints();
2927 		if (ret)
2928 			goto undo;
2929 
2930 		sched_rt_do_global();
2931 		sched_dl_do_global();
2932 	}
2933 	if (0) {
2934 undo:
2935 		sysctl_sched_rt_period = old_period;
2936 		sysctl_sched_rt_runtime = old_runtime;
2937 	}
2938 	sched_domains_mutex_unlock();
2939 	mutex_unlock(&mutex);
2940 
2941 	return ret;
2942 }
2943 
sched_rr_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2944 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
2945 		size_t *lenp, loff_t *ppos)
2946 {
2947 	int ret;
2948 	static DEFINE_MUTEX(mutex);
2949 
2950 	mutex_lock(&mutex);
2951 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2952 	/*
2953 	 * Make sure that internally we keep jiffies.
2954 	 * Also, writing zero resets the time-slice to default:
2955 	 */
2956 	if (!ret && write) {
2957 		sched_rr_timeslice =
2958 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2959 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
2960 
2961 		if (sysctl_sched_rr_timeslice <= 0)
2962 			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
2963 	}
2964 	mutex_unlock(&mutex);
2965 
2966 	return ret;
2967 }
2968 #endif /* CONFIG_SYSCTL */
2969 
print_rt_stats(struct seq_file * m,int cpu)2970 void print_rt_stats(struct seq_file *m, int cpu)
2971 {
2972 	rt_rq_iter_t iter;
2973 	struct rt_rq *rt_rq;
2974 
2975 	rcu_read_lock();
2976 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2977 		print_rt_rq(m, cpu, rt_rq);
2978 	rcu_read_unlock();
2979 }
2980