1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18
19 #include <linux/cpuset.h>
20 #include <linux/sched/clock.h>
21 #include <uapi/linux/sched/types.h>
22 #include "sched.h"
23 #include "pelt.h"
24
25 /*
26 * Default limits for DL period; on the top end we guard against small util
27 * tasks still getting ridiculously long effective runtimes, on the bottom end we
28 * guard against timer DoS.
29 */
30 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
31 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
32 #ifdef CONFIG_SYSCTL
33 static const struct ctl_table sched_dl_sysctls[] = {
34 {
35 .procname = "sched_deadline_period_max_us",
36 .data = &sysctl_sched_dl_period_max,
37 .maxlen = sizeof(unsigned int),
38 .mode = 0644,
39 .proc_handler = proc_douintvec_minmax,
40 .extra1 = (void *)&sysctl_sched_dl_period_min,
41 },
42 {
43 .procname = "sched_deadline_period_min_us",
44 .data = &sysctl_sched_dl_period_min,
45 .maxlen = sizeof(unsigned int),
46 .mode = 0644,
47 .proc_handler = proc_douintvec_minmax,
48 .extra2 = (void *)&sysctl_sched_dl_period_max,
49 },
50 };
51
sched_dl_sysctl_init(void)52 static int __init sched_dl_sysctl_init(void)
53 {
54 register_sysctl_init("kernel", sched_dl_sysctls);
55 return 0;
56 }
57 late_initcall(sched_dl_sysctl_init);
58 #endif /* CONFIG_SYSCTL */
59
dl_server(struct sched_dl_entity * dl_se)60 static bool dl_server(struct sched_dl_entity *dl_se)
61 {
62 return dl_se->dl_server;
63 }
64
dl_task_of(struct sched_dl_entity * dl_se)65 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
66 {
67 BUG_ON(dl_server(dl_se));
68 return container_of(dl_se, struct task_struct, dl);
69 }
70
rq_of_dl_rq(struct dl_rq * dl_rq)71 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
72 {
73 return container_of(dl_rq, struct rq, dl);
74 }
75
rq_of_dl_se(struct sched_dl_entity * dl_se)76 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
77 {
78 struct rq *rq = dl_se->rq;
79
80 if (!dl_server(dl_se))
81 rq = task_rq(dl_task_of(dl_se));
82
83 return rq;
84 }
85
dl_rq_of_se(struct sched_dl_entity * dl_se)86 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
87 {
88 return &rq_of_dl_se(dl_se)->dl;
89 }
90
on_dl_rq(struct sched_dl_entity * dl_se)91 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
92 {
93 return !RB_EMPTY_NODE(&dl_se->rb_node);
94 }
95
96 #ifdef CONFIG_RT_MUTEXES
pi_of(struct sched_dl_entity * dl_se)97 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
98 {
99 return dl_se->pi_se;
100 }
101
is_dl_boosted(struct sched_dl_entity * dl_se)102 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
103 {
104 return pi_of(dl_se) != dl_se;
105 }
106 #else /* !CONFIG_RT_MUTEXES: */
pi_of(struct sched_dl_entity * dl_se)107 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
108 {
109 return dl_se;
110 }
111
is_dl_boosted(struct sched_dl_entity * dl_se)112 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
113 {
114 return false;
115 }
116 #endif /* !CONFIG_RT_MUTEXES */
117
dl_bw_of(int i)118 static inline struct dl_bw *dl_bw_of(int i)
119 {
120 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
121 "sched RCU must be held");
122 return &cpu_rq(i)->rd->dl_bw;
123 }
124
dl_bw_cpus(int i)125 static inline int dl_bw_cpus(int i)
126 {
127 struct root_domain *rd = cpu_rq(i)->rd;
128
129 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
130 "sched RCU must be held");
131
132 return cpumask_weight_and(rd->span, cpu_active_mask);
133 }
134
__dl_bw_capacity(const struct cpumask * mask)135 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
136 {
137 unsigned long cap = 0;
138 int i;
139
140 for_each_cpu_and(i, mask, cpu_active_mask)
141 cap += arch_scale_cpu_capacity(i);
142
143 return cap;
144 }
145
146 /*
147 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
148 * of the CPU the task is running on rather rd's \Sum CPU capacity.
149 */
dl_bw_capacity(int i)150 static inline unsigned long dl_bw_capacity(int i)
151 {
152 if (!sched_asym_cpucap_active() &&
153 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
154 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
155 } else {
156 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
157 "sched RCU must be held");
158
159 return __dl_bw_capacity(cpu_rq(i)->rd->span);
160 }
161 }
162
dl_bw_visited(int cpu,u64 cookie)163 bool dl_bw_visited(int cpu, u64 cookie)
164 {
165 struct root_domain *rd = cpu_rq(cpu)->rd;
166
167 if (rd->visit_cookie == cookie)
168 return true;
169
170 rd->visit_cookie = cookie;
171 return false;
172 }
173
174 static inline
__dl_update(struct dl_bw * dl_b,s64 bw)175 void __dl_update(struct dl_bw *dl_b, s64 bw)
176 {
177 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
178 int i;
179
180 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
181 "sched RCU must be held");
182 for_each_cpu_and(i, rd->span, cpu_active_mask) {
183 struct rq *rq = cpu_rq(i);
184
185 rq->dl.extra_bw += bw;
186 }
187 }
188
189 static inline
__dl_sub(struct dl_bw * dl_b,u64 tsk_bw,int cpus)190 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
191 {
192 dl_b->total_bw -= tsk_bw;
193 __dl_update(dl_b, (s32)tsk_bw / cpus);
194 }
195
196 static inline
__dl_add(struct dl_bw * dl_b,u64 tsk_bw,int cpus)197 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
198 {
199 dl_b->total_bw += tsk_bw;
200 __dl_update(dl_b, -((s32)tsk_bw / cpus));
201 }
202
203 static inline bool
__dl_overflow(struct dl_bw * dl_b,unsigned long cap,u64 old_bw,u64 new_bw)204 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
205 {
206 return dl_b->bw != -1 &&
207 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
208 }
209
210 static inline
__add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)211 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
212 {
213 u64 old = dl_rq->running_bw;
214
215 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
216 dl_rq->running_bw += dl_bw;
217 WARN_ON_ONCE(dl_rq->running_bw < old); /* overflow */
218 WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw);
219 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
220 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
221 }
222
223 static inline
__sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)224 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
225 {
226 u64 old = dl_rq->running_bw;
227
228 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
229 dl_rq->running_bw -= dl_bw;
230 WARN_ON_ONCE(dl_rq->running_bw > old); /* underflow */
231 if (dl_rq->running_bw > old)
232 dl_rq->running_bw = 0;
233 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
234 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
235 }
236
237 static inline
__add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)238 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
239 {
240 u64 old = dl_rq->this_bw;
241
242 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
243 dl_rq->this_bw += dl_bw;
244 WARN_ON_ONCE(dl_rq->this_bw < old); /* overflow */
245 }
246
247 static inline
__sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)248 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
249 {
250 u64 old = dl_rq->this_bw;
251
252 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
253 dl_rq->this_bw -= dl_bw;
254 WARN_ON_ONCE(dl_rq->this_bw > old); /* underflow */
255 if (dl_rq->this_bw > old)
256 dl_rq->this_bw = 0;
257 WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw);
258 }
259
260 static inline
add_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)261 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
262 {
263 if (!dl_entity_is_special(dl_se))
264 __add_rq_bw(dl_se->dl_bw, dl_rq);
265 }
266
267 static inline
sub_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)268 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
269 {
270 if (!dl_entity_is_special(dl_se))
271 __sub_rq_bw(dl_se->dl_bw, dl_rq);
272 }
273
274 static inline
add_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)275 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
276 {
277 if (!dl_entity_is_special(dl_se))
278 __add_running_bw(dl_se->dl_bw, dl_rq);
279 }
280
281 static inline
sub_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)282 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
283 {
284 if (!dl_entity_is_special(dl_se))
285 __sub_running_bw(dl_se->dl_bw, dl_rq);
286 }
287
dl_rq_change_utilization(struct rq * rq,struct sched_dl_entity * dl_se,u64 new_bw)288 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
289 {
290 if (dl_se->dl_non_contending) {
291 sub_running_bw(dl_se, &rq->dl);
292 dl_se->dl_non_contending = 0;
293
294 /*
295 * If the timer handler is currently running and the
296 * timer cannot be canceled, inactive_task_timer()
297 * will see that dl_not_contending is not set, and
298 * will not touch the rq's active utilization,
299 * so we are still safe.
300 */
301 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
302 if (!dl_server(dl_se))
303 put_task_struct(dl_task_of(dl_se));
304 }
305 }
306 __sub_rq_bw(dl_se->dl_bw, &rq->dl);
307 __add_rq_bw(new_bw, &rq->dl);
308 }
309
310 static __always_inline
cancel_dl_timer(struct sched_dl_entity * dl_se,struct hrtimer * timer)311 void cancel_dl_timer(struct sched_dl_entity *dl_se, struct hrtimer *timer)
312 {
313 /*
314 * If the timer callback was running (hrtimer_try_to_cancel == -1),
315 * it will eventually call put_task_struct().
316 */
317 if (hrtimer_try_to_cancel(timer) == 1 && !dl_server(dl_se))
318 put_task_struct(dl_task_of(dl_se));
319 }
320
321 static __always_inline
cancel_replenish_timer(struct sched_dl_entity * dl_se)322 void cancel_replenish_timer(struct sched_dl_entity *dl_se)
323 {
324 cancel_dl_timer(dl_se, &dl_se->dl_timer);
325 }
326
327 static __always_inline
cancel_inactive_timer(struct sched_dl_entity * dl_se)328 void cancel_inactive_timer(struct sched_dl_entity *dl_se)
329 {
330 cancel_dl_timer(dl_se, &dl_se->inactive_timer);
331 }
332
dl_change_utilization(struct task_struct * p,u64 new_bw)333 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
334 {
335 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
336
337 if (task_on_rq_queued(p))
338 return;
339
340 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
341 }
342
343 static void __dl_clear_params(struct sched_dl_entity *dl_se);
344
345 /*
346 * The utilization of a task cannot be immediately removed from
347 * the rq active utilization (running_bw) when the task blocks.
348 * Instead, we have to wait for the so called "0-lag time".
349 *
350 * If a task blocks before the "0-lag time", a timer (the inactive
351 * timer) is armed, and running_bw is decreased when the timer
352 * fires.
353 *
354 * If the task wakes up again before the inactive timer fires,
355 * the timer is canceled, whereas if the task wakes up after the
356 * inactive timer fired (and running_bw has been decreased) the
357 * task's utilization has to be added to running_bw again.
358 * A flag in the deadline scheduling entity (dl_non_contending)
359 * is used to avoid race conditions between the inactive timer handler
360 * and task wakeups.
361 *
362 * The following diagram shows how running_bw is updated. A task is
363 * "ACTIVE" when its utilization contributes to running_bw; an
364 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
365 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
366 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
367 * time already passed, which does not contribute to running_bw anymore.
368 * +------------------+
369 * wakeup | ACTIVE |
370 * +------------------>+ contending |
371 * | add_running_bw | |
372 * | +----+------+------+
373 * | | ^
374 * | dequeue | |
375 * +--------+-------+ | |
376 * | | t >= 0-lag | | wakeup
377 * | INACTIVE |<---------------+ |
378 * | | sub_running_bw | |
379 * +--------+-------+ | |
380 * ^ | |
381 * | t < 0-lag | |
382 * | | |
383 * | V |
384 * | +----+------+------+
385 * | sub_running_bw | ACTIVE |
386 * +-------------------+ |
387 * inactive timer | non contending |
388 * fired +------------------+
389 *
390 * The task_non_contending() function is invoked when a task
391 * blocks, and checks if the 0-lag time already passed or
392 * not (in the first case, it directly updates running_bw;
393 * in the second case, it arms the inactive timer).
394 *
395 * The task_contending() function is invoked when a task wakes
396 * up, and checks if the task is still in the "ACTIVE non contending"
397 * state or not (in the second case, it updates running_bw).
398 */
task_non_contending(struct sched_dl_entity * dl_se,bool dl_task)399 static void task_non_contending(struct sched_dl_entity *dl_se, bool dl_task)
400 {
401 struct hrtimer *timer = &dl_se->inactive_timer;
402 struct rq *rq = rq_of_dl_se(dl_se);
403 struct dl_rq *dl_rq = &rq->dl;
404 s64 zerolag_time;
405
406 /*
407 * If this is a non-deadline task that has been boosted,
408 * do nothing
409 */
410 if (dl_se->dl_runtime == 0)
411 return;
412
413 if (dl_entity_is_special(dl_se))
414 return;
415
416 WARN_ON(dl_se->dl_non_contending);
417
418 zerolag_time = dl_se->deadline -
419 div64_long((dl_se->runtime * dl_se->dl_period),
420 dl_se->dl_runtime);
421
422 /*
423 * Using relative times instead of the absolute "0-lag time"
424 * allows to simplify the code
425 */
426 zerolag_time -= rq_clock(rq);
427
428 /*
429 * If the "0-lag time" already passed, decrease the active
430 * utilization now, instead of starting a timer
431 */
432 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
433 if (dl_server(dl_se)) {
434 sub_running_bw(dl_se, dl_rq);
435 } else {
436 struct task_struct *p = dl_task_of(dl_se);
437
438 if (dl_task)
439 sub_running_bw(dl_se, dl_rq);
440
441 if (!dl_task || READ_ONCE(p->__state) == TASK_DEAD) {
442 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
443
444 if (READ_ONCE(p->__state) == TASK_DEAD)
445 sub_rq_bw(dl_se, &rq->dl);
446 raw_spin_lock(&dl_b->lock);
447 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
448 raw_spin_unlock(&dl_b->lock);
449 __dl_clear_params(dl_se);
450 }
451 }
452
453 return;
454 }
455
456 dl_se->dl_non_contending = 1;
457 if (!dl_server(dl_se))
458 get_task_struct(dl_task_of(dl_se));
459
460 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
461 }
462
task_contending(struct sched_dl_entity * dl_se,int flags)463 static void task_contending(struct sched_dl_entity *dl_se, int flags)
464 {
465 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
466
467 /*
468 * If this is a non-deadline task that has been boosted,
469 * do nothing
470 */
471 if (dl_se->dl_runtime == 0)
472 return;
473
474 if (flags & ENQUEUE_MIGRATED)
475 add_rq_bw(dl_se, dl_rq);
476
477 if (dl_se->dl_non_contending) {
478 dl_se->dl_non_contending = 0;
479 /*
480 * If the timer handler is currently running and the
481 * timer cannot be canceled, inactive_task_timer()
482 * will see that dl_not_contending is not set, and
483 * will not touch the rq's active utilization,
484 * so we are still safe.
485 */
486 cancel_inactive_timer(dl_se);
487 } else {
488 /*
489 * Since "dl_non_contending" is not set, the
490 * task's utilization has already been removed from
491 * active utilization (either when the task blocked,
492 * when the "inactive timer" fired).
493 * So, add it back.
494 */
495 add_running_bw(dl_se, dl_rq);
496 }
497 }
498
is_leftmost(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)499 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
500 {
501 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
502 }
503
504 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
505
init_dl_bw(struct dl_bw * dl_b)506 void init_dl_bw(struct dl_bw *dl_b)
507 {
508 raw_spin_lock_init(&dl_b->lock);
509 if (global_rt_runtime() == RUNTIME_INF)
510 dl_b->bw = -1;
511 else
512 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
513 dl_b->total_bw = 0;
514 }
515
init_dl_rq(struct dl_rq * dl_rq)516 void init_dl_rq(struct dl_rq *dl_rq)
517 {
518 dl_rq->root = RB_ROOT_CACHED;
519
520 /* zero means no -deadline tasks */
521 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
522
523 dl_rq->overloaded = 0;
524 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
525
526 dl_rq->running_bw = 0;
527 dl_rq->this_bw = 0;
528 init_dl_rq_bw_ratio(dl_rq);
529 }
530
dl_overloaded(struct rq * rq)531 static inline int dl_overloaded(struct rq *rq)
532 {
533 return atomic_read(&rq->rd->dlo_count);
534 }
535
dl_set_overload(struct rq * rq)536 static inline void dl_set_overload(struct rq *rq)
537 {
538 if (!rq->online)
539 return;
540
541 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
542 /*
543 * Must be visible before the overload count is
544 * set (as in sched_rt.c).
545 *
546 * Matched by the barrier in pull_dl_task().
547 */
548 smp_wmb();
549 atomic_inc(&rq->rd->dlo_count);
550 }
551
dl_clear_overload(struct rq * rq)552 static inline void dl_clear_overload(struct rq *rq)
553 {
554 if (!rq->online)
555 return;
556
557 atomic_dec(&rq->rd->dlo_count);
558 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
559 }
560
561 #define __node_2_pdl(node) \
562 rb_entry((node), struct task_struct, pushable_dl_tasks)
563
__pushable_less(struct rb_node * a,const struct rb_node * b)564 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
565 {
566 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
567 }
568
has_pushable_dl_tasks(struct rq * rq)569 static inline int has_pushable_dl_tasks(struct rq *rq)
570 {
571 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
572 }
573
574 /*
575 * The list of pushable -deadline task is not a plist, like in
576 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
577 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)578 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
579 {
580 struct rb_node *leftmost;
581
582 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
583
584 leftmost = rb_add_cached(&p->pushable_dl_tasks,
585 &rq->dl.pushable_dl_tasks_root,
586 __pushable_less);
587 if (leftmost)
588 rq->dl.earliest_dl.next = p->dl.deadline;
589
590 if (!rq->dl.overloaded) {
591 dl_set_overload(rq);
592 rq->dl.overloaded = 1;
593 }
594 }
595
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)596 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
597 {
598 struct dl_rq *dl_rq = &rq->dl;
599 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
600 struct rb_node *leftmost;
601
602 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
603 return;
604
605 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
606 if (leftmost)
607 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
608
609 RB_CLEAR_NODE(&p->pushable_dl_tasks);
610
611 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
612 dl_clear_overload(rq);
613 rq->dl.overloaded = 0;
614 }
615 }
616
617 static int push_dl_task(struct rq *rq);
618
need_pull_dl_task(struct rq * rq,struct task_struct * prev)619 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
620 {
621 return rq->online && dl_task(prev);
622 }
623
624 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
625 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
626
627 static void push_dl_tasks(struct rq *);
628 static void pull_dl_task(struct rq *);
629
deadline_queue_push_tasks(struct rq * rq)630 static inline void deadline_queue_push_tasks(struct rq *rq)
631 {
632 if (!has_pushable_dl_tasks(rq))
633 return;
634
635 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
636 }
637
deadline_queue_pull_task(struct rq * rq)638 static inline void deadline_queue_pull_task(struct rq *rq)
639 {
640 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
641 }
642
643 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
644
dl_task_offline_migration(struct rq * rq,struct task_struct * p)645 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
646 {
647 struct rq *later_rq = NULL;
648 struct dl_bw *dl_b;
649
650 later_rq = find_lock_later_rq(p, rq);
651 if (!later_rq) {
652 int cpu;
653
654 /*
655 * If we cannot preempt any rq, fall back to pick any
656 * online CPU:
657 */
658 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
659 if (cpu >= nr_cpu_ids) {
660 /*
661 * Failed to find any suitable CPU.
662 * The task will never come back!
663 */
664 WARN_ON_ONCE(dl_bandwidth_enabled());
665
666 /*
667 * If admission control is disabled we
668 * try a little harder to let the task
669 * run.
670 */
671 cpu = cpumask_any(cpu_active_mask);
672 }
673 later_rq = cpu_rq(cpu);
674 double_lock_balance(rq, later_rq);
675 }
676
677 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
678 /*
679 * Inactive timer is armed (or callback is running, but
680 * waiting for us to release rq locks). In any case, when it
681 * will fire (or continue), it will see running_bw of this
682 * task migrated to later_rq (and correctly handle it).
683 */
684 sub_running_bw(&p->dl, &rq->dl);
685 sub_rq_bw(&p->dl, &rq->dl);
686
687 add_rq_bw(&p->dl, &later_rq->dl);
688 add_running_bw(&p->dl, &later_rq->dl);
689 } else {
690 sub_rq_bw(&p->dl, &rq->dl);
691 add_rq_bw(&p->dl, &later_rq->dl);
692 }
693
694 /*
695 * And we finally need to fix up root_domain(s) bandwidth accounting,
696 * since p is still hanging out in the old (now moved to default) root
697 * domain.
698 */
699 dl_b = &rq->rd->dl_bw;
700 raw_spin_lock(&dl_b->lock);
701 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
702 raw_spin_unlock(&dl_b->lock);
703
704 dl_b = &later_rq->rd->dl_bw;
705 raw_spin_lock(&dl_b->lock);
706 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
707 raw_spin_unlock(&dl_b->lock);
708
709 set_task_cpu(p, later_rq->cpu);
710 double_unlock_balance(later_rq, rq);
711
712 return later_rq;
713 }
714
715 static void
716 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
717 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
718 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
719 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
720
replenish_dl_new_period(struct sched_dl_entity * dl_se,struct rq * rq)721 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
722 struct rq *rq)
723 {
724 /* for non-boosted task, pi_of(dl_se) == dl_se */
725 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
726 dl_se->runtime = pi_of(dl_se)->dl_runtime;
727
728 /*
729 * If it is a deferred reservation, and the server
730 * is not handling an starvation case, defer it.
731 */
732 if (dl_se->dl_defer && !dl_se->dl_defer_running) {
733 dl_se->dl_throttled = 1;
734 dl_se->dl_defer_armed = 1;
735 }
736 }
737
738 /*
739 * We are being explicitly informed that a new instance is starting,
740 * and this means that:
741 * - the absolute deadline of the entity has to be placed at
742 * current time + relative deadline;
743 * - the runtime of the entity has to be set to the maximum value.
744 *
745 * The capability of specifying such event is useful whenever a -deadline
746 * entity wants to (try to!) synchronize its behaviour with the scheduler's
747 * one, and to (try to!) reconcile itself with its own scheduling
748 * parameters.
749 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)750 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
751 {
752 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
753 struct rq *rq = rq_of_dl_rq(dl_rq);
754
755 WARN_ON(is_dl_boosted(dl_se));
756 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
757
758 /*
759 * We are racing with the deadline timer. So, do nothing because
760 * the deadline timer handler will take care of properly recharging
761 * the runtime and postponing the deadline
762 */
763 if (dl_se->dl_throttled)
764 return;
765
766 /*
767 * We use the regular wall clock time to set deadlines in the
768 * future; in fact, we must consider execution overheads (time
769 * spent on hardirq context, etc.).
770 */
771 replenish_dl_new_period(dl_se, rq);
772 }
773
774 static int start_dl_timer(struct sched_dl_entity *dl_se);
775 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t);
776
777 /*
778 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
779 * possibility of a entity lasting more than what it declared, and thus
780 * exhausting its runtime.
781 *
782 * Here we are interested in making runtime overrun possible, but we do
783 * not want a entity which is misbehaving to affect the scheduling of all
784 * other entities.
785 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
786 * is used, in order to confine each entity within its own bandwidth.
787 *
788 * This function deals exactly with that, and ensures that when the runtime
789 * of a entity is replenished, its deadline is also postponed. That ensures
790 * the overrunning entity can't interfere with other entity in the system and
791 * can't make them miss their deadlines. Reasons why this kind of overruns
792 * could happen are, typically, a entity voluntarily trying to overcome its
793 * runtime, or it just underestimated it during sched_setattr().
794 */
replenish_dl_entity(struct sched_dl_entity * dl_se)795 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
796 {
797 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
798 struct rq *rq = rq_of_dl_rq(dl_rq);
799
800 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
801
802 /*
803 * This could be the case for a !-dl task that is boosted.
804 * Just go with full inherited parameters.
805 *
806 * Or, it could be the case of a deferred reservation that
807 * was not able to consume its runtime in background and
808 * reached this point with current u > U.
809 *
810 * In both cases, set a new period.
811 */
812 if (dl_se->dl_deadline == 0 ||
813 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
814 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
815 dl_se->runtime = pi_of(dl_se)->dl_runtime;
816 }
817
818 if (dl_se->dl_yielded && dl_se->runtime > 0)
819 dl_se->runtime = 0;
820
821 /*
822 * We keep moving the deadline away until we get some
823 * available runtime for the entity. This ensures correct
824 * handling of situations where the runtime overrun is
825 * arbitrary large.
826 */
827 while (dl_se->runtime <= 0) {
828 dl_se->deadline += pi_of(dl_se)->dl_period;
829 dl_se->runtime += pi_of(dl_se)->dl_runtime;
830 }
831
832 /*
833 * At this point, the deadline really should be "in
834 * the future" with respect to rq->clock. If it's
835 * not, we are, for some reason, lagging too much!
836 * Anyway, after having warn userspace abut that,
837 * we still try to keep the things running by
838 * resetting the deadline and the budget of the
839 * entity.
840 */
841 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
842 printk_deferred_once("sched: DL replenish lagged too much\n");
843 replenish_dl_new_period(dl_se, rq);
844 }
845
846 if (dl_se->dl_yielded)
847 dl_se->dl_yielded = 0;
848 if (dl_se->dl_throttled)
849 dl_se->dl_throttled = 0;
850
851 /*
852 * If this is the replenishment of a deferred reservation,
853 * clear the flag and return.
854 */
855 if (dl_se->dl_defer_armed) {
856 dl_se->dl_defer_armed = 0;
857 return;
858 }
859
860 /*
861 * A this point, if the deferred server is not armed, and the deadline
862 * is in the future, if it is not running already, throttle the server
863 * and arm the defer timer.
864 */
865 if (dl_se->dl_defer && !dl_se->dl_defer_running &&
866 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
867 if (!is_dl_boosted(dl_se)) {
868
869 /*
870 * Set dl_se->dl_defer_armed and dl_throttled variables to
871 * inform the start_dl_timer() that this is a deferred
872 * activation.
873 */
874 dl_se->dl_defer_armed = 1;
875 dl_se->dl_throttled = 1;
876 if (!start_dl_timer(dl_se)) {
877 /*
878 * If for whatever reason (delays), a previous timer was
879 * queued but not serviced, cancel it and clean the
880 * deferrable server variables intended for start_dl_timer().
881 */
882 hrtimer_try_to_cancel(&dl_se->dl_timer);
883 dl_se->dl_defer_armed = 0;
884 dl_se->dl_throttled = 0;
885 }
886 }
887 }
888 }
889
890 /*
891 * Here we check if --at time t-- an entity (which is probably being
892 * [re]activated or, in general, enqueued) can use its remaining runtime
893 * and its current deadline _without_ exceeding the bandwidth it is
894 * assigned (function returns true if it can't). We are in fact applying
895 * one of the CBS rules: when a task wakes up, if the residual runtime
896 * over residual deadline fits within the allocated bandwidth, then we
897 * can keep the current (absolute) deadline and residual budget without
898 * disrupting the schedulability of the system. Otherwise, we should
899 * refill the runtime and set the deadline a period in the future,
900 * because keeping the current (absolute) deadline of the task would
901 * result in breaking guarantees promised to other tasks (refer to
902 * Documentation/scheduler/sched-deadline.rst for more information).
903 *
904 * This function returns true if:
905 *
906 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
907 *
908 * IOW we can't recycle current parameters.
909 *
910 * Notice that the bandwidth check is done against the deadline. For
911 * task with deadline equal to period this is the same of using
912 * dl_period instead of dl_deadline in the equation above.
913 */
dl_entity_overflow(struct sched_dl_entity * dl_se,u64 t)914 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
915 {
916 u64 left, right;
917
918 /*
919 * left and right are the two sides of the equation above,
920 * after a bit of shuffling to use multiplications instead
921 * of divisions.
922 *
923 * Note that none of the time values involved in the two
924 * multiplications are absolute: dl_deadline and dl_runtime
925 * are the relative deadline and the maximum runtime of each
926 * instance, runtime is the runtime left for the last instance
927 * and (deadline - t), since t is rq->clock, is the time left
928 * to the (absolute) deadline. Even if overflowing the u64 type
929 * is very unlikely to occur in both cases, here we scale down
930 * as we want to avoid that risk at all. Scaling down by 10
931 * means that we reduce granularity to 1us. We are fine with it,
932 * since this is only a true/false check and, anyway, thinking
933 * of anything below microseconds resolution is actually fiction
934 * (but still we want to give the user that illusion >;).
935 */
936 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
937 right = ((dl_se->deadline - t) >> DL_SCALE) *
938 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
939
940 return dl_time_before(right, left);
941 }
942
943 /*
944 * Revised wakeup rule [1]: For self-suspending tasks, rather then
945 * re-initializing task's runtime and deadline, the revised wakeup
946 * rule adjusts the task's runtime to avoid the task to overrun its
947 * density.
948 *
949 * Reasoning: a task may overrun the density if:
950 * runtime / (deadline - t) > dl_runtime / dl_deadline
951 *
952 * Therefore, runtime can be adjusted to:
953 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
954 *
955 * In such way that runtime will be equal to the maximum density
956 * the task can use without breaking any rule.
957 *
958 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
959 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
960 */
961 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)962 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
963 {
964 u64 laxity = dl_se->deadline - rq_clock(rq);
965
966 /*
967 * If the task has deadline < period, and the deadline is in the past,
968 * it should already be throttled before this check.
969 *
970 * See update_dl_entity() comments for further details.
971 */
972 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
973
974 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
975 }
976
977 /*
978 * Regarding the deadline, a task with implicit deadline has a relative
979 * deadline == relative period. A task with constrained deadline has a
980 * relative deadline <= relative period.
981 *
982 * We support constrained deadline tasks. However, there are some restrictions
983 * applied only for tasks which do not have an implicit deadline. See
984 * update_dl_entity() to know more about such restrictions.
985 *
986 * The dl_is_implicit() returns true if the task has an implicit deadline.
987 */
dl_is_implicit(struct sched_dl_entity * dl_se)988 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
989 {
990 return dl_se->dl_deadline == dl_se->dl_period;
991 }
992
993 /*
994 * When a deadline entity is placed in the runqueue, its runtime and deadline
995 * might need to be updated. This is done by a CBS wake up rule. There are two
996 * different rules: 1) the original CBS; and 2) the Revisited CBS.
997 *
998 * When the task is starting a new period, the Original CBS is used. In this
999 * case, the runtime is replenished and a new absolute deadline is set.
1000 *
1001 * When a task is queued before the begin of the next period, using the
1002 * remaining runtime and deadline could make the entity to overflow, see
1003 * dl_entity_overflow() to find more about runtime overflow. When such case
1004 * is detected, the runtime and deadline need to be updated.
1005 *
1006 * If the task has an implicit deadline, i.e., deadline == period, the Original
1007 * CBS is applied. The runtime is replenished and a new absolute deadline is
1008 * set, as in the previous cases.
1009 *
1010 * However, the Original CBS does not work properly for tasks with
1011 * deadline < period, which are said to have a constrained deadline. By
1012 * applying the Original CBS, a constrained deadline task would be able to run
1013 * runtime/deadline in a period. With deadline < period, the task would
1014 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1015 *
1016 * In order to prevent this misbehave, the Revisited CBS is used for
1017 * constrained deadline tasks when a runtime overflow is detected. In the
1018 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1019 * the remaining runtime of the task is reduced to avoid runtime overflow.
1020 * Please refer to the comments update_dl_revised_wakeup() function to find
1021 * more about the Revised CBS rule.
1022 */
update_dl_entity(struct sched_dl_entity * dl_se)1023 static void update_dl_entity(struct sched_dl_entity *dl_se)
1024 {
1025 struct rq *rq = rq_of_dl_se(dl_se);
1026
1027 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1028 dl_entity_overflow(dl_se, rq_clock(rq))) {
1029
1030 if (unlikely((!dl_is_implicit(dl_se) || dl_se->dl_defer) &&
1031 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1032 !is_dl_boosted(dl_se))) {
1033 update_dl_revised_wakeup(dl_se, rq);
1034 return;
1035 }
1036
1037 /*
1038 * When [4] D->A is followed by [1] A->B, dl_defer_running
1039 * needs to be cleared, otherwise it will fail to properly
1040 * start the zero-laxity timer.
1041 */
1042 dl_se->dl_defer_running = 0;
1043 replenish_dl_new_period(dl_se, rq);
1044 } else if (dl_server(dl_se) && dl_se->dl_defer) {
1045 /*
1046 * The server can still use its previous deadline, so check if
1047 * it left the dl_defer_running state.
1048 */
1049 if (!dl_se->dl_defer_running) {
1050 dl_se->dl_defer_armed = 1;
1051 dl_se->dl_throttled = 1;
1052 }
1053 }
1054 }
1055
dl_next_period(struct sched_dl_entity * dl_se)1056 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1057 {
1058 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1059 }
1060
1061 /*
1062 * If the entity depleted all its runtime, and if we want it to sleep
1063 * while waiting for some new execution time to become available, we
1064 * set the bandwidth replenishment timer to the replenishment instant
1065 * and try to activate it.
1066 *
1067 * Notice that it is important for the caller to know if the timer
1068 * actually started or not (i.e., the replenishment instant is in
1069 * the future or in the past).
1070 */
start_dl_timer(struct sched_dl_entity * dl_se)1071 static int start_dl_timer(struct sched_dl_entity *dl_se)
1072 {
1073 struct hrtimer *timer = &dl_se->dl_timer;
1074 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1075 struct rq *rq = rq_of_dl_rq(dl_rq);
1076 ktime_t now, act;
1077 s64 delta;
1078
1079 lockdep_assert_rq_held(rq);
1080
1081 /*
1082 * We want the timer to fire at the deadline, but considering
1083 * that it is actually coming from rq->clock and not from
1084 * hrtimer's time base reading.
1085 *
1086 * The deferred reservation will have its timer set to
1087 * (deadline - runtime). At that point, the CBS rule will decide
1088 * if the current deadline can be used, or if a replenishment is
1089 * required to avoid add too much pressure on the system
1090 * (current u > U).
1091 */
1092 if (dl_se->dl_defer_armed) {
1093 WARN_ON_ONCE(!dl_se->dl_throttled);
1094 act = ns_to_ktime(dl_se->deadline - dl_se->runtime);
1095 } else {
1096 /* act = deadline - rel-deadline + period */
1097 act = ns_to_ktime(dl_next_period(dl_se));
1098 }
1099
1100 now = ktime_get();
1101 delta = ktime_to_ns(now) - rq_clock(rq);
1102 act = ktime_add_ns(act, delta);
1103
1104 /*
1105 * If the expiry time already passed, e.g., because the value
1106 * chosen as the deadline is too small, don't even try to
1107 * start the timer in the past!
1108 */
1109 if (ktime_us_delta(act, now) < 0)
1110 return 0;
1111
1112 /*
1113 * !enqueued will guarantee another callback; even if one is already in
1114 * progress. This ensures a balanced {get,put}_task_struct().
1115 *
1116 * The race against __run_timer() clearing the enqueued state is
1117 * harmless because we're holding task_rq()->lock, therefore the timer
1118 * expiring after we've done the check will wait on its task_rq_lock()
1119 * and observe our state.
1120 */
1121 if (!hrtimer_is_queued(timer)) {
1122 if (!dl_server(dl_se))
1123 get_task_struct(dl_task_of(dl_se));
1124 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1125 }
1126
1127 return 1;
1128 }
1129
__push_dl_task(struct rq * rq,struct rq_flags * rf)1130 static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1131 {
1132 /*
1133 * Queueing this task back might have overloaded rq, check if we need
1134 * to kick someone away.
1135 */
1136 if (has_pushable_dl_tasks(rq)) {
1137 /*
1138 * Nothing relies on rq->lock after this, so its safe to drop
1139 * rq->lock.
1140 */
1141 rq_unpin_lock(rq, rf);
1142 push_dl_task(rq);
1143 rq_repin_lock(rq, rf);
1144 }
1145 }
1146
1147 /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
1148 static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
1149
dl_server_timer(struct hrtimer * timer,struct sched_dl_entity * dl_se)1150 static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
1151 {
1152 struct rq *rq = rq_of_dl_se(dl_se);
1153 u64 fw;
1154
1155 scoped_guard (rq_lock, rq) {
1156 struct rq_flags *rf = &scope.rf;
1157
1158 if (!dl_se->dl_throttled || !dl_se->dl_runtime)
1159 return HRTIMER_NORESTART;
1160
1161 sched_clock_tick();
1162 update_rq_clock(rq);
1163
1164 /*
1165 * Make sure current has propagated its pending runtime into
1166 * any relevant server through calling dl_server_update() and
1167 * friends.
1168 */
1169 rq->donor->sched_class->update_curr(rq);
1170
1171 if (dl_se->dl_defer_idle) {
1172 dl_server_stop(dl_se);
1173 return HRTIMER_NORESTART;
1174 }
1175
1176 if (dl_se->dl_defer_armed) {
1177 /*
1178 * First check if the server could consume runtime in background.
1179 * If so, it is possible to push the defer timer for this amount
1180 * of time. The dl_server_min_res serves as a limit to avoid
1181 * forwarding the timer for a too small amount of time.
1182 */
1183 if (dl_time_before(rq_clock(dl_se->rq),
1184 (dl_se->deadline - dl_se->runtime - dl_server_min_res))) {
1185
1186 /* reset the defer timer */
1187 fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
1188
1189 hrtimer_forward_now(timer, ns_to_ktime(fw));
1190 return HRTIMER_RESTART;
1191 }
1192
1193 dl_se->dl_defer_running = 1;
1194 }
1195
1196 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1197
1198 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
1199 resched_curr(rq);
1200
1201 __push_dl_task(rq, rf);
1202 }
1203
1204 return HRTIMER_NORESTART;
1205 }
1206
1207 /*
1208 * This is the bandwidth enforcement timer callback. If here, we know
1209 * a task is not on its dl_rq, since the fact that the timer was running
1210 * means the task is throttled and needs a runtime replenishment.
1211 *
1212 * However, what we actually do depends on the fact the task is active,
1213 * (it is on its rq) or has been removed from there by a call to
1214 * dequeue_task_dl(). In the former case we must issue the runtime
1215 * replenishment and add the task back to the dl_rq; in the latter, we just
1216 * do nothing but clearing dl_throttled, so that runtime and deadline
1217 * updating (and the queueing back to dl_rq) will be done by the
1218 * next call to enqueue_task_dl().
1219 */
dl_task_timer(struct hrtimer * timer)1220 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1221 {
1222 struct sched_dl_entity *dl_se = container_of(timer,
1223 struct sched_dl_entity,
1224 dl_timer);
1225 struct task_struct *p;
1226 struct rq_flags rf;
1227 struct rq *rq;
1228
1229 if (dl_server(dl_se))
1230 return dl_server_timer(timer, dl_se);
1231
1232 p = dl_task_of(dl_se);
1233 rq = task_rq_lock(p, &rf);
1234
1235 /*
1236 * The task might have changed its scheduling policy to something
1237 * different than SCHED_DEADLINE (through switched_from_dl()).
1238 */
1239 if (!dl_task(p))
1240 goto unlock;
1241
1242 /*
1243 * The task might have been boosted by someone else and might be in the
1244 * boosting/deboosting path, its not throttled.
1245 */
1246 if (is_dl_boosted(dl_se))
1247 goto unlock;
1248
1249 /*
1250 * Spurious timer due to start_dl_timer() race; or we already received
1251 * a replenishment from rt_mutex_setprio().
1252 */
1253 if (!dl_se->dl_throttled)
1254 goto unlock;
1255
1256 sched_clock_tick();
1257 update_rq_clock(rq);
1258
1259 /*
1260 * If the throttle happened during sched-out; like:
1261 *
1262 * schedule()
1263 * deactivate_task()
1264 * dequeue_task_dl()
1265 * update_curr_dl()
1266 * start_dl_timer()
1267 * __dequeue_task_dl()
1268 * prev->on_rq = 0;
1269 *
1270 * We can be both throttled and !queued. Replenish the counter
1271 * but do not enqueue -- wait for our wakeup to do that.
1272 */
1273 if (!task_on_rq_queued(p)) {
1274 replenish_dl_entity(dl_se);
1275 goto unlock;
1276 }
1277
1278 if (unlikely(!rq->online)) {
1279 /*
1280 * If the runqueue is no longer available, migrate the
1281 * task elsewhere. This necessarily changes rq.
1282 */
1283 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1284 rq = dl_task_offline_migration(rq, p);
1285 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1286 update_rq_clock(rq);
1287
1288 /*
1289 * Now that the task has been migrated to the new RQ and we
1290 * have that locked, proceed as normal and enqueue the task
1291 * there.
1292 */
1293 }
1294
1295 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1296 if (dl_task(rq->donor))
1297 wakeup_preempt_dl(rq, p, 0);
1298 else
1299 resched_curr(rq);
1300
1301 __push_dl_task(rq, &rf);
1302
1303 unlock:
1304 task_rq_unlock(rq, p, &rf);
1305
1306 /*
1307 * This can free the task_struct, including this hrtimer, do not touch
1308 * anything related to that after this.
1309 */
1310 put_task_struct(p);
1311
1312 return HRTIMER_NORESTART;
1313 }
1314
init_dl_task_timer(struct sched_dl_entity * dl_se)1315 static void init_dl_task_timer(struct sched_dl_entity *dl_se)
1316 {
1317 struct hrtimer *timer = &dl_se->dl_timer;
1318
1319 hrtimer_setup(timer, dl_task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1320 }
1321
1322 /*
1323 * During the activation, CBS checks if it can reuse the current task's
1324 * runtime and period. If the deadline of the task is in the past, CBS
1325 * cannot use the runtime, and so it replenishes the task. This rule
1326 * works fine for implicit deadline tasks (deadline == period), and the
1327 * CBS was designed for implicit deadline tasks. However, a task with
1328 * constrained deadline (deadline < period) might be awakened after the
1329 * deadline, but before the next period. In this case, replenishing the
1330 * task would allow it to run for runtime / deadline. As in this case
1331 * deadline < period, CBS enables a task to run for more than the
1332 * runtime / period. In a very loaded system, this can cause a domino
1333 * effect, making other tasks miss their deadlines.
1334 *
1335 * To avoid this problem, in the activation of a constrained deadline
1336 * task after the deadline but before the next period, throttle the
1337 * task and set the replenishing timer to the begin of the next period,
1338 * unless it is boosted.
1339 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1340 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1341 {
1342 struct rq *rq = rq_of_dl_se(dl_se);
1343
1344 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1345 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1346 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
1347 return;
1348 dl_se->dl_throttled = 1;
1349 if (dl_se->runtime > 0)
1350 dl_se->runtime = 0;
1351 }
1352 }
1353
1354 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1355 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1356 {
1357 return (dl_se->runtime <= 0);
1358 }
1359
1360 /*
1361 * This function implements the GRUB accounting rule. According to the
1362 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1363 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1364 * where u is the utilization of the task, Umax is the maximum reclaimable
1365 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1366 * as the difference between the "total runqueue utilization" and the
1367 * "runqueue active utilization", and Uextra is the (per runqueue) extra
1368 * reclaimable utilization.
1369 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1370 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1371 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1372 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1373 * Since delta is a 64 bit variable, to have an overflow its value should be
1374 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1375 * not an issue here.
1376 */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1377 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1378 {
1379 u64 u_act;
1380 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1381
1382 /*
1383 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1384 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1385 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1386 * negative leading to wrong results.
1387 */
1388 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1389 u_act = dl_se->dl_bw;
1390 else
1391 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1392
1393 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1394 return (delta * u_act) >> BW_SHIFT;
1395 }
1396
dl_scaled_delta_exec(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec)1397 s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1398 {
1399 s64 scaled_delta_exec;
1400
1401 /*
1402 * For tasks that participate in GRUB, we implement GRUB-PA: the
1403 * spare reclaimed bandwidth is used to clock down frequency.
1404 *
1405 * For the others, we still need to scale reservation parameters
1406 * according to current frequency and CPU maximum capacity.
1407 */
1408 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1409 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
1410 } else {
1411 int cpu = cpu_of(rq);
1412 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1413 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1414
1415 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1416 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1417 }
1418
1419 return scaled_delta_exec;
1420 }
1421
1422 static inline void
1423 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, int flags);
1424
update_curr_dl_se(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec)1425 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1426 {
1427 bool idle = idle_rq(rq);
1428 s64 scaled_delta_exec;
1429
1430 if (unlikely(delta_exec <= 0)) {
1431 if (unlikely(dl_se->dl_yielded))
1432 goto throttle;
1433 return;
1434 }
1435
1436 if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
1437 return;
1438
1439 if (dl_entity_is_special(dl_se))
1440 return;
1441
1442 scaled_delta_exec = delta_exec;
1443 if (!dl_server(dl_se))
1444 scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
1445
1446 dl_se->runtime -= scaled_delta_exec;
1447
1448 if (dl_se->dl_defer_idle && !idle)
1449 dl_se->dl_defer_idle = 0;
1450
1451 /*
1452 * The DL server can consume its runtime while throttled (not
1453 * queued / running as regular CFS).
1454 *
1455 * If the server consumes its entire runtime in this state. The server
1456 * is not required for the current period. Thus, reset the server by
1457 * starting a new period, pushing the activation.
1458 */
1459 if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) {
1460 /*
1461 * Non-servers would never get time accounted while throttled.
1462 */
1463 WARN_ON_ONCE(!dl_server(dl_se));
1464
1465 /*
1466 * While the server is marked idle, do not push out the
1467 * activation further, instead wait for the period timer
1468 * to lapse and stop the server.
1469 */
1470 if (dl_se->dl_defer_idle && idle) {
1471 /*
1472 * The timer is at the zero-laxity point, this means
1473 * dl_server_stop() / dl_server_start() can happen
1474 * while now < deadline. This means update_dl_entity()
1475 * will not replenish. Additionally start_dl_timer()
1476 * will be set for 'deadline - runtime'. Negative
1477 * runtime will not do.
1478 */
1479 dl_se->runtime = 0;
1480 return;
1481 }
1482
1483 /*
1484 * If the server was previously activated - the starving condition
1485 * took place, it this point it went away because the fair scheduler
1486 * was able to get runtime in background. So return to the initial
1487 * state.
1488 */
1489 dl_se->dl_defer_running = 0;
1490
1491 hrtimer_try_to_cancel(&dl_se->dl_timer);
1492
1493 replenish_dl_new_period(dl_se, dl_se->rq);
1494
1495 if (idle)
1496 dl_se->dl_defer_idle = 1;
1497
1498 /*
1499 * Not being able to start the timer seems problematic. If it could not
1500 * be started for whatever reason, we need to "unthrottle" the DL server
1501 * and queue right away. Otherwise nothing might queue it. That's similar
1502 * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
1503 */
1504 WARN_ON_ONCE(!start_dl_timer(dl_se));
1505
1506 return;
1507 }
1508
1509 throttle:
1510 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1511 dl_se->dl_throttled = 1;
1512
1513 /* If requested, inform the user about runtime overruns. */
1514 if (dl_runtime_exceeded(dl_se) &&
1515 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1516 dl_se->dl_overrun = 1;
1517
1518 dequeue_dl_entity(dl_se, 0);
1519 if (!dl_server(dl_se)) {
1520 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1521 dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1522 }
1523
1524 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1525 if (dl_server(dl_se)) {
1526 replenish_dl_new_period(dl_se, rq);
1527 start_dl_timer(dl_se);
1528 } else {
1529 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1530 }
1531 }
1532
1533 if (!is_leftmost(dl_se, &rq->dl))
1534 resched_curr(rq);
1535 }
1536
1537 /*
1538 * The dl_server does not account for real-time workload because it
1539 * is running fair work.
1540 */
1541 if (dl_se->dl_server)
1542 return;
1543
1544 #ifdef CONFIG_RT_GROUP_SCHED
1545 /*
1546 * Because -- for now -- we share the rt bandwidth, we need to
1547 * account our runtime there too, otherwise actual rt tasks
1548 * would be able to exceed the shared quota.
1549 *
1550 * Account to the root rt group for now.
1551 *
1552 * The solution we're working towards is having the RT groups scheduled
1553 * using deadline servers -- however there's a few nasties to figure
1554 * out before that can happen.
1555 */
1556 if (rt_bandwidth_enabled()) {
1557 struct rt_rq *rt_rq = &rq->rt;
1558
1559 raw_spin_lock(&rt_rq->rt_runtime_lock);
1560 /*
1561 * We'll let actual RT tasks worry about the overflow here, we
1562 * have our own CBS to keep us inline; only account when RT
1563 * bandwidth is relevant.
1564 */
1565 if (sched_rt_bandwidth_account(rt_rq))
1566 rt_rq->rt_time += delta_exec;
1567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1568 }
1569 #endif /* CONFIG_RT_GROUP_SCHED */
1570 }
1571
1572 /*
1573 * In the non-defer mode, the idle time is not accounted, as the
1574 * server provides a guarantee.
1575 *
1576 * If the dl_server is in defer mode, the idle time is also considered as
1577 * time available for the dl_server, avoiding a penalty for the rt
1578 * scheduler that did not consumed that time.
1579 */
dl_server_update_idle(struct sched_dl_entity * dl_se,s64 delta_exec)1580 void dl_server_update_idle(struct sched_dl_entity *dl_se, s64 delta_exec)
1581 {
1582 if (dl_se->dl_server_active && dl_se->dl_runtime && dl_se->dl_defer)
1583 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1584 }
1585
dl_server_update(struct sched_dl_entity * dl_se,s64 delta_exec)1586 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1587 {
1588 /* 0 runtime = fair server disabled */
1589 if (dl_se->dl_server_active && dl_se->dl_runtime)
1590 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1591 }
1592
1593 /*
1594 * dl_server && dl_defer:
1595 *
1596 * 6
1597 * +--------------------+
1598 * v |
1599 * +-------------+ 4 +-----------+ 5 +------------------+
1600 * +-> | A:init | <--- | D:running | -----> | E:replenish-wait |
1601 * | +-------------+ +-----------+ +------------------+
1602 * | | | 1 ^ ^ |
1603 * | | 1 +----------+ | 3 |
1604 * | v | |
1605 * | +--------------------------------+ 2 |
1606 * | | | ----+ |
1607 * | 8 | B:zero_laxity-wait | | |
1608 * | | | <---+ |
1609 * | +--------------------------------+ |
1610 * | | ^ ^ 2 |
1611 * | | 7 | 2, 1 +----------------+
1612 * | v |
1613 * | +-------------+ |
1614 * +-- | C:idle-wait | -+
1615 * +-------------+
1616 * ^ 7 |
1617 * +---------+
1618 *
1619 *
1620 * [A] - init
1621 * dl_server_active = 0
1622 * dl_throttled = 0
1623 * dl_defer_armed = 0
1624 * dl_defer_running = 0/1
1625 * dl_defer_idle = 0
1626 *
1627 * [B] - zero_laxity-wait
1628 * dl_server_active = 1
1629 * dl_throttled = 1
1630 * dl_defer_armed = 1
1631 * dl_defer_running = 0
1632 * dl_defer_idle = 0
1633 *
1634 * [C] - idle-wait
1635 * dl_server_active = 1
1636 * dl_throttled = 1
1637 * dl_defer_armed = 1
1638 * dl_defer_running = 0
1639 * dl_defer_idle = 1
1640 *
1641 * [D] - running
1642 * dl_server_active = 1
1643 * dl_throttled = 0
1644 * dl_defer_armed = 0
1645 * dl_defer_running = 1
1646 * dl_defer_idle = 0
1647 *
1648 * [E] - replenish-wait
1649 * dl_server_active = 1
1650 * dl_throttled = 1
1651 * dl_defer_armed = 0
1652 * dl_defer_running = 1
1653 * dl_defer_idle = 0
1654 *
1655 *
1656 * [1] A->B, A->D, C->B
1657 * dl_server_start()
1658 * dl_defer_idle = 0;
1659 * if (dl_server_active)
1660 * return; // [B]
1661 * dl_server_active = 1;
1662 * enqueue_dl_entity()
1663 * update_dl_entity(WAKEUP)
1664 * if (dl_time_before() || dl_entity_overflow())
1665 * dl_defer_running = 0;
1666 * replenish_dl_new_period();
1667 * // fwd period
1668 * dl_throttled = 1;
1669 * dl_defer_armed = 1;
1670 * if (!dl_defer_running)
1671 * dl_defer_armed = 1;
1672 * dl_throttled = 1;
1673 * if (dl_throttled && start_dl_timer())
1674 * return; // [B]
1675 * __enqueue_dl_entity();
1676 * // [D]
1677 *
1678 * // deplete server runtime from client-class
1679 * [2] B->B, C->B, E->B
1680 * dl_server_update()
1681 * update_curr_dl_se() // idle = false
1682 * if (dl_defer_idle)
1683 * dl_defer_idle = 0;
1684 * if (dl_defer && dl_throttled && dl_runtime_exceeded())
1685 * dl_defer_running = 0;
1686 * hrtimer_try_to_cancel(); // stop timer
1687 * replenish_dl_new_period()
1688 * // fwd period
1689 * dl_throttled = 1;
1690 * dl_defer_armed = 1;
1691 * start_dl_timer(); // restart timer
1692 * // [B]
1693 *
1694 * // timer actually fires means we have runtime
1695 * [3] B->D
1696 * dl_server_timer()
1697 * if (dl_defer_armed)
1698 * dl_defer_running = 1;
1699 * enqueue_dl_entity(REPLENISH)
1700 * replenish_dl_entity()
1701 * // fwd period
1702 * if (dl_throttled)
1703 * dl_throttled = 0;
1704 * if (dl_defer_armed)
1705 * dl_defer_armed = 0;
1706 * __enqueue_dl_entity();
1707 * // [D]
1708 *
1709 * // schedule server
1710 * [4] D->A
1711 * pick_task_dl()
1712 * p = server_pick_task();
1713 * if (!p)
1714 * dl_server_stop()
1715 * dequeue_dl_entity();
1716 * hrtimer_try_to_cancel();
1717 * dl_defer_armed = 0;
1718 * dl_throttled = 0;
1719 * dl_server_active = 0;
1720 * // [A]
1721 * return p;
1722 *
1723 * // server running
1724 * [5] D->E
1725 * update_curr_dl_se()
1726 * if (dl_runtime_exceeded())
1727 * dl_throttled = 1;
1728 * dequeue_dl_entity();
1729 * start_dl_timer();
1730 * // [E]
1731 *
1732 * // server replenished
1733 * [6] E->D
1734 * dl_server_timer()
1735 * enqueue_dl_entity(REPLENISH)
1736 * replenish_dl_entity()
1737 * fwd-period
1738 * if (dl_throttled)
1739 * dl_throttled = 0;
1740 * __enqueue_dl_entity();
1741 * // [D]
1742 *
1743 * // deplete server runtime from idle
1744 * [7] B->C, C->C
1745 * dl_server_update_idle()
1746 * update_curr_dl_se() // idle = true
1747 * if (dl_defer && dl_throttled && dl_runtime_exceeded())
1748 * if (dl_defer_idle)
1749 * return;
1750 * dl_defer_running = 0;
1751 * hrtimer_try_to_cancel();
1752 * replenish_dl_new_period()
1753 * // fwd period
1754 * dl_throttled = 1;
1755 * dl_defer_armed = 1;
1756 * dl_defer_idle = 1;
1757 * start_dl_timer(); // restart timer
1758 * // [C]
1759 *
1760 * // stop idle server
1761 * [8] C->A
1762 * dl_server_timer()
1763 * if (dl_defer_idle)
1764 * dl_server_stop();
1765 * // [A]
1766 *
1767 *
1768 * digraph dl_server {
1769 * "A:init" -> "B:zero_laxity-wait" [label="1:dl_server_start"]
1770 * "A:init" -> "D:running" [label="1:dl_server_start"]
1771 * "B:zero_laxity-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
1772 * "B:zero_laxity-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"]
1773 * "B:zero_laxity-wait" -> "D:running" [label="3:dl_server_timer"]
1774 * "C:idle-wait" -> "A:init" [label="8:dl_server_timer"]
1775 * "C:idle-wait" -> "B:zero_laxity-wait" [label="1:dl_server_start"]
1776 * "C:idle-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
1777 * "C:idle-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"]
1778 * "D:running" -> "A:init" [label="4:pick_task_dl"]
1779 * "D:running" -> "E:replenish-wait" [label="5:update_curr_dl_se"]
1780 * "E:replenish-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
1781 * "E:replenish-wait" -> "D:running" [label="6:dl_server_timer"]
1782 * }
1783 *
1784 *
1785 * Notes:
1786 *
1787 * - When there are fair tasks running the most likely loop is [2]->[2].
1788 * the dl_server never actually runs, the timer never fires.
1789 *
1790 * - When there is actual fair starvation; the timer fires and starts the
1791 * dl_server. This will then throttle and replenish like a normal DL
1792 * task. Notably it will not 'defer' again.
1793 *
1794 * - When idle it will push the actication forward once, and then wait
1795 * for the timer to hit or a non-idle update to restart things.
1796 */
dl_server_start(struct sched_dl_entity * dl_se)1797 void dl_server_start(struct sched_dl_entity *dl_se)
1798 {
1799 struct rq *rq = dl_se->rq;
1800
1801 dl_se->dl_defer_idle = 0;
1802 if (!dl_server(dl_se) || dl_se->dl_server_active || !dl_se->dl_runtime)
1803 return;
1804
1805 /*
1806 * Update the current task to 'now'.
1807 */
1808 rq->donor->sched_class->update_curr(rq);
1809
1810 if (WARN_ON_ONCE(!cpu_online(cpu_of(rq))))
1811 return;
1812
1813 dl_se->dl_server_active = 1;
1814 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
1815 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
1816 resched_curr(dl_se->rq);
1817 }
1818
dl_server_stop(struct sched_dl_entity * dl_se)1819 void dl_server_stop(struct sched_dl_entity *dl_se)
1820 {
1821 if (!dl_server(dl_se) || !dl_server_active(dl_se))
1822 return;
1823
1824 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
1825 hrtimer_try_to_cancel(&dl_se->dl_timer);
1826 dl_se->dl_defer_armed = 0;
1827 dl_se->dl_throttled = 0;
1828 dl_se->dl_defer_idle = 0;
1829 dl_se->dl_server_active = 0;
1830 }
1831
dl_server_init(struct sched_dl_entity * dl_se,struct rq * rq,dl_server_pick_f pick_task)1832 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1833 dl_server_pick_f pick_task)
1834 {
1835 dl_se->rq = rq;
1836 dl_se->server_pick_task = pick_task;
1837 }
1838
sched_init_dl_servers(void)1839 void sched_init_dl_servers(void)
1840 {
1841 int cpu;
1842 struct rq *rq;
1843 struct sched_dl_entity *dl_se;
1844
1845 for_each_online_cpu(cpu) {
1846 u64 runtime = 50 * NSEC_PER_MSEC;
1847 u64 period = 1000 * NSEC_PER_MSEC;
1848
1849 rq = cpu_rq(cpu);
1850
1851 guard(rq_lock_irq)(rq);
1852 update_rq_clock(rq);
1853
1854 dl_se = &rq->fair_server;
1855
1856 WARN_ON(dl_server(dl_se));
1857
1858 dl_server_apply_params(dl_se, runtime, period, 1);
1859
1860 dl_se->dl_server = 1;
1861 dl_se->dl_defer = 1;
1862 setup_new_dl_entity(dl_se);
1863
1864 #ifdef CONFIG_SCHED_CLASS_EXT
1865 dl_se = &rq->ext_server;
1866
1867 WARN_ON(dl_server(dl_se));
1868
1869 dl_server_apply_params(dl_se, runtime, period, 1);
1870
1871 dl_se->dl_server = 1;
1872 dl_se->dl_defer = 1;
1873 setup_new_dl_entity(dl_se);
1874 #endif
1875 }
1876 }
1877
__dl_server_attach_root(struct sched_dl_entity * dl_se,struct rq * rq)1878 void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
1879 {
1880 u64 new_bw = dl_se->dl_bw;
1881 int cpu = cpu_of(rq);
1882 struct dl_bw *dl_b;
1883
1884 dl_b = dl_bw_of(cpu_of(rq));
1885 guard(raw_spinlock)(&dl_b->lock);
1886
1887 if (!dl_bw_cpus(cpu))
1888 return;
1889
1890 __dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
1891 }
1892
dl_server_apply_params(struct sched_dl_entity * dl_se,u64 runtime,u64 period,bool init)1893 int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
1894 {
1895 u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1896 u64 new_bw = to_ratio(period, runtime);
1897 struct rq *rq = dl_se->rq;
1898 int cpu = cpu_of(rq);
1899 struct dl_bw *dl_b;
1900 unsigned long cap;
1901 int cpus;
1902
1903 dl_b = dl_bw_of(cpu);
1904 guard(raw_spinlock)(&dl_b->lock);
1905
1906 cpus = dl_bw_cpus(cpu);
1907 cap = dl_bw_capacity(cpu);
1908
1909 if (__dl_overflow(dl_b, cap, old_bw, new_bw))
1910 return -EBUSY;
1911
1912 if (init) {
1913 __add_rq_bw(new_bw, &rq->dl);
1914 __dl_add(dl_b, new_bw, cpus);
1915 } else {
1916 __dl_sub(dl_b, dl_se->dl_bw, cpus);
1917 __dl_add(dl_b, new_bw, cpus);
1918
1919 dl_rq_change_utilization(rq, dl_se, new_bw);
1920 }
1921
1922 dl_se->dl_runtime = runtime;
1923 dl_se->dl_deadline = period;
1924 dl_se->dl_period = period;
1925
1926 dl_se->runtime = 0;
1927 dl_se->deadline = 0;
1928
1929 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1930 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
1931
1932 return 0;
1933 }
1934
1935 /*
1936 * Update the current task's runtime statistics (provided it is still
1937 * a -deadline task and has not been removed from the dl_rq).
1938 */
update_curr_dl(struct rq * rq)1939 static void update_curr_dl(struct rq *rq)
1940 {
1941 struct task_struct *donor = rq->donor;
1942 struct sched_dl_entity *dl_se = &donor->dl;
1943 s64 delta_exec;
1944
1945 if (!dl_task(donor) || !on_dl_rq(dl_se))
1946 return;
1947
1948 /*
1949 * Consumed budget is computed considering the time as
1950 * observed by schedulable tasks (excluding time spent
1951 * in hardirq context, etc.). Deadlines are instead
1952 * computed using hard walltime. This seems to be the more
1953 * natural solution, but the full ramifications of this
1954 * approach need further study.
1955 */
1956 delta_exec = update_curr_common(rq);
1957 update_curr_dl_se(rq, dl_se, delta_exec);
1958 }
1959
inactive_task_timer(struct hrtimer * timer)1960 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1961 {
1962 struct sched_dl_entity *dl_se = container_of(timer,
1963 struct sched_dl_entity,
1964 inactive_timer);
1965 struct task_struct *p = NULL;
1966 struct rq_flags rf;
1967 struct rq *rq;
1968
1969 if (!dl_server(dl_se)) {
1970 p = dl_task_of(dl_se);
1971 rq = task_rq_lock(p, &rf);
1972 } else {
1973 rq = dl_se->rq;
1974 rq_lock(rq, &rf);
1975 }
1976
1977 sched_clock_tick();
1978 update_rq_clock(rq);
1979
1980 if (dl_server(dl_se))
1981 goto no_task;
1982
1983 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1984 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1985
1986 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1987 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1988 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1989 dl_se->dl_non_contending = 0;
1990 }
1991
1992 raw_spin_lock(&dl_b->lock);
1993 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1994 raw_spin_unlock(&dl_b->lock);
1995 __dl_clear_params(dl_se);
1996
1997 goto unlock;
1998 }
1999
2000 no_task:
2001 if (dl_se->dl_non_contending == 0)
2002 goto unlock;
2003
2004 sub_running_bw(dl_se, &rq->dl);
2005 dl_se->dl_non_contending = 0;
2006 unlock:
2007
2008 if (!dl_server(dl_se)) {
2009 task_rq_unlock(rq, p, &rf);
2010 put_task_struct(p);
2011 } else {
2012 rq_unlock(rq, &rf);
2013 }
2014
2015 return HRTIMER_NORESTART;
2016 }
2017
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)2018 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
2019 {
2020 struct hrtimer *timer = &dl_se->inactive_timer;
2021
2022 hrtimer_setup(timer, inactive_task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
2023 }
2024
2025 #define __node_2_dle(node) \
2026 rb_entry((node), struct sched_dl_entity, rb_node)
2027
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)2028 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
2029 {
2030 struct rq *rq = rq_of_dl_rq(dl_rq);
2031
2032 if (dl_rq->earliest_dl.curr == 0 ||
2033 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
2034 if (dl_rq->earliest_dl.curr == 0)
2035 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
2036 dl_rq->earliest_dl.curr = deadline;
2037 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
2038 }
2039 }
2040
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)2041 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
2042 {
2043 struct rq *rq = rq_of_dl_rq(dl_rq);
2044
2045 /*
2046 * Since we may have removed our earliest (and/or next earliest)
2047 * task we must recompute them.
2048 */
2049 if (!dl_rq->dl_nr_running) {
2050 dl_rq->earliest_dl.curr = 0;
2051 dl_rq->earliest_dl.next = 0;
2052 cpudl_clear(&rq->rd->cpudl, rq->cpu, rq->online);
2053 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2054 } else {
2055 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
2056 struct sched_dl_entity *entry = __node_2_dle(leftmost);
2057
2058 dl_rq->earliest_dl.curr = entry->deadline;
2059 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
2060 }
2061 }
2062
2063 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)2064 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
2065 {
2066 u64 deadline = dl_se->deadline;
2067
2068 dl_rq->dl_nr_running++;
2069
2070 if (!dl_server(dl_se))
2071 add_nr_running(rq_of_dl_rq(dl_rq), 1);
2072
2073 inc_dl_deadline(dl_rq, deadline);
2074 }
2075
2076 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)2077 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
2078 {
2079 WARN_ON(!dl_rq->dl_nr_running);
2080 dl_rq->dl_nr_running--;
2081
2082 if (!dl_server(dl_se))
2083 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
2084
2085 dec_dl_deadline(dl_rq, dl_se->deadline);
2086 }
2087
__dl_less(struct rb_node * a,const struct rb_node * b)2088 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
2089 {
2090 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
2091 }
2092
2093 static __always_inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity * dl_se)2094 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
2095 {
2096 if (!schedstat_enabled())
2097 return NULL;
2098
2099 if (dl_server(dl_se))
2100 return NULL;
2101
2102 return &dl_task_of(dl_se)->stats;
2103 }
2104
2105 static inline void
update_stats_wait_start_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)2106 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
2107 {
2108 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
2109 if (stats)
2110 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
2111 }
2112
2113 static inline void
update_stats_wait_end_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)2114 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
2115 {
2116 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
2117 if (stats)
2118 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
2119 }
2120
2121 static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)2122 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
2123 {
2124 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
2125 if (stats)
2126 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
2127 }
2128
2129 static inline void
update_stats_enqueue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)2130 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
2131 int flags)
2132 {
2133 if (!schedstat_enabled())
2134 return;
2135
2136 if (flags & ENQUEUE_WAKEUP)
2137 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
2138 }
2139
2140 static inline void
update_stats_dequeue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)2141 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
2142 int flags)
2143 {
2144 struct task_struct *p = dl_task_of(dl_se);
2145 struct rq *rq = rq_of_dl_rq(dl_rq);
2146
2147 if (!schedstat_enabled())
2148 return;
2149
2150 if (p != rq->curr)
2151 update_stats_wait_end_dl(dl_rq, dl_se);
2152
2153 if ((flags & DEQUEUE_SLEEP)) {
2154 unsigned int state;
2155
2156 state = READ_ONCE(p->__state);
2157 if (state & TASK_INTERRUPTIBLE)
2158 __schedstat_set(p->stats.sleep_start,
2159 rq_clock(rq_of_dl_rq(dl_rq)));
2160
2161 if (state & TASK_UNINTERRUPTIBLE)
2162 __schedstat_set(p->stats.block_start,
2163 rq_clock(rq_of_dl_rq(dl_rq)));
2164 }
2165 }
2166
__enqueue_dl_entity(struct sched_dl_entity * dl_se)2167 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
2168 {
2169 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2170
2171 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
2172
2173 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
2174
2175 inc_dl_tasks(dl_se, dl_rq);
2176 }
2177
__dequeue_dl_entity(struct sched_dl_entity * dl_se)2178 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
2179 {
2180 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2181
2182 if (RB_EMPTY_NODE(&dl_se->rb_node))
2183 return;
2184
2185 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
2186
2187 RB_CLEAR_NODE(&dl_se->rb_node);
2188
2189 dec_dl_tasks(dl_se, dl_rq);
2190 }
2191
2192 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,int flags)2193 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
2194 {
2195 WARN_ON_ONCE(on_dl_rq(dl_se));
2196
2197 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
2198
2199 /*
2200 * Check if a constrained deadline task was activated
2201 * after the deadline but before the next period.
2202 * If that is the case, the task will be throttled and
2203 * the replenishment timer will be set to the next period.
2204 */
2205 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
2206 dl_check_constrained_dl(dl_se);
2207
2208 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
2209 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2210
2211 add_rq_bw(dl_se, dl_rq);
2212 add_running_bw(dl_se, dl_rq);
2213 }
2214
2215 /*
2216 * If p is throttled, we do not enqueue it. In fact, if it exhausted
2217 * its budget it needs a replenishment and, since it now is on
2218 * its rq, the bandwidth timer callback (which clearly has not
2219 * run yet) will take care of this.
2220 * However, the active utilization does not depend on the fact
2221 * that the task is on the runqueue or not (but depends on the
2222 * task's state - in GRUB parlance, "inactive" vs "active contending").
2223 * In other words, even if a task is throttled its utilization must
2224 * be counted in the active utilization; hence, we need to call
2225 * add_running_bw().
2226 */
2227 if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
2228 if (flags & ENQUEUE_WAKEUP)
2229 task_contending(dl_se, flags);
2230
2231 return;
2232 }
2233
2234 /*
2235 * If this is a wakeup or a new instance, the scheduling
2236 * parameters of the task might need updating. Otherwise,
2237 * we want a replenishment of its runtime.
2238 */
2239 if (flags & ENQUEUE_WAKEUP) {
2240 task_contending(dl_se, flags);
2241 update_dl_entity(dl_se);
2242 } else if (flags & ENQUEUE_REPLENISH) {
2243 replenish_dl_entity(dl_se);
2244 } else if ((flags & ENQUEUE_MOVE) &&
2245 !is_dl_boosted(dl_se) &&
2246 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
2247 setup_new_dl_entity(dl_se);
2248 }
2249
2250 /*
2251 * If the reservation is still throttled, e.g., it got replenished but is a
2252 * deferred task and still got to wait, don't enqueue.
2253 */
2254 if (dl_se->dl_throttled && start_dl_timer(dl_se))
2255 return;
2256
2257 /*
2258 * We're about to enqueue, make sure we're not ->dl_throttled!
2259 * In case the timer was not started, say because the defer time
2260 * has passed, mark as not throttled and mark unarmed.
2261 * Also cancel earlier timers, since letting those run is pointless.
2262 */
2263 if (dl_se->dl_throttled) {
2264 hrtimer_try_to_cancel(&dl_se->dl_timer);
2265 dl_se->dl_defer_armed = 0;
2266 dl_se->dl_throttled = 0;
2267 }
2268
2269 __enqueue_dl_entity(dl_se);
2270 }
2271
dequeue_dl_entity(struct sched_dl_entity * dl_se,int flags)2272 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
2273 {
2274 __dequeue_dl_entity(dl_se);
2275
2276 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
2277 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2278
2279 sub_running_bw(dl_se, dl_rq);
2280 sub_rq_bw(dl_se, dl_rq);
2281 }
2282
2283 /*
2284 * This check allows to start the inactive timer (or to immediately
2285 * decrease the active utilization, if needed) in two cases:
2286 * when the task blocks and when it is terminating
2287 * (p->state == TASK_DEAD). We can handle the two cases in the same
2288 * way, because from GRUB's point of view the same thing is happening
2289 * (the task moves from "active contending" to "active non contending"
2290 * or "inactive")
2291 */
2292 if (flags & DEQUEUE_SLEEP)
2293 task_non_contending(dl_se, true);
2294 }
2295
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)2296 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2297 {
2298 if (is_dl_boosted(&p->dl)) {
2299 /*
2300 * Because of delays in the detection of the overrun of a
2301 * thread's runtime, it might be the case that a thread
2302 * goes to sleep in a rt mutex with negative runtime. As
2303 * a consequence, the thread will be throttled.
2304 *
2305 * While waiting for the mutex, this thread can also be
2306 * boosted via PI, resulting in a thread that is throttled
2307 * and boosted at the same time.
2308 *
2309 * In this case, the boost overrides the throttle.
2310 */
2311 if (p->dl.dl_throttled) {
2312 /*
2313 * The replenish timer needs to be canceled. No
2314 * problem if it fires concurrently: boosted threads
2315 * are ignored in dl_task_timer().
2316 */
2317 cancel_replenish_timer(&p->dl);
2318 p->dl.dl_throttled = 0;
2319 }
2320 } else if (!dl_prio(p->normal_prio)) {
2321 /*
2322 * Special case in which we have a !SCHED_DEADLINE task that is going
2323 * to be deboosted, but exceeds its runtime while doing so. No point in
2324 * replenishing it, as it's going to return back to its original
2325 * scheduling class after this. If it has been throttled, we need to
2326 * clear the flag, otherwise the task may wake up as throttled after
2327 * being boosted again with no means to replenish the runtime and clear
2328 * the throttle.
2329 */
2330 p->dl.dl_throttled = 0;
2331 if (!(flags & ENQUEUE_REPLENISH))
2332 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
2333 task_pid_nr(p));
2334
2335 return;
2336 }
2337
2338 check_schedstat_required();
2339 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
2340
2341 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2342 flags |= ENQUEUE_MIGRATING;
2343
2344 enqueue_dl_entity(&p->dl, flags);
2345
2346 if (dl_server(&p->dl))
2347 return;
2348
2349 if (task_is_blocked(p))
2350 return;
2351
2352 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
2353 enqueue_pushable_dl_task(rq, p);
2354 }
2355
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)2356 static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2357 {
2358 update_curr_dl(rq);
2359
2360 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2361 flags |= DEQUEUE_MIGRATING;
2362
2363 dequeue_dl_entity(&p->dl, flags);
2364 if (!p->dl.dl_throttled && !dl_server(&p->dl))
2365 dequeue_pushable_dl_task(rq, p);
2366
2367 return true;
2368 }
2369
2370 /*
2371 * Yield task semantic for -deadline tasks is:
2372 *
2373 * get off from the CPU until our next instance, with
2374 * a new runtime. This is of little use now, since we
2375 * don't have a bandwidth reclaiming mechanism. Anyway,
2376 * bandwidth reclaiming is planned for the future, and
2377 * yield_task_dl will indicate that some spare budget
2378 * is available for other task instances to use it.
2379 */
yield_task_dl(struct rq * rq)2380 static void yield_task_dl(struct rq *rq)
2381 {
2382 /*
2383 * We make the task go to sleep until its current deadline by
2384 * forcing its runtime to zero. This way, update_curr_dl() stops
2385 * it and the bandwidth timer will wake it up and will give it
2386 * new scheduling parameters (thanks to dl_yielded=1).
2387 */
2388 rq->donor->dl.dl_yielded = 1;
2389
2390 update_rq_clock(rq);
2391 update_curr_dl(rq);
2392 /*
2393 * Tell update_rq_clock() that we've just updated,
2394 * so we don't do microscopic update in schedule()
2395 * and double the fastpath cost.
2396 */
2397 rq_clock_skip_update(rq);
2398 }
2399
dl_task_is_earliest_deadline(struct task_struct * p,struct rq * rq)2400 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
2401 struct rq *rq)
2402 {
2403 return (!rq->dl.dl_nr_running ||
2404 dl_time_before(p->dl.deadline,
2405 rq->dl.earliest_dl.curr));
2406 }
2407
2408 static int find_later_rq(struct task_struct *task);
2409
2410 static int
select_task_rq_dl(struct task_struct * p,int cpu,int flags)2411 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
2412 {
2413 struct task_struct *curr, *donor;
2414 bool select_rq;
2415 struct rq *rq;
2416
2417 if (!(flags & WF_TTWU))
2418 return cpu;
2419
2420 rq = cpu_rq(cpu);
2421
2422 rcu_read_lock();
2423 curr = READ_ONCE(rq->curr); /* unlocked access */
2424 donor = READ_ONCE(rq->donor);
2425
2426 /*
2427 * If we are dealing with a -deadline task, we must
2428 * decide where to wake it up.
2429 * If it has a later deadline and the current task
2430 * on this rq can't move (provided the waking task
2431 * can!) we prefer to send it somewhere else. On the
2432 * other hand, if it has a shorter deadline, we
2433 * try to make it stay here, it might be important.
2434 */
2435 select_rq = unlikely(dl_task(donor)) &&
2436 (curr->nr_cpus_allowed < 2 ||
2437 !dl_entity_preempt(&p->dl, &donor->dl)) &&
2438 p->nr_cpus_allowed > 1;
2439
2440 /*
2441 * Take the capacity of the CPU into account to
2442 * ensure it fits the requirement of the task.
2443 */
2444 if (sched_asym_cpucap_active())
2445 select_rq |= !dl_task_fits_capacity(p, cpu);
2446
2447 if (select_rq) {
2448 int target = find_later_rq(p);
2449
2450 if (target != -1 &&
2451 dl_task_is_earliest_deadline(p, cpu_rq(target)))
2452 cpu = target;
2453 }
2454 rcu_read_unlock();
2455
2456 return cpu;
2457 }
2458
migrate_task_rq_dl(struct task_struct * p,int new_cpu __maybe_unused)2459 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
2460 {
2461 struct rq_flags rf;
2462 struct rq *rq;
2463
2464 if (READ_ONCE(p->__state) != TASK_WAKING)
2465 return;
2466
2467 rq = task_rq(p);
2468 /*
2469 * Since p->state == TASK_WAKING, set_task_cpu() has been called
2470 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
2471 * rq->lock is not... So, lock it
2472 */
2473 rq_lock(rq, &rf);
2474 if (p->dl.dl_non_contending) {
2475 update_rq_clock(rq);
2476 sub_running_bw(&p->dl, &rq->dl);
2477 p->dl.dl_non_contending = 0;
2478 /*
2479 * If the timer handler is currently running and the
2480 * timer cannot be canceled, inactive_task_timer()
2481 * will see that dl_not_contending is not set, and
2482 * will not touch the rq's active utilization,
2483 * so we are still safe.
2484 */
2485 cancel_inactive_timer(&p->dl);
2486 }
2487 sub_rq_bw(&p->dl, &rq->dl);
2488 rq_unlock(rq, &rf);
2489 }
2490
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)2491 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2492 {
2493 /*
2494 * Current can't be migrated, useless to reschedule,
2495 * let's hope p can move out.
2496 */
2497 if (rq->curr->nr_cpus_allowed == 1 ||
2498 !cpudl_find(&rq->rd->cpudl, rq->donor, NULL))
2499 return;
2500
2501 /*
2502 * p is migratable, so let's not schedule it and
2503 * see if it is pushed or pulled somewhere else.
2504 */
2505 if (p->nr_cpus_allowed != 1 &&
2506 cpudl_find(&rq->rd->cpudl, p, NULL))
2507 return;
2508
2509 resched_curr(rq);
2510 }
2511
balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf)2512 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2513 {
2514 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2515 /*
2516 * This is OK, because current is on_cpu, which avoids it being
2517 * picked for load-balance and preemption/IRQs are still
2518 * disabled avoiding further scheduler activity on it and we've
2519 * not yet started the picking loop.
2520 */
2521 rq_unpin_lock(rq, rf);
2522 pull_dl_task(rq);
2523 rq_repin_lock(rq, rf);
2524 }
2525
2526 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2527 }
2528
2529 /*
2530 * Only called when both the current and waking task are -deadline
2531 * tasks.
2532 */
wakeup_preempt_dl(struct rq * rq,struct task_struct * p,int flags)2533 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags)
2534 {
2535 /*
2536 * Can only get preempted by stop-class, and those should be
2537 * few and short lived, doesn't really make sense to push
2538 * anything away for that.
2539 */
2540 if (p->sched_class != &dl_sched_class)
2541 return;
2542
2543 if (dl_entity_preempt(&p->dl, &rq->donor->dl)) {
2544 resched_curr(rq);
2545 return;
2546 }
2547
2548 /*
2549 * In the unlikely case current and p have the same deadline
2550 * let us try to decide what's the best thing to do...
2551 */
2552 if ((p->dl.deadline == rq->donor->dl.deadline) &&
2553 !test_tsk_need_resched(rq->curr))
2554 check_preempt_equal_dl(rq, p);
2555 }
2556
2557 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se)2558 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2559 {
2560 hrtick_start(rq, dl_se->runtime);
2561 }
2562 #else /* !CONFIG_SCHED_HRTICK: */
start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se)2563 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2564 {
2565 }
2566 #endif /* !CONFIG_SCHED_HRTICK */
2567
set_next_task_dl(struct rq * rq,struct task_struct * p,bool first)2568 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2569 {
2570 struct sched_dl_entity *dl_se = &p->dl;
2571 struct dl_rq *dl_rq = &rq->dl;
2572
2573 p->se.exec_start = rq_clock_task(rq);
2574 if (on_dl_rq(&p->dl))
2575 update_stats_wait_end_dl(dl_rq, dl_se);
2576
2577 /* You can't push away the running task */
2578 dequeue_pushable_dl_task(rq, p);
2579
2580 if (!first)
2581 return;
2582
2583 if (rq->donor->sched_class != &dl_sched_class)
2584 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2585
2586 deadline_queue_push_tasks(rq);
2587
2588 if (hrtick_enabled_dl(rq))
2589 start_hrtick_dl(rq, &p->dl);
2590 }
2591
pick_next_dl_entity(struct dl_rq * dl_rq)2592 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2593 {
2594 struct rb_node *left = rb_first_cached(&dl_rq->root);
2595
2596 if (!left)
2597 return NULL;
2598
2599 return __node_2_dle(left);
2600 }
2601
2602 /*
2603 * __pick_next_task_dl - Helper to pick the next -deadline task to run.
2604 * @rq: The runqueue to pick the next task from.
2605 */
__pick_task_dl(struct rq * rq,struct rq_flags * rf)2606 static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
2607 {
2608 struct sched_dl_entity *dl_se;
2609 struct dl_rq *dl_rq = &rq->dl;
2610 struct task_struct *p;
2611
2612 again:
2613 if (!sched_dl_runnable(rq))
2614 return NULL;
2615
2616 dl_se = pick_next_dl_entity(dl_rq);
2617 WARN_ON_ONCE(!dl_se);
2618
2619 if (dl_server(dl_se)) {
2620 p = dl_se->server_pick_task(dl_se, rf);
2621 if (!p) {
2622 dl_server_stop(dl_se);
2623 goto again;
2624 }
2625 rq->dl_server = dl_se;
2626 } else {
2627 p = dl_task_of(dl_se);
2628 }
2629
2630 return p;
2631 }
2632
pick_task_dl(struct rq * rq,struct rq_flags * rf)2633 static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
2634 {
2635 return __pick_task_dl(rq, rf);
2636 }
2637
put_prev_task_dl(struct rq * rq,struct task_struct * p,struct task_struct * next)2638 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
2639 {
2640 struct sched_dl_entity *dl_se = &p->dl;
2641 struct dl_rq *dl_rq = &rq->dl;
2642
2643 if (on_dl_rq(&p->dl))
2644 update_stats_wait_start_dl(dl_rq, dl_se);
2645
2646 update_curr_dl(rq);
2647
2648 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2649
2650 if (task_is_blocked(p))
2651 return;
2652
2653 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2654 enqueue_pushable_dl_task(rq, p);
2655 }
2656
2657 /*
2658 * scheduler tick hitting a task of our scheduling class.
2659 *
2660 * NOTE: This function can be called remotely by the tick offload that
2661 * goes along full dynticks. Therefore no local assumption can be made
2662 * and everything must be accessed through the @rq and @curr passed in
2663 * parameters.
2664 */
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)2665 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2666 {
2667 update_curr_dl(rq);
2668
2669 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2670 /*
2671 * Even when we have runtime, update_curr_dl() might have resulted in us
2672 * not being the leftmost task anymore. In that case NEED_RESCHED will
2673 * be set and schedule() will start a new hrtick for the next task.
2674 */
2675 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2676 is_leftmost(&p->dl, &rq->dl))
2677 start_hrtick_dl(rq, &p->dl);
2678 }
2679
task_fork_dl(struct task_struct * p)2680 static void task_fork_dl(struct task_struct *p)
2681 {
2682 /*
2683 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2684 * sched_fork()
2685 */
2686 }
2687
2688 /* Only try algorithms three times */
2689 #define DL_MAX_TRIES 3
2690
2691 /*
2692 * Return the earliest pushable rq's task, which is suitable to be executed
2693 * on the CPU, NULL otherwise:
2694 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)2695 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2696 {
2697 struct task_struct *p = NULL;
2698 struct rb_node *next_node;
2699
2700 if (!has_pushable_dl_tasks(rq))
2701 return NULL;
2702
2703 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2704 while (next_node) {
2705 p = __node_2_pdl(next_node);
2706
2707 if (task_is_pushable(rq, p, cpu))
2708 return p;
2709
2710 next_node = rb_next(next_node);
2711 }
2712
2713 return NULL;
2714 }
2715
2716 /* Access rule: must be called on local CPU with preemption disabled */
2717 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2718
find_later_rq(struct task_struct * task)2719 static int find_later_rq(struct task_struct *task)
2720 {
2721 struct sched_domain *sd;
2722 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2723 int this_cpu = smp_processor_id();
2724 int cpu = task_cpu(task);
2725
2726 /* Make sure the mask is initialized first */
2727 if (unlikely(!later_mask))
2728 return -1;
2729
2730 if (task->nr_cpus_allowed == 1)
2731 return -1;
2732
2733 /*
2734 * We have to consider system topology and task affinity
2735 * first, then we can look for a suitable CPU.
2736 */
2737 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2738 return -1;
2739
2740 /*
2741 * If we are here, some targets have been found, including
2742 * the most suitable which is, among the runqueues where the
2743 * current tasks have later deadlines than the task's one, the
2744 * rq with the latest possible one.
2745 *
2746 * Now we check how well this matches with task's
2747 * affinity and system topology.
2748 *
2749 * The last CPU where the task run is our first
2750 * guess, since it is most likely cache-hot there.
2751 */
2752 if (cpumask_test_cpu(cpu, later_mask))
2753 return cpu;
2754 /*
2755 * Check if this_cpu is to be skipped (i.e., it is
2756 * not in the mask) or not.
2757 */
2758 if (!cpumask_test_cpu(this_cpu, later_mask))
2759 this_cpu = -1;
2760
2761 rcu_read_lock();
2762 for_each_domain(cpu, sd) {
2763 if (sd->flags & SD_WAKE_AFFINE) {
2764 int best_cpu;
2765
2766 /*
2767 * If possible, preempting this_cpu is
2768 * cheaper than migrating.
2769 */
2770 if (this_cpu != -1 &&
2771 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2772 rcu_read_unlock();
2773 return this_cpu;
2774 }
2775
2776 best_cpu = cpumask_any_and_distribute(later_mask,
2777 sched_domain_span(sd));
2778 /*
2779 * Last chance: if a CPU being in both later_mask
2780 * and current sd span is valid, that becomes our
2781 * choice. Of course, the latest possible CPU is
2782 * already under consideration through later_mask.
2783 */
2784 if (best_cpu < nr_cpu_ids) {
2785 rcu_read_unlock();
2786 return best_cpu;
2787 }
2788 }
2789 }
2790 rcu_read_unlock();
2791
2792 /*
2793 * At this point, all our guesses failed, we just return
2794 * 'something', and let the caller sort the things out.
2795 */
2796 if (this_cpu != -1)
2797 return this_cpu;
2798
2799 cpu = cpumask_any_distribute(later_mask);
2800 if (cpu < nr_cpu_ids)
2801 return cpu;
2802
2803 return -1;
2804 }
2805
pick_next_pushable_dl_task(struct rq * rq)2806 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2807 {
2808 struct task_struct *i, *p = NULL;
2809 struct rb_node *next_node;
2810
2811 if (!has_pushable_dl_tasks(rq))
2812 return NULL;
2813
2814 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2815 while (next_node) {
2816 i = __node_2_pdl(next_node);
2817 /* make sure task isn't on_cpu (possible with proxy-exec) */
2818 if (!task_on_cpu(rq, i)) {
2819 p = i;
2820 break;
2821 }
2822
2823 next_node = rb_next(next_node);
2824 }
2825
2826 if (!p)
2827 return NULL;
2828
2829 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2830 WARN_ON_ONCE(task_current(rq, p));
2831 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2832
2833 WARN_ON_ONCE(!task_on_rq_queued(p));
2834 WARN_ON_ONCE(!dl_task(p));
2835
2836 return p;
2837 }
2838
2839 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)2840 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2841 {
2842 struct rq *later_rq = NULL;
2843 int tries;
2844 int cpu;
2845
2846 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2847 cpu = find_later_rq(task);
2848
2849 if ((cpu == -1) || (cpu == rq->cpu))
2850 break;
2851
2852 later_rq = cpu_rq(cpu);
2853
2854 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2855 /*
2856 * Target rq has tasks of equal or earlier deadline,
2857 * retrying does not release any lock and is unlikely
2858 * to yield a different result.
2859 */
2860 later_rq = NULL;
2861 break;
2862 }
2863
2864 /* Retry if something changed. */
2865 if (double_lock_balance(rq, later_rq)) {
2866 /*
2867 * double_lock_balance had to release rq->lock, in the
2868 * meantime, task may no longer be fit to be migrated.
2869 * Check the following to ensure that the task is
2870 * still suitable for migration:
2871 * 1. It is possible the task was scheduled,
2872 * migrate_disabled was set and then got preempted,
2873 * so we must check the task migration disable
2874 * flag.
2875 * 2. The CPU picked is in the task's affinity.
2876 * 3. For throttled task (dl_task_offline_migration),
2877 * check the following:
2878 * - the task is not on the rq anymore (it was
2879 * migrated)
2880 * - the task is not on CPU anymore
2881 * - the task is still a dl task
2882 * - the task is not queued on the rq anymore
2883 * 4. For the non-throttled task (push_dl_task), the
2884 * check to ensure that this task is still at the
2885 * head of the pushable tasks list is enough.
2886 */
2887 if (unlikely(is_migration_disabled(task) ||
2888 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2889 (task->dl.dl_throttled &&
2890 (task_rq(task) != rq ||
2891 task_on_cpu(rq, task) ||
2892 !dl_task(task) ||
2893 !task_on_rq_queued(task))) ||
2894 (!task->dl.dl_throttled &&
2895 task != pick_next_pushable_dl_task(rq)))) {
2896
2897 double_unlock_balance(rq, later_rq);
2898 later_rq = NULL;
2899 break;
2900 }
2901 }
2902
2903 /*
2904 * If the rq we found has no -deadline task, or
2905 * its earliest one has a later deadline than our
2906 * task, the rq is a good one.
2907 */
2908 if (dl_task_is_earliest_deadline(task, later_rq))
2909 break;
2910
2911 /* Otherwise we try again. */
2912 double_unlock_balance(rq, later_rq);
2913 later_rq = NULL;
2914 }
2915
2916 return later_rq;
2917 }
2918
2919 /*
2920 * See if the non running -deadline tasks on this rq
2921 * can be sent to some other CPU where they can preempt
2922 * and start executing.
2923 */
push_dl_task(struct rq * rq)2924 static int push_dl_task(struct rq *rq)
2925 {
2926 struct task_struct *next_task;
2927 struct rq *later_rq;
2928 int ret = 0;
2929
2930 next_task = pick_next_pushable_dl_task(rq);
2931 if (!next_task)
2932 return 0;
2933
2934 retry:
2935 /*
2936 * If next_task preempts rq->curr, and rq->curr
2937 * can move away, it makes sense to just reschedule
2938 * without going further in pushing next_task.
2939 */
2940 if (dl_task(rq->donor) &&
2941 dl_time_before(next_task->dl.deadline, rq->donor->dl.deadline) &&
2942 rq->curr->nr_cpus_allowed > 1) {
2943 resched_curr(rq);
2944 return 0;
2945 }
2946
2947 if (is_migration_disabled(next_task))
2948 return 0;
2949
2950 if (WARN_ON(next_task == rq->curr))
2951 return 0;
2952
2953 /* We might release rq lock */
2954 get_task_struct(next_task);
2955
2956 /* Will lock the rq it'll find */
2957 later_rq = find_lock_later_rq(next_task, rq);
2958 if (!later_rq) {
2959 struct task_struct *task;
2960
2961 /*
2962 * We must check all this again, since
2963 * find_lock_later_rq releases rq->lock and it is
2964 * then possible that next_task has migrated.
2965 */
2966 task = pick_next_pushable_dl_task(rq);
2967 if (task == next_task) {
2968 /*
2969 * The task is still there. We don't try
2970 * again, some other CPU will pull it when ready.
2971 */
2972 goto out;
2973 }
2974
2975 if (!task)
2976 /* No more tasks */
2977 goto out;
2978
2979 put_task_struct(next_task);
2980 next_task = task;
2981 goto retry;
2982 }
2983
2984 move_queued_task_locked(rq, later_rq, next_task);
2985 ret = 1;
2986
2987 resched_curr(later_rq);
2988
2989 double_unlock_balance(rq, later_rq);
2990
2991 out:
2992 put_task_struct(next_task);
2993
2994 return ret;
2995 }
2996
push_dl_tasks(struct rq * rq)2997 static void push_dl_tasks(struct rq *rq)
2998 {
2999 /* push_dl_task() will return true if it moved a -deadline task */
3000 while (push_dl_task(rq))
3001 ;
3002 }
3003
pull_dl_task(struct rq * this_rq)3004 static void pull_dl_task(struct rq *this_rq)
3005 {
3006 int this_cpu = this_rq->cpu, cpu;
3007 struct task_struct *p, *push_task;
3008 bool resched = false;
3009 struct rq *src_rq;
3010 u64 dmin = LONG_MAX;
3011
3012 if (likely(!dl_overloaded(this_rq)))
3013 return;
3014
3015 /*
3016 * Match the barrier from dl_set_overloaded; this guarantees that if we
3017 * see overloaded we must also see the dlo_mask bit.
3018 */
3019 smp_rmb();
3020
3021 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
3022 if (this_cpu == cpu)
3023 continue;
3024
3025 src_rq = cpu_rq(cpu);
3026
3027 /*
3028 * It looks racy, and it is! However, as in sched_rt.c,
3029 * we are fine with this.
3030 */
3031 if (this_rq->dl.dl_nr_running &&
3032 dl_time_before(this_rq->dl.earliest_dl.curr,
3033 src_rq->dl.earliest_dl.next))
3034 continue;
3035
3036 /* Might drop this_rq->lock */
3037 push_task = NULL;
3038 double_lock_balance(this_rq, src_rq);
3039
3040 /*
3041 * If there are no more pullable tasks on the
3042 * rq, we're done with it.
3043 */
3044 if (src_rq->dl.dl_nr_running <= 1)
3045 goto skip;
3046
3047 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
3048
3049 /*
3050 * We found a task to be pulled if:
3051 * - it preempts our current (if there's one),
3052 * - it will preempt the last one we pulled (if any).
3053 */
3054 if (p && dl_time_before(p->dl.deadline, dmin) &&
3055 dl_task_is_earliest_deadline(p, this_rq)) {
3056 WARN_ON(p == src_rq->curr);
3057 WARN_ON(!task_on_rq_queued(p));
3058
3059 /*
3060 * Then we pull iff p has actually an earlier
3061 * deadline than the current task of its runqueue.
3062 */
3063 if (dl_time_before(p->dl.deadline,
3064 src_rq->donor->dl.deadline))
3065 goto skip;
3066
3067 if (is_migration_disabled(p)) {
3068 push_task = get_push_task(src_rq);
3069 } else {
3070 move_queued_task_locked(src_rq, this_rq, p);
3071 dmin = p->dl.deadline;
3072 resched = true;
3073 }
3074
3075 /* Is there any other task even earlier? */
3076 }
3077 skip:
3078 double_unlock_balance(this_rq, src_rq);
3079
3080 if (push_task) {
3081 preempt_disable();
3082 raw_spin_rq_unlock(this_rq);
3083 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
3084 push_task, &src_rq->push_work);
3085 preempt_enable();
3086 raw_spin_rq_lock(this_rq);
3087 }
3088 }
3089
3090 if (resched)
3091 resched_curr(this_rq);
3092 }
3093
3094 /*
3095 * Since the task is not running and a reschedule is not going to happen
3096 * anytime soon on its runqueue, we try pushing it away now.
3097 */
task_woken_dl(struct rq * rq,struct task_struct * p)3098 static void task_woken_dl(struct rq *rq, struct task_struct *p)
3099 {
3100 if (!task_on_cpu(rq, p) &&
3101 !test_tsk_need_resched(rq->curr) &&
3102 p->nr_cpus_allowed > 1 &&
3103 dl_task(rq->donor) &&
3104 (rq->curr->nr_cpus_allowed < 2 ||
3105 !dl_entity_preempt(&p->dl, &rq->donor->dl))) {
3106 push_dl_tasks(rq);
3107 }
3108 }
3109
set_cpus_allowed_dl(struct task_struct * p,struct affinity_context * ctx)3110 static void set_cpus_allowed_dl(struct task_struct *p,
3111 struct affinity_context *ctx)
3112 {
3113 struct root_domain *src_rd;
3114 struct rq *rq;
3115
3116 WARN_ON_ONCE(!dl_task(p));
3117
3118 rq = task_rq(p);
3119 src_rd = rq->rd;
3120 /*
3121 * Migrating a SCHED_DEADLINE task between exclusive
3122 * cpusets (different root_domains) entails a bandwidth
3123 * update. We already made space for us in the destination
3124 * domain (see cpuset_can_attach()).
3125 */
3126 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
3127 struct dl_bw *src_dl_b;
3128
3129 src_dl_b = dl_bw_of(cpu_of(rq));
3130 /*
3131 * We now free resources of the root_domain we are migrating
3132 * off. In the worst case, sched_setattr() may temporary fail
3133 * until we complete the update.
3134 */
3135 raw_spin_lock(&src_dl_b->lock);
3136 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
3137 raw_spin_unlock(&src_dl_b->lock);
3138 }
3139
3140 set_cpus_allowed_common(p, ctx);
3141 }
3142
3143 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)3144 static void rq_online_dl(struct rq *rq)
3145 {
3146 if (rq->dl.overloaded)
3147 dl_set_overload(rq);
3148
3149 if (rq->dl.dl_nr_running > 0)
3150 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
3151 else
3152 cpudl_clear(&rq->rd->cpudl, rq->cpu, true);
3153 }
3154
3155 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)3156 static void rq_offline_dl(struct rq *rq)
3157 {
3158 if (rq->dl.overloaded)
3159 dl_clear_overload(rq);
3160
3161 cpudl_clear(&rq->rd->cpudl, rq->cpu, false);
3162 }
3163
init_sched_dl_class(void)3164 void __init init_sched_dl_class(void)
3165 {
3166 unsigned int i;
3167
3168 for_each_possible_cpu(i)
3169 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
3170 GFP_KERNEL, cpu_to_node(i));
3171 }
3172
3173 /*
3174 * This function always returns a non-empty bitmap in @cpus. This is because
3175 * if a root domain has reserved bandwidth for DL tasks, the DL bandwidth
3176 * check will prevent CPU hotplug from deactivating all CPUs in that domain.
3177 */
dl_get_task_effective_cpus(struct task_struct * p,struct cpumask * cpus)3178 static void dl_get_task_effective_cpus(struct task_struct *p, struct cpumask *cpus)
3179 {
3180 const struct cpumask *hk_msk;
3181
3182 hk_msk = housekeeping_cpumask(HK_TYPE_DOMAIN);
3183 if (housekeeping_enabled(HK_TYPE_DOMAIN)) {
3184 if (!cpumask_intersects(p->cpus_ptr, hk_msk)) {
3185 /*
3186 * CPUs isolated by isolcpu="domain" always belong to
3187 * def_root_domain.
3188 */
3189 cpumask_andnot(cpus, cpu_active_mask, hk_msk);
3190 return;
3191 }
3192 }
3193
3194 /*
3195 * If a root domain holds a DL task, it must have active CPUs. So
3196 * active CPUs can always be found by walking up the task's cpuset
3197 * hierarchy up to the partition root.
3198 */
3199 cpuset_cpus_allowed_locked(p, cpus);
3200 }
3201
3202 /* The caller should hold cpuset_mutex */
dl_add_task_root_domain(struct task_struct * p)3203 void dl_add_task_root_domain(struct task_struct *p)
3204 {
3205 struct rq_flags rf;
3206 struct rq *rq;
3207 struct dl_bw *dl_b;
3208 unsigned int cpu;
3209 struct cpumask *msk;
3210
3211 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
3212 if (!dl_task(p) || dl_entity_is_special(&p->dl)) {
3213 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
3214 return;
3215 }
3216
3217 msk = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
3218 dl_get_task_effective_cpus(p, msk);
3219 cpu = cpumask_first_and(cpu_active_mask, msk);
3220 BUG_ON(cpu >= nr_cpu_ids);
3221 rq = cpu_rq(cpu);
3222 dl_b = &rq->rd->dl_bw;
3223
3224 raw_spin_lock(&dl_b->lock);
3225 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
3226 raw_spin_unlock(&dl_b->lock);
3227 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
3228 }
3229
dl_server_add_bw(struct root_domain * rd,int cpu)3230 static void dl_server_add_bw(struct root_domain *rd, int cpu)
3231 {
3232 struct sched_dl_entity *dl_se;
3233
3234 dl_se = &cpu_rq(cpu)->fair_server;
3235 if (dl_server(dl_se) && cpu_active(cpu))
3236 __dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
3237
3238 #ifdef CONFIG_SCHED_CLASS_EXT
3239 dl_se = &cpu_rq(cpu)->ext_server;
3240 if (dl_server(dl_se) && cpu_active(cpu))
3241 __dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
3242 #endif
3243 }
3244
dl_server_read_bw(int cpu)3245 static u64 dl_server_read_bw(int cpu)
3246 {
3247 u64 dl_bw = 0;
3248
3249 if (cpu_rq(cpu)->fair_server.dl_server)
3250 dl_bw += cpu_rq(cpu)->fair_server.dl_bw;
3251
3252 #ifdef CONFIG_SCHED_CLASS_EXT
3253 if (cpu_rq(cpu)->ext_server.dl_server)
3254 dl_bw += cpu_rq(cpu)->ext_server.dl_bw;
3255 #endif
3256
3257 return dl_bw;
3258 }
3259
dl_clear_root_domain(struct root_domain * rd)3260 void dl_clear_root_domain(struct root_domain *rd)
3261 {
3262 int i;
3263
3264 guard(raw_spinlock_irqsave)(&rd->dl_bw.lock);
3265
3266 /*
3267 * Reset total_bw to zero and extra_bw to max_bw so that next
3268 * loop will add dl-servers contributions back properly,
3269 */
3270 rd->dl_bw.total_bw = 0;
3271 for_each_cpu(i, rd->span)
3272 cpu_rq(i)->dl.extra_bw = cpu_rq(i)->dl.max_bw;
3273
3274 /*
3275 * dl_servers are not tasks. Since dl_add_task_root_domain ignores
3276 * them, we need to account for them here explicitly.
3277 */
3278 for_each_cpu(i, rd->span)
3279 dl_server_add_bw(rd, i);
3280 }
3281
dl_clear_root_domain_cpu(int cpu)3282 void dl_clear_root_domain_cpu(int cpu)
3283 {
3284 dl_clear_root_domain(cpu_rq(cpu)->rd);
3285 }
3286
switched_from_dl(struct rq * rq,struct task_struct * p)3287 static void switched_from_dl(struct rq *rq, struct task_struct *p)
3288 {
3289 /*
3290 * task_non_contending() can start the "inactive timer" (if the 0-lag
3291 * time is in the future). If the task switches back to dl before
3292 * the "inactive timer" fires, it can continue to consume its current
3293 * runtime using its current deadline. If it stays outside of
3294 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
3295 * will reset the task parameters.
3296 */
3297 if (task_on_rq_queued(p) && p->dl.dl_runtime)
3298 task_non_contending(&p->dl, false);
3299
3300 /*
3301 * In case a task is setscheduled out from SCHED_DEADLINE we need to
3302 * keep track of that on its cpuset (for correct bandwidth tracking).
3303 */
3304 dec_dl_tasks_cs(p);
3305
3306 if (!task_on_rq_queued(p)) {
3307 /*
3308 * Inactive timer is armed. However, p is leaving DEADLINE and
3309 * might migrate away from this rq while continuing to run on
3310 * some other class. We need to remove its contribution from
3311 * this rq running_bw now, or sub_rq_bw (below) will complain.
3312 */
3313 if (p->dl.dl_non_contending)
3314 sub_running_bw(&p->dl, &rq->dl);
3315 sub_rq_bw(&p->dl, &rq->dl);
3316 }
3317
3318 /*
3319 * We cannot use inactive_task_timer() to invoke sub_running_bw()
3320 * at the 0-lag time, because the task could have been migrated
3321 * while SCHED_OTHER in the meanwhile.
3322 */
3323 if (p->dl.dl_non_contending)
3324 p->dl.dl_non_contending = 0;
3325
3326 /*
3327 * Since this might be the only -deadline task on the rq,
3328 * this is the right place to try to pull some other one
3329 * from an overloaded CPU, if any.
3330 */
3331 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3332 return;
3333
3334 deadline_queue_pull_task(rq);
3335 }
3336
3337 /*
3338 * When switching to -deadline, we may overload the rq, then
3339 * we try to push someone off, if possible.
3340 */
switched_to_dl(struct rq * rq,struct task_struct * p)3341 static void switched_to_dl(struct rq *rq, struct task_struct *p)
3342 {
3343 cancel_inactive_timer(&p->dl);
3344
3345 /*
3346 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
3347 * track of that on its cpuset (for correct bandwidth tracking).
3348 */
3349 inc_dl_tasks_cs(p);
3350
3351 /* If p is not queued we will update its parameters at next wakeup. */
3352 if (!task_on_rq_queued(p)) {
3353 add_rq_bw(&p->dl, &rq->dl);
3354
3355 return;
3356 }
3357
3358 if (rq->donor != p) {
3359 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
3360 deadline_queue_push_tasks(rq);
3361 if (dl_task(rq->donor))
3362 wakeup_preempt_dl(rq, p, 0);
3363 else
3364 resched_curr(rq);
3365 } else {
3366 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
3367 }
3368 }
3369
get_prio_dl(struct rq * rq,struct task_struct * p)3370 static u64 get_prio_dl(struct rq *rq, struct task_struct *p)
3371 {
3372 /*
3373 * Make sure to update current so we don't return a stale value.
3374 */
3375 if (task_current_donor(rq, p))
3376 update_curr_dl(rq);
3377
3378 return p->dl.deadline;
3379 }
3380
3381 /*
3382 * If the scheduling parameters of a -deadline task changed,
3383 * a push or pull operation might be needed.
3384 */
prio_changed_dl(struct rq * rq,struct task_struct * p,u64 old_deadline)3385 static void prio_changed_dl(struct rq *rq, struct task_struct *p, u64 old_deadline)
3386 {
3387 if (!task_on_rq_queued(p))
3388 return;
3389
3390 if (p->dl.deadline == old_deadline)
3391 return;
3392
3393 if (dl_time_before(old_deadline, p->dl.deadline))
3394 deadline_queue_pull_task(rq);
3395
3396 if (task_current_donor(rq, p)) {
3397 /*
3398 * If we now have a earlier deadline task than p,
3399 * then reschedule, provided p is still on this
3400 * runqueue.
3401 */
3402 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3403 resched_curr(rq);
3404 } else {
3405 /*
3406 * Current may not be deadline in case p was throttled but we
3407 * have just replenished it (e.g. rt_mutex_setprio()).
3408 *
3409 * Otherwise, if p was given an earlier deadline, reschedule.
3410 */
3411 if (!dl_task(rq->curr) ||
3412 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
3413 resched_curr(rq);
3414 }
3415 }
3416
3417 #ifdef CONFIG_SCHED_CORE
task_is_throttled_dl(struct task_struct * p,int cpu)3418 static int task_is_throttled_dl(struct task_struct *p, int cpu)
3419 {
3420 return p->dl.dl_throttled;
3421 }
3422 #endif
3423
3424 DEFINE_SCHED_CLASS(dl) = {
3425 .enqueue_task = enqueue_task_dl,
3426 .dequeue_task = dequeue_task_dl,
3427 .yield_task = yield_task_dl,
3428
3429 .wakeup_preempt = wakeup_preempt_dl,
3430
3431 .pick_task = pick_task_dl,
3432 .put_prev_task = put_prev_task_dl,
3433 .set_next_task = set_next_task_dl,
3434
3435 .balance = balance_dl,
3436 .select_task_rq = select_task_rq_dl,
3437 .migrate_task_rq = migrate_task_rq_dl,
3438 .set_cpus_allowed = set_cpus_allowed_dl,
3439 .rq_online = rq_online_dl,
3440 .rq_offline = rq_offline_dl,
3441 .task_woken = task_woken_dl,
3442 .find_lock_rq = find_lock_later_rq,
3443
3444 .task_tick = task_tick_dl,
3445 .task_fork = task_fork_dl,
3446
3447 .get_prio = get_prio_dl,
3448 .prio_changed = prio_changed_dl,
3449 .switched_from = switched_from_dl,
3450 .switched_to = switched_to_dl,
3451
3452 .update_curr = update_curr_dl,
3453 #ifdef CONFIG_SCHED_CORE
3454 .task_is_throttled = task_is_throttled_dl,
3455 #endif
3456 };
3457
3458 /*
3459 * Used for dl_bw check and update, used under sched_rt_handler()::mutex and
3460 * sched_domains_mutex.
3461 */
3462 u64 dl_cookie;
3463
sched_dl_global_validate(void)3464 int sched_dl_global_validate(void)
3465 {
3466 u64 runtime = global_rt_runtime();
3467 u64 period = global_rt_period();
3468 u64 new_bw = to_ratio(period, runtime);
3469 u64 cookie = ++dl_cookie;
3470 struct dl_bw *dl_b;
3471 int cpu, cpus, ret = 0;
3472 unsigned long flags;
3473
3474 /*
3475 * Here we want to check the bandwidth not being set to some
3476 * value smaller than the currently allocated bandwidth in
3477 * any of the root_domains.
3478 */
3479 for_each_online_cpu(cpu) {
3480 rcu_read_lock_sched();
3481
3482 if (dl_bw_visited(cpu, cookie))
3483 goto next;
3484
3485 dl_b = dl_bw_of(cpu);
3486 cpus = dl_bw_cpus(cpu);
3487
3488 raw_spin_lock_irqsave(&dl_b->lock, flags);
3489 if (new_bw * cpus < dl_b->total_bw)
3490 ret = -EBUSY;
3491 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3492
3493 next:
3494 rcu_read_unlock_sched();
3495
3496 if (ret)
3497 break;
3498 }
3499
3500 return ret;
3501 }
3502
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)3503 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
3504 {
3505 if (global_rt_runtime() == RUNTIME_INF) {
3506 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
3507 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
3508 } else {
3509 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
3510 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
3511 dl_rq->max_bw = dl_rq->extra_bw =
3512 to_ratio(global_rt_period(), global_rt_runtime());
3513 }
3514 }
3515
sched_dl_do_global(void)3516 void sched_dl_do_global(void)
3517 {
3518 u64 new_bw = -1;
3519 u64 cookie = ++dl_cookie;
3520 struct dl_bw *dl_b;
3521 int cpu;
3522 unsigned long flags;
3523
3524 if (global_rt_runtime() != RUNTIME_INF)
3525 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
3526
3527 for_each_possible_cpu(cpu)
3528 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
3529
3530 for_each_possible_cpu(cpu) {
3531 rcu_read_lock_sched();
3532
3533 if (dl_bw_visited(cpu, cookie)) {
3534 rcu_read_unlock_sched();
3535 continue;
3536 }
3537
3538 dl_b = dl_bw_of(cpu);
3539
3540 raw_spin_lock_irqsave(&dl_b->lock, flags);
3541 dl_b->bw = new_bw;
3542 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3543
3544 rcu_read_unlock_sched();
3545 }
3546 }
3547
3548 /*
3549 * We must be sure that accepting a new task (or allowing changing the
3550 * parameters of an existing one) is consistent with the bandwidth
3551 * constraints. If yes, this function also accordingly updates the currently
3552 * allocated bandwidth to reflect the new situation.
3553 *
3554 * This function is called while holding p's rq->lock.
3555 */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)3556 int sched_dl_overflow(struct task_struct *p, int policy,
3557 const struct sched_attr *attr)
3558 {
3559 u64 period = attr->sched_period ?: attr->sched_deadline;
3560 u64 runtime = attr->sched_runtime;
3561 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
3562 int cpus, err = -1, cpu = task_cpu(p);
3563 struct dl_bw *dl_b = dl_bw_of(cpu);
3564 unsigned long cap;
3565
3566 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3567 return 0;
3568
3569 /* !deadline task may carry old deadline bandwidth */
3570 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
3571 return 0;
3572
3573 /*
3574 * Either if a task, enters, leave, or stays -deadline but changes
3575 * its parameters, we may need to update accordingly the total
3576 * allocated bandwidth of the container.
3577 */
3578 raw_spin_lock(&dl_b->lock);
3579 cpus = dl_bw_cpus(cpu);
3580 cap = dl_bw_capacity(cpu);
3581
3582 if (dl_policy(policy) && !task_has_dl_policy(p) &&
3583 !__dl_overflow(dl_b, cap, 0, new_bw)) {
3584 if (hrtimer_active(&p->dl.inactive_timer))
3585 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3586 __dl_add(dl_b, new_bw, cpus);
3587 err = 0;
3588 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
3589 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
3590 /*
3591 * XXX this is slightly incorrect: when the task
3592 * utilization decreases, we should delay the total
3593 * utilization change until the task's 0-lag point.
3594 * But this would require to set the task's "inactive
3595 * timer" when the task is not inactive.
3596 */
3597 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3598 __dl_add(dl_b, new_bw, cpus);
3599 dl_change_utilization(p, new_bw);
3600 err = 0;
3601 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
3602 /*
3603 * Do not decrease the total deadline utilization here,
3604 * switched_from_dl() will take care to do it at the correct
3605 * (0-lag) time.
3606 */
3607 err = 0;
3608 }
3609 raw_spin_unlock(&dl_b->lock);
3610
3611 return err;
3612 }
3613
3614 /*
3615 * This function initializes the sched_dl_entity of a newly becoming
3616 * SCHED_DEADLINE task.
3617 *
3618 * Only the static values are considered here, the actual runtime and the
3619 * absolute deadline will be properly calculated when the task is enqueued
3620 * for the first time with its new policy.
3621 */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)3622 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3623 {
3624 struct sched_dl_entity *dl_se = &p->dl;
3625
3626 dl_se->dl_runtime = attr->sched_runtime;
3627 dl_se->dl_deadline = attr->sched_deadline;
3628 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3629 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
3630 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3631 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3632 }
3633
__getparam_dl(struct task_struct * p,struct sched_attr * attr,unsigned int flags)3634 void __getparam_dl(struct task_struct *p, struct sched_attr *attr, unsigned int flags)
3635 {
3636 struct sched_dl_entity *dl_se = &p->dl;
3637 struct rq *rq = task_rq(p);
3638 u64 adj_deadline;
3639
3640 attr->sched_priority = p->rt_priority;
3641 if (flags & SCHED_GETATTR_FLAG_DL_DYNAMIC) {
3642 guard(raw_spinlock_irq)(&rq->__lock);
3643 update_rq_clock(rq);
3644 if (task_current(rq, p))
3645 update_curr_dl(rq);
3646
3647 attr->sched_runtime = dl_se->runtime;
3648 adj_deadline = dl_se->deadline - rq_clock(rq) + ktime_get_ns();
3649 attr->sched_deadline = adj_deadline;
3650 } else {
3651 attr->sched_runtime = dl_se->dl_runtime;
3652 attr->sched_deadline = dl_se->dl_deadline;
3653 }
3654 attr->sched_period = dl_se->dl_period;
3655 attr->sched_flags &= ~SCHED_DL_FLAGS;
3656 attr->sched_flags |= dl_se->flags;
3657 }
3658
3659 /*
3660 * This function validates the new parameters of a -deadline task.
3661 * We ask for the deadline not being zero, and greater or equal
3662 * than the runtime, as well as the period of being zero or
3663 * greater than deadline. Furthermore, we have to be sure that
3664 * user parameters are above the internal resolution of 1us (we
3665 * check sched_runtime only since it is always the smaller one) and
3666 * below 2^63 ns (we have to check both sched_deadline and
3667 * sched_period, as the latter can be zero).
3668 */
__checkparam_dl(const struct sched_attr * attr)3669 bool __checkparam_dl(const struct sched_attr *attr)
3670 {
3671 u64 period, max, min;
3672
3673 /* special dl tasks don't actually use any parameter */
3674 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3675 return true;
3676
3677 /* deadline != 0 */
3678 if (attr->sched_deadline == 0)
3679 return false;
3680
3681 /*
3682 * Since we truncate DL_SCALE bits, make sure we're at least
3683 * that big.
3684 */
3685 if (attr->sched_runtime < (1ULL << DL_SCALE))
3686 return false;
3687
3688 /*
3689 * Since we use the MSB for wrap-around and sign issues, make
3690 * sure it's not set (mind that period can be equal to zero).
3691 */
3692 if (attr->sched_deadline & (1ULL << 63) ||
3693 attr->sched_period & (1ULL << 63))
3694 return false;
3695
3696 period = attr->sched_period;
3697 if (!period)
3698 period = attr->sched_deadline;
3699
3700 /* runtime <= deadline <= period (if period != 0) */
3701 if (period < attr->sched_deadline ||
3702 attr->sched_deadline < attr->sched_runtime)
3703 return false;
3704
3705 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3706 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3707
3708 if (period < min || period > max)
3709 return false;
3710
3711 return true;
3712 }
3713
3714 /*
3715 * This function clears the sched_dl_entity static params.
3716 */
__dl_clear_params(struct sched_dl_entity * dl_se)3717 static void __dl_clear_params(struct sched_dl_entity *dl_se)
3718 {
3719 dl_se->dl_runtime = 0;
3720 dl_se->dl_deadline = 0;
3721 dl_se->dl_period = 0;
3722 dl_se->flags = 0;
3723 dl_se->dl_bw = 0;
3724 dl_se->dl_density = 0;
3725
3726 dl_se->dl_throttled = 0;
3727 dl_se->dl_yielded = 0;
3728 dl_se->dl_non_contending = 0;
3729 dl_se->dl_overrun = 0;
3730 dl_se->dl_server = 0;
3731 dl_se->dl_defer = 0;
3732 dl_se->dl_defer_running = 0;
3733 dl_se->dl_defer_armed = 0;
3734
3735 #ifdef CONFIG_RT_MUTEXES
3736 dl_se->pi_se = dl_se;
3737 #endif
3738 }
3739
init_dl_entity(struct sched_dl_entity * dl_se)3740 void init_dl_entity(struct sched_dl_entity *dl_se)
3741 {
3742 RB_CLEAR_NODE(&dl_se->rb_node);
3743 init_dl_task_timer(dl_se);
3744 init_dl_inactive_task_timer(dl_se);
3745 __dl_clear_params(dl_se);
3746 }
3747
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)3748 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3749 {
3750 struct sched_dl_entity *dl_se = &p->dl;
3751
3752 if (dl_se->dl_runtime != attr->sched_runtime ||
3753 dl_se->dl_deadline != attr->sched_deadline ||
3754 dl_se->dl_period != attr->sched_period ||
3755 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3756 return true;
3757
3758 return false;
3759 }
3760
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)3761 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3762 const struct cpumask *trial)
3763 {
3764 unsigned long flags, cap;
3765 struct dl_bw *cur_dl_b;
3766 int ret = 1;
3767
3768 rcu_read_lock_sched();
3769 cur_dl_b = dl_bw_of(cpumask_any(cur));
3770 cap = __dl_bw_capacity(trial);
3771 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3772 if (__dl_overflow(cur_dl_b, cap, 0, 0))
3773 ret = 0;
3774 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3775 rcu_read_unlock_sched();
3776
3777 return ret;
3778 }
3779
3780 enum dl_bw_request {
3781 dl_bw_req_deactivate = 0,
3782 dl_bw_req_alloc,
3783 dl_bw_req_free
3784 };
3785
dl_bw_manage(enum dl_bw_request req,int cpu,u64 dl_bw)3786 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3787 {
3788 unsigned long flags, cap;
3789 struct dl_bw *dl_b;
3790 bool overflow = 0;
3791 u64 dl_server_bw = 0;
3792
3793 rcu_read_lock_sched();
3794 dl_b = dl_bw_of(cpu);
3795 raw_spin_lock_irqsave(&dl_b->lock, flags);
3796
3797 cap = dl_bw_capacity(cpu);
3798 switch (req) {
3799 case dl_bw_req_free:
3800 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3801 break;
3802 case dl_bw_req_alloc:
3803 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3804
3805 if (!overflow) {
3806 /*
3807 * We reserve space in the destination
3808 * root_domain, as we can't fail after this point.
3809 * We will free resources in the source root_domain
3810 * later on (see set_cpus_allowed_dl()).
3811 */
3812 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3813 }
3814 break;
3815 case dl_bw_req_deactivate:
3816 /*
3817 * cpu is not off yet, but we need to do the math by
3818 * considering it off already (i.e., what would happen if we
3819 * turn cpu off?).
3820 */
3821 cap -= arch_scale_cpu_capacity(cpu);
3822
3823 /*
3824 * cpu is going offline and NORMAL and EXT tasks will be
3825 * moved away from it. We can thus discount dl_server
3826 * bandwidth contribution as it won't need to be servicing
3827 * tasks after the cpu is off.
3828 */
3829 dl_server_bw = dl_server_read_bw(cpu);
3830
3831 /*
3832 * Not much to check if no DEADLINE bandwidth is present.
3833 * dl_servers we can discount, as tasks will be moved out the
3834 * offlined CPUs anyway.
3835 */
3836 if (dl_b->total_bw - dl_server_bw > 0) {
3837 /*
3838 * Leaving at least one CPU for DEADLINE tasks seems a
3839 * wise thing to do. As said above, cpu is not offline
3840 * yet, so account for that.
3841 */
3842 if (dl_bw_cpus(cpu) - 1)
3843 overflow = __dl_overflow(dl_b, cap, dl_server_bw, 0);
3844 else
3845 overflow = 1;
3846 }
3847
3848 break;
3849 }
3850
3851 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3852 rcu_read_unlock_sched();
3853
3854 return overflow ? -EBUSY : 0;
3855 }
3856
dl_bw_deactivate(int cpu)3857 int dl_bw_deactivate(int cpu)
3858 {
3859 return dl_bw_manage(dl_bw_req_deactivate, cpu, 0);
3860 }
3861
dl_bw_alloc(int cpu,u64 dl_bw)3862 int dl_bw_alloc(int cpu, u64 dl_bw)
3863 {
3864 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3865 }
3866
dl_bw_free(int cpu,u64 dl_bw)3867 void dl_bw_free(int cpu, u64 dl_bw)
3868 {
3869 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3870 }
3871
print_dl_stats(struct seq_file * m,int cpu)3872 void print_dl_stats(struct seq_file *m, int cpu)
3873 {
3874 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3875 }
3876