Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu
1 // SPDX-License-Identifier: GPL-2.0
24 * guard against timer DoS.
59 return dl_se->dl_server; in dl_server()
75 struct rq *rq = dl_se->rq; in rq_of_dl_se()
85 return &rq_of_dl_se(dl_se)->dl; in dl_rq_of_se()
90 return !RB_EMPTY_NODE(&dl_se->rb_node); in on_dl_rq()
96 return dl_se->pi_se; in pi_of()
120 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
125 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()
131 if (cpumask_subset(rd->span, cpu_active_mask)) in dl_bw_cpus()
132 return cpumask_weight(rd->span); in dl_bw_cpus()
136 for_each_cpu_and(i, rd->span, cpu_active_mask) in dl_bw_cpus()
154 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
155 * of the CPU the task is running on rather rd's \Sum CPU capacity.
166 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity()
170 static inline bool dl_bw_visited(int cpu, u64 gen) in dl_bw_visited() argument
172 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited()
174 if (rd->visit_gen == gen) in dl_bw_visited()
177 rd->visit_gen = gen; in dl_bw_visited()
189 for_each_cpu_and(i, rd->span, cpu_active_mask) { in __dl_update()
192 rq->dl.extra_bw += bw; in __dl_update()
198 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
211 static inline bool dl_bw_visited(int cpu, u64 gen) in dl_bw_visited() argument
221 dl->extra_bw += bw; in __dl_update()
228 dl_b->total_bw -= tsk_bw; in __dl_sub()
235 dl_b->total_bw += tsk_bw; in __dl_add()
236 __dl_update(dl_b, -((s32)tsk_bw / cpus)); in __dl_add()
242 return dl_b->bw != -1 && in __dl_overflow()
243 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; in __dl_overflow()
249 u64 old = dl_rq->running_bw; in __add_running_bw()
252 dl_rq->running_bw += dl_bw; in __add_running_bw()
253 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ in __add_running_bw()
254 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); in __add_running_bw()
262 u64 old = dl_rq->running_bw; in __sub_running_bw()
265 dl_rq->running_bw -= dl_bw; in __sub_running_bw()
266 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ in __sub_running_bw()
267 if (dl_rq->running_bw > old) in __sub_running_bw()
268 dl_rq->running_bw = 0; in __sub_running_bw()
276 u64 old = dl_rq->this_bw; in __add_rq_bw()
279 dl_rq->this_bw += dl_bw; in __add_rq_bw()
280 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ in __add_rq_bw()
286 u64 old = dl_rq->this_bw; in __sub_rq_bw()
289 dl_rq->this_bw -= dl_bw; in __sub_rq_bw()
290 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ in __sub_rq_bw()
291 if (dl_rq->this_bw > old) in __sub_rq_bw()
292 dl_rq->this_bw = 0; in __sub_rq_bw()
293 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); in __sub_rq_bw()
300 __add_rq_bw(dl_se->dl_bw, dl_rq); in add_rq_bw()
307 __sub_rq_bw(dl_se->dl_bw, dl_rq); in sub_rq_bw()
314 __add_running_bw(dl_se->dl_bw, dl_rq); in add_running_bw()
321 __sub_running_bw(dl_se->dl_bw, dl_rq); in sub_running_bw()
328 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
334 if (p->dl.dl_non_contending) { in dl_change_utilization()
335 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
336 p->dl.dl_non_contending = 0; in dl_change_utilization()
338 * If the timer handler is currently running and the in dl_change_utilization()
339 * timer cannot be canceled, inactive_task_timer() in dl_change_utilization()
344 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
347 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
348 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
354 * The utilization of a task cannot be immediately removed from
356 * Instead, we have to wait for the so called "0-lag time".
358 * If a task blocks before the "0-lag time", a timer (the inactive
359 * timer) is armed, and running_bw is decreased when the timer
362 * If the task wakes up again before the inactive timer fires,
363 * the timer is canceled, whereas if the task wakes up after the
364 * inactive timer fired (and running_bw has been decreased) the
367 * is used to avoid race conditions between the inactive timer handler
373 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
374 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
376 * +------------------+
378 * +------------------>+ contending |
380 * | +----+------+------+
383 * +--------+-------+ | |
384 * | | t >= 0-lag | | wakeup
385 * | INACTIVE |<---------------+ |
387 * +--------+-------+ | |
389 * | t < 0-lag | |
392 * | +----+------+------+
394 * +-------------------+ |
395 * inactive timer | non contending |
396 * fired +------------------+
399 * blocks, and checks if the 0-lag time already passed or
401 * in the second case, it arms the inactive timer).
409 struct hrtimer *timer = &dl_se->inactive_timer; in task_non_contending() local
411 struct dl_rq *dl_rq = &rq->dl; in task_non_contending()
415 * If this is a non-deadline task that has been boosted, in task_non_contending()
418 if (dl_se->dl_runtime == 0) in task_non_contending()
424 WARN_ON(dl_se->dl_non_contending); in task_non_contending()
426 zerolag_time = dl_se->deadline - in task_non_contending()
427 div64_long((dl_se->runtime * dl_se->dl_period), in task_non_contending()
428 dl_se->dl_runtime); in task_non_contending()
431 * Using relative times instead of the absolute "0-lag time" in task_non_contending()
434 zerolag_time -= rq_clock(rq); in task_non_contending()
437 * If the "0-lag time" already passed, decrease the active in task_non_contending()
438 * utilization now, instead of starting a timer in task_non_contending()
440 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { in task_non_contending()
449 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in task_non_contending()
452 if (READ_ONCE(p->__state) == TASK_DEAD) in task_non_contending()
453 sub_rq_bw(dl_se, &rq->dl); in task_non_contending()
454 raw_spin_lock(&dl_b->lock); in task_non_contending()
455 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
456 raw_spin_unlock(&dl_b->lock); in task_non_contending()
464 dl_se->dl_non_contending = 1; in task_non_contending()
468 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); in task_non_contending()
476 * If this is a non-deadline task that has been boosted, in task_contending()
479 if (dl_se->dl_runtime == 0) in task_contending()
485 if (dl_se->dl_non_contending) { in task_contending()
486 dl_se->dl_non_contending = 0; in task_contending()
488 * If the timer handler is currently running and the in task_contending()
489 * timer cannot be canceled, inactive_task_timer() in task_contending()
494 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { in task_contending()
503 * when the "inactive timer" fired). in task_contending()
512 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node; in is_leftmost()
519 raw_spin_lock_init(&dl_b->lock); in init_dl_bw()
521 dl_b->bw = -1; in init_dl_bw()
523 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); in init_dl_bw()
524 dl_b->total_bw = 0; in init_dl_bw()
529 dl_rq->root = RB_ROOT_CACHED; in init_dl_rq()
532 /* zero means no -deadline tasks */ in init_dl_rq()
533 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq()
535 dl_rq->overloaded = 0; in init_dl_rq()
536 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; in init_dl_rq()
538 init_dl_bw(&dl_rq->dl_bw); in init_dl_rq()
541 dl_rq->running_bw = 0; in init_dl_rq()
542 dl_rq->this_bw = 0; in init_dl_rq()
550 return atomic_read(&rq->rd->dlo_count); in dl_overloaded()
555 if (!rq->online) in dl_set_overload()
558 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); in dl_set_overload()
566 atomic_inc(&rq->rd->dlo_count); in dl_set_overload()
571 if (!rq->online) in dl_clear_overload()
574 atomic_dec(&rq->rd->dlo_count); in dl_clear_overload()
575 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); in dl_clear_overload()
583 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); in __pushable_less()
588 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); in has_pushable_dl_tasks()
592 * The list of pushable -deadline task is not a plist, like in
593 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
599 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); in enqueue_pushable_dl_task()
601 leftmost = rb_add_cached(&p->pushable_dl_tasks, in enqueue_pushable_dl_task()
602 &rq->dl.pushable_dl_tasks_root, in enqueue_pushable_dl_task()
605 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
607 if (!rq->dl.overloaded) { in enqueue_pushable_dl_task()
609 rq->dl.overloaded = 1; in enqueue_pushable_dl_task()
615 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task()
616 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root; in dequeue_pushable_dl_task()
619 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) in dequeue_pushable_dl_task()
622 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); in dequeue_pushable_dl_task()
624 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; in dequeue_pushable_dl_task()
626 RB_CLEAR_NODE(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
628 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) { in dequeue_pushable_dl_task()
630 rq->dl.overloaded = 0; in dequeue_pushable_dl_task()
638 return rq->online && dl_task(prev); in need_pull_dl_task()
652 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); in deadline_queue_push_tasks()
657 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); in deadline_queue_pull_task()
669 int cpu; in dl_task_offline_migration() local
672 * If we cannot preempt any rq, fall back to pick any in dl_task_offline_migration()
673 * online CPU: in dl_task_offline_migration()
675 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); in dl_task_offline_migration()
676 if (cpu >= nr_cpu_ids) { in dl_task_offline_migration()
678 * Failed to find any suitable CPU. in dl_task_offline_migration()
688 cpu = cpumask_any(cpu_active_mask); in dl_task_offline_migration()
690 later_rq = cpu_rq(cpu); in dl_task_offline_migration()
694 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration()
696 * Inactive timer is armed (or callback is running, but in dl_task_offline_migration()
701 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
702 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
704 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
705 add_running_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
707 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
708 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
716 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration()
717 raw_spin_lock(&dl_b->lock); in dl_task_offline_migration()
718 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
719 raw_spin_unlock(&dl_b->lock); in dl_task_offline_migration()
721 dl_b = &later_rq->rd->dl_bw; in dl_task_offline_migration()
722 raw_spin_lock(&dl_b->lock); in dl_task_offline_migration()
723 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
724 raw_spin_unlock(&dl_b->lock); in dl_task_offline_migration()
726 set_task_cpu(p, later_rq->cpu); in dl_task_offline_migration()
772 /* for non-boosted task, pi_of(dl_se) == dl_se */ in replenish_dl_new_period()
773 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; in replenish_dl_new_period()
774 dl_se->runtime = pi_of(dl_se)->dl_runtime; in replenish_dl_new_period()
780 * - the absolute deadline of the entity has to be placed at
782 * - the runtime of the entity has to be set to the maximum value.
784 * The capability of specifying such event is useful whenever a -deadline
795 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); in setup_new_dl_entity()
798 * We are racing with the deadline timer. So, do nothing because in setup_new_dl_entity()
799 * the deadline timer handler will take care of properly recharging in setup_new_dl_entity()
802 if (dl_se->dl_throttled) in setup_new_dl_entity()
836 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0); in replenish_dl_entity()
839 * This could be the case for a !-dl task that is boosted. in replenish_dl_entity()
842 if (dl_se->dl_deadline == 0) in replenish_dl_entity()
845 if (dl_se->dl_yielded && dl_se->runtime > 0) in replenish_dl_entity()
846 dl_se->runtime = 0; in replenish_dl_entity()
854 while (dl_se->runtime <= 0) { in replenish_dl_entity()
855 dl_se->deadline += pi_of(dl_se)->dl_period; in replenish_dl_entity()
856 dl_se->runtime += pi_of(dl_se)->dl_runtime; in replenish_dl_entity()
861 * the future" with respect to rq->clock. If it's in replenish_dl_entity()
868 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { in replenish_dl_entity()
873 if (dl_se->dl_yielded) in replenish_dl_entity()
874 dl_se->dl_yielded = 0; in replenish_dl_entity()
875 if (dl_se->dl_throttled) in replenish_dl_entity()
876 dl_se->dl_throttled = 0; in replenish_dl_entity()
880 * Here we check if --at time t-- an entity (which is probably being
891 * Documentation/scheduler/sched-deadline.rst for more information).
895 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
916 * and (deadline - t), since t is rq->clock, is the time left in dl_entity_overflow()
925 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); in dl_entity_overflow()
926 right = ((dl_se->deadline - t) >> DL_SCALE) * in dl_entity_overflow()
927 (pi_of(dl_se)->dl_runtime >> DL_SCALE); in dl_entity_overflow()
933 * Revised wakeup rule [1]: For self-suspending tasks, rather then
934 * re-initializing task's runtime and deadline, the revised wakeup
939 * runtime / (deadline - t) > dl_runtime / dl_deadline
942 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
948 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
953 u64 laxity = dl_se->deadline - rq_clock(rq); in update_dl_revised_wakeup()
961 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); in update_dl_revised_wakeup()
963 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; in update_dl_revised_wakeup()
979 return dl_se->dl_deadline == dl_se->dl_period; in dl_is_implicit()
984 * might need to be updated. This is done by a CBS wake up rule. There are two
1016 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || in update_dl_entity()
1020 !dl_time_before(dl_se->deadline, rq_clock(rq)) && in update_dl_entity()
1032 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; in dl_next_period()
1038 * set the bandwidth replenishment timer to the replenishment instant
1041 * Notice that it is important for the caller to know if the timer
1047 struct hrtimer *timer = &dl_se->dl_timer; in start_dl_timer() local
1056 * We want the timer to fire at the deadline, but considering in start_dl_timer()
1057 * that it is actually coming from rq->clock and not from in start_dl_timer()
1061 now = hrtimer_cb_get_time(timer); in start_dl_timer()
1062 delta = ktime_to_ns(now) - rq_clock(rq); in start_dl_timer()
1068 * start the timer in the past! in start_dl_timer()
1078 * harmless because we're holding task_rq()->lock, therefore the timer in start_dl_timer()
1082 if (!hrtimer_is_queued(timer)) { in start_dl_timer()
1085 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); in start_dl_timer()
1100 * Nothing relies on rq->lock after this, so its safe to drop in __push_dl_task()
1101 * rq->lock. in __push_dl_task()
1111 * This is the bandwidth enforcement timer callback. If here, we know
1112 * a task is not on its dl_rq, since the fact that the timer was running
1123 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) in dl_task_timer() argument
1125 struct sched_dl_entity *dl_se = container_of(timer, in dl_task_timer()
1137 if (dl_se->dl_throttled) { in dl_task_timer()
1141 if (dl_se->server_has_tasks(dl_se)) { in dl_task_timer()
1173 * Spurious timer due to start_dl_timer() race; or we already received in dl_task_timer()
1176 if (!dl_se->dl_throttled) in dl_task_timer()
1183 * If the throttle happened during sched-out; like: in dl_task_timer()
1191 * prev->on_rq = 0; in dl_task_timer()
1194 * but do not enqueue -- wait for our wakeup to do that. in dl_task_timer()
1202 if (unlikely(!rq->online)) { in dl_task_timer()
1221 if (dl_task(rq->curr)) in dl_task_timer()
1242 struct hrtimer *timer = &dl_se->dl_timer; in init_dl_task_timer() local
1244 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in init_dl_task_timer()
1245 timer->function = dl_task_timer; in init_dl_task_timer()
1251 * cannot use the runtime, and so it replenishes the task. This rule
1263 * task and set the replenishing timer to the begin of the next period,
1270 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && in dl_check_constrained_dl()
1274 dl_se->dl_throttled = 1; in dl_check_constrained_dl()
1275 if (dl_se->runtime > 0) in dl_check_constrained_dl()
1276 dl_se->runtime = 0; in dl_check_constrained_dl()
1283 return (dl_se->runtime <= 0); in dl_runtime_exceeded()
1288 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1289 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1291 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1295 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1297 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1298 * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1300 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1306 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ in grub_reclaim()
1309 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we in grub_reclaim()
1310 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra in grub_reclaim()
1311 * can be larger than u_max. So, u_max - u_inact - u_extra would be in grub_reclaim()
1314 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw) in grub_reclaim()
1315 u_act = dl_se->dl_bw; in grub_reclaim()
1317 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw; in grub_reclaim()
1319 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1331 if (unlikely(dl_se->dl_yielded)) in update_curr_dl_se()
1340 * For tasks that participate in GRUB, we implement GRUB-PA: the in update_curr_dl_se()
1344 * according to current frequency and CPU maximum capacity. in update_curr_dl_se()
1346 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { in update_curr_dl_se()
1349 int cpu = cpu_of(rq); in update_curr_dl_se() local
1350 unsigned long scale_freq = arch_scale_freq_capacity(cpu); in update_curr_dl_se()
1351 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); in update_curr_dl_se()
1357 dl_se->runtime -= scaled_delta_exec; in update_curr_dl_se()
1360 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { in update_curr_dl_se()
1361 dl_se->dl_throttled = 1; in update_curr_dl_se()
1365 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) in update_curr_dl_se()
1366 dl_se->dl_overrun = 1; in update_curr_dl_se()
1370 update_stats_dequeue_dl(&rq->dl, dl_se, 0); in update_curr_dl_se()
1381 if (!is_leftmost(dl_se, &rq->dl)) in update_curr_dl_se()
1386 * Because -- for now -- we share the rt bandwidth, we need to in update_curr_dl_se()
1393 * using deadline servers -- however there's a few nasties to figure in update_curr_dl_se()
1397 struct rt_rq *rt_rq = &rq->rt; in update_curr_dl_se()
1399 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_dl_se()
1406 rt_rq->rt_time += delta_exec; in update_curr_dl_se()
1407 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_dl_se()
1413 update_curr_dl_se(dl_se->rq, dl_se, delta_exec); in dl_server_update()
1419 dl_se->dl_server = 1; in dl_server_start()
1434 dl_se->rq = rq; in dl_server_init()
1435 dl_se->server_has_tasks = has_tasks; in dl_server_init()
1436 dl_se->server_pick = pick; in dl_server_init()
1441 * a -deadline task and has not been removed from the dl_rq).
1445 struct task_struct *curr = rq->curr; in update_curr_dl()
1446 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
1464 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) in inactive_task_timer() argument
1466 struct sched_dl_entity *dl_se = container_of(timer, in inactive_task_timer()
1477 rq = dl_se->rq; in inactive_task_timer()
1487 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in inactive_task_timer()
1490 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { in inactive_task_timer()
1491 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1492 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1493 dl_se->dl_non_contending = 0; in inactive_task_timer()
1496 raw_spin_lock(&dl_b->lock); in inactive_task_timer()
1497 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1498 raw_spin_unlock(&dl_b->lock); in inactive_task_timer()
1505 if (dl_se->dl_non_contending == 0) in inactive_task_timer()
1508 sub_running_bw(dl_se, &rq->dl); in inactive_task_timer()
1509 dl_se->dl_non_contending = 0; in inactive_task_timer()
1524 struct hrtimer *timer = &dl_se->inactive_timer; in init_dl_inactive_task_timer() local
1526 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in init_dl_inactive_task_timer()
1527 timer->function = inactive_task_timer; in init_dl_inactive_task_timer()
1539 if (dl_rq->earliest_dl.curr == 0 || in inc_dl_deadline()
1540 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { in inc_dl_deadline()
1541 if (dl_rq->earliest_dl.curr == 0) in inc_dl_deadline()
1542 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); in inc_dl_deadline()
1543 dl_rq->earliest_dl.curr = deadline; in inc_dl_deadline()
1544 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); in inc_dl_deadline()
1556 if (!dl_rq->dl_nr_running) { in dec_dl_deadline()
1557 dl_rq->earliest_dl.curr = 0; in dec_dl_deadline()
1558 dl_rq->earliest_dl.next = 0; in dec_dl_deadline()
1559 cpudl_clear(&rq->rd->cpudl, rq->cpu); in dec_dl_deadline()
1560 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in dec_dl_deadline()
1562 struct rb_node *leftmost = rb_first_cached(&dl_rq->root); in dec_dl_deadline()
1565 dl_rq->earliest_dl.curr = entry->deadline; in dec_dl_deadline()
1566 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); in dec_dl_deadline()
1580 u64 deadline = dl_se->deadline; in inc_dl_tasks()
1582 dl_rq->dl_nr_running++; in inc_dl_tasks()
1591 WARN_ON(!dl_rq->dl_nr_running); in dec_dl_tasks()
1592 dl_rq->dl_nr_running--; in dec_dl_tasks()
1595 dec_dl_deadline(dl_rq, dl_se->deadline); in dec_dl_tasks()
1600 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); in __dl_less()
1606 return &dl_task_of(dl_se)->stats; in __schedstats_from_dl_se()
1668 state = READ_ONCE(p->__state); in update_stats_dequeue_dl()
1670 __schedstat_set(p->stats.sleep_start, in update_stats_dequeue_dl()
1674 __schedstat_set(p->stats.block_start, in update_stats_dequeue_dl()
1683 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node)); in __enqueue_dl_entity()
1685 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less); in __enqueue_dl_entity()
1694 if (RB_EMPTY_NODE(&dl_se->rb_node)) in __dequeue_dl_entity()
1697 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); in __dequeue_dl_entity()
1699 RB_CLEAR_NODE(&dl_se->rb_node); in __dequeue_dl_entity()
1715 * the replenishment timer will be set to the next period. in enqueue_dl_entity()
1717 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se)) in enqueue_dl_entity()
1730 * its rq, the bandwidth timer callback (which clearly has not in enqueue_dl_entity()
1734 * task's state - in GRUB parlance, "inactive" vs "active contending"). in enqueue_dl_entity()
1739 if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) { in enqueue_dl_entity()
1757 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { in enqueue_dl_entity()
1776 * This check allows to start the inactive timer (or to immediately in dequeue_dl_entity()
1779 * (p->state == TASK_DEAD). We can handle the two cases in the same in dequeue_dl_entity()
1790 if (is_dl_boosted(&p->dl)) { in enqueue_task_dl()
1803 if (p->dl.dl_throttled) { in enqueue_task_dl()
1805 * The replenish timer needs to be canceled. No in enqueue_task_dl()
1809 hrtimer_try_to_cancel(&p->dl.dl_timer); in enqueue_task_dl()
1810 p->dl.dl_throttled = 0; in enqueue_task_dl()
1812 } else if (!dl_prio(p->normal_prio)) { in enqueue_task_dl()
1818 * clear the flag, otherwise the task may wake up as throttled after in enqueue_task_dl()
1822 p->dl.dl_throttled = 0; in enqueue_task_dl()
1824 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", in enqueue_task_dl()
1831 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); in enqueue_task_dl()
1833 if (p->on_rq == TASK_ON_RQ_MIGRATING) in enqueue_task_dl()
1836 enqueue_dl_entity(&p->dl, flags); in enqueue_task_dl()
1838 if (dl_server(&p->dl)) in enqueue_task_dl()
1841 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) in enqueue_task_dl()
1849 if (p->on_rq == TASK_ON_RQ_MIGRATING) in dequeue_task_dl()
1852 dequeue_dl_entity(&p->dl, flags); in dequeue_task_dl()
1853 if (!p->dl.dl_throttled && !dl_server(&p->dl)) in dequeue_task_dl()
1858 * Yield task semantic for -deadline tasks is:
1860 * get off from the CPU until our next instance, with
1872 * it and the bandwidth timer will wake it up and will give it in yield_task_dl()
1875 rq->curr->dl.dl_yielded = 1; in yield_task_dl()
1892 return (!rq->dl.dl_nr_running || in dl_task_is_earliest_deadline()
1893 dl_time_before(p->dl.deadline, in dl_task_is_earliest_deadline()
1894 rq->dl.earliest_dl.curr)); in dl_task_is_earliest_deadline()
1900 select_task_rq_dl(struct task_struct *p, int cpu, int flags) in select_task_rq_dl() argument
1909 rq = cpu_rq(cpu); in select_task_rq_dl()
1912 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_dl()
1915 * If we are dealing with a -deadline task, we must in select_task_rq_dl()
1916 * decide where to wake it up. in select_task_rq_dl()
1924 (curr->nr_cpus_allowed < 2 || in select_task_rq_dl()
1925 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1926 p->nr_cpus_allowed > 1; in select_task_rq_dl()
1929 * Take the capacity of the CPU into account to in select_task_rq_dl()
1933 select_rq |= !dl_task_fits_capacity(p, cpu); in select_task_rq_dl()
1938 if (target != -1 && in select_task_rq_dl()
1940 cpu = target; in select_task_rq_dl()
1945 return cpu; in select_task_rq_dl()
1953 if (READ_ONCE(p->__state) != TASK_WAKING) in migrate_task_rq_dl()
1958 * Since p->state == TASK_WAKING, set_task_cpu() has been called in migrate_task_rq_dl()
1959 * from try_to_wake_up(). Hence, p->pi_lock is locked, but in migrate_task_rq_dl()
1960 * rq->lock is not... So, lock it in migrate_task_rq_dl()
1963 if (p->dl.dl_non_contending) { in migrate_task_rq_dl()
1965 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1966 p->dl.dl_non_contending = 0; in migrate_task_rq_dl()
1968 * If the timer handler is currently running and the in migrate_task_rq_dl()
1969 * timer cannot be canceled, inactive_task_timer() in migrate_task_rq_dl()
1974 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in migrate_task_rq_dl()
1977 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1987 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_dl()
1988 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) in check_preempt_equal_dl()
1995 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()
1996 cpudl_find(&rq->rd->cpudl, p, NULL)) in check_preempt_equal_dl()
2004 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
2007 * picked for load-balance and preemption/IRQs are still in balance_dl()
2021 * Only called when both the current and waking task are -deadline
2027 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in wakeup_preempt_dl()
2037 if ((p->dl.deadline == rq->curr->dl.deadline) && in wakeup_preempt_dl()
2038 !test_tsk_need_resched(rq->curr)) in wakeup_preempt_dl()
2046 hrtick_start(rq, dl_se->runtime); in start_hrtick_dl()
2056 struct sched_dl_entity *dl_se = &p->dl; in set_next_task_dl()
2057 struct dl_rq *dl_rq = &rq->dl; in set_next_task_dl()
2059 p->se.exec_start = rq_clock_task(rq); in set_next_task_dl()
2060 if (on_dl_rq(&p->dl)) in set_next_task_dl()
2069 if (rq->curr->sched_class != &dl_sched_class) in set_next_task_dl()
2077 struct rb_node *left = rb_first_cached(&dl_rq->root); in pick_next_dl_entity()
2088 struct dl_rq *dl_rq = &rq->dl; in pick_task_dl()
2099 p = dl_se->server_pick(dl_se); in pick_task_dl()
2102 dl_se->dl_yielded = 1; in pick_task_dl()
2106 p->dl_server = dl_se; in pick_task_dl()
2122 if (!p->dl_server) in pick_next_task_dl()
2126 start_hrtick_dl(rq, &p->dl); in pick_next_task_dl()
2133 struct sched_dl_entity *dl_se = &p->dl; in put_prev_task_dl()
2134 struct dl_rq *dl_rq = &rq->dl; in put_prev_task_dl()
2136 if (on_dl_rq(&p->dl)) in put_prev_task_dl()
2142 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2164 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2165 is_leftmost(&p->dl, &rq->dl)) in task_tick_dl()
2166 start_hrtick_dl(rq, &p->dl); in task_tick_dl()
2172 * SCHED_DEADLINE tasks cannot fork and this is achieved through in task_fork_dl()
2182 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) in pick_dl_task() argument
2185 cpumask_test_cpu(cpu, &p->cpus_mask)) in pick_dl_task()
2192 * on the CPU, NULL otherwise:
2194 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) in pick_earliest_pushable_dl_task() argument
2202 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); in pick_earliest_pushable_dl_task()
2208 if (pick_dl_task(rq, p, cpu)) in pick_earliest_pushable_dl_task()
2225 int cpu = task_cpu(task); in find_later_rq() local
2229 return -1; in find_later_rq()
2231 if (task->nr_cpus_allowed == 1) in find_later_rq()
2232 return -1; in find_later_rq()
2236 * first, then we can look for a suitable CPU. in find_later_rq()
2238 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq()
2239 return -1; in find_later_rq()
2250 * The last CPU where the task run is our first in find_later_rq()
2251 * guess, since it is most likely cache-hot there. in find_later_rq()
2253 if (cpumask_test_cpu(cpu, later_mask)) in find_later_rq()
2254 return cpu; in find_later_rq()
2260 this_cpu = -1; in find_later_rq()
2263 for_each_domain(cpu, sd) { in find_later_rq()
2264 if (sd->flags & SD_WAKE_AFFINE) { in find_later_rq()
2271 if (this_cpu != -1 && in find_later_rq()
2280 * Last chance: if a CPU being in both later_mask in find_later_rq()
2282 * choice. Of course, the latest possible CPU is in find_later_rq()
2297 if (this_cpu != -1) in find_later_rq()
2300 cpu = cpumask_any_distribute(later_mask); in find_later_rq()
2301 if (cpu < nr_cpu_ids) in find_later_rq()
2302 return cpu; in find_later_rq()
2304 return -1; in find_later_rq()
2312 int cpu; in find_lock_later_rq() local
2315 cpu = find_later_rq(task); in find_lock_later_rq()
2317 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_later_rq()
2320 later_rq = cpu_rq(cpu); in find_lock_later_rq()
2335 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || in find_lock_later_rq()
2347 * If the rq we found has no -deadline task, or in find_lock_later_rq()
2369 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); in pick_next_pushable_dl_task()
2371 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2373 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()
2382 * See if the non running -deadline tasks on this rq
2383 * can be sent to some other CPU where they can preempt
2398 * If next_task preempts rq->curr, and rq->curr in push_dl_task()
2402 if (dl_task(rq->curr) && in push_dl_task()
2403 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && in push_dl_task()
2404 rq->curr->nr_cpus_allowed > 1) { in push_dl_task()
2412 if (WARN_ON(next_task == rq->curr)) in push_dl_task()
2425 * find_lock_later_rq releases rq->lock and it is in push_dl_task()
2432 * again, some other CPU will pull it when ready. in push_dl_task()
2447 set_task_cpu(next_task, later_rq->cpu); in push_dl_task()
2463 /* push_dl_task() will return true if it moved a -deadline task */ in push_dl_tasks()
2470 int this_cpu = this_rq->cpu, cpu; in pull_dl_task() local
2485 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task()
2486 if (this_cpu == cpu) in pull_dl_task()
2489 src_rq = cpu_rq(cpu); in pull_dl_task()
2495 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2496 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
2497 src_rq->dl.earliest_dl.next)) in pull_dl_task()
2500 /* Might drop this_rq->lock */ in pull_dl_task()
2508 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task()
2515 * - it preempts our current (if there's one), in pull_dl_task()
2516 * - it will preempt the last one we pulled (if any). in pull_dl_task()
2518 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
2520 WARN_ON(p == src_rq->curr); in pull_dl_task()
2527 if (dl_time_before(p->dl.deadline, in pull_dl_task()
2528 src_rq->curr->dl.deadline)) in pull_dl_task()
2537 dmin = p->dl.deadline; in pull_dl_task()
2549 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, in pull_dl_task()
2550 push_task, &src_rq->push_work); in pull_dl_task()
2567 !test_tsk_need_resched(rq->curr) && in task_woken_dl()
2568 p->nr_cpus_allowed > 1 && in task_woken_dl()
2569 dl_task(rq->curr) && in task_woken_dl()
2570 (rq->curr->nr_cpus_allowed < 2 || in task_woken_dl()
2571 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2585 src_rd = rq->rd; in set_cpus_allowed_dl()
2592 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) { in set_cpus_allowed_dl()
2601 raw_spin_lock(&src_dl_b->lock); in set_cpus_allowed_dl()
2602 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2603 raw_spin_unlock(&src_dl_b->lock); in set_cpus_allowed_dl()
2609 /* Assumes rq->lock is held */
2612 if (rq->dl.overloaded) in rq_online_dl()
2615 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); in rq_online_dl()
2616 if (rq->dl.dl_nr_running > 0) in rq_online_dl()
2617 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
2620 /* Assumes rq->lock is held */
2623 if (rq->dl.overloaded) in rq_offline_dl()
2626 cpudl_clear(&rq->rd->cpudl, rq->cpu); in rq_offline_dl()
2627 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); in rq_offline_dl()
2645 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in dl_add_task_root_domain()
2647 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in dl_add_task_root_domain()
2653 dl_b = &rq->rd->dl_bw; in dl_add_task_root_domain()
2654 raw_spin_lock(&dl_b->lock); in dl_add_task_root_domain()
2656 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2658 raw_spin_unlock(&dl_b->lock); in dl_add_task_root_domain()
2667 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2668 rd->dl_bw.total_bw = 0; in dl_clear_root_domain()
2669 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2677 * task_non_contending() can start the "inactive timer" (if the 0-lag in switched_from_dl()
2679 * the "inactive timer" fires, it can continue to consume its current in switched_from_dl()
2681 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() in switched_from_dl()
2684 if (task_on_rq_queued(p) && p->dl.dl_runtime) in switched_from_dl()
2685 task_non_contending(&p->dl); in switched_from_dl()
2695 * Inactive timer is armed. However, p is leaving DEADLINE and in switched_from_dl()
2700 if (p->dl.dl_non_contending) in switched_from_dl()
2701 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2702 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2706 * We cannot use inactive_task_timer() to invoke sub_running_bw() in switched_from_dl()
2707 * at the 0-lag time, because the task could have been migrated in switched_from_dl()
2710 if (p->dl.dl_non_contending) in switched_from_dl()
2711 p->dl.dl_non_contending = 0; in switched_from_dl()
2714 * Since this might be the only -deadline task on the rq, in switched_from_dl()
2716 * from an overloaded CPU, if any. in switched_from_dl()
2718 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2725 * When switching to -deadline, we may overload the rq, then
2730 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in switched_to_dl()
2741 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2746 if (rq->curr != p) { in switched_to_dl()
2748 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2751 if (dl_task(rq->curr)) in switched_to_dl()
2761 * If the scheduling parameters of a -deadline task changed,
2777 if (!rq->dl.overloaded) in prio_changed_dl()
2786 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2795 if (!dl_task(rq->curr) || in prio_changed_dl()
2796 dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) in prio_changed_dl()
2809 static int task_is_throttled_dl(struct task_struct *p, int cpu) in task_is_throttled_dl() argument
2811 return p->dl.dl_throttled; in task_is_throttled_dl()
2862 int cpu, cpus, ret = 0; in sched_dl_global_validate() local
2870 for_each_possible_cpu(cpu) { in sched_dl_global_validate()
2873 if (dl_bw_visited(cpu, gen)) in sched_dl_global_validate()
2876 dl_b = dl_bw_of(cpu); in sched_dl_global_validate()
2877 cpus = dl_bw_cpus(cpu); in sched_dl_global_validate()
2879 raw_spin_lock_irqsave(&dl_b->lock, flags); in sched_dl_global_validate()
2880 if (new_bw * cpus < dl_b->total_bw) in sched_dl_global_validate()
2881 ret = -EBUSY; in sched_dl_global_validate()
2882 raw_spin_unlock_irqrestore(&dl_b->lock, flags); in sched_dl_global_validate()
2897 dl_rq->bw_ratio = 1 << RATIO_SHIFT; in init_dl_rq_bw_ratio()
2898 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT; in init_dl_rq_bw_ratio()
2900 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), in init_dl_rq_bw_ratio()
2901 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); in init_dl_rq_bw_ratio()
2902 dl_rq->max_bw = dl_rq->extra_bw = in init_dl_rq_bw_ratio()
2909 u64 new_bw = -1; in sched_dl_do_global()
2912 int cpu; in sched_dl_do_global() local
2918 for_each_possible_cpu(cpu) { in sched_dl_do_global()
2921 if (dl_bw_visited(cpu, gen)) { in sched_dl_do_global()
2926 dl_b = dl_bw_of(cpu); in sched_dl_do_global()
2928 raw_spin_lock_irqsave(&dl_b->lock, flags); in sched_dl_do_global()
2929 dl_b->bw = new_bw; in sched_dl_do_global()
2930 raw_spin_unlock_irqrestore(&dl_b->lock, flags); in sched_dl_do_global()
2933 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); in sched_dl_do_global()
2943 * This function is called while holding p's rq->lock.
2948 u64 period = attr->sched_period ?: attr->sched_deadline; in sched_dl_overflow()
2949 u64 runtime = attr->sched_runtime; in sched_dl_overflow()
2951 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow() local
2952 struct dl_bw *dl_b = dl_bw_of(cpu); in sched_dl_overflow()
2955 if (attr->sched_flags & SCHED_FLAG_SUGOV) in sched_dl_overflow()
2959 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2963 * Either if a task, enters, leave, or stays -deadline but changes in sched_dl_overflow()
2967 raw_spin_lock(&dl_b->lock); in sched_dl_overflow()
2968 cpus = dl_bw_cpus(cpu); in sched_dl_overflow()
2969 cap = dl_bw_capacity(cpu); in sched_dl_overflow()
2973 if (hrtimer_active(&p->dl.inactive_timer)) in sched_dl_overflow()
2974 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2978 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2982 * utilization change until the task's 0-lag point. in sched_dl_overflow()
2984 * timer" when the task is not inactive. in sched_dl_overflow()
2986 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2994 * (0-lag) time. in sched_dl_overflow()
2998 raw_spin_unlock(&dl_b->lock); in sched_dl_overflow()
3013 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
3015 dl_se->dl_runtime = attr->sched_runtime; in __setparam_dl()
3016 dl_se->dl_deadline = attr->sched_deadline; in __setparam_dl()
3017 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; in __setparam_dl()
3018 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; in __setparam_dl()
3019 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); in __setparam_dl()
3020 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); in __setparam_dl()
3025 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
3027 attr->sched_priority = p->rt_priority; in __getparam_dl()
3028 attr->sched_runtime = dl_se->dl_runtime; in __getparam_dl()
3029 attr->sched_deadline = dl_se->dl_deadline; in __getparam_dl()
3030 attr->sched_period = dl_se->dl_period; in __getparam_dl()
3031 attr->sched_flags &= ~SCHED_DL_FLAGS; in __getparam_dl()
3032 attr->sched_flags |= dl_se->flags; in __getparam_dl()
3036 * This function validates the new parameters of a -deadline task.
3050 if (attr->sched_flags & SCHED_FLAG_SUGOV) in __checkparam_dl()
3054 if (attr->sched_deadline == 0) in __checkparam_dl()
3061 if (attr->sched_runtime < (1ULL << DL_SCALE)) in __checkparam_dl()
3065 * Since we use the MSB for wrap-around and sign issues, make in __checkparam_dl()
3068 if (attr->sched_deadline & (1ULL << 63) || in __checkparam_dl()
3069 attr->sched_period & (1ULL << 63)) in __checkparam_dl()
3072 period = attr->sched_period; in __checkparam_dl()
3074 period = attr->sched_deadline; in __checkparam_dl()
3077 if (period < attr->sched_deadline || in __checkparam_dl()
3078 attr->sched_deadline < attr->sched_runtime) in __checkparam_dl()
3095 dl_se->dl_runtime = 0; in __dl_clear_params()
3096 dl_se->dl_deadline = 0; in __dl_clear_params()
3097 dl_se->dl_period = 0; in __dl_clear_params()
3098 dl_se->flags = 0; in __dl_clear_params()
3099 dl_se->dl_bw = 0; in __dl_clear_params()
3100 dl_se->dl_density = 0; in __dl_clear_params()
3102 dl_se->dl_throttled = 0; in __dl_clear_params()
3103 dl_se->dl_yielded = 0; in __dl_clear_params()
3104 dl_se->dl_non_contending = 0; in __dl_clear_params()
3105 dl_se->dl_overrun = 0; in __dl_clear_params()
3106 dl_se->dl_server = 0; in __dl_clear_params()
3109 dl_se->pi_se = dl_se; in __dl_clear_params()
3115 RB_CLEAR_NODE(&dl_se->rb_node); in init_dl_entity()
3123 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()
3125 if (dl_se->dl_runtime != attr->sched_runtime || in dl_param_changed()
3126 dl_se->dl_deadline != attr->sched_deadline || in dl_param_changed()
3127 dl_se->dl_period != attr->sched_period || in dl_param_changed()
3128 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS)) in dl_param_changed()
3145 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); in dl_cpuset_cpumask_can_shrink()
3148 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); in dl_cpuset_cpumask_can_shrink()
3160 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) in dl_bw_manage() argument
3167 dl_b = dl_bw_of(cpu); in dl_bw_manage()
3168 raw_spin_lock_irqsave(&dl_b->lock, flags); in dl_bw_manage()
3171 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); in dl_bw_manage()
3173 unsigned long cap = dl_bw_capacity(cpu); in dl_bw_manage()
3184 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); in dl_bw_manage()
3188 raw_spin_unlock_irqrestore(&dl_b->lock, flags); in dl_bw_manage()
3191 return overflow ? -EBUSY : 0; in dl_bw_manage()
3194 int dl_bw_check_overflow(int cpu) in dl_bw_check_overflow() argument
3196 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); in dl_bw_check_overflow()
3199 int dl_bw_alloc(int cpu, u64 dl_bw) in dl_bw_alloc() argument
3201 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); in dl_bw_alloc()
3204 void dl_bw_free(int cpu, u64 dl_bw) in dl_bw_free() argument
3206 dl_bw_manage(dl_bw_req_free, cpu, dl_bw); in dl_bw_free()
3211 void print_dl_stats(struct seq_file *m, int cpu) in print_dl_stats() argument
3213 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); in print_dl_stats()