Lines Matching +full:pull +full:- +full:ups

1 // SPDX-License-Identifier: GPL-2.0
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
12 * period over which we measure -rt task CPU usage in us.
70 array = &rt_rq->active; in init_rt_rq()
72 INIT_LIST_HEAD(array->queue + i); in init_rt_rq()
73 __clear_bit(i, array->bitmap); in init_rt_rq()
76 __set_bit(MAX_RT_PRIO, array->bitmap); in init_rt_rq()
79 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq()
80 rt_rq->highest_prio.next = MAX_RT_PRIO-1; in init_rt_rq()
81 rt_rq->overloaded = 0; in init_rt_rq()
82 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
85 rt_rq->rt_queued = 0; in init_rt_rq()
88 rt_rq->rt_time = 0; in init_rt_rq()
89 rt_rq->rt_throttled = 0; in init_rt_rq()
90 rt_rq->rt_runtime = 0; in init_rt_rq()
91 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
106 raw_spin_lock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
108 overrun = hrtimer_forward_now(timer, rt_b->rt_period); in sched_rt_period_timer()
112 raw_spin_unlock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
114 raw_spin_lock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
117 rt_b->rt_period_active = 0; in sched_rt_period_timer()
118 raw_spin_unlock(&rt_b->rt_runtime_lock); in sched_rt_period_timer()
125 rt_b->rt_period = ns_to_ktime(period); in init_rt_bandwidth()
126 rt_b->rt_runtime = runtime; in init_rt_bandwidth()
128 raw_spin_lock_init(&rt_b->rt_runtime_lock); in init_rt_bandwidth()
130 hrtimer_setup(&rt_b->rt_period_timer, sched_rt_period_timer, CLOCK_MONOTONIC, in init_rt_bandwidth()
136 raw_spin_lock(&rt_b->rt_runtime_lock); in do_start_rt_bandwidth()
137 if (!rt_b->rt_period_active) { in do_start_rt_bandwidth()
138 rt_b->rt_period_active = 1; in do_start_rt_bandwidth()
147 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); in do_start_rt_bandwidth()
148 hrtimer_start_expires(&rt_b->rt_period_timer, in do_start_rt_bandwidth()
151 raw_spin_unlock(&rt_b->rt_runtime_lock); in do_start_rt_bandwidth()
156 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) in start_rt_bandwidth()
164 hrtimer_cancel(&rt_b->rt_period_timer); in destroy_rt_bandwidth()
167 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
178 return rt_rq->rq; in rq_of_rt_rq()
183 return rt_se->rt_rq; in rt_rq_of_se()
188 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se()
190 return rt_rq->rq; in rq_of_rt_se()
195 if (tg->rt_se) in unregister_rt_sched_group()
196 destroy_rt_bandwidth(&tg->rt_bandwidth); in unregister_rt_sched_group()
204 if (tg->rt_rq) in free_rt_sched_group()
205 kfree(tg->rt_rq[i]); in free_rt_sched_group()
206 if (tg->rt_se) in free_rt_sched_group()
207 kfree(tg->rt_se[i]); in free_rt_sched_group()
210 kfree(tg->rt_rq); in free_rt_sched_group()
211 kfree(tg->rt_se); in free_rt_sched_group()
220 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry()
221 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
222 rt_rq->rq = rq; in init_tg_rt_entry()
223 rt_rq->tg = tg; in init_tg_rt_entry()
225 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
226 tg->rt_se[cpu] = rt_se; in init_tg_rt_entry()
232 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
234 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
236 rt_se->my_q = rt_rq; in init_tg_rt_entry()
237 rt_se->parent = parent; in init_tg_rt_entry()
238 INIT_LIST_HEAD(&rt_se->run_list); in init_tg_rt_entry()
247 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
248 if (!tg->rt_rq) in alloc_rt_sched_group()
250 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); in alloc_rt_sched_group()
251 if (!tg->rt_se) in alloc_rt_sched_group()
254 init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0); in alloc_rt_sched_group()
268 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
269 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
305 return &rq->rt; in rt_rq_of_se()
322 /* Try to pull RT tasks here if we lower this rq's prio */ in need_pull_rt_task()
323 return rq->online && rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
328 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
333 if (!rq->online) in rt_set_overload()
336 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
347 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
352 if (!rq->online) in rt_clear_overload()
356 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
357 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
362 return !plist_head_empty(&rq->rt.pushable_tasks); in has_pushable_tasks()
376 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); in rt_queue_push_tasks()
381 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); in rt_queue_pull_task()
386 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
387 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task()
388 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
391 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
392 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
394 if (!rq->rt.overloaded) { in enqueue_pushable_task()
396 rq->rt.overloaded = 1; in enqueue_pushable_task()
402 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
406 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
408 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
410 rq->rt.highest_prio.next = MAX_RT_PRIO-1; in dequeue_pushable_task()
412 if (rq->rt.overloaded) { in dequeue_pushable_task()
414 rq->rt.overloaded = 0; in dequeue_pushable_task()
439 return rt_se->on_rq; in on_rt_rq()
448 * is higher than the capacity of a @cpu. For non-heterogeneous system this
485 if (!rt_rq->tg) in sched_rt_runtime()
488 return rt_rq->rt_runtime; in sched_rt_runtime()
493 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
501 tg = list_entry_rcu(tg->list.next, in next_task_group()
503 } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); in next_task_group()
505 if (&tg->list == &task_groups) in next_task_group()
514 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
517 for (; rt_se; rt_se = rt_se->parent)
521 return rt_se->my_q; in group_rt_rq()
529 struct task_struct *donor = rq_of_rt_rq(rt_rq)->donor; in sched_rt_rq_enqueue()
535 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
537 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
543 if (rt_rq->highest_prio.curr < donor->prio) in sched_rt_rq_enqueue()
553 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
556 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
566 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
575 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
578 return p->prio != p->normal_prio; in rt_se_boosted()
584 return this_rq()->rd->span; in sched_rt_period_mask()
596 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
601 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
608 return (hrtimer_active(&rt_b->rt_period_timer) || in sched_rt_bandwidth_account()
609 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
619 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
623 weight = cpumask_weight(rd->span); in do_balance_runtime()
625 raw_spin_lock(&rt_b->rt_runtime_lock); in do_balance_runtime()
626 rt_period = ktime_to_ns(rt_b->rt_period); in do_balance_runtime()
627 for_each_cpu(i, rd->span) { in do_balance_runtime()
634 raw_spin_lock(&iter->rt_runtime_lock); in do_balance_runtime()
640 if (iter->rt_runtime == RUNTIME_INF) in do_balance_runtime()
647 diff = iter->rt_runtime - iter->rt_time; in do_balance_runtime()
650 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
651 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
652 iter->rt_runtime -= diff; in do_balance_runtime()
653 rt_rq->rt_runtime += diff; in do_balance_runtime()
654 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
655 raw_spin_unlock(&iter->rt_runtime_lock); in do_balance_runtime()
660 raw_spin_unlock(&iter->rt_runtime_lock); in do_balance_runtime()
662 raw_spin_unlock(&rt_b->rt_runtime_lock); in do_balance_runtime()
670 struct root_domain *rd = rq->rd; in __disable_runtime()
682 raw_spin_lock(&rt_b->rt_runtime_lock); in __disable_runtime()
683 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
689 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
690 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
692 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
699 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
704 for_each_cpu(i, rd->span) { in __disable_runtime()
711 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
714 raw_spin_lock(&iter->rt_runtime_lock); in __disable_runtime()
716 diff = min_t(s64, iter->rt_runtime, want); in __disable_runtime()
717 iter->rt_runtime -= diff; in __disable_runtime()
718 want -= diff; in __disable_runtime()
720 iter->rt_runtime -= want; in __disable_runtime()
721 want -= want; in __disable_runtime()
723 raw_spin_unlock(&iter->rt_runtime_lock); in __disable_runtime()
729 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
731 * We cannot be left wanting - that would mean some runtime in __disable_runtime()
738 * runtime - in which case borrowing doesn't make sense. in __disable_runtime()
740 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
741 rt_rq->rt_throttled = 0; in __disable_runtime()
742 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
743 raw_spin_unlock(&rt_b->rt_runtime_lock); in __disable_runtime()
764 raw_spin_lock(&rt_b->rt_runtime_lock); in __enable_runtime()
765 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
766 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
767 rt_rq->rt_time = 0; in __enable_runtime()
768 rt_rq->rt_throttled = 0; in __enable_runtime()
769 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
770 raw_spin_unlock(&rt_b->rt_runtime_lock); in __enable_runtime()
779 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
780 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
782 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
816 * When span == cpu_online_mask, taking each rq->lock in do_sched_rt_period_timer()
817 * can be time-consuming. Try to avoid it when possible. in do_sched_rt_period_timer()
819 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
820 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
821 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
822 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
823 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
830 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
833 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
834 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
836 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
837 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
838 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
839 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
849 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
852 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
854 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
855 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
860 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
868 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) in do_sched_rt_period_timer()
878 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
889 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
896 if (likely(rt_b->rt_runtime)) { in sched_rt_runtime_exceeded()
897 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
905 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
922 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
936 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
945 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
961 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()
977 return rt_rq->highest_prio.curr; in rt_se_prio()
980 return rt_task_of(rt_se)->prio; in rt_se_prio()
989 struct task_struct *donor = rq->donor; in update_curr_rt()
992 if (donor->sched_class != &rt_sched_class) in update_curr_rt()
1000 struct sched_rt_entity *rt_se = &donor->rt; in update_curr_rt()
1010 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1011 rt_rq->rt_time += delta_exec; in update_curr_rt()
1015 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1028 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1030 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1033 BUG_ON(!rq->nr_running); in dequeue_top_rt_rq()
1036 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1045 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1047 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1053 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1054 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1055 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1073 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1076 if (rq->online && prio < prev_prio) in inc_rt_prio_smp()
1077 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); in inc_rt_prio_smp()
1089 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1092 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1093 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1109 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1112 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1120 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1122 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1128 * we may have some re-computation to do in dec_rt_prio()
1131 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1133 rt_rq->highest_prio.curr = in dec_rt_prio()
1134 sched_find_first_bit(array->bitmap); in dec_rt_prio()
1138 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in dec_rt_prio()
1157 rt_rq->rt_nr_boosted++; in inc_rt_group()
1159 if (rt_rq->tg) in inc_rt_group()
1160 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1167 rt_rq->rt_nr_boosted--; in dec_rt_group()
1169 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1190 return group_rq->rt_nr_running; in rt_se_nr_running()
1202 return group_rq->rr_nr_running; in rt_se_rr_nr_running()
1206 return (tsk->policy == SCHED_RR) ? 1 : 0; in rt_se_rr_nr_running()
1215 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1216 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1226 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1227 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1228 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1235 * Change rt_se->run_list location unless SAVE && !MOVE
1249 list_del_init(&rt_se->run_list); in __delist_rt_entity()
1251 if (list_empty(array->queue + rt_se_prio(rt_se))) in __delist_rt_entity()
1252 __clear_bit(rt_se_prio(rt_se), array->bitmap); in __delist_rt_entity()
1254 rt_se->on_list = 0; in __delist_rt_entity()
1266 return &rt_task_of(rt_se)->stats; in __schedstats_from_rt_se()
1352 state = READ_ONCE(p->__state); in update_stats_dequeue_rt()
1354 __schedstat_set(p->stats.sleep_start, in update_stats_dequeue_rt()
1358 __schedstat_set(p->stats.block_start, in update_stats_dequeue_rt()
1366 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1368 struct list_head *queue = array->queue + rt_se_prio(rt_se); in __enqueue_rt_entity()
1376 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { in __enqueue_rt_entity()
1377 if (rt_se->on_list) in __enqueue_rt_entity()
1383 WARN_ON_ONCE(rt_se->on_list); in __enqueue_rt_entity()
1385 list_add(&rt_se->run_list, queue); in __enqueue_rt_entity()
1387 list_add_tail(&rt_se->run_list, queue); in __enqueue_rt_entity()
1389 __set_bit(rt_se_prio(rt_se), array->bitmap); in __enqueue_rt_entity()
1390 rt_se->on_list = 1; in __enqueue_rt_entity()
1392 rt_se->on_rq = 1; in __enqueue_rt_entity()
1400 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1403 WARN_ON_ONCE(!rt_se->on_list); in __dequeue_rt_entity()
1406 rt_se->on_rq = 0; in __dequeue_rt_entity()
1413 * entries, we must remove entries top - down.
1421 rt_se->back = back; in dequeue_rt_stack()
1425 rt_nr_running = rt_rq_of_se(back)->rt_nr_running; in dequeue_rt_stack()
1427 for (rt_se = back; rt_se; rt_se = rt_se->back) { in dequeue_rt_stack()
1444 enqueue_top_rt_rq(&rq->rt); in enqueue_rt_entity()
1458 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1461 enqueue_top_rt_rq(&rq->rt); in dequeue_rt_entity()
1470 struct sched_rt_entity *rt_se = &p->rt; in enqueue_task_rt()
1473 rt_se->timeout = 0; in enqueue_task_rt()
1480 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1486 struct sched_rt_entity *rt_se = &p->rt; in dequeue_task_rt()
1504 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1505 struct list_head *queue = array->queue + rt_se_prio(rt_se); in requeue_rt_entity()
1508 list_move(&rt_se->run_list, queue); in requeue_rt_entity()
1510 list_move_tail(&rt_se->run_list, queue); in requeue_rt_entity()
1516 struct sched_rt_entity *rt_se = &p->rt; in requeue_task_rt()
1527 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1540 /* For anything but wake ups, just return the task_cpu */ in select_task_rq_rt()
1547 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1548 donor = READ_ONCE(rq->donor); in select_task_rq_rt()
1567 * post-schedule router will push the preempted task away in select_task_rq_rt()
1569 * This test is optimistic, if we get it wrong the load-balancer in select_task_rq_rt()
1573 * requirement of the task - which is only important on heterogeneous in select_task_rq_rt()
1578 (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio); in select_task_rq_rt()
1587 if (!test && target != -1 && !rt_task_fits_capacity(p, target)) in select_task_rq_rt()
1594 if (target != -1 && in select_task_rq_rt()
1595 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1608 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1609 !cpupri_find(&rq->rd->cpupri, rq->donor, NULL)) in check_preempt_equal_prio()
1616 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_prio()
1617 cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1631 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1634 * picked for load-balance and preemption/IRQs are still in balance_rt()
1652 struct task_struct *donor = rq->donor; in wakeup_preempt_rt()
1654 if (p->prio < donor->prio) { in wakeup_preempt_rt()
1663 * - the newly woken task is of equal priority to the current task in wakeup_preempt_rt()
1664 * - the newly woken task is non-migratable while current is migratable in wakeup_preempt_rt()
1665 * - current will be preempted on the next reschedule in wakeup_preempt_rt()
1669 * to move current somewhere else, making room for our non-migratable in wakeup_preempt_rt()
1672 if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr)) in wakeup_preempt_rt()
1679 struct sched_rt_entity *rt_se = &p->rt; in set_next_task_rt()
1680 struct rt_rq *rt_rq = &rq->rt; in set_next_task_rt()
1682 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1683 if (on_rt_rq(&p->rt)) in set_next_task_rt()
1697 if (rq->donor->sched_class != &rt_sched_class) in set_next_task_rt()
1705 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1710 idx = sched_find_first_bit(array->bitmap); in pick_next_rt_entity()
1713 queue = array->queue + idx; in pick_next_rt_entity()
1716 next = list_entry(queue->next, struct sched_rt_entity, run_list); in pick_next_rt_entity()
1724 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt()
1750 struct sched_rt_entity *rt_se = &p->rt; in put_prev_task_rt()
1751 struct rt_rq *rt_rq = &rq->rt; in put_prev_task_rt()
1753 if (on_rt_rq(&p->rt)) in put_prev_task_rt()
1764 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt()
1779 struct plist_head *head = &rq->rt.pushable_tasks; in pick_highest_pushable_task()
1805 return -1; in find_lowest_rq()
1807 if (task->nr_cpus_allowed == 1) in find_lowest_rq()
1808 return -1; /* No other targets possible */ in find_lowest_rq()
1816 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1821 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1826 return -1; /* No targets found */ in find_lowest_rq()
1834 * it is most likely cache-hot in that location. in find_lowest_rq()
1844 this_cpu = -1; /* Skip this_cpu opt if not among lowest */ in find_lowest_rq()
1848 if (sd->flags & SD_WAKE_AFFINE) { in find_lowest_rq()
1855 if (this_cpu != -1 && in find_lowest_rq()
1876 if (this_cpu != -1) in find_lowest_rq()
1883 return -1; in find_lowest_rq()
1896 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_lowest_rq()
1901 if (lowest_rq->rt.highest_prio.curr <= task->prio) { in find_lock_lowest_rq()
1923 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || in find_lock_lowest_rq()
1936 if (lowest_rq->rt.highest_prio.curr > task->prio) in find_lock_lowest_rq()
1954 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
1957 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
1960 BUG_ON(p->nr_cpus_allowed <= 1); in pick_next_pushable_task()
1973 static int push_rt_task(struct rq *rq, bool pull) in push_rt_task() argument
1979 if (!rq->rt.overloaded) in push_rt_task()
1992 if (unlikely(next_task->prio < rq->donor->prio)) { in push_rt_task()
2001 if (!pull || rq->push_busy) in push_rt_task()
2013 if (rq->donor->sched_class != &rt_sched_class) in push_rt_task()
2016 cpu = find_lowest_rq(rq->curr); in push_rt_task()
2017 if (cpu == -1 || cpu == rq->cpu) in push_rt_task()
2030 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in push_rt_task()
2031 push_task, &rq->push_work); in push_rt_task()
2039 if (WARN_ON(next_task == rq->curr)) in push_rt_task()
2050 * find_lock_lowest_rq releases rq->lock in push_rt_task()
2054 * run-queue and is also still the next task eligible for in push_rt_task()
2061 * eligible task, but we failed to find a run-queue in push_rt_task()
2063 * other CPUs will pull from us when ready. in push_rt_task()
2106 * up that may be able to run one of its non-running queued RT tasks.
2147 * When starting the IPI RT pushing, the rto_cpu is set to -1, in rto_next_cpu()
2161 /* When rto_cpu is -1 this acts like cpumask_first() */ in rto_next_cpu()
2162 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); in rto_next_cpu()
2164 rd->rto_cpu = cpu; in rto_next_cpu()
2169 rd->rto_cpu = -1; in rto_next_cpu()
2177 next = atomic_read_acquire(&rd->rto_loop_next); in rto_next_cpu()
2179 if (rd->rto_loop == next) in rto_next_cpu()
2182 rd->rto_loop = next; in rto_next_cpu()
2185 return -1; in rto_next_cpu()
2200 int cpu = -1; in tell_cpu_to_push()
2203 atomic_inc(&rq->rd->rto_loop_next); in tell_cpu_to_push()
2206 if (!rto_start_trylock(&rq->rd->rto_loop_start)) in tell_cpu_to_push()
2209 raw_spin_lock(&rq->rd->rto_lock); in tell_cpu_to_push()
2217 if (rq->rd->rto_cpu < 0) in tell_cpu_to_push()
2218 cpu = rto_next_cpu(rq->rd); in tell_cpu_to_push()
2220 raw_spin_unlock(&rq->rd->rto_lock); in tell_cpu_to_push()
2222 rto_start_unlock(&rq->rd->rto_loop_start); in tell_cpu_to_push()
2226 sched_get_rd(rq->rd); in tell_cpu_to_push()
2227 irq_work_queue_on(&rq->rd->rto_push_work, cpu); in tell_cpu_to_push()
2252 raw_spin_lock(&rd->rto_lock); in rto_push_irq_work_func()
2257 raw_spin_unlock(&rd->rto_lock); in rto_push_irq_work_func()
2265 irq_work_queue_on(&rd->rto_push_work, cpu); in rto_push_irq_work_func()
2271 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2288 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2298 for_each_cpu(cpu, this_rq->rd->rto_mask) { in pull_rt_task()
2305 * Don't bother taking the src_rq->lock if the next highest in pull_rt_task()
2306 * task is known to be lower-priority than our current task. in pull_rt_task()
2311 if (src_rq->rt.highest_prio.next >= in pull_rt_task()
2312 this_rq->rt.highest_prio.curr) in pull_rt_task()
2324 * We can pull only a task, which is pushable in pull_rt_task()
2331 * the to-be-scheduled task? in pull_rt_task()
2333 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2334 WARN_ON(p == src_rq->curr); in pull_rt_task()
2341 * had a chance to schedule. We only pull in pull_rt_task()
2345 if (p->prio < src_rq->donor->prio) in pull_rt_task()
2367 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, in pull_rt_task()
2368 push_task, &src_rq->push_work); in pull_rt_task()
2385 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2386 p->nr_cpus_allowed > 1 && in task_woken_rt()
2387 (dl_task(rq->donor) || rt_task(rq->donor)) && in task_woken_rt()
2388 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2389 rq->donor->prio <= p->prio); in task_woken_rt()
2395 /* Assumes rq->lock is held */
2398 if (rq->rt.overloaded) in rq_online_rt()
2403 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2406 /* Assumes rq->lock is held */
2409 if (rq->rt.overloaded) in rq_offline_rt()
2414 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); in rq_offline_rt()
2419 * that we might want to pull RT tasks from other runqueues.
2430 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2470 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2473 if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2480 * us to initiate a push or pull.
2492 * may need to pull tasks to this runqueue. in prio_changed_rt()
2494 if (oldprio < p->prio) in prio_changed_rt()
2501 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2505 if (oldprio < p->prio) in prio_changed_rt()
2514 if (p->prio < rq->donor->prio) in prio_changed_rt()
2531 if (p->rt.watchdog_stamp != jiffies) { in watchdog()
2532 p->rt.timeout++; in watchdog()
2533 p->rt.watchdog_stamp = jiffies; in watchdog()
2537 if (p->rt.timeout > next) { in watchdog()
2538 posix_cputimers_rt_watchdog(&p->posix_cputimers, in watchdog()
2539 p->se.sum_exec_runtime); in watchdog()
2557 struct sched_rt_entity *rt_se = &p->rt; in task_tick_rt()
2565 * RR tasks need a special form of time-slice management. in task_tick_rt()
2568 if (p->policy != SCHED_RR) in task_tick_rt()
2571 if (--p->rt.time_slice) in task_tick_rt()
2574 p->rt.time_slice = sched_rr_timeslice; in task_tick_rt()
2581 if (rt_se->run_list.prev != rt_se->run_list.next) { in task_tick_rt()
2594 if (task->policy == SCHED_RR) in get_rr_interval_rt()
2606 rt_rq = task_group(p)->rt_rq[cpu]; in task_is_throttled_rt()
2608 rt_rq = &cpu_rq(cpu)->rt; in task_is_throttled_rt()
2674 css_task_iter_start(&tg->css, 0, &it); in tg_has_rt_tasks()
2695 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
2696 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2698 if (tg == d->tg) { in tg_rt_schedulable()
2699 period = d->rt_period; in tg_rt_schedulable()
2700 runtime = d->rt_runtime; in tg_rt_schedulable()
2707 return -EINVAL; in tg_rt_schedulable()
2713 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
2714 return -EBUSY; in tg_rt_schedulable()
2722 return -EINVAL; in tg_rt_schedulable()
2727 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable()
2728 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable()
2729 runtime = child->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2731 if (child == d->tg) { in tg_rt_schedulable()
2732 period = d->rt_period; in tg_rt_schedulable()
2733 runtime = d->rt_runtime; in tg_rt_schedulable()
2740 return -EINVAL; in tg_rt_schedulable()
2772 return -EINVAL; in tg_set_rt_bandwidth()
2776 return -EINVAL; in tg_set_rt_bandwidth()
2782 return -EINVAL; in tg_set_rt_bandwidth()
2789 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2790 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_rt_bandwidth()
2791 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2794 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth()
2796 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2797 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2798 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2800 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2811 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
2816 return -EINVAL; in sched_group_set_rt_runtime()
2825 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
2826 return -1; in sched_group_rt_runtime()
2828 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
2838 return -EINVAL; in sched_group_set_rt_period()
2841 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
2850 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
2870 /* Don't accept real-time tasks when there is no way for them to run */ in sched_rt_can_attach()
2871 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()
2894 return -EINVAL; in sched_rt_global_validate()
2954 * Also, writing zero resets the time-slice to default: in sched_rr_handler()