Lines Matching +full:assigned +full:- +full:resolution +full:- +full:bits

1 /* SPDX-License-Identifier: GPL-2.0 */
70 #include <asm-generic/vmlinux.lds.h>
104 * Helpers for converting nanosecond timing to jiffy resolution
109 * Increase resolution of nice-level calculations for 64-bit architectures.
110 * The extra resolution improves shares distribution and load balancing of
111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
112 * hierarchies, especially on larger systems. This is not a user-visible change
113 * and does not change the user-interface for setting shares/weights.
115 * We increase resolution only if we have enough bits to allow this increased
116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
120 * increase coverage and consistency always enable it on 64-bit platforms.
140 * independent resolution, but they should be well calibrated. We use
151 * 10 -> just above 1us
152 * 9 -> just above 0.5us
187 return idle_policy(p->policy); in task_has_idle_policy()
192 return rt_policy(p->policy); in task_has_rt_policy()
197 return dl_policy(p->policy); in task_has_dl_policy()
204 s64 diff = sample - *avg; in update_avg()
225 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); in dl_entity_is_special()
238 dl_time_before(a->deadline, b->deadline); in dl_entity_preempt()
242 * This is the priority-queue data structure of the RT scheduling class:
261 * To keep the bandwidth of -deadline tasks and groups under control
263 * - store the maximum -deadline bandwidth of the system (the group);
264 * - cache the fraction of that bandwidth that is currently allocated.
267 * one used for RT-throttling (rt_bandwidth), with the main difference
272 * With respect to SMP, the bandwidth is given on a per-CPU basis,
274 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
275 * - dl_total_bw array contains, in the i-eth element, the currently
276 * allocated bandwidth on the i-eth CPU.
306 dl_b->total_bw -= tsk_bw; in __dl_sub()
313 dl_b->total_bw += tsk_bw; in __dl_add()
314 __dl_update(dl_b, -((s32)tsk_bw / cpus)); in __dl_add()
320 return dl_b->bw != -1 && in __dl_overflow()
321 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; in __dl_overflow()
336 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; in dl_task_fits_capacity()
425 /* The two decimal precision [%] value requested from user-space */
443 * (The default weight is 1024 - so there's no practical
518 /* CFS-related fields in a runqueue */
570 * Where f(tg) is the recursive weight fraction assigned to
584 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
618 /* Real-Time classes' related field in a runqueue: */
656 return rt_rq->rt_queued && rt_rq->rt_nr_running; in rt_rq_is_runnable()
683 * an rb-tree, ordered by tasks' deadlines, with caching
698 * Utilization of the tasks "assigned" to this runqueue (including
704 * runqueue (inactive utilization = this_bw - running_bw).
718 #define entity_is_task(se) (!se->my_q)
723 se->runnable_weight = se->my_q->h_nr_running; in se_update_runnable()
729 return !!se->on_rq; in se_runnable()
731 return se->runnable_weight; in se_runnable()
741 return !!se->on_rq; in se_runnable()
747 * XXX we want to get rid of these helpers and use the full load resolution.
751 return scale_load_down(se->load.weight); in se_weight()
768 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */
771 * We add the notion of a root-domain which will be used to define per-domain
774 * exclusive cpuset is created, we also create and attach a new root-domain
787 * - More than one runnable task
788 * - Running task is misfit
792 /* Indicate one or more cpus over-utilized (tipping point) */
797 * than one runnable -deadline task (as it is below for RT tasks).
827 * NULL-terminated list of performance domains intersecting with the
846 * struct uclamp_bucket - Utilization clamp bucket
855 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
859 * struct uclamp_rq - rq's utilization clamp
871 * - for util_min: we want to run the CPU at least at the max of the minimum
873 * - for util_max: we want to allow the CPU to run up to the max of the
878 * the metrics required to compute all the per-rq utilization clamp values.
889 * This is the main, per-CPU runqueue data structure.
1033 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1058 return cfs_rq->rq; in rq_of()
1072 return rq->cpu; in cpu_of()
1097 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1104 return READ_ONCE(rq->clock); in __rq_clock_broken()
1108 * rq::clock_update_flags bits
1110 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
1114 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
1117 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
1124 * if (rq-clock_update_flags >= RQCF_UPDATED)
1140 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1145 lockdep_assert_held(&rq->lock); in rq_clock()
1148 return rq->clock; in rq_clock()
1153 lockdep_assert_held(&rq->lock); in rq_clock_task()
1156 return rq->clock_task; in rq_clock_task()
1179 lockdep_assert_held(&rq->lock); in rq_clock_skip_update()
1180 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1189 lockdep_assert_held(&rq->lock); in rq_clock_cancel_skipupdate()
1190 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1212 * copy of the (on-stack) 'struct rq_flags rf'.
1214 * Also see Documentation/locking/lockdep-design.rst.
1218 rf->cookie = lockdep_pin_lock(&rq->lock); in rq_pin_lock()
1221 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1222 rf->clock_update_flags = 0; in rq_pin_lock()
1229 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1230 rf->clock_update_flags = RQCF_UPDATED; in rq_unpin_lock()
1233 lockdep_unpin_lock(&rq->lock, rf->cookie); in rq_unpin_lock()
1238 lockdep_repin_lock(&rq->lock, rf->cookie); in rq_repin_lock()
1244 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1249 __acquires(rq->lock);
1252 __acquires(p->pi_lock)
1253 __acquires(rq->lock);
1256 __releases(rq->lock) in __task_rq_unlock()
1259 raw_spin_unlock(&rq->lock); in __task_rq_unlock()
1264 __releases(rq->lock) in task_rq_unlock()
1265 __releases(p->pi_lock) in task_rq_unlock()
1268 raw_spin_unlock(&rq->lock); in task_rq_unlock()
1269 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_unlock()
1274 __acquires(rq->lock) in rq_lock_irqsave()
1276 raw_spin_lock_irqsave(&rq->lock, rf->flags); in rq_lock_irqsave()
1282 __acquires(rq->lock) in rq_lock_irq()
1284 raw_spin_lock_irq(&rq->lock); in rq_lock_irq()
1290 __acquires(rq->lock) in rq_lock()
1292 raw_spin_lock(&rq->lock); in rq_lock()
1298 __acquires(rq->lock) in rq_relock()
1300 raw_spin_lock(&rq->lock); in rq_relock()
1306 __releases(rq->lock) in rq_unlock_irqrestore()
1309 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); in rq_unlock_irqrestore()
1314 __releases(rq->lock) in rq_unlock_irq()
1317 raw_spin_unlock_irq(&rq->lock); in rq_unlock_irq()
1322 __releases(rq->lock) in rq_unlock()
1325 raw_spin_unlock(&rq->lock); in rq_unlock()
1330 __acquires(rq->lock) in this_rq_lock_irq()
1390 lockdep_assert_held(&rq->lock); in queue_balance_callback()
1392 if (unlikely(head->next)) in queue_balance_callback()
1395 head->func = (void (*)(struct callback_head *))func; in queue_balance_callback()
1396 head->next = rq->balance_callback; in queue_balance_callback()
1397 rq->balance_callback = head; in queue_balance_callback()
1405 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1409 * preempt-disabled sections.
1412 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1413 __sd; __sd = __sd->parent)
1416 * highest_flag_domain - Return highest sched_domain containing flag.
1429 if (!(sd->flags & flag)) in highest_flag_domain()
1442 if (sd->flags & flag) in lowest_flag_domain()
1465 unsigned long min_capacity; /* Min per-CPU capacity in group */
1466 unsigned long max_capacity; /* Max per-CPU capacity in group */
1497 return to_cpumask(sg->cpumask); in sched_group_span()
1505 return to_cpumask(sg->sgc->cpumask); in group_balance_mask()
1509 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
1553 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1561 return p->sched_task_group; in task_group()
1572 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq()
1573 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq()
1574 p->se.parent = tg->se[cpu]; in set_task_rq()
1578 p->rt.rt_rq = tg->rt_rq[cpu]; in set_task_rq()
1579 p->rt.parent = tg->rt_se[cpu]; in set_task_rq()
1598 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be in __set_task_cpu()
1600 * per-task data have been completed by this moment. in __set_task_cpu()
1604 WRITE_ONCE(p->cpu, cpu); in __set_task_cpu()
1606 WRITE_ONCE(task_thread_info(p)->cpu, cpu); in __set_task_cpu()
1608 p->wake_cpu = cpu; in __set_task_cpu()
1635 * To support run-time toggling of sched features, all the translation units
1695 return rq->curr == p; in task_current()
1701 return p->on_cpu; in task_running()
1709 return p->on_rq == TASK_ON_RQ_QUEUED; in task_on_rq_queued()
1714 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; in task_on_rq_migrating()
1743 * DEQUEUE_SLEEP - task is no longer runnable
1744 * ENQUEUE_WAKEUP - task just became runnable
1746 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1750 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1753 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1754 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1755 * ENQUEUE_MIGRATED - the task was migrated during wakeup
1777 #define RETRY_TASK ((void *)-1UL)
1816 * The switched_from() call is allowed to drop rq->lock, therefore we
1818 * rq->lock. They are however serialized by p->pi_lock.
1840 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
1841 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
1846 WARN_ON_ONCE(rq->curr != next); in set_next_task()
1847 next->sched_class->set_next_task(rq, next, false); in set_next_task()
1850 /* Defined in include/asm-generic/vmlinux.lds.h */
1854 #define sched_class_highest (__end_sched_classes - 1)
1855 #define sched_class_lowest (__begin_sched_classes - 1)
1858 for (class = (_from); class != (_to); class--)
1871 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
1876 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
1881 return rq->rt.rt_queued > 0; in sched_rt_runnable()
1886 return rq->cfs.nr_running > 0; in sched_fair_runnable()
1906 rq->idle_state = idle_state; in idle_set_state()
1913 return rq->idle_state; in idle_get_state()
1953 #define MAX_BW_BITS (64 - BW_SHIFT)
1954 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
1988 unsigned prev_nr = rq->nr_running; in add_nr_running()
1990 rq->nr_running = prev_nr + count; in add_nr_running()
1996 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
1997 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
1998 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
2007 rq->nr_running -= count; in sub_nr_running()
2009 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2028 * - enabled by features
2029 * - hrtimer is actually high res
2037 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2060 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2066 * ------ * SCHED_CAPACITY_SCALE
2082 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2090 __releases(this_rq->lock) in _double_lock_balance()
2091 __acquires(busiest->lock) in _double_lock_balance()
2092 __acquires(this_rq->lock) in _double_lock_balance()
2094 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2104 * already in proper order on entry. This favors lower CPU-ids and will
2109 __releases(this_rq->lock) in _double_lock_balance()
2110 __acquires(busiest->lock) in _double_lock_balance()
2111 __acquires(this_rq->lock) in _double_lock_balance()
2115 if (unlikely(!raw_spin_trylock(&busiest->lock))) { in _double_lock_balance()
2117 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2118 raw_spin_lock(&busiest->lock); in _double_lock_balance()
2119 raw_spin_lock_nested(&this_rq->lock, in _double_lock_balance()
2123 raw_spin_lock_nested(&busiest->lock, in _double_lock_balance()
2132 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2137 /* printk() doesn't work well under rq->lock */ in double_lock_balance()
2138 raw_spin_unlock(&this_rq->lock); in double_lock_balance()
2146 __releases(busiest->lock) in double_unlock_balance()
2148 raw_spin_unlock(&busiest->lock); in double_unlock_balance()
2149 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); in double_unlock_balance()
2180 * double_rq_lock - safely lock two runqueues
2186 __acquires(rq1->lock) in double_rq_lock()
2187 __acquires(rq2->lock) in double_rq_lock()
2191 raw_spin_lock(&rq1->lock); in double_rq_lock()
2192 __acquire(rq2->lock); /* Fake it out ;) */ in double_rq_lock()
2195 raw_spin_lock(&rq1->lock); in double_rq_lock()
2196 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); in double_rq_lock()
2198 raw_spin_lock(&rq2->lock); in double_rq_lock()
2199 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); in double_rq_lock()
2205 * double_rq_unlock - safely unlock two runqueues
2211 __releases(rq1->lock) in double_rq_unlock()
2212 __releases(rq2->lock) in double_rq_unlock()
2214 raw_spin_unlock(&rq1->lock); in double_rq_unlock()
2216 raw_spin_unlock(&rq2->lock); in double_rq_unlock()
2218 __release(rq2->lock); in double_rq_unlock()
2228 * double_rq_lock - safely lock two runqueues
2234 __acquires(rq1->lock) in double_rq_lock()
2235 __acquires(rq2->lock) in double_rq_lock()
2239 raw_spin_lock(&rq1->lock); in double_rq_lock()
2240 __acquire(rq2->lock); /* Fake it out ;) */ in double_rq_lock()
2244 * double_rq_unlock - safely unlock two runqueues
2250 __releases(rq1->lock) in double_rq_unlock()
2251 __releases(rq2->lock) in double_rq_unlock()
2254 raw_spin_unlock(&rq1->lock); in double_rq_unlock()
2255 __release(rq2->lock); in double_rq_unlock()
2297 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2314 for_each_cpu_and(i, rd->span, cpu_active_mask) { in __dl_update()
2317 rq->dl.extra_bw += bw; in __dl_update()
2326 dl->extra_bw += bw; in __dl_update()
2353 seq = __u64_stats_fetch_begin(&irqtime->sync); in irq_time_read()
2354 total = irqtime->total; in irq_time_read()
2355 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); in irq_time_read()
2365 * cpufreq_update_util - Take a note about CPU utilization changes.
2372 * It can only be called from RCU-sched read-side critical sections.
2383 * but that really is a band-aid. Going forward it should be replaced with
2393 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2403 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
2429 min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); in uclamp_rq_util_with()
2430 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_util_with()
2453 * Returns true if userspace opted-in to use uclamp and aggregation at rq level
2485 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of()
2490 * enum schedutil_type - CPU utilization type
2512 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
2517 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
2522 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs()
2526 READ_ONCE(rq->cfs.avg.util_est.enqueued)); in cpu_util_cfs()
2534 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
2548 return rq->avg_irq.util_avg; in cpu_util_irq()
2554 util *= (max - irq); in scale_irq_capacity()
2575 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2594 * - prior user-space memory accesses and store to rq->membarrier_state,
2595 * - store to rq->membarrier_state and following user-space memory accesses.
2596 * In the same way it provides those guarantees around store to rq->curr.
2607 membarrier_state = atomic_read(&next_mm->membarrier_state); in membarrier_switch_mm()
2608 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
2611 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
2624 if (!(p->flags & PF_KTHREAD)) in is_per_cpu_kthread()
2627 if (p->nr_cpus_allowed != 1) in is_per_cpu_kthread()