Lines Matching +full:int +full:- +full:array +full:- +full:variable +full:- +full:length +full:- +full:and +full:- +full:constrained +full:- +full:values

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Define 'struct task_struct' and provide the main scheduler
41 #include <linux/posix-timers_types.h>
49 #include <linux/tracepoint-defs.h>
91 * encoded in fs/proc/array.c: get_task_state().
93 * We have two separate sets of flags: task->__state
94 * is about runnability, while task->exit_state are
100 /* Used in tsk->__state: */
106 /* Used in tsk->exit_state: */
110 /* Used in tsk->__state again: */
123 #define TASK_ANY (TASK_STATE_MAX-1)
146 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
148 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
149 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
150 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED…
153 * Special states are those that do not use the normal wait-loop pattern. See
164 current->task_state_change = _THIS_IP_; \
170 current->task_state_change = _THIS_IP_; \
175 current->saved_state_change = current->task_state_change;\
176 current->task_state_change = _THIS_IP_; \
181 current->task_state_change = current->saved_state_change;\
198 * set_current_state() includes a barrier so that the write of current->__state
212 * CONDITION test and condition change and wakeup are under the same lock) then
221 * accessing p->__state.
223 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
230 * and our @cond test will save the day.
238 WRITE_ONCE(current->__state, (state_value)); \
245 smp_store_mb(current->__state, (state_value)); \
250 * can not use the regular condition based wait-loop. In that case we must
251 * serialize against wakeups such that any possible in-flight TASK_RUNNING
258 raw_spin_lock_irqsave(&current->pi_lock, flags); \
261 WRITE_ONCE(current->__state, (state_value)); \
262 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
269 * task when blocking on the lock is saved in task_struct::saved_state and
283 * raw_spin_unlock_irq(&lock->wait_lock);
285 * raw_spin_lock_irq(&lock->wait_lock);
293 raw_spin_lock(&current->pi_lock); \
294 current->saved_state = current->__state; \
297 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
298 raw_spin_unlock(&current->pi_lock); \
304 raw_spin_lock(&current->pi_lock); \
306 trace_set_current_state(current->saved_state); \
307 WRITE_ONCE(current->__state, current->saved_state); \
308 current->saved_state = TASK_RUNNING; \
309 raw_spin_unlock(&current->pi_lock); \
312 #define get_current_state() READ_ONCE(current->__state)
315 * Define the task command name length as enum, then it can be visible to
338 extern int __must_check io_schedule_prepare(void);
339 extern void io_schedule_finish(int token);
345 extern void __trace_set_current_state(int state_value);
348 * struct prev_cputime - snapshot of system and user cputime
353 * Stores previous user/system time values such that we can guarantee
381 unsigned int cpu;
410 int sched_priority;
442 * has a few: load, load_avg, util_avg, freq, and capacity.
444 * We define a basic fixed point arithmetic range, and then formalize
475 * where runnable% is the time ratio that a sched_entity is runnable and
478 * For cfs_rq, they are the aggregated values of all runnable and blocked
481 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
485 * N.B., the above ratios (runnable% and running%) themselves are in the
492 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
494 * and should not overflow as the number already hits PID_MAX_LIMIT.
496 * For all other cases (including 32-bit kernels), struct load_weight's
513 unsigned int util_est;
570 /* For load-balancing: */
594 int depth;
600 /* cached value of my_q->h_nr_running */
609 * collide with read-mostly values above.
619 unsigned int time_slice;
651 * Actual scheduling parameters. Initialized with the values above,
657 unsigned int flags; /* Specifying the scheduler behaviour */
671 * indicates if the inactive timer has been armed and its handler
673 * conditions between the inactive timer handler and the wakeup
689 * and is stopped when a dequeue results in 0 cfs tasks on the
696 unsigned int dl_throttled : 1;
697 unsigned int dl_yielded : 1;
698 unsigned int dl_non_contending : 1;
699 unsigned int dl_overrun : 1;
700 unsigned int dl_server : 1;
701 unsigned int dl_server_active : 1;
702 unsigned int dl_defer : 1;
703 unsigned int dl_defer_armed : 1;
704 unsigned int dl_defer_running : 1;
707 * Bandwidth enforcement timer. Each -deadline task has its
714 * at the "0-lag time". When a -deadline task blocks, it contributes
715 * to GRUB's active utilization until the "0-lag time", hence a
722 * Bits for DL-server functionality. Also see the comment near
753 * @user_defined: the requested clamp value comes from user-space
756 * which is pre-computed and stored to avoid expensive integer divisions from
760 * which can be different from the clamp value "requested" from user-space.
764 * The user_defined bit is set whenever a task has got a task-specific clamp
767 * restrictive task-specific value has been requested, thus allowing to
772 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
773 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
774 unsigned int active : 1;
775 unsigned int user_defined : 1;
790 perf_invalid_context = -1,
808 int idx;
821 unsigned int __state;
824 unsigned int saved_state;
828 * scheduling-critical items should be added above here.
835 unsigned int flags;
836 unsigned int ptrace;
843 int on_cpu;
845 unsigned int wakee_flips;
856 int recent_used_cpu;
857 int wake_cpu;
859 int on_rq;
861 int prio;
862 int static_prio;
863 int normal_prio;
864 unsigned int rt_priority;
878 unsigned int core_occupation;
888 * Clamp values requested for a scheduling entity.
893 * Effective clamp values used for a scheduling entity.
907 unsigned int btrace_seq;
910 unsigned int policy;
912 int nr_cpus_allowed;
923 int rcu_read_lock_nesting;
933 int rcu_tasks_idle_cpu;
935 int rcu_tasks_exit_cpu;
940 int trc_reader_nesting;
941 int trc_ipi_to_cpu;
945 int trc_blkd_cpu;
960 int exit_state;
961 int exit_code;
962 int exit_signal;
964 int pdeath_signal;
969 unsigned int personality;
984 * queueing no longer being serialized by p->on_cpu. However:
986 * p->XXX = X; ttwu()
987 * schedule() if (p->on_rq && ..) // false
988 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
990 * p->on_rq = 0; p->sched_remote_wakeup = Y;
993 * ->sched_remote_wakeup gets used, so it can be in this word.
1017 /* disallow userland-initiated cgroup migration */
1058 /* Canary value for the -fstack-protector GCC feature: */
1063 * older sibling, respectively. (p->father can be replaced with
1064 * p->real_parent->pid)
1083 * This includes both natural children and PTRACE_ATTACH targets.
1084 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1097 int __user *set_child_tid;
1100 int __user *clear_child_tid;
1130 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1146 /* Objective and real subjective task credentials (COW): */
1160 * - normally initialized begin_new_exec()
1161 * - set it with set_task_comm()
1162 * - strscpy_pad() to ensure it is always NUL-terminated and
1163 * zero-padded
1164 * - task_lock() to ensure the operation is atomic and the name is
1202 unsigned int sas_ss_flags;
1211 unsigned int sessionid;
1220 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1231 /* Updated under owner's pi_lock and rq lock */
1233 /* Deadlock detection and priority inheritance handling: */
1247 int non_block_count;
1252 unsigned int hardirq_threaded;
1254 int softirqs_enabled;
1255 int softirq_context;
1256 int irq_config;
1259 int softirq_disable_cnt;
1265 int lockdep_depth;
1266 unsigned int lockdep_recursion;
1271 unsigned int in_ubsan;
1298 unsigned int psi_flags;
1309 /* Protected by ->alloc_lock: */
1313 int cpuset_mem_spread_rotor;
1318 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1333 unsigned int futex_state;
1353 int numa_scan_seq;
1354 unsigned int numa_scan_period;
1355 unsigned int numa_scan_period_max;
1356 int numa_preferred_nid;
1365 * This pointer is only modified for current in syscall and
1366 * pagefault context (and for tasks being destroyed), so it can be read
1368 * - RCU read-side critical section
1369 * - current->numa_group from everywhere
1370 * - task's runqueue locked, task not running
1375 * numa_faults is an array split into four regions:
1379 * faults_memory: Exponential decaying average of faults on a per-node
1381 * counts. The values remain static for the duration of a PTE scan.
1384 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1386 * in faults_memory and faults_cpu decay and these values are copied.
1414 * validation of read-only fields. The struct rseq has a
1415 * variable-length array at the end, so it cannot be used
1423 int mm_cid; /* Current cid in mm */
1424 int last_mm_cid; /* Most recent cid in mm */
1425 int migrate_from_cpu;
1426 int mm_cid_active; /* Whether cid bitmap is active */
1442 int make_it_fail;
1443 unsigned int fail_nth;
1449 int nr_dirtied;
1450 int nr_dirtied_pause;
1451 /* Start of a write-and-pause period: */
1455 int latency_record_count;
1459 * Time slack values; these are used to round up poll() and
1460 * select() etc timeout values. These are in nanoseconds.
1466 unsigned int kasan_depth;
1475 int kcsan_stack_depth;
1489 int curr_ret_stack;
1490 int curr_ret_depth;
1510 /* Bitmask and counter of trace recursion: */
1518 unsigned int kcov_mode;
1521 unsigned int kcov_size;
1533 int kcov_sequence;
1536 unsigned int kcov_softirq;
1545 unsigned int memcg_nr_pages_over_high;
1550 /* Cache for current->cgroups->memcg->objcg lookups: */
1562 unsigned int sequential_io;
1563 unsigned int sequential_io_avg;
1574 int pagefault_disabled;
1587 int patch_state;
1599 /* Used by BPF for per-TASK xdp storage */
1615 int mce_count;
1637 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1655 /* CPU-specific state of this task: */
1659 * WARNING: on x86, 'thread_struct' contains a variable-sized
1669 static inline unsigned int __task_state_index(unsigned int tsk_state, in __task_state_index()
1670 unsigned int tsk_exit_state) in __task_state_index()
1672 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; in __task_state_index()
1691 static inline unsigned int task_state_index(struct task_struct *tsk) in task_state_index()
1693 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); in task_state_index()
1696 static inline char task_index_to_char(unsigned int state) in task_index_to_char()
1700 BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1)); in task_index_to_char()
1723 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1744 #define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinni…
1748 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be…
1751 * Only the _current_ task can read/write to tsk->flags, but other
1752 * tasks can access tsk->flags in readonly mode for example
1756 * child->flags of its traced child (same goes for fork, the parent
1757 * can write to the child->flags), because we're guaranteed the
1758 * child is not running and in turn not changing child->flags
1761 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1762 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1767 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1772 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1775 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1781 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1782 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1788 /* Per-process atomic flags. */
1800 { return test_bit(PFA_##name, &p->atomic_flags); }
1804 { set_bit(PFA_##name, &p->atomic_flags); }
1808 { clear_bit(PFA_##name, &p->atomic_flags); }
1842 current->flags &= ~flags; in TASK_PFA_TEST()
1843 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1846 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1847 extern int task_can_attach(struct task_struct *p);
1848 extern int dl_bw_alloc(int cpu, u64 dl_bw);
1849 extern void dl_bw_free(int cpu, u64 dl_bw);
1852 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1856 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1862 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1863 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1865 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1872 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr()
1876 return -EINVAL; in set_cpus_allowed_ptr()
1879 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) in dup_user_cpus_ptr()
1881 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1882 return -EINVAL; in dup_user_cpus_ptr()
1887 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1890 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity()
1896 extern int yield_to(struct task_struct *p, bool preempt);
1898 extern int task_prio(const struct task_struct *p);
1901 * task_nice - return the nice value of a given task.
1904 * Return: The nice value [ -20 ... 0 ... 19 ].
1906 static inline int task_nice(const struct task_struct *p) in task_nice()
1908 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1911 extern int can_nice(const struct task_struct *p, const int nice);
1912 extern int task_curr(const struct task_struct *p);
1913 extern int idle_cpu(int cpu);
1914 extern int available_idle_cpu(int cpu);
1915 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1916 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1919 extern void sched_set_normal(struct task_struct *p, int nice);
1920 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1921 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1922 extern struct task_struct *idle_task(int cpu);
1925 * is_idle_task - is the specified task an idle task?
1932 return !!(p->flags & PF_IDLE); in is_idle_task()
1935 extern struct task_struct *curr_task(int cpu);
1936 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1955 # define task_thread_info(task) (&(task)->thread_info)
1957 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1975 * find a task by its virtual pid and get the task struct
1979 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1980 extern int wake_up_process(struct task_struct *tsk);
1996 * - Why not use task_lock()?
1999 * condition could lead to long-term mixed results.
2001 * always NUL-terminated and zero-padded. Therefore the race condition between
2002 * reader and writer is not an issue.
2004 * - BUILD_BUG_ON() can help prevent the buf from being truncated.
2010 strscpy_pad(buf, (tsk)->comm); \
2028 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
2034 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) in set_tsk_thread_flag()
2039 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) in clear_tsk_thread_flag()
2044 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, in update_tsk_thread_flag()
2050 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) in test_and_set_tsk_thread_flag()
2055 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) in test_and_clear_tsk_thread_flag()
2060 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) in test_tsk_thread_flag()
2073 (atomic_long_t *)&task_thread_info(tsk)->flags); in clear_tsk_need_resched()
2076 static inline int test_tsk_need_resched(struct task_struct *tsk) in test_tsk_need_resched()
2082 * cond_resched() and cond_resched_lock(): latency reduction via
2088 extern int __cond_resched(void);
2097 static __always_inline int _cond_resched(void) in _cond_resched()
2104 extern int dynamic_cond_resched(void);
2106 static __always_inline int _cond_resched(void) in _cond_resched()
2113 static inline int _cond_resched(void) in _cond_resched()
2123 static inline int _cond_resched(void) in _cond_resched()
2136 extern int __cond_resched_lock(spinlock_t *lock);
2137 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2138 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2141 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2153 * preempt_count() and rcu_preempt_depth().
2180 * Wrappers for p->thread_info->cpu access. No-op on UP.
2184 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu()
2186 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2189 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2193 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu()
2198 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) in set_task_cpu()
2206 return p->on_rq && !p->se.sched_delayed; in task_is_runnable()
2211 extern struct task_struct *cpu_curr_snapshot(int cpu);
2219 * This allows us to terminate optimistic spin loops and block, analogous to
2224 static inline bool vcpu_is_preempted(int cpu) in vcpu_is_preempted()
2244 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
2248 unsigned long sched_cpu_util(int cpu);
2254 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2256 extern int sched_core_idle_cpu(int cpu);
2260 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } in sched_core_idle_cpu()
2263 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2268 swap(current->alloc_tag, tag); in alloc_tag_save()
2275 WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); in alloc_tag_restore()
2277 current->alloc_tag = old; in alloc_tag_restore()