Lines Matching +full:int +full:- +full:array +full:- +full:variable +full:- +full:length +full:- +full:and +full:- +full:constrained +full:- +full:values

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Define 'struct task_struct' and provide the main scheduler
34 #include <linux/posix-timers.h>
76 * encoded in fs/proc/array.c: get_task_state().
78 * We have two separate sets of flags: task->__state
79 * is about runnability, while task->exit_state are
85 /* Used in tsk->__state: */
91 /* Used in tsk->exit_state: */
95 /* Used in tsk->__state again: */
108 #define TASK_ANY (TASK_STATE_MAX-1)
131 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
133 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
134 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
135 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED…
138 * Special states are those that do not use the normal wait-loop pattern. See
148 current->task_state_change = _THIS_IP_; \
154 current->task_state_change = _THIS_IP_; \
159 current->saved_state_change = current->task_state_change;\
160 current->task_state_change = _THIS_IP_; \
165 current->task_state_change = current->saved_state_change;\
176 * set_current_state() includes a barrier so that the write of current->__state
190 * CONDITION test and condition change and wakeup are under the same lock) then
199 * accessing p->__state.
201 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
208 * and our @cond test will save the day.
215 WRITE_ONCE(current->__state, (state_value)); \
221 smp_store_mb(current->__state, (state_value)); \
226 * can not use the regular condition based wait-loop. In that case we must
227 * serialize against wakeups such that any possible in-flight TASK_RUNNING
234 raw_spin_lock_irqsave(&current->pi_lock, flags); \
236 WRITE_ONCE(current->__state, (state_value)); \
237 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
244 * task when blocking on the lock is saved in task_struct::saved_state and
258 * raw_spin_unlock_irq(&lock->wait_lock);
260 * raw_spin_lock_irq(&lock->wait_lock);
268 raw_spin_lock(&current->pi_lock); \
269 current->saved_state = current->__state; \
271 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
272 raw_spin_unlock(&current->pi_lock); \
278 raw_spin_lock(&current->pi_lock); \
280 WRITE_ONCE(current->__state, current->saved_state); \
281 current->saved_state = TASK_RUNNING; \
282 raw_spin_unlock(&current->pi_lock); \
285 #define get_current_state() READ_ONCE(current->__state)
288 * Define the task command name length as enum, then it can be visible to
311 extern int __must_check io_schedule_prepare(void);
312 extern void io_schedule_finish(int token);
317 * struct prev_cputime - snapshot of system and user cputime
322 * Stores previous user/system time values such that we can guarantee
350 unsigned int cpu;
396 * has a few: load, load_avg, util_avg, freq, and capacity.
398 * We define a basic fixed point arithmetic range, and then formalize
414 * struct util_est - Estimation utilization of FAIR tasks
425 * The enqueued attribute has a slightly different meaning for tasks and cpus:
426 * - task: the task's util_avg at last task dequeue time
427 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
443 unsigned int enqueued;
444 unsigned int ewma;
465 * where runnable% is the time ratio that a sched_entity is runnable and
468 * For cfs_rq, they are the aggregated values of all runnable and blocked
471 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
475 * N.B., the above ratios (runnable% and running%) themselves are in the
482 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
484 * and should not overflow as the number already hits PID_MAX_LIMIT.
486 * For all other cases (including 32-bit kernels), struct load_weight's
549 /* For load-balancing: */
556 unsigned int on_rq;
568 int depth;
574 /* cached value of my_q->h_nr_running */
583 * collide with read-mostly values above.
593 unsigned int time_slice;
622 * Actual scheduling parameters. Initialized with the values above,
628 unsigned int flags; /* Specifying the scheduler behaviour */
642 * indicates if the inactive timer has been armed and its handler
644 * conditions between the inactive timer handler and the wakeup
650 unsigned int dl_throttled : 1;
651 unsigned int dl_yielded : 1;
652 unsigned int dl_non_contending : 1;
653 unsigned int dl_overrun : 1;
656 * Bandwidth enforcement timer. Each -deadline task has its
663 * at the "0-lag time". When a -deadline task blocks, it contributes
664 * to GRUB's active utilization until the "0-lag time", hence a
689 * @user_defined: the requested clamp value comes from user-space
692 * which is pre-computed and stored to avoid expensive integer divisions from
696 * which can be different from the clamp value "requested" from user-space.
700 * The user_defined bit is set whenever a task has got a task-specific clamp
703 * restrictive task-specific value has been requested, thus allowing to
708 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
709 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
710 unsigned int active : 1;
711 unsigned int user_defined : 1;
726 perf_invalid_context = -1,
738 int idx;
751 unsigned int __state;
755 unsigned int saved_state;
760 * scheduling-critical items should be added above here.
767 unsigned int flags;
768 unsigned int ptrace;
771 int on_cpu;
773 unsigned int wakee_flips;
784 int recent_used_cpu;
785 int wake_cpu;
787 int on_rq;
789 int prio;
790 int static_prio;
791 int normal_prio;
792 unsigned int rt_priority;
802 unsigned int core_occupation;
811 * Clamp values requested for a scheduling entity.
816 * Effective clamp values used for a scheduling entity.
830 unsigned int btrace_seq;
833 unsigned int policy;
834 int nr_cpus_allowed;
845 int rcu_read_lock_nesting;
855 int rcu_tasks_idle_cpu;
860 int trc_reader_nesting;
861 int trc_ipi_to_cpu;
865 int trc_blkd_cpu;
879 int exit_state;
880 int exit_code;
881 int exit_signal;
883 int pdeath_signal;
888 unsigned int personality;
902 * queueing no longer being serialized by p->on_cpu. However:
904 * p->XXX = X; ttwu()
905 * schedule() if (p->on_rq && ..) // false
906 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
908 * p->on_rq = 0; p->sched_remote_wakeup = Y;
911 * ->sched_remote_wakeup gets used, so it can be in this word.
932 /* disallow userland-initiated cgroup migration */
971 /* Canary value for the -fstack-protector GCC feature: */
976 * older sibling, respectively. (p->father can be replaced with
977 * p->real_parent->pid)
996 * This includes both natural children and PTRACE_ATTACH targets.
997 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1011 int __user *set_child_tid;
1014 int __user *clear_child_tid;
1044 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1060 /* Objective and real subjective task credentials (COW): */
1074 * - normally initialized setup_new_exec()
1075 * - access it with [gs]et_task_comm()
1076 * - lock it with task_lock()
1113 unsigned int sas_ss_flags;
1122 unsigned int sessionid;
1131 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1142 /* Updated under owner's pi_lock and rq lock */
1144 /* Deadlock detection and priority inheritance handling: */
1154 int non_block_count;
1159 unsigned int hardirq_threaded;
1161 int softirqs_enabled;
1162 int softirq_context;
1163 int irq_config;
1166 int softirq_disable_cnt;
1172 int lockdep_depth;
1173 unsigned int lockdep_recursion;
1178 unsigned int in_ubsan;
1205 unsigned int psi_flags;
1216 /* Protected by ->alloc_lock: */
1220 int cpuset_mem_spread_rotor;
1221 int cpuset_slab_spread_rotor;
1226 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1241 unsigned int futex_state;
1258 int numa_scan_seq;
1259 unsigned int numa_scan_period;
1260 unsigned int numa_scan_period_max;
1261 int numa_preferred_nid;
1270 * This pointer is only modified for current in syscall and
1271 * pagefault context (and for tasks being destroyed), so it can be read
1273 * - RCU read-side critical section
1274 * - current->numa_group from everywhere
1275 * - task's runqueue locked, task not running
1280 * numa_faults is an array split into four regions:
1284 * faults_memory: Exponential decaying average of faults on a per-node
1286 * counts. The values remain static for the duration of a PTE scan.
1289 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1291 * in faults_memory and faults_cpu decay and these values are copied.
1319 int mm_cid; /* Current cid in mm */
1320 int last_mm_cid; /* Most recent cid in mm */
1321 int migrate_from_cpu;
1322 int mm_cid_active; /* Whether cid bitmap is active */
1338 int make_it_fail;
1339 unsigned int fail_nth;
1345 int nr_dirtied;
1346 int nr_dirtied_pause;
1347 /* Start of a write-and-pause period: */
1351 int latency_record_count;
1355 * Time slack values; these are used to round up poll() and
1356 * select() etc timeout values. These are in nanoseconds.
1362 unsigned int kasan_depth;
1371 int kcsan_stack_depth;
1385 int curr_ret_stack;
1386 int curr_ret_depth;
1405 /* Bitmask and counter of trace recursion: */
1413 unsigned int kcov_mode;
1416 unsigned int kcov_size;
1428 int kcov_sequence;
1431 unsigned int kcov_softirq;
1437 int memcg_oom_order;
1440 unsigned int memcg_nr_pages_over_high;
1454 unsigned int sequential_io;
1455 unsigned int sequential_io_avg;
1466 int pagefault_disabled;
1479 int patch_state;
1505 int mce_count;
1527 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1545 /* CPU-specific state of this task: */
1549 * WARNING: on x86, 'thread_struct' contains a variable-sized
1558 return task->thread_pid; in task_pid()
1576 return tsk->pid; in task_pid_nr()
1592 return tsk->tgid; in task_tgid_nr()
1596 * pid_alive - check that a task structure is not stale
1601 * can be stale and must not be dereferenced.
1605 static inline int pid_alive(const struct task_struct *p) in pid_alive()
1607 return p->thread_pid != NULL; in pid_alive()
1647 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); in task_ppid_nr_ns()
1667 static inline unsigned int __task_state_index(unsigned int tsk_state, in __task_state_index()
1668 unsigned int tsk_exit_state) in __task_state_index()
1670 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; in __task_state_index()
1688 static inline unsigned int task_state_index(struct task_struct *tsk) in task_state_index()
1690 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); in task_state_index()
1693 static inline char task_index_to_char(unsigned int state) in task_index_to_char()
1697 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); in task_index_to_char()
1708 * is_global_init - check if a task structure is init. Since init
1709 * is free to have sub-threads we need to check tgid.
1716 static inline int is_global_init(struct task_struct *tsk) in is_global_init()
1734 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1755 #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long ter…
1758 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be…
1761 * Only the _current_ task can read/write to tsk->flags, but other
1762 * tasks can access tsk->flags in readonly mode for example
1766 * child->flags of its traced child (same goes for fork, the parent
1767 * can write to the child->flags), because we're guaranteed the
1768 * child is not running and in turn not changing child->flags
1771 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1772 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1777 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1782 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1785 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1791 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1792 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1798 /* Per-process atomic flags. */
1810 { return test_bit(PFA_##name, &p->atomic_flags); }
1814 { set_bit(PFA_##name, &p->atomic_flags); }
1818 { clear_bit(PFA_##name, &p->atomic_flags); }
1852 current->flags &= ~flags; in TASK_PFA_TEST()
1853 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1856 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1857 extern int task_can_attach(struct task_struct *p);
1858 extern int dl_bw_alloc(int cpu, u64 dl_bw);
1859 extern void dl_bw_free(int cpu, u64 dl_bw);
1862 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1866 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1872 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1873 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1875 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1882 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr()
1885 return -EINVAL; in set_cpus_allowed_ptr()
1888 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) in dup_user_cpus_ptr()
1890 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1891 return -EINVAL; in dup_user_cpus_ptr()
1896 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1899 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity()
1905 extern int yield_to(struct task_struct *p, bool preempt);
1907 extern int task_prio(const struct task_struct *p);
1910 * task_nice - return the nice value of a given task.
1913 * Return: The nice value [ -20 ... 0 ... 19 ].
1915 static inline int task_nice(const struct task_struct *p) in task_nice()
1917 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1920 extern int can_nice(const struct task_struct *p, const int nice);
1921 extern int task_curr(const struct task_struct *p);
1922 extern int idle_cpu(int cpu);
1923 extern int available_idle_cpu(int cpu);
1924 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1925 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1928 extern void sched_set_normal(struct task_struct *p, int nice);
1929 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1930 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1931 extern struct task_struct *idle_task(int cpu);
1934 * is_idle_task - is the specified task an idle task?
1941 return !!(p->flags & PF_IDLE); in is_idle_task()
1944 extern struct task_struct *curr_task(int cpu);
1945 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1966 # define task_thread_info(task) (&(task)->thread_info)
1968 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1986 * find a task by its virtual pid and get the task struct
1990 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1991 extern int wake_up_process(struct task_struct *tsk);
2027 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
2033 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) in set_tsk_thread_flag()
2038 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) in clear_tsk_thread_flag()
2043 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, in update_tsk_thread_flag()
2049 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) in test_and_set_tsk_thread_flag()
2054 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) in test_and_clear_tsk_thread_flag()
2059 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) in test_tsk_thread_flag()
2074 static inline int test_tsk_need_resched(struct task_struct *tsk) in test_tsk_need_resched()
2080 * cond_resched() and cond_resched_lock(): latency reduction via
2086 extern int __cond_resched(void);
2095 static __always_inline int _cond_resched(void) in _cond_resched()
2102 extern int dynamic_cond_resched(void);
2104 static __always_inline int _cond_resched(void) in _cond_resched()
2111 static inline int _cond_resched(void) in _cond_resched()
2121 static inline int _cond_resched(void) in _cond_resched()
2134 extern int __cond_resched_lock(spinlock_t *lock);
2135 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2136 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2139 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2151 * preempt_count() and rcu_preempt_depth().
2210 * Does the preemption model allow non-cooperative preemption?
2227 static inline int spin_needbreak(spinlock_t *lock) in spin_needbreak()
2238 * Returns non-zero if there is another task waiting on the rwlock.
2244 static inline int rwlock_needbreak(rwlock_t *lock) in rwlock_needbreak()
2259 * Wrappers for p->thread_info->cpu access. No-op on UP.
2263 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu()
2265 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2268 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2272 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu()
2277 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) in set_task_cpu()
2285 extern struct task_struct *cpu_curr_snapshot(int cpu);
2291 * This allows us to terminate optimistic spin loops and block, analogous to
2296 static inline bool vcpu_is_preempted(int cpu) in vcpu_is_preempted()
2316 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
2320 unsigned long sched_cpu_util(int cpu);
2326 * Map the event mask on the user-space ABI enum rseq_cs_flags
2343 if (t->rseq) in rseq_set_notify_resume()
2352 if (current->rseq) in rseq_handle_notify_resume()
2360 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask); in rseq_signal_deliver()
2368 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); in rseq_preempt()
2375 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); in rseq_migrate()
2386 t->rseq = NULL; in rseq_fork()
2387 t->rseq_len = 0; in rseq_fork()
2388 t->rseq_sig = 0; in rseq_fork()
2389 t->rseq_event_mask = 0; in rseq_fork()
2391 t->rseq = current->rseq; in rseq_fork()
2392 t->rseq_len = current->rseq_len; in rseq_fork()
2393 t->rseq_sig = current->rseq_sig; in rseq_fork()
2394 t->rseq_event_mask = current->rseq_event_mask; in rseq_fork()
2400 t->rseq = NULL; in rseq_execve()
2401 t->rseq_len = 0; in rseq_execve()
2402 t->rseq_sig = 0; in rseq_execve()
2403 t->rseq_event_mask = 0; in rseq_execve()
2449 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2451 extern int sched_core_idle_cpu(int cpu);
2455 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } in sched_core_idle_cpu()
2458 extern void sched_set_stop_task(int cpu, struct task_struct *stop);