Lines Matching +full:int +full:- +full:array +full:- +full:variable +full:- +full:length +full:- +full:and +full:- +full:constrained +full:- +full:values

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Define 'struct task_struct' and provide the main scheduler
40 #include <linux/posix-timers_types.h>
85 * encoded in fs/proc/array.c: get_task_state().
87 * We have two separate sets of flags: task->__state
88 * is about runnability, while task->exit_state are
94 /* Used in tsk->__state: */
100 /* Used in tsk->exit_state: */
104 /* Used in tsk->__state again: */
117 #define TASK_ANY (TASK_STATE_MAX-1)
140 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
142 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
143 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
144 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED…
147 * Special states are those that do not use the normal wait-loop pattern. See
157 current->task_state_change = _THIS_IP_; \
163 current->task_state_change = _THIS_IP_; \
168 current->saved_state_change = current->task_state_change;\
169 current->task_state_change = _THIS_IP_; \
174 current->task_state_change = current->saved_state_change;\
185 * set_current_state() includes a barrier so that the write of current->__state
199 * CONDITION test and condition change and wakeup are under the same lock) then
208 * accessing p->__state.
210 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
217 * and our @cond test will save the day.
224 WRITE_ONCE(current->__state, (state_value)); \
230 smp_store_mb(current->__state, (state_value)); \
235 * can not use the regular condition based wait-loop. In that case we must
236 * serialize against wakeups such that any possible in-flight TASK_RUNNING
243 raw_spin_lock_irqsave(&current->pi_lock, flags); \
245 WRITE_ONCE(current->__state, (state_value)); \
246 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
253 * task when blocking on the lock is saved in task_struct::saved_state and
267 * raw_spin_unlock_irq(&lock->wait_lock);
269 * raw_spin_lock_irq(&lock->wait_lock);
277 raw_spin_lock(&current->pi_lock); \
278 current->saved_state = current->__state; \
280 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
281 raw_spin_unlock(&current->pi_lock); \
287 raw_spin_lock(&current->pi_lock); \
289 WRITE_ONCE(current->__state, current->saved_state); \
290 current->saved_state = TASK_RUNNING; \
291 raw_spin_unlock(&current->pi_lock); \
294 #define get_current_state() READ_ONCE(current->__state)
297 * Define the task command name length as enum, then it can be visible to
320 extern int __must_check io_schedule_prepare(void);
321 extern void io_schedule_finish(int token);
326 * struct prev_cputime - snapshot of system and user cputime
331 * Stores previous user/system time values such that we can guarantee
359 unsigned int cpu;
383 int sched_priority;
409 * has a few: load, load_avg, util_avg, freq, and capacity.
411 * We define a basic fixed point arithmetic range, and then formalize
442 * where runnable% is the time ratio that a sched_entity is runnable and
445 * For cfs_rq, they are the aggregated values of all runnable and blocked
448 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
452 * N.B., the above ratios (runnable% and running%) themselves are in the
459 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
461 * and should not overflow as the number already hits PID_MAX_LIMIT.
463 * For all other cases (including 32-bit kernels), struct load_weight's
480 unsigned int util_est;
537 /* For load-balancing: */
544 unsigned int on_rq;
556 int depth;
562 /* cached value of my_q->h_nr_running */
571 * collide with read-mostly values above.
581 unsigned int time_slice;
613 * Actual scheduling parameters. Initialized with the values above,
619 unsigned int flags; /* Specifying the scheduler behaviour */
633 * indicates if the inactive timer has been armed and its handler
635 * conditions between the inactive timer handler and the wakeup
641 unsigned int dl_throttled : 1;
642 unsigned int dl_yielded : 1;
643 unsigned int dl_non_contending : 1;
644 unsigned int dl_overrun : 1;
645 unsigned int dl_server : 1;
648 * Bandwidth enforcement timer. Each -deadline task has its
655 * at the "0-lag time". When a -deadline task blocks, it contributes
656 * to GRUB's active utilization until the "0-lag time", hence a
663 * Bits for DL-server functionality. Also see the comment near
694 * @user_defined: the requested clamp value comes from user-space
697 * which is pre-computed and stored to avoid expensive integer divisions from
701 * which can be different from the clamp value "requested" from user-space.
705 * The user_defined bit is set whenever a task has got a task-specific clamp
708 * restrictive task-specific value has been requested, thus allowing to
713 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
714 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
715 unsigned int active : 1;
716 unsigned int user_defined : 1;
731 perf_invalid_context = -1,
743 int idx;
756 unsigned int __state;
759 unsigned int saved_state;
763 * scheduling-critical items should be added above here.
770 unsigned int flags;
771 unsigned int ptrace;
774 int on_cpu;
776 unsigned int wakee_flips;
787 int recent_used_cpu;
788 int wake_cpu;
790 int on_rq;
792 int prio;
793 int static_prio;
794 int normal_prio;
795 unsigned int rt_priority;
806 unsigned int core_occupation;
815 * Clamp values requested for a scheduling entity.
820 * Effective clamp values used for a scheduling entity.
834 unsigned int btrace_seq;
837 unsigned int policy;
838 int nr_cpus_allowed;
849 int rcu_read_lock_nesting;
859 int rcu_tasks_idle_cpu;
864 int trc_reader_nesting;
865 int trc_ipi_to_cpu;
869 int trc_blkd_cpu;
884 int exit_state;
885 int exit_code;
886 int exit_signal;
888 int pdeath_signal;
893 unsigned int personality;
907 * queueing no longer being serialized by p->on_cpu. However:
909 * p->XXX = X; ttwu()
910 * schedule() if (p->on_rq && ..) // false
911 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
913 * p->on_rq = 0; p->sched_remote_wakeup = Y;
916 * ->sched_remote_wakeup gets used, so it can be in this word.
940 /* disallow userland-initiated cgroup migration */
979 /* Canary value for the -fstack-protector GCC feature: */
984 * older sibling, respectively. (p->father can be replaced with
985 * p->real_parent->pid)
1004 * This includes both natural children and PTRACE_ATTACH targets.
1005 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1018 int __user *set_child_tid;
1021 int __user *clear_child_tid;
1051 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1067 /* Objective and real subjective task credentials (COW): */
1081 * - normally initialized setup_new_exec()
1082 * - access it with [gs]et_task_comm()
1083 * - lock it with task_lock()
1120 unsigned int sas_ss_flags;
1129 unsigned int sessionid;
1138 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1149 /* Updated under owner's pi_lock and rq lock */
1151 /* Deadlock detection and priority inheritance handling: */
1161 int non_block_count;
1166 unsigned int hardirq_threaded;
1168 int softirqs_enabled;
1169 int softirq_context;
1170 int irq_config;
1173 int softirq_disable_cnt;
1179 int lockdep_depth;
1180 unsigned int lockdep_recursion;
1185 unsigned int in_ubsan;
1212 unsigned int psi_flags;
1223 /* Protected by ->alloc_lock: */
1227 int cpuset_mem_spread_rotor;
1228 int cpuset_slab_spread_rotor;
1233 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1248 unsigned int futex_state;
1265 int numa_scan_seq;
1266 unsigned int numa_scan_period;
1267 unsigned int numa_scan_period_max;
1268 int numa_preferred_nid;
1277 * This pointer is only modified for current in syscall and
1278 * pagefault context (and for tasks being destroyed), so it can be read
1280 * - RCU read-side critical section
1281 * - current->numa_group from everywhere
1282 * - task's runqueue locked, task not running
1287 * numa_faults is an array split into four regions:
1291 * faults_memory: Exponential decaying average of faults on a per-node
1293 * counts. The values remain static for the duration of a PTE scan.
1296 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1298 * in faults_memory and faults_cpu decay and these values are copied.
1326 int mm_cid; /* Current cid in mm */
1327 int last_mm_cid; /* Most recent cid in mm */
1328 int migrate_from_cpu;
1329 int mm_cid_active; /* Whether cid bitmap is active */
1345 int make_it_fail;
1346 unsigned int fail_nth;
1352 int nr_dirtied;
1353 int nr_dirtied_pause;
1354 /* Start of a write-and-pause period: */
1358 int latency_record_count;
1362 * Time slack values; these are used to round up poll() and
1363 * select() etc timeout values. These are in nanoseconds.
1369 unsigned int kasan_depth;
1378 int kcsan_stack_depth;
1392 int curr_ret_stack;
1393 int curr_ret_depth;
1412 /* Bitmask and counter of trace recursion: */
1420 unsigned int kcov_mode;
1423 unsigned int kcov_size;
1435 int kcov_sequence;
1438 unsigned int kcov_softirq;
1444 int memcg_oom_order;
1447 unsigned int memcg_nr_pages_over_high;
1465 unsigned int sequential_io;
1466 unsigned int sequential_io_avg;
1477 int pagefault_disabled;
1490 int patch_state;
1516 int mce_count;
1538 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1556 /* CPU-specific state of this task: */
1560 * WARNING: on x86, 'thread_struct' contains a variable-sized
1570 static inline unsigned int __task_state_index(unsigned int tsk_state, in __task_state_index()
1571 unsigned int tsk_exit_state) in __task_state_index()
1573 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; in __task_state_index()
1591 static inline unsigned int task_state_index(struct task_struct *tsk) in task_state_index()
1593 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); in task_state_index()
1596 static inline char task_index_to_char(unsigned int state) in task_index_to_char()
1600 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); in task_index_to_char()
1623 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1644 #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long ter…
1647 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be…
1650 * Only the _current_ task can read/write to tsk->flags, but other
1651 * tasks can access tsk->flags in readonly mode for example
1655 * child->flags of its traced child (same goes for fork, the parent
1656 * can write to the child->flags), because we're guaranteed the
1657 * child is not running and in turn not changing child->flags
1660 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1661 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1666 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1671 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1674 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1680 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1681 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1687 /* Per-process atomic flags. */
1699 { return test_bit(PFA_##name, &p->atomic_flags); }
1703 { set_bit(PFA_##name, &p->atomic_flags); }
1707 { clear_bit(PFA_##name, &p->atomic_flags); }
1741 current->flags &= ~flags; in TASK_PFA_TEST()
1742 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1745 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1746 extern int task_can_attach(struct task_struct *p);
1747 extern int dl_bw_alloc(int cpu, u64 dl_bw);
1748 extern void dl_bw_free(int cpu, u64 dl_bw);
1751 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1755 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1761 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1762 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1764 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1771 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr()
1774 return -EINVAL; in set_cpus_allowed_ptr()
1777 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) in dup_user_cpus_ptr()
1779 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1780 return -EINVAL; in dup_user_cpus_ptr()
1785 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1788 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity()
1794 extern int yield_to(struct task_struct *p, bool preempt);
1796 extern int task_prio(const struct task_struct *p);
1799 * task_nice - return the nice value of a given task.
1802 * Return: The nice value [ -20 ... 0 ... 19 ].
1804 static inline int task_nice(const struct task_struct *p) in task_nice()
1806 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1809 extern int can_nice(const struct task_struct *p, const int nice);
1810 extern int task_curr(const struct task_struct *p);
1811 extern int idle_cpu(int cpu);
1812 extern int available_idle_cpu(int cpu);
1813 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1814 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1817 extern void sched_set_normal(struct task_struct *p, int nice);
1818 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1819 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1820 extern struct task_struct *idle_task(int cpu);
1823 * is_idle_task - is the specified task an idle task?
1830 return !!(p->flags & PF_IDLE); in is_idle_task()
1833 extern struct task_struct *curr_task(int cpu);
1834 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1853 # define task_thread_info(task) (&(task)->thread_info)
1855 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1873 * find a task by its virtual pid and get the task struct
1877 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1878 extern int wake_up_process(struct task_struct *tsk);
1914 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1920 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) in set_tsk_thread_flag()
1925 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) in clear_tsk_thread_flag()
1930 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, in update_tsk_thread_flag()
1936 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) in test_and_set_tsk_thread_flag()
1941 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) in test_and_clear_tsk_thread_flag()
1946 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) in test_tsk_thread_flag()
1961 static inline int test_tsk_need_resched(struct task_struct *tsk) in test_tsk_need_resched()
1967 * cond_resched() and cond_resched_lock(): latency reduction via
1973 extern int __cond_resched(void);
1982 static __always_inline int _cond_resched(void) in _cond_resched()
1989 extern int dynamic_cond_resched(void);
1991 static __always_inline int _cond_resched(void) in _cond_resched()
1998 static inline int _cond_resched(void) in _cond_resched()
2008 static inline int _cond_resched(void) in _cond_resched()
2021 extern int __cond_resched_lock(spinlock_t *lock);
2022 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2023 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2026 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2038 * preempt_count() and rcu_preempt_depth().
2088 * Does the preemption model allow non-cooperative preemption?
2106 * Wrappers for p->thread_info->cpu access. No-op on UP.
2110 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu()
2112 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2115 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2119 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu()
2124 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) in set_task_cpu()
2132 extern struct task_struct *cpu_curr_snapshot(int cpu);
2140 * This allows us to terminate optimistic spin loops and block, analogous to
2145 static inline bool vcpu_is_preempted(int cpu) in vcpu_is_preempted()
2165 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
2169 unsigned long sched_cpu_util(int cpu);
2175 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2177 extern int sched_core_idle_cpu(int cpu);
2181 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } in sched_core_idle_cpu()
2184 extern void sched_set_stop_task(int cpu, struct task_struct *stop);