Lines Matching +full:msb +full:- +full:justified

1 /* SPDX-License-Identifier: GPL-2.0 */
40 #include <linux/posix-timers_types.h>
87 * We have two separate sets of flags: task->__state
88 * is about runnability, while task->exit_state are
94 /* Used in tsk->__state: */
100 /* Used in tsk->exit_state: */
104 /* Used in tsk->__state again: */
117 #define TASK_ANY (TASK_STATE_MAX-1)
140 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
142 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
143 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
144 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED…
147 * Special states are those that do not use the normal wait-loop pattern. See
157 current->task_state_change = _THIS_IP_; \
163 current->task_state_change = _THIS_IP_; \
168 current->saved_state_change = current->task_state_change;\
169 current->task_state_change = _THIS_IP_; \
174 current->task_state_change = current->saved_state_change;\
185 * set_current_state() includes a barrier so that the write of current->__state
208 * accessing p->__state.
210 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
224 WRITE_ONCE(current->__state, (state_value)); \
230 smp_store_mb(current->__state, (state_value)); \
235 * can not use the regular condition based wait-loop. In that case we must
236 * serialize against wakeups such that any possible in-flight TASK_RUNNING
243 raw_spin_lock_irqsave(&current->pi_lock, flags); \
245 WRITE_ONCE(current->__state, (state_value)); \
246 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
267 * raw_spin_unlock_irq(&lock->wait_lock);
269 * raw_spin_lock_irq(&lock->wait_lock);
277 raw_spin_lock(&current->pi_lock); \
278 current->saved_state = current->__state; \
280 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
281 raw_spin_unlock(&current->pi_lock); \
287 raw_spin_lock(&current->pi_lock); \
289 WRITE_ONCE(current->__state, current->saved_state); \
290 current->saved_state = TASK_RUNNING; \
291 raw_spin_unlock(&current->pi_lock); \
294 #define get_current_state() READ_ONCE(current->__state)
326 * struct prev_cputime - snapshot of system and user cputime
459 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
463 * For all other cases (including 32-bit kernels), struct load_weight's
487 * This information is mapped into the MSB bit of util_est at dequeue time.
489 * it is safe to use MSB.
537 /* For load-balancing: */
562 /* cached value of my_q->h_nr_running */
571 * collide with read-mostly values above.
648 * Bandwidth enforcement timer. Each -deadline task has its
655 * at the "0-lag time". When a -deadline task blocks, it contributes
656 * to GRUB's active utilization until the "0-lag time", hence a
663 * Bits for DL-server functionality. Also see the comment near
694 * @user_defined: the requested clamp value comes from user-space
697 * which is pre-computed and stored to avoid expensive integer divisions from
701 * which can be different from the clamp value "requested" from user-space.
705 * The user_defined bit is set whenever a task has got a task-specific clamp
708 * restrictive task-specific value has been requested, thus allowing to
731 perf_invalid_context = -1,
763 * scheduling-critical items should be added above here.
907 * queueing no longer being serialized by p->on_cpu. However:
909 * p->XXX = X; ttwu()
910 * schedule() if (p->on_rq && ..) // false
911 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
913 * p->on_rq = 0; p->sched_remote_wakeup = Y;
916 * ->sched_remote_wakeup gets used, so it can be in this word.
940 /* disallow userland-initiated cgroup migration */
979 /* Canary value for the -fstack-protector GCC feature: */
984 * older sibling, respectively. (p->father can be replaced with
985 * p->real_parent->pid)
1005 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1051 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1081 * - normally initialized setup_new_exec()
1082 * - access it with [gs]et_task_comm()
1083 * - lock it with task_lock()
1138 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1223 /* Protected by ->alloc_lock: */
1233 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1280 * - RCU read-side critical section
1281 * - current->numa_group from everywhere
1282 * - task's runqueue locked, task not running
1291 * faults_memory: Exponential decaying average of faults on a per-node
1354 /* Start of a write-and-pause period: */
1538 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1541 * none of these are justified.
1556 /* CPU-specific state of this task: */
1560 * WARNING: on x86, 'thread_struct' contains a variable-sized
1593 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); in task_state_index()
1600 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); in task_index_to_char()
1623 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1650 * Only the _current_ task can read/write to tsk->flags, but other
1651 * tasks can access tsk->flags in readonly mode for example
1655 * child->flags of its traced child (same goes for fork, the parent
1656 * can write to the child->flags), because we're guaranteed the
1657 * child is not running and in turn not changing child->flags
1660 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1661 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1666 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1671 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1674 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1680 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1681 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1687 /* Per-process atomic flags. */
1699 { return test_bit(PFA_##name, &p->atomic_flags); }
1703 { set_bit(PFA_##name, &p->atomic_flags); }
1707 { clear_bit(PFA_##name, &p->atomic_flags); }
1741 current->flags &= ~flags; in TASK_PFA_TEST()
1742 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1751 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1755 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1774 return -EINVAL; in set_cpus_allowed_ptr()
1779 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1780 return -EINVAL; in dup_user_cpus_ptr()
1785 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1799 * task_nice - return the nice value of a given task.
1802 * Return: The nice value [ -20 ... 0 ... 19 ].
1806 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1823 * is_idle_task - is the specified task an idle task?
1830 return !!(p->flags & PF_IDLE); in is_idle_task()
1853 # define task_thread_info(task) (&(task)->thread_info)
1855 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
2026 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2088 * Does the preemption model allow non-cooperative preemption?
2106 * Wrappers for p->thread_info->cpu access. No-op on UP.
2112 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2165 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()