Home
last modified time | relevance | path

Searched full:task (Results 76 – 100 of 2661) sorted by relevance

12345678910>>...107

/linux/drivers/gpu/drm/
H A Ddrm_flip_work.c38 struct drm_flip_task *task; in drm_flip_work_allocate_task() local
40 task = kzalloc(sizeof(*task), flags); in drm_flip_work_allocate_task()
41 if (task) in drm_flip_work_allocate_task()
42 task->data = data; in drm_flip_work_allocate_task()
44 return task; in drm_flip_work_allocate_task()
47 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) in drm_flip_work_queue_task() argument
52 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
66 struct drm_flip_task *task; in drm_flip_work_queue() local
68 task = drm_flip_work_allocate_task(val, in drm_flip_work_queue()
70 if (task) { in drm_flip_work_queue()
[all …]
/linux/kernel/trace/rv/monitors/sleep/
H A Dsleep.c25 static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon) in ltl_atoms_fetch() argument
29 * tasks. A task being PI-boosted means it is blocking an "actual" in ltl_atoms_fetch()
30 * real-task, therefore it should also obey the monitor's rule, in ltl_atoms_fetch()
31 * otherwise the "actual" real-task may be delayed. in ltl_atoms_fetch()
33 ltl_atom_set(mon, LTL_RT, rt_or_dl_task(task)); in ltl_atoms_fetch()
36 static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation) in ltl_atoms_init() argument
56 if (task->flags & PF_KTHREAD) { in ltl_atoms_init()
67 if (strstarts(task->comm, "migration/")) in ltl_atoms_init()
72 if (strstarts(task->comm, "rcu")) in ltl_atoms_init()
85 static void handle_sched_set_state(void *data, struct task_struct *task, int state) in handle_sched_set_state() argument
[all …]
/linux/arch/arm64/include/asm/
H A Dgcs.h59 static inline bool task_gcs_el0_enabled(struct task_struct *task) in task_gcs_el0_enabled() argument
61 return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE; in task_gcs_el0_enabled()
64 void gcs_set_el0_mode(struct task_struct *task);
65 void gcs_free(struct task_struct *task);
70 static inline int gcs_check_locked(struct task_struct *task, in gcs_check_locked() argument
73 unsigned long cur_val = task->thread.gcs_el0_mode; in gcs_check_locked()
75 cur_val &= task->thread.gcs_el0_locked; in gcs_check_locked()
76 new_val &= task->thread.gcs_el0_locked; in gcs_check_locked()
86 static inline bool task_gcs_el0_enabled(struct task_struct *task) in task_gcs_el0_enabled() argument
91 static inline void gcs_set_el0_mode(struct task_struct *task) { } in gcs_set_el0_mode() argument
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Duptr_failure.c21 struct task_struct *task; in uptr_write() local
24 task = bpf_get_current_task_btf(); in uptr_write()
25 v = bpf_task_storage_get(&datamap, task, 0, in uptr_write()
38 struct task_struct *task; in uptr_write_nested() local
41 task = bpf_get_current_task_btf(); in uptr_write_nested()
42 v = bpf_task_storage_get(&datamap, task, 0, in uptr_write_nested()
55 struct task_struct *task; in uptr_no_null_check() local
58 task = bpf_get_current_task_btf(); in uptr_no_null_check()
59 v = bpf_task_storage_get(&datamap, task, 0, in uptr_no_null_check()
73 struct task_struct *task; in uptr_kptr_xchg() local
[all …]
H A Dbpf_iter_task_file.c17 struct task_struct *task = ctx->task; in dump_task_file() local
21 if (task == (void *)0 || file == (void *)0) in dump_task_file()
29 if (tgid == task->tgid && task->tgid != task->pid) in dump_task_file()
32 if (last_tgid != task->tgid) { in dump_task_file()
33 last_tgid = task->tgid; in dump_task_file()
37 BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd, in dump_task_file()
H A Dtest_core_reloc_kernel.c53 struct task_struct *task = (void *)bpf_get_current_task(); in test_core_kernel() local
62 if (CORE_READ(&pid, &task->pid) || in test_core_kernel()
63 CORE_READ(&tgid, &task->tgid)) in test_core_kernel()
70 out->valid[1] = BPF_CORE_READ(task, in test_core_kernel()
72 out->valid[2] = BPF_CORE_READ(task, in test_core_kernel()
75 out->valid[3] = BPF_CORE_READ(task, in test_core_kernel()
78 out->valid[4] = BPF_CORE_READ(task, in test_core_kernel()
81 out->valid[5] = BPF_CORE_READ(task, in test_core_kernel()
85 out->valid[6] = BPF_CORE_READ(task, in test_core_kernel()
89 out->valid[7] = BPF_CORE_READ(task, in test_core_kernel()
[all …]
/linux/fs/nfs/flexfilelayout/
H A Dflexfilelayout.c42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
693 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, in nfs4_ff_layout_stat_io_end_read() argument
701 ktime_get(), task->tk_start); in nfs4_ff_layout_stat_io_end_read()
724 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, in nfs4_ff_layout_stat_io_end_write() argument
735 requested, completed, ktime_get(), task->tk_start); in nfs4_ff_layout_stat_io_end_write()
1037 struct rpc_task *task = &hdr->task; in ff_layout_reset_write() local
1042 dprintk("%s Reset task %5u for i/o through pNFS " in ff_layout_reset_write()
1044 hdr->task.tk_pid, in ff_layout_reset_write()
1055 dprintk("%s Reset task %5u for i/o through MDS " in ff_layout_reset_write()
1057 hdr->task.tk_pid, in ff_layout_reset_write()
[all …]
/linux/arch/microblaze/include/asm/
H A Dprocessor.h71 /* Return some info about the user process TASK. */
72 # define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE) argument
73 # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) argument
78 # define task_sp(task) (task_regs(task)->r1) argument
79 # define task_pc(task) (task_regs(task)->pc) argument
81 # define KSTK_EIP(task) (task_pc(task)) argument
82 # define KSTK_ESP(task) (task_sp(task)) argument
/linux/Documentation/admin-guide/hw-vuln/
H A Dcore-scheduling.rst61 ``pid`` of the task for which the operation applies.
67 will be performed for all tasks in the task group of ``pid``.
89 specified task or a share a cookie with a task. In combination this allows a
90 simple helper program to pull a cookie from a task in an existing core
95 Each task that is tagged is assigned a cookie internally in the kernel. As
102 The idle task is considered special, as it trusts everything and everything
105 During a schedule() event on any sibling of a core, the highest priority task on
107 the sibling has the task enqueued. For rest of the siblings in the core,
108 highest priority task with the same cookie is selected if there is one runnable
109 in their individual run queues. If a task with same cookie is not available,
[all …]
/linux/kernel/time/
H A Dsleep_timeout.c16 * the target task on the stack as well.
20 struct task_struct *task; member
27 wake_up_process(timeout->task); in process_timeout()
34 * Make the current task sleep until @timeout jiffies have elapsed.
35 * The function behavior depends on the current task state
38 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
43 * pass before the routine returns unless the current task is explicitly
47 * delivered to the current task or the current task is explicitly woken
50 * The current task state is guaranteed to be %TASK_RUNNING when this
95 timer.task = current; in schedule_timeout()
[all …]
/linux/fs/nfs/filelayout/
H A Dfilelayout.c91 struct rpc_task *task = &hdr->task; in filelayout_reset_write() local
94 dprintk("%s Reset task %5u for i/o through MDS " in filelayout_reset_write()
96 hdr->task.tk_pid, in filelayout_reset_write()
102 task->tk_status = pnfs_write_done_resend_to_mds(hdr); in filelayout_reset_write()
108 struct rpc_task *task = &hdr->task; in filelayout_reset_read() local
111 dprintk("%s Reset task %5u for i/o through MDS " in filelayout_reset_read()
113 hdr->task.tk_pid, in filelayout_reset_read()
119 task->tk_status = pnfs_read_done_resend_to_mds(hdr); in filelayout_reset_read()
123 static int filelayout_async_handle_error(struct rpc_task *task, in filelayout_async_handle_error() argument
133 if (task->tk_status >= 0) in filelayout_async_handle_error()
[all …]
/linux/kernel/cgroup/
H A Dfreezer.c4 #include <linux/sched/task.h>
149 * Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE
152 static void cgroup_freeze_task(struct task_struct *task, bool freeze) in cgroup_freeze_task() argument
156 /* If the task is about to die, don't bother with freezing it. */ in cgroup_freeze_task()
157 if (!lock_task_sighand(task, &flags)) in cgroup_freeze_task()
161 task->jobctl |= JOBCTL_TRAP_FREEZE; in cgroup_freeze_task()
162 signal_wake_up(task, false); in cgroup_freeze_task()
164 task->jobctl &= ~JOBCTL_TRAP_FREEZE; in cgroup_freeze_task()
165 wake_up_process(task); in cgroup_freeze_task()
168 unlock_task_sighand(task, &flags); in cgroup_freeze_task()
[all …]
/linux/include/linux/sunrpc/
H A Dxprt.h67 struct rpc_task * rq_task; /* RPC task data */
154 int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
155 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
156 void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
159 void (*rpcbind)(struct rpc_task *task);
161 void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
165 int (*buf_alloc)(struct rpc_task *task);
166 void (*buf_free)(struct rpc_task *task);
171 void (*wait_for_reply_request)(struct rpc_task *task);
172 void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
[all …]
/linux/include/linux/fsl/bestcomm/
H A Dbestcomm_priv.h49 * struct bcom_tdt - Task Descriptor Table Entry
111 /* task enable */
238 extern int bcom_load_image(int task, u32 *task_image);
239 extern void bcom_set_initiator(int task, int initiator);
261 bcom_enable_task(int task) in bcom_enable_task() argument
264 reg = in_be16(&bcom_eng->regs->tcr[task]); in bcom_enable_task()
265 out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); in bcom_enable_task()
269 bcom_disable_task(int task) in bcom_disable_task() argument
271 u16 reg = in_be16(&bcom_eng->regs->tcr[task]); in bcom_disable_task()
272 out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); in bcom_disable_task()
[all …]
/linux/Documentation/scheduler/
H A Dsched-capacity.rst127 2. Task utilization
133 Capacity aware scheduling requires an expression of a task's requirements with
135 while task utilization is specific to CFS, it is convenient to describe it here
138 Task utilization is a percentage meant to represent the throughput requirements
139 of a task. A simple approximation of it is the task's duty cycle, i.e.::
143 On an SMP system with fixed frequencies, 100% utilization suggests the task is a
144 busy loop. Conversely, 10% utilization hints it is a small periodic task that
170 This yields duty_cycle(p) == 50%, despite the task having the exact same
173 The task utilization signal can be made frequency invariant using the following
179 task utilization of 25%.
[all …]
H A Dsched-util-clamp.rst31 These two bounds will ensure a task will operate within this performance range
32 of the system. UCLAMP_MIN implies boosting a task, while UCLAMP_MAX implies
33 capping a task.
85 On the other hand, a busy task for instance that requires to run at maximum
106 Note that by design RT tasks don't have per-task PELT signal and must always
110 when an RT task wakes up. This cost is unchanged by using uclamp. Uclamp only
121 Util clamp is a property of every task in the system. It sets the boundaries of
125 The actual utilization signal of a task is never clamped in reality. If you
127 they are intact. Clamping happens only when needed, e.g: when a task wakes up
131 performance point for a task to run on, it must be able to influence the
[all …]
/linux/net/sunrpc/
H A Dauth_tls.c35 static void rpc_tls_probe_call_prepare(struct rpc_task *task, void *data) in rpc_tls_probe_call_prepare() argument
37 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; in rpc_tls_probe_call_prepare()
38 rpc_call_start(task); in rpc_tls_probe_call_prepare()
41 static void rpc_tls_probe_call_done(struct rpc_task *task, void *data) in rpc_tls_probe_call_done() argument
62 struct rpc_task *task; in tls_probe() local
65 task = rpc_run_task(&task_setup_data); in tls_probe()
66 if (IS_ERR(task)) in tls_probe()
67 return PTR_ERR(task); in tls_probe()
68 status = task->tk_status; in tls_probe()
69 rpc_put_task(task); in tls_probe()
[all …]
/linux/arch/x86/include/asm/
H A Dunwind.h17 struct task_struct *task; member
42 void __unwind_start(struct unwind_state *state, struct task_struct *task,
59 void unwind_start(struct unwind_state *state, struct task_struct *task, in unwind_start() argument
62 first_frame = first_frame ? : get_stack_pointer(task, regs); in unwind_start()
64 __unwind_start(state, task, regs, first_frame); in unwind_start()
112 return rethook_find_ret_addr(state->task, (unsigned long)addr_p, in unwind_recover_rethook()
125 ret = ftrace_graph_ret_addr(state->task, &state->graph_idx, in unwind_recover_ret_addr()
131 * This disables KASAN checking when reading a value from another task's stack,
132 * since the other task could be running on another CPU and could have poisoned
135 #define READ_ONCE_TASK_STACK(task, x) \ argument
[all …]
/linux/arch/arm64/kernel/
H A Dstacktrace.c43 * @task: The task being unwound.
51 struct task_struct *task; member
63 struct task_struct *task) in kunwind_init() argument
66 state->task = task; in kunwind_init()
77 * The regs must be on a stack currently owned by the calling task.
110 * Start an unwind from a blocked task.
115 * The caller should ensure the task is blocked in cpu_switch_to() for the
117 * call this for the current task.
121 struct task_struct *task) in kunwind_init_from_task() argument
123 kunwind_init(state, task); in kunwind_init_from_task()
[all …]
/linux/drivers/dma/bestcomm/
H A Dbestcomm.c56 /* Get and reserve a task num */ in bcom_task_alloc()
80 /* Get IRQ of that task */ in bcom_task_alloc()
121 /* Stop the task */ in bcom_task_free()
137 bcom_load_image(int task, u32 *task_image) in bcom_load_image() argument
151 if ((task < 0) || (task >= BCOM_MAX_TASKS)) { in bcom_load_image()
153 ": Trying to load invalid task %d\n", task); in bcom_load_image()
158 tdt = &bcom_eng->tdt[task]; in bcom_load_image()
161 desc = bcom_task_desc(task); in bcom_load_image()
162 if (hdr->desc_size != bcom_task_num_descs(task)) { in bcom_load_image()
164 ": Trying to reload wrong task image " in bcom_load_image()
[all …]
/linux/arch/arm/kernel/
H A Diwmmxt.S81 ldr r2, [r0, #S_PC] @ current task pc value
85 add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
86 str r0, [r3] @ this task now owns Concan regs
184 * r0 = struct thread_info pointer of target task or NULL for any
196 add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
230 * r0 = struct thread_info pointer of target task
243 add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
245 teq r2, r3 @ does this task own it...
248 @ current Concan values are in the task save area
255 1: @ this task owns Concan regs -- grab a copy from there
[all …]
/linux/arch/x86/kernel/
H A Dprocess_64.c21 #include <linux/sched/task.h>
275 static __always_inline void save_fsgs(struct task_struct *task) in save_fsgs() argument
277 savesegment(fs, task->thread.fsindex); in save_fsgs()
278 savesegment(gs, task->thread.gsindex); in save_fsgs()
285 task->thread.fsbase = rdfsbase(); in save_fsgs()
286 task->thread.gsbase = __rdgsbase_inactive(); in save_fsgs()
288 save_base_legacy(task, task->thread.fsindex, FS); in save_fsgs()
289 save_base_legacy(task, task->thread.gsindex, GS); in save_fsgs()
327 * The next task is using 64-bit TLS, is not using this in load_seg_legacy()
362 * The next task is using a real segment. Loading the selector in load_seg_legacy()
[all …]
/linux/include/linux/sched/
H A Dtask_stack.h6 * task->stack (kernel stack) handling interfaces:
17 * When accessing the stack of a non-current task that might exit, use
21 static __always_inline void *task_stack_page(const struct task_struct *task) in task_stack_page() argument
23 return task->stack; in task_stack_page()
28 static __always_inline unsigned long *end_of_stack(const struct task_struct *task) in end_of_stack() argument
31 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; in end_of_stack()
33 return task->stack; in end_of_stack()
39 #define task_stack_page(task) ((void *)(task)->stack) argument
44 task_thread_info(p)->task = p; in setup_thread_stack()
86 #define task_stack_end_corrupted(task) \ argument
[all …]
/linux/arch/um/include/asm/
H A Dstacktrace.h19 get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) in get_frame_pointer() argument
21 if (!task || task == current) in get_frame_pointer()
23 return KSTK_EBP(task); in get_frame_pointer()
27 get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) in get_frame_pointer() argument
34 *get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs) in get_stack_pointer() argument
36 if (!task || task == current) in get_stack_pointer()
38 return (unsigned long *)KSTK_ESP(task); in get_stack_pointer()
/linux/drivers/connector/
H A Dcn_proc.c117 void proc_fork_connector(struct task_struct *task) in proc_fork_connector() argument
133 parent = rcu_dereference(task->real_parent); in proc_fork_connector()
137 ev->event_data.fork.child_pid = task->pid; in proc_fork_connector()
138 ev->event_data.fork.child_tgid = task->tgid; in proc_fork_connector()
147 void proc_exec_connector(struct task_struct *task) in proc_exec_connector() argument
161 ev->event_data.exec.process_pid = task->pid; in proc_exec_connector()
162 ev->event_data.exec.process_tgid = task->tgid; in proc_exec_connector()
171 void proc_id_connector(struct task_struct *task, int which_id) in proc_id_connector() argument
185 ev->event_data.id.process_pid = task->pid; in proc_id_connector()
186 ev->event_data.id.process_tgid = task->tgid; in proc_id_connector()
[all …]

12345678910>>...107