Lines Matching full:task

73 	struct task_struct *g, *task;  in klp_complete_transition()  local
110 for_each_process_thread(g, task) { in klp_complete_transition()
111 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); in klp_complete_transition()
112 task->patch_state = KLP_UNDEFINED; in klp_complete_transition()
117 task = idle_task(cpu); in klp_complete_transition()
118 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); in klp_complete_transition()
119 task->patch_state = KLP_UNDEFINED; in klp_complete_transition()
157 * Switch the patched state of the task to the set of functions in the target
160 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
163 void klp_update_patch_state(struct task_struct *task) in klp_update_patch_state() argument
183 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) in klp_update_patch_state()
184 task->patch_state = READ_ONCE(klp_target_state); in klp_update_patch_state()
239 * Determine whether it's safe to transition the task to the target patch state
242 static int klp_check_stack(struct task_struct *task, char *err_buf) in klp_check_stack() argument
249 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); in klp_check_stack()
253 __func__, task->comm, task->pid); in klp_check_stack()
266 __func__, task->comm, task->pid, in klp_check_stack()
277 * Try to safely switch a task to the target patch state. If it's currently
281 static bool klp_try_switch_task(struct task_struct *task) in klp_try_switch_task() argument
291 /* check if this task has already switched over */ in klp_try_switch_task()
292 if (task->patch_state == klp_target_state) in klp_try_switch_task()
304 * functions. If all goes well, switch the task to the target patch in klp_try_switch_task()
307 rq = task_rq_lock(task, &flags); in klp_try_switch_task()
309 if (task_running(rq, task) && task != current) { in klp_try_switch_task()
311 "%s: %s:%d is running\n", __func__, task->comm, in klp_try_switch_task()
312 task->pid); in klp_try_switch_task()
316 ret = klp_check_stack(task, err_buf); in klp_try_switch_task()
322 clear_tsk_thread_flag(task, TIF_PATCH_PENDING); in klp_try_switch_task()
323 task->patch_state = klp_target_state; in klp_try_switch_task()
326 task_rq_unlock(rq, task, &flags); in klp_try_switch_task()
330 * holding the task rq lock. Instead we have to use a temporary buffer in klp_try_switch_task()
345 struct task_struct *g, *task; in klp_send_signals() local
351 for_each_process_thread(g, task) { in klp_send_signals()
352 if (!klp_patch_pending(task)) in klp_send_signals()
358 * Meanwhile the task could migrate itself and the action in klp_send_signals()
361 if (task->flags & PF_KTHREAD) { in klp_send_signals()
366 wake_up_state(task, TASK_INTERRUPTIBLE); in klp_send_signals()
372 spin_lock_irq(&task->sighand->siglock); in klp_send_signals()
373 signal_wake_up(task, 0); in klp_send_signals()
374 spin_unlock_irq(&task->sighand->siglock); in klp_send_signals()
383 * to-be-unpatched functions. If such functions are found, the task can't be
391 struct task_struct *g, *task; in klp_try_complete_transition() local
401 * is deemed unreliable, the task can't be switched yet. in klp_try_complete_transition()
407 for_each_process_thread(g, task) in klp_try_complete_transition()
408 if (!klp_try_switch_task(task)) in klp_try_complete_transition()
417 task = idle_task(cpu); in klp_try_complete_transition()
419 if (!klp_try_switch_task(task)) in klp_try_complete_transition()
421 } else if (task->patch_state != klp_target_state) { in klp_try_complete_transition()
423 clear_tsk_thread_flag(task, TIF_PATCH_PENDING); in klp_try_complete_transition()
424 task->patch_state = klp_target_state; in klp_try_complete_transition()
465 struct task_struct *g, *task; in klp_start_transition() local
480 for_each_process_thread(g, task) in klp_start_transition()
481 if (task->patch_state != klp_target_state) in klp_start_transition()
482 set_tsk_thread_flag(task, TIF_PATCH_PENDING); in klp_start_transition()
491 task = idle_task(cpu); in klp_start_transition()
492 if (task->patch_state != klp_target_state) in klp_start_transition()
493 set_tsk_thread_flag(task, TIF_PATCH_PENDING); in klp_start_transition()
506 struct task_struct *g, *task; in klp_init_transition() local
530 for_each_process_thread(g, task) { in klp_init_transition()
531 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); in klp_init_transition()
532 task->patch_state = initial_state; in klp_init_transition()
540 task = idle_task(cpu); in klp_init_transition()
541 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); in klp_init_transition()
542 task->patch_state = initial_state; in klp_init_transition()
546 * Enforce the order of the task->patch_state initializations and the in klp_init_transition()
548 * see a func in transition with a task->patch_state of KLP_UNDEFINED. in klp_init_transition()
552 * set a task->patch_state to KLP_UNDEFINED. in klp_init_transition()
581 struct task_struct *g, *task; in klp_reverse_transition() local
598 for_each_process_thread(g, task) in klp_reverse_transition()
599 clear_tsk_thread_flag(task, TIF_PATCH_PENDING); in klp_reverse_transition()
623 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
631 struct task_struct *g, *task; in klp_force_transition() local
637 for_each_process_thread(g, task) in klp_force_transition()
638 klp_update_patch_state(task); in klp_force_transition()