Searched refs:PF_KTHREAD (Results 1 – 25 of 68) sorted by relevance
123
121 if ((current->flags & PF_KTHREAD) || !user_mode(regs)) { in unwind_user_start()151 if (current->flags & PF_KTHREAD) in unwind_user()
239 if ((current->flags & (PF_KTHREAD | PF_EXITING)) || in unwind_deferred_request()
50 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path()174 if (!(p->flags & PF_KTHREAD)) in freeze_task()
84 WARN_ON(!(k->flags & PF_KTHREAD)); in to_kthread()1620 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); in kthread_use_mm()1666 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); in kthread_unuse_mm()1706 if (!(current->flags & PF_KTHREAD)) in kthread_associate_blkcg()1732 if (current->flags & PF_KTHREAD) { in kthread_blkcg()
128 if (unlikely(!tsk->mm || (tsk->flags & PF_KTHREAD))) in __acct_update_integrals()
232 if (current->flags & PF_KTHREAD) in stack_trace_save_user()
173 if (preemptible() && (current->flags & PF_KTHREAD)) { in arch_efi_call_virt_setup()210 if (preemptible() && (current->flags & PF_KTHREAD)) { in arch_efi_call_virt_teardown()
205 if (task->flags & PF_KTHREAD) in cgroup_do_freeze()233 if (task->flags & PF_KTHREAD) in cgroup_freezer_migrate_task()
36 !(old->flags & (PF_KTHREAD | PF_USER_WORKER))) { in switch_fpu()
31 if (unlikely(p->flags & PF_KTHREAD)) in current_is_single_threaded()
59 if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) in fpregs_restore_userregs()
63 if (WARN_ON_ONCE(task->flags & PF_KTHREAD)) in x86_task_fpu()483 if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && in kernel_fpu_begin_mask()897 WARN_ON_ONCE(current->flags & PF_KTHREAD); in fpregs_lock_and_load()
22 #define PF_KTHREAD 0x00200000 macro115 .is_kthread = task->flags & PF_KTHREAD ? 1 : 0, in update_task_info()
9 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ macro173 if (t->flags & PF_KTHREAD) in can_record()
41 if (p->flags & PF_KTHREAD) { in BPF_STRUCT_OPS()
22 return (current->flags & (PF_EXITING | PF_KTHREAD)) || percpu_ref_is_dying(&ctx->refs); in io_should_terminate_tw()
74 if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } { in close_fd()
226 if (p->flags & PF_KTHREAD) in thaw_kernel_threads()
79 if (task->flags & PF_KTHREAD) in arch_stack_walk_reliable()
140 if (((current->flags & PF_KTHREAD) != 0) && in emit_log_message()
114 if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) { in BPF_STRUCT_OPS()
56 if (task->flags & PF_KTHREAD) { in ltl_atoms_init()
23 if (p->flags & PF_KTHREAD) in tsk_is_kthread()
104 .flags = PF_KTHREAD,
120 if (fpu_state_size_dynamic() && !(tsk->flags & (PF_KTHREAD | PF_USER_WORKER))) in arch_release_task_struct()229 if (unlikely(p->flags & PF_KTHREAD)) { in copy_thread()