Home
last modified time | relevance | path

Searched refs:task_current_donor (Results 1 – 6 of 6) sorted by relevance

/linux/kernel/sched/
H A Dsyscalls.c94 running = task_current_donor(rq, p); in set_user_nice()
702 running = task_current_donor(rq, p); in __sched_setscheduler()
H A Dcore.c2719 running = task_current_donor(rq, p); in __do_set_cpus_allowed()
2916 (task_current_donor(rq, p) && !task_current(rq, p))) { in affine_move_task()
5513 if (task_current_donor(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
6894 if (!task_current_donor(rq, next)) in __schedule()
6932 if (!task_current_donor(rq, next)) in __schedule()
7395 running = task_current_donor(rq, p); in rt_mutex_setprio()
8093 running = task_current_donor(rq, p); in sched_setnuma()
9219 running = task_current_donor(rq, tsk); in sched_move_task()
H A Drt.c1860 BUG_ON(task_current_donor(rq, p)); in pick_next_pushable_task()
2445 if (task_current_donor(rq, p)) { in prio_changed_rt()
H A Dfair.c5874 !task_current_donor(rq_of(cfs_rq), p)) { in enqueue_throttled_task()
6794 if (task_current_donor(rq, p)) in hrtick_start_fair()
9449 task_current_donor(env->src_rq, p)) { in can_migrate_task()
9494 WARN_ON(task_current_donor(env->src_rq, p)); in detach_task()
13161 if (task_current_donor(rq, p)) { in prio_changed_fair()
13268 if (task_current_donor(rq, p)) in switched_to_fair()
H A Ddeadline.c3067 if (task_current_donor(rq, p)) { in prio_changed_dl()
H A Dsched.h2285 static inline int task_current_donor(struct rq *rq, struct task_struct *p) in task_current_donor() function