Home
last modified time | relevance | path

Searched refs:task_rq_lock (Results 1 – 11 of 11) sorted by relevance

/linux/Documentation/locking/
H A Dlockstat.rst163 36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
168 41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
/linux/Documentation/translations/it_IT/locking/
H A Dlockstat.rst188 36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
193 41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
/linux/kernel/sched/
H A Dcore_sched.c62 rq = task_rq_lock(p, &rf); in sched_core_update_cookie()
H A Dsyscalls.c77 CLASS(task_rq_lock, rq_guard)(p); in set_user_nice() local
591 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
1543 scoped_guard (task_rq_lock, p) { in sched_rr_get_interval()
H A Dext_idle.c880 rq = task_rq_lock(p, &rf); in select_cpu_from_kfunc()
H A Dsched.h1798 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1819 DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
1820 _T->rq = task_rq_lock(_T->lock, &_T->rf),
2177 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be in __set_task_cpu()
H A Dcputime.c301 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime()
H A Dext.c1652 iter->rq = task_rq_lock(p, &iter->rf); in scx_task_iter_next_locked()
3818 rq = task_rq_lock(p, &rf); in scx_init_task()
3963 rq = task_rq_lock(p, &rf); in scx_post_fork()
3982 rq = task_rq_lock(p, &rf); in scx_cancel_fork()
4007 rq = task_rq_lock(p, &rf); in sched_ext_free()
H A Ddeadline.c1123 * expiring after we've done the check will wait on its task_rq_lock() in start_dl_timer()
1237 rq = task_rq_lock(p, &rf); in dl_task_timer()
1754 rq = task_rq_lock(p, &rf); in inactive_task_timer()
H A Dpsi.c1170 rq = task_rq_lock(task, &rf); in cgroup_move_task()
H A Dfair.c8582 rq = task_rq_lock(p, &rf); in task_dead_fair()
11721 * See task_rq_lock() family for the details. in sched_balance_rq()