Lines Matching defs:t
117 static void rcu_read_unlock_special(struct task_struct *t);
145 * to the head of the list won't block any grace period that is already
169 struct task_struct *t = current;
196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
227 list_add(&t->rcu_node_entry, rnp->exp_tasks);
238 list_add(&t->rcu_node_entry, rnp->gp_tasks);
255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
326 struct task_struct *t = current;
334 !t->rcu_read_unlock_special.b.blocked) {
339 t->rcu_read_unlock_special.b.blocked = true;
340 t->rcu_blocked_node = rnp;
348 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
350 t->pid,
356 rcu_preempt_deferred_qs(t);
432 struct task_struct *t = current;
437 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
438 rcu_read_unlock_special(t);
452 static struct list_head *rcu_next_node_entry(struct task_struct *t,
457 np = t->rcu_node_entry.next;
478 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
496 * t->rcu_read_unlock_special cannot change.
498 special = t->rcu_read_unlock_special;
503 t->rcu_read_unlock_special.s = 0;
532 rnp = t->rcu_blocked_node;
534 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
540 np = rcu_next_node_entry(t, rnp);
541 list_del_init(&t->rcu_node_entry);
542 t->rcu_blocked_node = NULL;
544 rnp->gp_seq, t->pid);
545 if (&t->rcu_node_entry == rnp->gp_tasks)
547 if (&t->rcu_node_entry == rnp->exp_tasks)
551 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
552 if (&t->rcu_node_entry == rnp->boost_tasks)
558 * we aren't waiting on any CPUs, report the quiescent state.
600 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
603 READ_ONCE(t->rcu_read_unlock_special.s)) &&
614 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
618 if (!rcu_preempt_need_deferred_qs(t))
621 rcu_preempt_deferred_qs_irqrestore(t, flags);
664 * @t: The task being checked
671 static bool rcu_unlock_needs_exp_handling(struct task_struct *t,
681 * check because 't' might not be on the exp_tasks list at all - its
684 if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks))
705 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node))
717 if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node)
728 static void rcu_read_unlock_special(struct task_struct *t)
746 needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp, irqs_were_disabled);
773 rcu_preempt_deferred_qs_irqrestore(t, flags);
787 struct task_struct *t;
796 t = container_of(rnp->gp_tasks, struct task_struct,
799 rnp->gp_seq, t->pid);
813 struct task_struct *t = current;
819 if (rcu_preempt_need_deferred_qs(t)) {
820 set_tsk_need_resched(t);
823 } else if (rcu_preempt_need_deferred_qs(t)) {
824 rcu_preempt_deferred_qs(t); /* Report deferred QS. */
835 !t->rcu_read_unlock_special.b.need_qs &&
837 t->rcu_read_unlock_special.b.need_qs = true;
850 struct task_struct *t = current;
855 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
898 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
1035 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
1047 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
1160 struct task_struct *t;
1190 * We boost task t by manufacturing an rt_mutex that appears to
1191 * be held by task t. We leave a pointer to that rt_mutex where
1192 * task t can find it, and task t will release the mutex when it
1195 * t's priority. (Thanks to tglx for suggesting this approach!)
1197 * Note that task t must acquire rnp->lock to remove itself from
1199 * nowhere else. We therefore are guaranteed that task t will
1202 * and task t's exiting its outermost RCU read-side critical
1205 t = container_of(tb, struct task_struct, rcu_node_entry);
1206 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1208 /* Lock only for side effect: boosts task t's priority. */
1259 * The ->boost_kthread_task is immortal, so we don't need to worry
1306 struct task_struct *t;
1311 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1313 if (WARN_ON_ONCE(IS_ERR(t)))
1317 rnp->boost_kthread_task = t;
1321 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1322 rcu_thread_affine_rnp(t, rnp);
1323 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */