Lines Matching +full:wait +full:- +full:state

1 // SPDX-License-Identifier: GPL-2.0-only
11 spin_lock_init(&wq_head->lock); in __init_waitqueue_head()
12 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head()
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue()
23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue()
25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue()
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive()
34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive()
36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive()
44 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue()
46 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue()
51 * Scan threshold to break wait queue walk.
53 * wait queue lock during the wait queue walk.
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
73 lockdep_assert_held(&wq_head->lock); in __wake_up_common()
75 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { in __wake_up_common()
78 list_del(&bookmark->entry); in __wake_up_common()
79 bookmark->flags = 0; in __wake_up_common()
81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
83 if (&curr->entry == &wq_head->head) in __wake_up_common()
86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
87 unsigned flags = curr->flags; in __wake_up_common()
93 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
96 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) in __wake_up_common()
100 (&next->entry != &wq_head->head)) { in __wake_up_common()
101 bookmark->flags = WQ_FLAG_BOOKMARK; in __wake_up_common()
102 list_add_tail(&bookmark->entry, &next->entry); in __wake_up_common()
122 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock()
125 spin_unlock_irqrestore(&wq_head->lock, flags); in __wake_up_common_lock()
130 * __wake_up - wake up threads blocked on a waitqueue.
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
137 * accessing the task state.
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
176 * be migrated to another CPU - ie. the two threads are 'synchronized'
182 * accessing the task state.
195 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
202 * be migrated to another CPU - ie. the two threads are 'synchronized'
208 * accessing the task state.
218 * __wake_up_sync - see __wake_up_sync_key()
227 * Note: we use "set_current_state()" _after_ the wait-queue add,
229 * wake-function that tests for the wait-queue being active
233 * The spin_unlock() itself is semi-permeable and only protects
235 * stops them from bleeding out - it would still allow subsequent
239 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) in prepare_to_wait() argument
243 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in prepare_to_wait()
244 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait()
245 if (list_empty(&wq_entry->entry)) in prepare_to_wait()
247 set_current_state(state); in prepare_to_wait()
248 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait()
253 …re_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) in prepare_to_wait_exclusive() argument
257 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in prepare_to_wait_exclusive()
258 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_exclusive()
259 if (list_empty(&wq_entry->entry)) in prepare_to_wait_exclusive()
261 set_current_state(state); in prepare_to_wait_exclusive()
262 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_exclusive()
268 wq_entry->flags = flags; in init_wait_entry()
269 wq_entry->private = current; in init_wait_entry()
270 wq_entry->func = autoremove_wake_function; in init_wait_entry()
271 INIT_LIST_HEAD(&wq_entry->entry); in init_wait_entry()
275 …repare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) in prepare_to_wait_event() argument
280 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_event()
281 if (signal_pending_state(state, current)) { in prepare_to_wait_event()
288 * wakeup locks/unlocks the same wq_head->lock. in prepare_to_wait_event()
290 * But we need to ensure that set-condition + wakeup after that in prepare_to_wait_event()
294 list_del_init(&wq_entry->entry); in prepare_to_wait_event()
295 ret = -ERESTARTSYS; in prepare_to_wait_event()
297 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_event()
298 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) in prepare_to_wait_event()
303 set_current_state(state); in prepare_to_wait_event()
305 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_event()
312 * Note! These two wait functions are entered with the
313 * wait-queue lock held (and interrupts off in the _irq
315 * condition in the caller before they add the wait
318 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) in do_wait_intr() argument
320 if (likely(list_empty(&wait->entry))) in do_wait_intr()
321 __add_wait_queue_entry_tail(wq, wait); in do_wait_intr()
325 return -ERESTARTSYS; in do_wait_intr()
327 spin_unlock(&wq->lock); in do_wait_intr()
329 spin_lock(&wq->lock); in do_wait_intr()
335 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) in do_wait_intr_irq() argument
337 if (likely(list_empty(&wait->entry))) in do_wait_intr_irq()
338 __add_wait_queue_entry_tail(wq, wait); in do_wait_intr_irq()
342 return -ERESTARTSYS; in do_wait_intr_irq()
344 spin_unlock_irq(&wq->lock); in do_wait_intr_irq()
346 spin_lock_irq(&wq->lock); in do_wait_intr_irq()
353 * finish_wait - clean up after waiting in a queue
355 * @wq_entry: wait descriptor
357 * Sets current thread back to running state and removes
358 * the wait descriptor from the given waitqueue if still
369 * - we use the "careful" check that verifies both in finish_wait()
371 * be any half-pending updates in progress on other in finish_wait()
375 * - all other users take the lock (ie we can only in finish_wait()
379 if (!list_empty_careful(&wq_entry->entry)) { in finish_wait()
380 spin_lock_irqsave(&wq_head->lock, flags); in finish_wait()
381 list_del_init(&wq_entry->entry); in finish_wait()
382 spin_unlock_irqrestore(&wq_head->lock, flags); in finish_wait()
392 list_del_init_careful(&wq_entry->entry); in autoremove_wake_function()
400 return (current->flags & PF_KTHREAD) && kthread_should_stop(); in is_kthread_should_stop()
404 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
406 * add_wait_queue(&wq_head, &wait);
413 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
415 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
416 * schedule() if (p->state & mode)
417 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
418 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
421 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
428 * either we see the store to wq_entry->flags in woken_wake_function() in wait_woken()
429 * or woken_wake_function() sees our store to current->state. in wait_woken()
432 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) in wait_woken()
438 * in woken_wake_function() such that either we see the wait condition in wait_woken()
439 * being true or the store to wq_entry->flags in woken_wake_function() in wait_woken()
442 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ in wait_woken()
452 wq_entry->flags |= WQ_FLAG_WOKEN; in woken_wake_function()