Lines Matching +full:wake +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0-only
11 spin_lock_init(&wq_head->lock); in __init_waitqueue_head()
12 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head()
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue()
23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue()
25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue()
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive()
34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive()
36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive()
44 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue()
46 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue()
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
62 * There are circumstances in which we can try to wake a task which has already
73 lockdep_assert_held(&wq_head->lock); in __wake_up_common()
75 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { in __wake_up_common()
78 list_del(&bookmark->entry); in __wake_up_common()
79 bookmark->flags = 0; in __wake_up_common()
81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
83 if (&curr->entry == &wq_head->head) in __wake_up_common()
86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
87 unsigned flags = curr->flags; in __wake_up_common()
93 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
96 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) in __wake_up_common()
100 (&next->entry != &wq_head->head)) { in __wake_up_common()
101 bookmark->flags = WQ_FLAG_BOOKMARK; in __wake_up_common()
102 list_add_tail(&bookmark->entry, &next->entry); in __wake_up_common()
122 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock()
125 spin_unlock_irqrestore(&wq_head->lock, flags); in __wake_up_common_lock()
130 * __wake_up - wake up threads blocked on a waitqueue.
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
136 * If this function wakes up a task, it executes a full memory barrier before
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
175 * away soon, so while the target thread will be woken up, it will not
176 * be migrated to another CPU - ie. the two threads are 'synchronized'
179 * On UP it can prevent extra preemption.
181 * If this function wakes up a task, it executes a full memory barrier before
195 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
201 * away soon, so while the target thread will be woken up, it will not
202 * be migrated to another CPU - ie. the two threads are 'synchronized'
205 * On UP it can prevent extra preemption.
207 * If this function wakes up a task, it executes a full memory barrier before
218 * __wake_up_sync - see __wake_up_sync_key()
227 * Note: we use "set_current_state()" _after_ the wait-queue add,
229 * wake-function that tests for the wait-queue being active
233 * The spin_unlock() itself is semi-permeable and only protects
235 * stops them from bleeding out - it would still allow subsequent
243 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in prepare_to_wait()
244 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait()
245 if (list_empty(&wq_entry->entry)) in prepare_to_wait()
248 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait()
257 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in prepare_to_wait_exclusive()
258 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_exclusive()
259 if (list_empty(&wq_entry->entry)) in prepare_to_wait_exclusive()
262 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_exclusive()
268 wq_entry->flags = flags; in init_wait_entry()
269 wq_entry->private = current; in init_wait_entry()
270 wq_entry->func = autoremove_wake_function; in init_wait_entry()
271 INIT_LIST_HEAD(&wq_entry->entry); in init_wait_entry()
280 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_event()
287 * we were already woken up, we can not miss the event because in prepare_to_wait_event()
288 * wakeup locks/unlocks the same wq_head->lock. in prepare_to_wait_event()
290 * But we need to ensure that set-condition + wakeup after that in prepare_to_wait_event()
291 * can't see us, it should wake up another exclusive waiter if in prepare_to_wait_event()
294 list_del_init(&wq_entry->entry); in prepare_to_wait_event()
295 ret = -ERESTARTSYS; in prepare_to_wait_event()
297 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_event()
298 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) in prepare_to_wait_event()
305 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_event()
313 * wait-queue lock held (and interrupts off in the _irq
316 * entry to the wake queue.
320 if (likely(list_empty(&wait->entry))) in do_wait_intr()
325 return -ERESTARTSYS; in do_wait_intr()
327 spin_unlock(&wq->lock); in do_wait_intr()
329 spin_lock(&wq->lock); in do_wait_intr()
337 if (likely(list_empty(&wait->entry))) in do_wait_intr_irq()
342 return -ERESTARTSYS; in do_wait_intr_irq()
344 spin_unlock_irq(&wq->lock); in do_wait_intr_irq()
346 spin_lock_irq(&wq->lock); in do_wait_intr_irq()
353 * finish_wait - clean up after waiting in a queue
369 * - we use the "careful" check that verifies both in finish_wait()
371 * be any half-pending updates in progress on other in finish_wait()
375 * - all other users take the lock (ie we can only in finish_wait()
379 if (!list_empty_careful(&wq_entry->entry)) { in finish_wait()
380 spin_lock_irqsave(&wq_head->lock, flags); in finish_wait()
381 list_del_init(&wq_entry->entry); in finish_wait()
382 spin_unlock_irqrestore(&wq_head->lock, flags); in finish_wait()
392 list_del_init_careful(&wq_entry->entry); in autoremove_wake_function()
400 return (current->flags & PF_KTHREAD) && kthread_should_stop(); in is_kthread_should_stop()
413 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
415 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
416 * schedule() if (p->state & mode)
417 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
418 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
421 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
428 * either we see the store to wq_entry->flags in woken_wake_function() in wait_woken()
429 * or woken_wake_function() sees our store to current->state. in wait_woken()
432 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) in wait_woken()
439 * being true or the store to wq_entry->flags in woken_wake_function() in wait_woken()
442 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ in wait_woken()
452 wq_entry->flags |= WQ_FLAG_WOKEN; in woken_wake_function()