Lines Matching +full:wake +full:- +full:up
1 // SPDX-License-Identifier: GPL-2.0-only
10 spin_lock_init(&wq_head->lock); in __init_waitqueue_head()
11 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
12 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head()
21 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue()
22 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue()
24 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue()
32 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive()
33 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive()
35 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive()
43 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; in add_wait_queue_priority()
44 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_priority()
46 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_priority()
54 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue()
56 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue()
61 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
62 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
63 * number) then we wake that number of exclusive tasks, and potentially all
64 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
65 * the list and any non-exclusive tasks will be woken first. A priority task
69 * There are circumstances in which we can try to wake a task which has already
78 lockdep_assert_held(&wq_head->lock); in __wake_up_common()
80 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
82 if (&curr->entry == &wq_head->head) in __wake_up_common()
85 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
86 unsigned flags = curr->flags; in __wake_up_common()
89 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
92 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) in __wake_up_common()
105 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock()
108 spin_unlock_irqrestore(&wq_head->lock, flags); in __wake_up_common_lock()
110 return nr_exclusive - remaining; in __wake_up_common_lock()
114 * __wake_up - wake up threads blocked on a waitqueue.
117 * @nr_exclusive: how many wake-one or wake-many threads to wake up
120 * If this function wakes up a task, it executes a full memory barrier
152 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
158 * away soon, so while the target thread will be woken up, it will not
159 * be migrated to another CPU - ie. the two threads are 'synchronized'
162 * On UP it can prevent extra preemption.
164 * If this function wakes up a task, it executes a full memory barrier before
178 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
184 * away soon, so while the target thread will be woken up, it will not
185 * be migrated to another CPU - ie. the two threads are 'synchronized'
188 * On UP it can prevent extra preemption.
190 * If this function wakes up a task, it executes a full memory barrier before
201 * __wake_up_sync - see __wake_up_sync_key()
217 * Note: we use "set_current_state()" _after_ the wait-queue add,
219 * wake-function that tests for the wait-queue being active
223 * The spin_unlock() itself is semi-permeable and only protects
225 * stops them from bleeding out - it would still allow subsequent
233 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in prepare_to_wait()
234 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait()
235 if (list_empty(&wq_entry->entry)) in prepare_to_wait()
238 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait()
249 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in prepare_to_wait_exclusive()
250 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_exclusive()
251 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_exclusive()
252 was_empty = list_empty(&wq_head->head); in prepare_to_wait_exclusive()
256 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_exclusive()
263 wq_entry->flags = flags; in init_wait_entry()
264 wq_entry->private = current; in init_wait_entry()
265 wq_entry->func = autoremove_wake_function; in init_wait_entry()
266 INIT_LIST_HEAD(&wq_entry->entry); in init_wait_entry()
275 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_event()
282 * we were already woken up, we can not miss the event because in prepare_to_wait_event()
283 * wakeup locks/unlocks the same wq_head->lock. in prepare_to_wait_event()
285 * But we need to ensure that set-condition + wakeup after that in prepare_to_wait_event()
286 * can't see us, it should wake up another exclusive waiter if in prepare_to_wait_event()
289 list_del_init(&wq_entry->entry); in prepare_to_wait_event()
290 ret = -ERESTARTSYS; in prepare_to_wait_event()
292 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_event()
293 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) in prepare_to_wait_event()
300 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_event()
308 * wait-queue lock held (and interrupts off in the _irq
311 * entry to the wake queue.
315 if (likely(list_empty(&wait->entry))) in do_wait_intr()
320 return -ERESTARTSYS; in do_wait_intr()
322 spin_unlock(&wq->lock); in do_wait_intr()
324 spin_lock(&wq->lock); in do_wait_intr()
332 if (likely(list_empty(&wait->entry))) in do_wait_intr_irq()
337 return -ERESTARTSYS; in do_wait_intr_irq()
339 spin_unlock_irq(&wq->lock); in do_wait_intr_irq()
341 spin_lock_irq(&wq->lock); in do_wait_intr_irq()
348 * finish_wait - clean up after waiting in a queue
364 * - we use the "careful" check that verifies both in finish_wait()
366 * be any half-pending updates in progress on other in finish_wait()
370 * - all other users take the lock (ie we can only in finish_wait()
374 if (!list_empty_careful(&wq_entry->entry)) { in finish_wait()
375 spin_lock_irqsave(&wq_head->lock, flags); in finish_wait()
376 list_del_init(&wq_entry->entry); in finish_wait()
377 spin_unlock_irqrestore(&wq_head->lock, flags); in finish_wait()
387 list_del_init_careful(&wq_entry->entry); in autoremove_wake_function()
403 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
405 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
406 * schedule() if (p->state & mode)
407 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
408 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
411 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
418 * either we see the store to wq_entry->flags in woken_wake_function() in wait_woken()
419 * or woken_wake_function() sees our store to current->state. in wait_woken()
422 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !kthread_should_stop_or_park()) in wait_woken()
429 * being true or the store to wq_entry->flags in woken_wake_function() in wait_woken()
432 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ in wait_woken()
442 wq_entry->flags |= WQ_FLAG_WOKEN; in woken_wake_function()