cregit-Linux how code gets into the kernel

Release 4.15 kernel/sched/wait.c

Directory: kernel/sched
/*
 * Generic waiting primitives.
 *
 * (C) 2004 Nadia Yvette Chambers, Oracle
 */
#include <linux/init.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/mm.h>
#include <linux/wait.h>
#include <linux/hash.h>
#include <linux/kthread.h>


void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) { spin_lock_init(&wq_head->lock); lockdep_set_class_and_name(&wq_head->lock, key, name); INIT_LIST_HEAD(&wq_head->head); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2654.17%360.00%
Peter Zijlstra2245.83%240.00%
Total48100.00%5100.00%

EXPORT_SYMBOL(__init_waitqueue_head);
void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { unsigned long flags; wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); __add_wait_queue(wq_head, wq_entry); spin_unlock_irqrestore(&wq_head->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III4177.36%125.00%
Ingo Molnar1120.75%250.00%
Omar Sandoval11.89%125.00%
Total53100.00%4100.00%

EXPORT_SYMBOL(add_wait_queue);
void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { unsigned long flags; wq_entry->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); __add_wait_queue_entry_tail(wq_head, wq_entry); spin_unlock_irqrestore(&wq_head->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III4076.92%125.00%
Ingo Molnar1223.08%375.00%
Total52100.00%4100.00%

EXPORT_SYMBOL(add_wait_queue_exclusive);
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { unsigned long flags; spin_lock_irqsave(&wq_head->lock, flags); __remove_wait_queue(wq_head, wq_entry); spin_unlock_irqrestore(&wq_head->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III3678.26%133.33%
Ingo Molnar1021.74%266.67%
Total46100.00%3100.00%

EXPORT_SYMBOL(remove_wait_queue); /* * Scan threshold to break wait queue walk. * This allows a waker to take a break from holding the * wait queue lock during the wait queue walk. */ #define WAITQUEUE_WALK_BREAK_CNT 64 /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */
static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive, int wake_flags, void *key, wait_queue_entry_t *bookmark) { wait_queue_entry_t *curr, *next; int cnt = 0; if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { curr = list_next_entry(bookmark, entry); list_del(&bookmark->entry); bookmark->flags = 0; } else curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); if (&curr->entry == &wq_head->head) return nr_exclusive; list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { unsigned flags = curr->flags; int ret; if (flags & WQ_FLAG_BOOKMARK) continue; ret = curr->func(curr, mode, wake_flags, key); if (ret < 0) break; if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && (&next->entry != &wq_head->head)) { bookmark->flags = WQ_FLAG_BOOKMARK; list_add_tail(&bookmark->entry, &next->entry); break; } } return nr_exclusive; }

Contributors

PersonTokensPropCommitsCommitProp
Tim Chen13761.71%116.67%
Peter Zijlstra6730.18%116.67%
Linus Torvalds125.41%116.67%
Ingo Molnar62.70%350.00%
Total222100.00%6100.00%


static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive, int wake_flags, void *key) { unsigned long flags; wait_queue_entry_t bookmark; bookmark.flags = 0; bookmark.private = NULL; bookmark.func = NULL; INIT_LIST_HEAD(&bookmark.entry); spin_lock_irqsave(&wq_head->lock, flags); nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark); spin_unlock_irqrestore(&wq_head->lock, flags); while (bookmark.flags & WQ_FLAG_BOOKMARK) { spin_lock_irqsave(&wq_head->lock, flags); nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark); spin_unlock_irqrestore(&wq_head->lock, flags); } }

Contributors

PersonTokensPropCommitsCommitProp
Tim Chen8861.11%133.33%
Peter Zijlstra5034.72%133.33%
Ingo Molnar64.17%133.33%
Total144100.00%3100.00%

/** * __wake_up - wake up threads blocked on a waitqueue. * @wq_head: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive, void *key) { __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); }

Contributors

PersonTokensPropCommitsCommitProp
Tim Chen34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) { __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2681.25%133.33%
Ingo Molnar412.50%133.33%
Tim Chen26.25%133.33%
Total32100.00%3100.00%

EXPORT_SYMBOL_GPL(__wake_up_locked);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) { __wake_up_common(wq_head, mode, 1, 0, key, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2678.79%125.00%
Ingo Molnar412.12%125.00%
Tim Chen26.06%125.00%
Andrea Arcangeli13.03%125.00%
Total33100.00%4100.00%

EXPORT_SYMBOL_GPL(__wake_up_locked_key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, unsigned int mode, void *key, wait_queue_entry_t *bookmark) { __wake_up_common(wq_head, mode, 1, 0, key, bookmark); }

Contributors

PersonTokensPropCommitsCommitProp
Tim Chen37100.00%1100.00%
Total37100.00%1100.00%

EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); /** * __wake_up_sync_key - wake up threads blocked on a waitqueue. * @wq_head: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: opaque value to be passed to wakeup targets * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive, void *key) { int wake_flags = 1; /* XXX WF_SYNC */ if (unlikely(!wq_head)) return; if (unlikely(nr_exclusive != 1)) wake_flags = 0; __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra5690.32%133.33%
Ingo Molnar58.06%133.33%
Tim Chen11.61%133.33%
Total62100.00%3100.00%

EXPORT_SYMBOL_GPL(__wake_up_sync_key); /* * __wake_up_sync - see __wake_up_sync_key() */
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) { __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2485.71%150.00%
Ingo Molnar414.29%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any * wake-function that tests for the wait-queue being active * will be guaranteed to see waitqueue addition _or_ subsequent * tests in this thread will see the wakeup having taken place. * * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent * loads to move into the critical region). */
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) { unsigned long flags; wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); if (list_empty(&wq_entry->entry)) __add_wait_queue(wq_head, wq_entry); set_current_state(state); spin_unlock_irqrestore(&wq_head->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III5881.69%125.00%
Ingo Molnar1318.31%375.00%
Total71100.00%4100.00%

EXPORT_SYMBOL(prepare_to_wait);
void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) { unsigned long flags; wq_entry->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); if (list_empty(&wq_entry->entry)) __add_wait_queue_entry_tail(wq_head, wq_entry); set_current_state(state); spin_unlock_irqrestore(&wq_head->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III5680.00%120.00%
Ingo Molnar1420.00%480.00%
Total70100.00%5100.00%

EXPORT_SYMBOL(prepare_to_wait_exclusive);
void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) { wq_entry->flags = flags; wq_entry->private = current; wq_entry->func = autoremove_wake_function; INIT_LIST_HEAD(&wq_entry->entry); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov3179.49%133.33%
Ingo Molnar820.51%266.67%
Total39100.00%3100.00%

EXPORT_SYMBOL(init_wait_entry);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) { unsigned long flags; long ret = 0; spin_lock_irqsave(&wq_head->lock, flags); if (unlikely(signal_pending_state(state, current))) { /* * Exclusive waiter must not fail if it was selected by wakeup, * it should "consume" the condition we were waiting for. * * The caller will recheck the condition and return success if * we were already woken up, we can not miss the event because * wakeup locks/unlocks the same wq_head->lock. * * But we need to ensure that set-condition + wakeup after that * can't see us, it should wake up another exclusive waiter if * we fail. */ list_del_init(&wq_entry->entry); ret = -ERESTARTSYS; } else { if (list_empty(&wq_entry->entry)) { if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) __add_wait_queue_entry_tail(wq_head, wq_entry); else __add_wait_queue(wq_head, wq_entry); } set_current_state(state); } spin_unlock_irqrestore(&wq_head->lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov10284.30%233.33%
Ingo Molnar1915.70%466.67%
Total121100.00%6100.00%

EXPORT_SYMBOL(prepare_to_wait_event); /* * Note! These two wait functions are entered with the * wait-queue lock held (and interrupts off in the _irq * case), so there is no race with testing the wakeup * condition in the caller before they add the wait * entry to the wake queue. */
int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) { if (likely(list_empty(&wait->entry))) __add_wait_queue_entry_tail(wq, wait); set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) return -ERESTARTSYS; spin_unlock(&wq->lock); schedule(); spin_lock(&wq->lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds6895.77%133.33%
Ingo Molnar34.23%266.67%
Total71100.00%3100.00%

EXPORT_SYMBOL(do_wait_intr);
int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) { if (likely(list_empty(&wait->entry))) __add_wait_queue_entry_tail(wq, wait); set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) return -ERESTARTSYS; spin_unlock_irq(&wq->lock); schedule(); spin_lock_irq(&wq->lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds6895.77%133.33%
Ingo Molnar34.23%266.67%
Total71100.00%3100.00%

EXPORT_SYMBOL(do_wait_intr_irq); /** * finish_wait - clean up after waiting in a queue * @wq_head: waitqueue waited on * @wq_entry: wait descriptor * * Sets current thread back to running state and removes * the wait descriptor from the given waitqueue if still * queued. */
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { unsigned long flags; __set_current_state(TASK_RUNNING); /* * We can check for list emptiness outside the lock * IFF: * - we use the "careful" check that verifies both * the next and prev pointers, so that there cannot * be any half-pending updates in progress on other * CPU's that we haven't seen yet (and that might * still change the stack area. * and * - all other users take the lock (ie we can only * have _one_ other CPU that looks at or modifies * the list). */ if (!list_empty_careful(&wq_entry->entry)) { spin_lock_irqsave(&wq_head->lock, flags); list_del_init(&wq_entry->entry); spin_unlock_irqrestore(&wq_head->lock, flags); } }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III5481.82%125.00%
Ingo Molnar1218.18%375.00%
Total66100.00%4100.00%

EXPORT_SYMBOL(finish_wait);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) { int ret = default_wake_function(wq_entry, mode, sync, key); if (ret) list_del_init(&wq_entry->entry); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III4387.76%133.33%
Ingo Molnar612.24%266.67%
Total49100.00%3100.00%

EXPORT_SYMBOL(autoremove_wake_function);
static inline bool is_kthread_should_stop(void) { return (current->flags & PF_KTHREAD) && kthread_should_stop(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra21100.00%1100.00%
Total21100.00%1100.00%

/* * DEFINE_WAIT_FUNC(wait, woken_wake_func); * * add_wait_queue(&wq_head, &wait); * for (;;) { * if (condition) * break; * * p->state = mode; condition = true; * smp_mb(); // A smp_wmb(); // C * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN; * schedule() try_to_wake_up(); * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true; * smp_mb() // B smp_wmb(); // C * wq_entry->flags |= WQ_FLAG_WOKEN; * } * remove_wait_queue(&wq_head, &wait); * */
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) { set_current_state(mode); /* A */ /* * The above implies an smp_mb(), which matches with the smp_wmb() from * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must * also observe all state before the wakeup. */ if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) timeout = schedule_timeout(timeout); __set_current_state(TASK_RUNNING); /* * The below implies an smp_mb(), it too pairs with the smp_wmb() from * woken_wake_function() such that we must either observe the wait * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss * an event. */ smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ return timeout; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6391.30%375.00%
Ingo Molnar68.70%125.00%
Total69100.00%4100.00%

EXPORT_SYMBOL(wait_woken);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) { /* * Although this function is called under waitqueue lock, LOCK * doesn't imply write barrier and the users expects write * barrier semantics on wakeup functions. The following * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() * and is paired with smp_store_mb() in wait_woken(). */ smp_wmb(); /* C */ wq_entry->flags |= WQ_FLAG_WOKEN; return default_wake_function(wq_entry, mode, sync, key); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3888.37%266.67%
Ingo Molnar511.63%133.33%
Total43100.00%3100.00%

EXPORT_SYMBOL(woken_wake_function);

Overall Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra43626.88%620.69%
William Lee Irwin III37923.37%310.34%
Tim Chen31219.24%26.90%
Ingo Molnar18811.59%827.59%
Linus Torvalds1599.80%26.90%
Oleg Nesterov1438.82%310.34%
Paul Gortmaker10.06%13.45%
Andrea Arcangeli10.06%13.45%
Omar Sandoval10.06%13.45%
Michael Opdenacker10.06%13.45%
Nadia Yvette Chambers10.06%13.45%
Total1622100.00%29100.00%
Directory: kernel/sched
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.