cregit-Linux how code gets into the kernel

Release 4.15 kernel/locking/mutex.c

Directory: kernel/locking
/*
 * kernel/locking/mutex.c
 *
 * Mutexes: blocking mutual exclusion locks
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 * David Howells for suggestions and improvements.
 *
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 *    from the -rt tree, where it was originally implemented for rtmutexes
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 *    and Sven Dietrich.
 *
 * Also see Documentation/locking/mutex-design.txt.
 */
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/debug.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>

#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
#else
# include "mutex.h"
#endif


void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { atomic_long_set(&lock->owner, 0); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER osq_lock_init(&lock->osq); #endif debug_mutex_init(lock, name, key); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5276.47%228.57%
Waiman Long811.76%114.29%
Jason Low45.88%228.57%
Peter Zijlstra45.88%228.57%
Total68100.00%7100.00%

EXPORT_SYMBOL(__mutex_init); /* * @owner: contains: 'struct task_struct *' to the current lock owner, * NULL means not owned. Since task_struct pointers are aligned at * at least L1_CACHE_BYTES, we have low bits to store extra state. * * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. * Bit1 indicates unlock needs to hand the lock to the top-waiter * Bit2 indicates handoff has been done and we're waiting for pickup. */ #define MUTEX_FLAG_WAITERS 0x01 #define MUTEX_FLAG_HANDOFF 0x02 #define MUTEX_FLAG_PICKUP 0x04 #define MUTEX_FLAGS 0x07
static inline struct task_struct *__owner_task(unsigned long owner) { return (struct task_struct *)(owner & ~MUTEX_FLAGS); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra26100.00%1100.00%
Total26100.00%1100.00%


static inline unsigned long __owner_flags(unsigned long owner) { return owner & MUTEX_FLAGS; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%1100.00%
Total17100.00%1100.00%

/* * Trylock variant that retuns the owning task on failure. */
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) { unsigned long owner, curr = (unsigned long)current; owner = atomic_long_read(&lock->owner); for (;;) { /* must loop, can race against a flag */ unsigned long old, flags = __owner_flags(owner); unsigned long task = owner & ~MUTEX_FLAGS; if (task) { if (likely(task != curr)) break; if (likely(!(flags & MUTEX_FLAG_PICKUP))) break; flags &= ~MUTEX_FLAG_PICKUP; } else { #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); #endif } /* * We set the HANDOFF bit, we must make sure it doesn't live * past the point where we acquire it. This would be possible * if we (accidentally) set the bit on an unlocked mutex. */ flags &= ~MUTEX_FLAG_HANDOFF; old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); if (old == owner) return NULL; owner = old; } return __owner_task(owner); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra154100.00%3100.00%
Total154100.00%3100.00%

/* * Actual trylock that will work on any unlocked state. */
static inline bool __mutex_trylock(struct mutex *lock) { return !__mutex_trylock_or_owner(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%2100.00%
Total19100.00%2100.00%

#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * Lockdep annotations are contained to the slow paths for simplicity. * There is nothing that would stop spreading the lockdep annotations outwards * except more code. */ /* * Optimistic trylock that only works in the uncontended case. Make sure to * follow with a __mutex_trylock() before failing. */
static __always_inline bool __mutex_trylock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra43100.00%1100.00%
Total43100.00%1100.00%


static __always_inline bool __mutex_unlock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra44100.00%1100.00%
Total44100.00%1100.00%

#endif
static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) { atomic_long_or(flag, &lock->owner); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra26100.00%1100.00%
Total26100.00%1100.00%


static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) { atomic_long_andnot(flag, &lock->owner); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra26100.00%1100.00%
Total26100.00%1100.00%


static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) { return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra33100.00%1100.00%
Total33100.00%1100.00%

/* * Give up ownership to a specific task, when @task = NULL, this is equivalent * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves * WAITERS. Provides RELEASE semantics like a regular unlock, the * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. */
static void __mutex_handoff(struct mutex *lock, struct task_struct *task) { unsigned long owner = atomic_long_read(&lock->owner); for (;;) { unsigned long old, new; #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); #endif new = (owner & MUTEX_FLAG_WAITERS); new |= (unsigned long)task; if (task) new |= MUTEX_FLAG_PICKUP; old = atomic_long_cmpxchg_release(&lock->owner, owner, new); if (old == owner) break; owner = old; } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra112100.00%2100.00%
Total112100.00%2100.00%

#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ static void __sched __mutex_lock_slowpath(struct mutex *lock); /** * mutex_lock - acquire the mutex * @lock: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until it can get it. * * The mutex must later on be released by the same task that * acquired it. Recursive locking is not allowed. The task * may not exit without first unlocking the mutex. Also, kernel * memory where the mutex resides must not be freed with * the mutex still locked. The mutex must first be initialized * (or statically defined) before it can be locked. memset()-ing * the mutex to 0 is not allowed. * * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging * checks that will enforce the restrictions and will also do * deadlock debugging) * * This function is similar to (but not equivalent to) down(). */
void __sched mutex_lock(struct mutex *lock) { might_sleep(); if (!__mutex_trylock_fast(lock)) __mutex_lock_slowpath(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1762.96%250.00%
Peter Zijlstra1037.04%250.00%
Total27100.00%4100.00%

EXPORT_SYMBOL(mutex_lock); #endif
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) { #ifdef CONFIG_DEBUG_MUTEXES /* * If this WARN_ON triggers, you used ww_mutex_lock to acquire, * but released with a normal mutex_unlock in this call. * * This should never happen, always use ww_mutex_unlock. */ DEBUG_LOCKS_WARN_ON(ww->ctx); /* * Not quite done after calling ww_acquire_done() ? */ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); if (ww_ctx->contending_lock) { /* * After -EDEADLK you tried to * acquire a different ww_mutex? Bad! */ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); /* * You called ww_mutex_lock after receiving -EDEADLK, * but 'forgot' to unlock everything else first? */ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); ww_ctx->contending_lock = NULL; } /* * Naughty, using a different class will lead to undefined behavior! */ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); #endif ww_ctx->acquired++; }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A89100.00%1100.00%
Total89100.00%1100.00%


static inline bool __sched __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) { return a->stamp - b->stamp <= LONG_MAX && (a->stamp != b->stamp || a > b); }

Contributors

PersonTokensPropCommitsCommitProp
Nicolai Hähnle43100.00%1100.00%
Total43100.00%1100.00%

/* * Wake up any waiters that may have to back off when the lock is held by the * given context. * * Due to the invariants on the wait list, this can only affect the first * waiter with a context. * * The current task must not be on the wait list. */
static void __sched __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) { struct mutex_waiter *cur; lockdep_assert_held(&lock->wait_lock); list_for_each_entry(cur, &lock->wait_list, list) { if (!cur->ww_ctx) continue; if (cur->ww_ctx->acquired > 0 && __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) { debug_mutex_wake_waiter(lock, cur); wake_up_process(cur->task); } break; } }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A4857.14%150.00%
Nicolai Hähnle3642.86%150.00%
Total84100.00%2100.00%

/* * After acquiring lock with fastpath or when we lost out in contested * slowpath, set ctx and wake up any waiters so they can recheck. */
static __always_inline void ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { ww_mutex_lock_acquired(lock, ctx); lock->ctx = ctx; /* * The lock->ctx update should be visible on all cores before * the atomic read is done, otherwise contended waiters might be * missed. The contended waiters will either see ww_ctx == NULL * and keep spinning, or it will acquire wait_lock, add itself * to waiter list and sleep. */ smp_mb(); /* ^^^ */ /* * Check if lock is contended, if not there is nobody to wake up */ if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) return; /* * Uh oh, we raced in fastpath, wake up everyone in this case, * so they can see the new lock->ctx. */ spin_lock(&lock->base.wait_lock); __ww_mutex_wakeup_for_backoff(&lock->base, ctx); spin_unlock(&lock->base.wait_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A7787.50%240.00%
Peter Zijlstra910.23%240.00%
Nicolai Hähnle22.27%120.00%
Total88100.00%5100.00%

/* * After acquiring lock in the slowpath set ctx. * * Unlike for the fast path, the caller ensures that waiters are woken up where * necessary. * * Callers must hold the mutex wait_lock. */
static __always_inline void ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { ww_mutex_lock_acquired(lock, ctx); lock->ctx = ctx; }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A30100.00%1100.00%
Total30100.00%1100.00%

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) { struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); /* * If ww->ctx is set the contents are undefined, only * by acquiring wait_lock there is a guarantee that * they are not invalid when reading. * * As such, when deadlock detection needs to be * performed the optimistic spinning cannot be done. * * Check this in every inner iteration because we may * be racing against another thread's ww_mutex_lock. */ if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) return false; /* * If we aren't on the wait list yet, cancel the spin * if there are waiters. We want to avoid stealing the * lock from a waiter with an earlier stamp, since the * other thread may already own a lock that we also * need. */ if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) return false; /* * Similarly, stop spinning if we are no longer the * first waiter. */ if (waiter && !__mutex_waiter_is_first(lock, waiter)) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolai Hähnle98100.00%1100.00%
Total98100.00%1100.00%

/* * Look out! "owner" is an entirely speculative pointer access and not * reliable. * * "noinline" so that this function shows up on perf profiles. */
static noinline bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) { bool ret = true; rcu_read_lock(); while (__mutex_owner(lock) == owner) { /* * Ensure we emit the owner->on_cpu, dereference _after_ * checking lock->owner still matches owner. If that fails, * owner might point to freed memory. If it still matches, * the rcu_read_lock() ensures the memory stays valid. */ barrier(); /* * Use vcpu_is_preempted to detect lock holder preemption issue. */ if (!owner->on_cpu || need_resched() || vcpu_is_preempted(task_cpu(owner))) { ret = false; break; } if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { ret = false; break; } cpu_relax(); } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Waiman Long3936.79%112.50%
Nicolai Hähnle3129.25%225.00%
Jason Low2321.70%225.00%
Pan Xinhui98.49%112.50%
Peter Zijlstra32.83%112.50%
Christian Bornträger10.94%112.50%
Total106100.00%8100.00%

/* * Initial check for entering the mutex spinning loop */
static inline int mutex_can_spin_on_owner(struct mutex *lock) { struct task_struct *owner; int retval = 1; if (need_resched()) return 0; rcu_read_lock(); owner = __mutex_owner(lock); /* * As lock holder preemption issue, we both skip spinning if task is not * on cpu or its cpu is preempted */ if (owner) retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); rcu_read_unlock(); /* * If lock->owner is not set, the mutex has been released. Return true * such that we'll trylock in the spin path, which is a faster option * than the blocking slow path. */ return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Waiman Long3450.75%120.00%
Peter Zijlstra1522.39%240.00%
Pan Xinhui1014.93%120.00%
Jason Low811.94%120.00%
Total67100.00%5100.00%

/* * Optimistic spinning. * * We try to spin for acquisition when we find that the lock owner * is currently running on a (different) CPU and while we don't * need to reschedule. The rationale is that if the lock owner is * running, it is likely to release the lock soon. * * The mutex spinners are queued up using MCS lock so that only one * spinner can compete for the mutex. However, if mutex spinning isn't * going to happen, there is no point in going through the lock/unlock * overhead. * * Returns true when the lock was taken, otherwise false, indicating * that we need to jump to the slowpath and sleep. * * The waiter flag is set to true if the spinner is a waiter in the wait * queue. The waiter-spinner will spin on the lock directly and concurrently * with the spinner at the head of the OSQ, if present, until the owner is * changed to itself. */
static __always_inline bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx, struct mutex_waiter *waiter) { if (!waiter) { /* * The purpose of the mutex_can_spin_on_owner() function is * to eliminate the overhead of osq_lock() and osq_unlock() * in case spinning isn't possible. As a waiter-spinner * is not going to take OSQ lock anyway, there is no need * to call mutex_can_spin_on_owner(). */ if (!mutex_can_spin_on_owner(lock)) goto fail; /* * In order to avoid a stampede of mutex spinners trying to * acquire the mutex all at once, the spinners need to take a * MCS (queued) lock first before spinning on the owner field. */ if (!osq_lock(&lock->osq)) goto fail; } for (;;) { struct task_struct *owner; /* Try to acquire the mutex... */ owner = __mutex_trylock_or_owner(lock); if (!owner) break; /* * There's an owner, wait for it to either * release the lock or go to sleep. */ if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) goto fail_unlock; /* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need * memory barriers as we'll eventually observe the right * values at the cost of a few extra spins. */ cpu_relax(); } if (!waiter) osq_unlock(&lock->osq); return true; fail_unlock: if (!waiter) osq_unlock(&lock->osq); fail: /* * If we fell out of the spin path because of need_resched(), * reschedule now, before we try-lock the mutex. This avoids getting * scheduled out right after we obtained the mutex. */ if (need_resched()) { /* * We _should_ have TASK_RUNNING here, but just in case * we do not, make it so, otherwise we might get stuck. */ __set_current_state(TASK_RUNNING); schedule_preempt_disabled(); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A9559.01%111.11%
Waiman Long4226.09%111.11%
Peter Zijlstra169.94%444.44%
Nicolai Hähnle74.35%222.22%
Christian Bornträger10.62%111.11%
Total161100.00%9100.00%

#else
static __always_inline bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx, struct mutex_waiter *waiter) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A2379.31%125.00%
Nicolai Hähnle310.34%125.00%
Waiman Long26.90%125.00%
Peter Zijlstra13.45%125.00%
Total29100.00%4100.00%

#endif static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); /** * mutex_unlock - release the mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously. * * This function must not be used in interrupt context. Unlocking * of a not locked mutex is not allowed. * * This function is similar to (but not equivalent to) up(). */
void __sched mutex_unlock(struct mutex *lock) { #ifndef CONFIG_DEBUG_LOCK_ALLOC if (__mutex_unlock_fast(lock)) return; #endif __mutex_unlock_slowpath(lock, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Davidlohr Bueso A1754.84%133.33%
Peter Zijlstra825.81%133.33%
Ingo Molnar619.35%133.33%
Total31100.00%3100.00%

EXPORT_SYMBOL(mutex_unlock); /** * ww_mutex_unlock - release the w/w mutex * @lock: the mutex to be released * * Unlock a mutex that has been locked by this task previously with any of the * ww_mutex_lock* functions (with or without an acquire context). It is * forbidden to release the locks after releasing the acquire context. * * This function must not be used in interrupt context. Unlocking * of a unlocked mutex is not allowed. */
void __sched ww_mutex_unlock(struct ww_mutex *lock) { /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: */ if (lock->ctx) { #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); #endif if (lock->ctx->acquired > 0) lock->ctx->acquired--; lock->ctx = NULL; } mutex_unlock(&lock->base); }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst6598.48%150.00%
Peter Zijlstra11.52%150.00%
Total66100.00%2100.00%

EXPORT_SYMBOL(ww_mutex_unlock);
static inline int __sched __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter, struct ww_acquire_ctx *ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); struct mutex_waiter *cur; if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) goto deadlock; /* * If there is a waiter in front of us that has a context, then its * stamp is earlier than ours and we must back off. */ cur = waiter; list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { if (cur->ww_ctx) goto deadlock; } return 0; deadlock: #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(ctx->contending_lock); ctx->contending_lock = ww; #endif return -EDEADLK; }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst5041.67%114.29%
Nicolai Hähnle4638.33%228.57%
Davidlohr Bueso A2016.67%342.86%
Ingo Molnar43.33%114.29%
Total120100.00%7100.00%


static inline int __sched __ww_mutex_add_waiter(struct mutex_waiter *waiter, struct mutex *lock, struct ww_acquire_ctx *ww_ctx) { struct mutex_waiter *cur; struct list_head *pos; if (!ww_ctx) { list_add_tail(&waiter->list, &lock->wait_list); return 0; } /* * Add the waiter before the first waiter with a higher stamp. * Waiters without a context are skipped to avoid starving * them. */ pos = &lock->wait_list; list_for_each_entry_reverse(cur, &lock->wait_list, list) { if (!cur->ww_ctx) continue; if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { /* Back off immediately if necessary. */ if (ww_ctx->acquired > 0) { #ifdef CONFIG_DEBUG_MUTEXES struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); ww_ctx->contending_lock = ww; #endif return -EDEADLK; } break; } pos = &cur->list; /* * Wake up the waiter so that it gets a chance to back * off. */ if (cur->ww_ctx->acquired > 0) { debug_mutex_wake_waiter(lock, cur); wake_up_process(cur->task); } } list_add_tail(&waiter->list, pos); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolai Hähnle15781.35%250.00%
Davidlohr Bueso A199.84%125.00%
Maarten Lankhorst178.81%125.00%
Total193100.00%4100.00%

/* * Lock a mutex (possibly interruptible), slowpath: */
static __always_inline int __sched __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { struct mutex_waiter waiter; bool first = false; struct ww_mutex *ww; int ret; might_sleep(); ww = container_of(lock, struct ww_mutex, base); if (use_ww_ctx && ww_ctx) { if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) return -EALREADY; } preempt_disable(); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); if (__mutex_trylock(lock) || mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { /* got the lock, yay! */ lock_acquired(&lock->dep_map, ip); if (use_ww_ctx && ww_ctx) ww_mutex_set_context_fastpath(ww, ww_ctx); preempt_enable(); return 0; } spin_lock(&lock->wait_lock); /* * After waiting to acquire the wait_lock, try again. */ if (__mutex_trylock(lock)) { if (use_ww_ctx && ww_ctx) __ww_mutex_wakeup_for_backoff(lock, ww_ctx); goto skip_wait; } debug_mutex_lock_common(lock, &waiter); debug_mutex_add_waiter(lock, &waiter, current); lock_contended(&lock->dep_map, ip); if (!use_ww_ctx) { /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); #ifdef CONFIG_DEBUG_MUTEXES waiter.ww_ctx = MUTEX_POISON_WW_CTX; #endif } else { /* Add in stamp order, waking up waiters that must back off. */ ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); if (ret) goto err_early_backoff; waiter.ww_ctx = ww_ctx; } waiter.task = current; if (__mutex_waiter_is_first(lock, &waiter)) __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); set_current_state(state); for (;;) { /* * Once we hold wait_lock, we're serialized against * mutex_unlock() handing the lock off to us, do a trylock * before testing the error conditions to make sure we pick up * the handoff. */ if (__mutex_trylock(lock)) goto acquired; /* * Check for signals and wound conditions while holding * wait_lock. This ensures the lock cancellation is ordered * against mutex_unlock() and wake-ups do not go missing. */ if (unlikely(signal_pending_state(state, current))) { ret = -EINTR; goto err; } if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) { ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx); if (ret) goto err; } spin_unlock(&lock->wait_lock); schedule_preempt_disabled(); /* * ww_mutex needs to always recheck its position since its waiter * list is not FIFO ordered. */ if ((use_ww_ctx && ww_ctx) || !first) { first = __mutex_waiter_is_first(lock, &waiter); if (first) __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); } set_current_state(state); /* * Here we order against unlock; we must either see it change * state back to RUNNING and fall through the next schedule(), * or we must see its unlock and acquire. */ if (__mutex_trylock(lock) || (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) break; spin_lock(&lock->wait_lock); } spin_lock(&lock->wait_lock); acquired: __set_current_state(TASK_RUNNING); mutex_remove_waiter(lock, &waiter, current); if (likely(list_empty(&lock->wait_list))) __mutex_clear_flag(lock, MUTEX_FLAGS); debug_mutex_free_waiter(&waiter); skip_wait: /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); if (use_ww_ctx && ww_ctx) ww_mutex_set_context_slowpath(ww, ww_ctx); spin_unlock(&lock->wait_lock); preempt_enable(); return 0; err: __set_current_state(TASK_RUNNING); mutex_remove_waiter(lock, &waiter, current); err_early_backoff: spin_unlock(&lock->wait_lock); debug_mutex_free_waiter(&waiter); mutex_release(&lock->dep_map, 1, ip); preempt_enable(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra18429.87%1335.14%
Ingo Molnar11518.67%410.81%
Nicolai Hähnle10116.40%616.22%
Maarten Lankhorst9715.75%12.70%
Davidlohr Bueso A599.58%718.92%
Chris Wilson294.71%12.70%
Waiman Long213.41%25.41%
Tetsuo Handa71.14%12.70%
Roman Zippel20.32%12.70%
Thomas Gleixner10.16%12.70%
Total616100.00%37100.00%


static int __sched __mutex_lock(struct mutex *lock, long state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip) { return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra46100.00%1100.00%
Total46100.00%1100.00%


static int __sched __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, struct ww_acquire_ctx *ww_ctx) { return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra51100.00%1100.00%
Total51100.00%1100.00%

#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2382.14%125.00%
Peter Zijlstra517.86%375.00%
Total28100.00%4100.00%

EXPORT_SYMBOL_GPL(mutex_lock_nested);
void __sched _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) { __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra29100.00%2100.00%
Total29100.00%2100.00%

EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
int __sched mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) { return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Liam R. Howlett2068.97%125.00%
Maarten Lankhorst620.69%125.00%
Neil Brown26.90%125.00%
Peter Zijlstra13.45%125.00%
Total29100.00%4100.00%

EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst2896.55%150.00%
Peter Zijlstra13.45%150.00%
Total29100.00%2100.00%

EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) { int token; might_sleep(); token = io_schedule_prepare(); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_, NULL, 0); io_schedule_finish(token); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo48100.00%1100.00%
Total48100.00%1100.00%

EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
static inline int ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH unsigned tmp; if (ctx->deadlock_inject_countdown-- == 0) { tmp = ctx->deadlock_inject_interval; if (tmp > UINT_MAX/4) tmp = UINT_MAX; else tmp = tmp*2 + tmp + tmp/2; ctx->deadlock_inject_interval = tmp; ctx->deadlock_inject_countdown = tmp; ctx->contending_lock = lock; ww_mutex_unlock(lock); return -EDEADLK; } #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Vetter97100.00%1100.00%
Total97100.00%1100.00%


int __sched ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; might_sleep(); ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, ctx); if (!ret && ctx && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst3446.58%225.00%
Daniel Vetter2635.62%112.50%
Nicolai Hähnle79.59%225.00%
Liam R. Howlett45.48%112.50%
Peter Zijlstra22.74%225.00%
Total73100.00%8100.00%

EXPORT_SYMBOL_GPL(ww_mutex_lock);
int __sched ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; might_sleep(); ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, ctx); if (!ret && ctx && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Vetter2635.62%110.00%
Neil Brown1824.66%110.00%
Maarten Lankhorst1621.92%220.00%
Nicolai Hähnle79.59%220.00%
Peter Zijlstra45.48%330.00%
Liam R. Howlett22.74%110.00%
Total73100.00%10100.00%

EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); #endif /* * Release the lock, slowpath: */
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) { struct task_struct *next = NULL; DEFINE_WAKE_Q(wake_q); unsigned long owner; mutex_release(&lock->dep_map, 1, ip); /* * Release the lock before (potentially) taking the spinlock such that * other contenders can get on with things ASAP. * * Except when HANDOFF, in that case we must not clear the owner field, * but instead set it to the top waiter. */ owner = atomic_long_read(&lock->owner); for (;;) { unsigned long old; #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); #endif if (owner & MUTEX_FLAG_HANDOFF) break; old = atomic_long_cmpxchg_release(&lock->owner, owner, __owner_flags(owner)); if (old == owner) { if (owner & MUTEX_FLAG_WAITERS) break; return; } owner = old; } spin_lock(&lock->wait_lock); debug_mutex_unlock(lock); if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = list_first_entry(&lock->wait_list, struct mutex_waiter, list); next = waiter->task; debug_mutex_wake_waiter(lock, waiter); wake_q_add(&wake_q, next); } if (owner & MUTEX_FLAG_HANDOFF) __mutex_handoff(lock, next); spin_unlock(&lock->wait_lock); wake_up_q(&wake_q); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra12856.64%436.36%
Ingo Molnar7231.86%327.27%
Davidlohr Bueso A177.52%218.18%
Jason Low83.54%19.09%
Waiman Long10.44%19.09%
Total226100.00%11100.00%

#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ static noinline int __sched __mutex_lock_killable_slowpath(struct mutex *lock); static noinline int __sched __mutex_lock_interruptible_slowpath(struct mutex *lock); /** * mutex_lock_interruptible - acquire the mutex, interruptible * @lock: the mutex to be acquired * * Lock the mutex like mutex_lock(), and return 0 if the mutex has * been acquired or sleep until the mutex becomes available. If a * signal arrives while waiting for the lock then this function * returns -EINTR. * * This function is similar to (but not equivalent to) down_interruptible(). */
int __sched mutex_lock_interruptible(struct mutex *lock) { might_sleep(); if (__mutex_trylock_fast(lock)) return 0; return __mutex_lock_interruptible_slowpath(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1550.00%240.00%
Peter Zijlstra826.67%240.00%
Maarten Lankhorst723.33%120.00%
Total30100.00%5100.00%

EXPORT_SYMBOL(mutex_lock_interruptible);
int __sched mutex_lock_killable(struct mutex *lock) { might_sleep(); if (__mutex_trylock_fast(lock)) return 0; return __mutex_lock_killable_slowpath(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Liam R. Howlett1550.00%125.00%
Peter Zijlstra826.67%250.00%
Maarten Lankhorst723.33%125.00%
Total30100.00%4100.00%

EXPORT_SYMBOL(mutex_lock_killable);
void __sched mutex_lock_io(struct mutex *lock) { int token; token = io_schedule_prepare(); mutex_lock(lock); io_schedule_finish(token); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo29100.00%1100.00%
Total29100.00%1100.00%

EXPORT_SYMBOL_GPL(mutex_lock_io);
static noinline void __sched __mutex_lock_slowpath(struct mutex *lock) { __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2596.15%480.00%
Harvey Harrison13.85%120.00%
Total26100.00%5100.00%


static noinline int __sched __mutex_lock_killable_slowpath(struct mutex *lock) { return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Liam R. Howlett1866.67%114.29%
Peter Zijlstra414.81%342.86%
Maarten Lankhorst27.41%114.29%
Ingo Molnar27.41%114.29%
Harvey Harrison13.70%114.29%
Total27100.00%7100.00%


static noinline int __sched __mutex_lock_interruptible_slowpath(struct mutex *lock) { return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1348.15%225.00%
Peter Zijlstra518.52%337.50%
Maarten Lankhorst518.52%225.00%
Liam R. Howlett414.81%112.50%
Total27100.00%8100.00%


static noinline int __sched __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_, ctx); }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst3697.30%150.00%
Peter Zijlstra12.70%150.00%
Total37100.00%2100.00%


static noinline int __sched __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_, ctx); }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst3389.19%133.33%
Ingo Molnar38.11%133.33%
Peter Zijlstra12.70%133.33%
Total37100.00%3100.00%

#endif /** * mutex_trylock - try to acquire the mutex, without waiting * @lock: the mutex to be acquired * * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * * NOTE: this function follows the spin_trylock() convention, so * it is negated from the down_trylock() return values! Be careful * about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */
int __sched mutex_trylock(struct mutex *lock) { bool locked = __mutex_trylock(lock); if (locked) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); return locked; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2972.50%250.00%
Peter Zijlstra1127.50%250.00%
Total40100.00%4100.00%

EXPORT_SYMBOL(mutex_trylock); #ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { might_sleep(); if (__mutex_trylock_fast(&lock->base)) { if (ctx) ww_mutex_set_context_fastpath(lock, ctx); return 0; } return __ww_mutex_lock_slowpath(lock, ctx); }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst4177.36%125.00%
Peter Zijlstra713.21%125.00%
Nicolai Hähnle59.43%250.00%
Total53100.00%4100.00%

EXPORT_SYMBOL(ww_mutex_lock);
int __sched ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { might_sleep(); if (__mutex_trylock_fast(&lock->base)) { if (ctx) ww_mutex_set_context_fastpath(lock, ctx); return 0; } return __ww_mutex_lock_interruptible_slowpath(lock, ctx); }

Contributors

PersonTokensPropCommitsCommitProp
Maarten Lankhorst4177.36%125.00%
Peter Zijlstra713.21%125.00%
Nicolai Hähnle59.43%250.00%
Total53100.00%4100.00%

EXPORT_SYMBOL(ww_mutex_lock_interruptible); #endif /** * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 * @cnt: the atomic which we are to dec * @lock: the mutex to return holding if we dec to 0 * * return true and hold lock if we dec to 0, return false otherwise */
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) { /* dec if we can't possibly hit 0 */ if (atomic_add_unless(cnt, -1, 1)) return 0; /* we might hit 0, so take the lock */ mutex_lock(lock); if (!atomic_dec_and_test(cnt)) { /* when we actually did the dec, we didn't hit 0 */ mutex_unlock(lock); return 0; } /* we hit 0, and we hold the lock */ return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton59100.00%1100.00%
Total59100.00%1100.00%

EXPORT_SYMBOL(atomic_dec_and_mutex_lock);

Overall Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra115129.89%1620.51%
Nicolai Hähnle55514.41%911.54%
Maarten Lankhorst52413.61%45.13%
Davidlohr Bueso A50913.22%1215.38%
Ingo Molnar43911.40%810.26%
Waiman Long1523.95%56.41%
Daniel Vetter1493.87%11.28%
Tejun Heo872.26%11.28%
Liam R. Howlett812.10%11.28%
Andrew Morton651.69%11.28%
Jason Low431.12%67.69%
Chris Wilson290.75%11.28%
Neil Brown240.62%11.28%
Pan Xinhui190.49%11.28%
Tetsuo Handa70.18%11.28%
Harvey Harrison30.08%11.28%
Clark Williams30.08%11.28%
Tim Chen20.05%11.28%
Christian Bornträger20.05%11.28%
Roman Zippel20.05%11.28%
Randy Dunlap10.03%11.28%
Paul Gortmaker10.03%11.28%
Mauro Carvalho Chehab10.03%11.28%
Török Edwin10.03%11.28%
Thomas Gleixner10.03%11.28%
Total3851100.00%78100.00%
Directory: kernel/locking
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.