cregit-Linux how code gets into the kernel

Release 4.7 include/asm-generic/qspinlock.h

/*
 * Queued spinlock
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 *
 * Authors: Waiman Long <waiman.long@hpe.com>
 */
#ifndef __ASM_GENERIC_QSPINLOCK_H

#define __ASM_GENERIC_QSPINLOCK_H

#include <asm-generic/qspinlock_types.h>

/**
 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
 * @lock : Pointer to queued spinlock structure
 *
 * There is a very slight possibility of live-lock if the lockers keep coming
 * and the waiter is just unfortunate enough to not see any unlock state.
 */
#ifndef queued_spin_unlock_wait
extern void queued_spin_unlock_wait(struct qspinlock *lock);
#endif

/**
 * queued_spin_is_locked - is the spinlock locked?
 * @lock: Pointer to queued spinlock structure
 * Return: 1 if it is locked, 0 otherwise
 */
#ifndef queued_spin_is_locked

static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* * See queued_spin_unlock_wait(). * * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL * isn't immediately observable. */ return atomic_read(&lock->val); }

Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long2195.45%150.00%
peter zijlstrapeter zijlstra14.55%150.00%
Total22100.00%2100.00%

#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? * @lock: queued spinlock structure * Return: 1 if it is unlocked, 0 otherwise * * N.B. Whenever there are tasks waiting for the lock, it is considered * locked wrt the lockref code to avoid lock stealing by the lockref * code and change things underneath the lock. This also allows some * optimizations to be applied without conflict with lockref. */
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) { return !atomic_read(&lock.val); }

Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long21100.00%1100.00%
Total21100.00%1100.00%

/** * queued_spin_is_contended - check if the lock is contended * @lock : Pointer to queued spinlock structure * Return: 1 if lock contended, 0 otherwise */
static __always_inline int queued_spin_is_contended(struct qspinlock *lock) { return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long24100.00%1100.00%
Total24100.00%1100.00%

/** * queued_spin_trylock - try to acquire the queued spinlock * @lock : Pointer to queued spinlock structure * Return: 1 if lock acquired, 0 if failed */
static __always_inline int queued_spin_trylock(struct qspinlock *lock) { if (!atomic_read(&lock->val) && (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long45100.00%2100.00%
Total45100.00%2100.00%

extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); /** * queued_spin_lock - acquire a queued spinlock * @lock: Pointer to queued spinlock structure */
static __always_inline void queued_spin_lock(struct qspinlock *lock) { u32 val; val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; queued_spin_lock_slowpath(lock, val); }

Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long46100.00%2100.00%
Total46100.00%2100.00%

#ifndef queued_spin_unlock /** * queued_spin_unlock - release a queued spinlock * @lock : Pointer to queued spinlock structure */
static __always_inline void queued_spin_unlock(struct qspinlock *lock) { /* * smp_mb__before_atomic() in order to guarantee release semantics */ smp_mb__before_atomic(); atomic_sub(_Q_LOCKED_VAL, &lock->val); }

Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long26100.00%2100.00%
Total26100.00%2100.00%

#endif #ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra15100.00%2100.00%
Total15100.00%2100.00%

#endif /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. */ #define arch_spin_is_locked(l) queued_spin_is_locked(l) #define arch_spin_is_contended(l) queued_spin_is_contended(l) #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) #define arch_spin_lock_flags(l, f) queued_spin_lock(l) #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
waiman longwaiman long27986.92%240.00%
peter zijlstrapeter zijlstra4213.08%360.00%
Total321100.00%5100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}