Release 4.7 include/asm-generic/qspinlock.h
/*
* Queued spinlock
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
* Authors: Waiman Long <waiman.long@hpe.com>
*/
#ifndef __ASM_GENERIC_QSPINLOCK_H
#define __ASM_GENERIC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
/**
* queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
#ifndef queued_spin_unlock_wait
extern void queued_spin_unlock_wait(struct qspinlock *lock);
#endif
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
* See queued_spin_unlock_wait().
*
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
* isn't immediately observable.
*/
return atomic_read(&lock->val);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 21 | 95.45% | 1 | 50.00% |
peter zijlstra | peter zijlstra | 1 | 4.55% | 1 | 50.00% |
| Total | 22 | 100.00% | 2 | 100.00% |
#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
* @lock: queued spinlock structure
* Return: 1 if it is unlocked, 0 otherwise
*
* N.B. Whenever there are tasks waiting for the lock, it is considered
* locked wrt the lockref code to avoid lock stealing by the lockref
* code and change things underneath the lock. This also allows some
* optimizations to be applied without conflict with lockref.
*/
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
return !atomic_read(&lock.val);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 21 | 100.00% | 1 | 100.00% |
| Total | 21 | 100.00% | 1 | 100.00% |
/**
* queued_spin_is_contended - check if the lock is contended
* @lock : Pointer to queued spinlock structure
* Return: 1 if lock contended, 0 otherwise
*/
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
{
return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
/**
* queued_spin_trylock - try to acquire the queued spinlock
* @lock : Pointer to queued spinlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
if (!atomic_read(&lock->val) &&
(atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
return 1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 45 | 100.00% | 2 | 100.00% |
| Total | 45 | 100.00% | 2 | 100.00% |
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
u32 val;
val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
if (likely(val == 0))
return;
queued_spin_lock_slowpath(lock, val);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 46 | 100.00% | 2 | 100.00% |
| Total | 46 | 100.00% | 2 | 100.00% |
#ifndef queued_spin_unlock
/**
* queued_spin_unlock - release a queued spinlock
* @lock : Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
{
/*
* smp_mb__before_atomic() in order to guarantee release semantics
*/
smp_mb__before_atomic();
atomic_sub(_Q_LOCKED_VAL, &lock->val);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 26 | 100.00% | 2 | 100.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
#endif
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter zijlstra | peter zijlstra | 15 | 100.00% | 2 | 100.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
#endif
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
*/
#define arch_spin_is_locked(l) queued_spin_is_locked(l)
#define arch_spin_is_contended(l) queued_spin_is_contended(l)
#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
#define arch_spin_lock(l) queued_spin_lock(l)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
#endif /* __ASM_GENERIC_QSPINLOCK_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
waiman long | waiman long | 279 | 86.92% | 2 | 40.00% |
peter zijlstra | peter zijlstra | 42 | 13.08% | 3 | 60.00% |
| Total | 321 | 100.00% | 5 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.