Release 4.7 include/linux/spinlock.h
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
#include <linux/typecheck.h>
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <asm/barrier.h>
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
* Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
* Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key);
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
# define raw_spin_lock_init(lock) \
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
#ifdef arch_spin_is_contended
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
#define raw_spin_is_contended(lock) (((void)(lock), 0))
#endif /*arch_spin_is_contended*/
#endif
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
* can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
* serialize it with another STORE done by spin_lock().
*/
#ifndef smp_mb__before_spinlock
#define smp_mb__before_spinlock() smp_wmb()
#endif
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 16 | 57.14% | 2 | 50.00% |
luca barbieri | luca barbieri | 9 | 32.14% | 1 | 25.00% |
zwane mwaikambo | zwane mwaikambo | 3 | 10.71% | 1 | 25.00% |
| Total | 28 | 100.00% | 4 | 100.00% |
static inline void
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
{
__acquire(lock);
arch_spin_lock_flags(&lock->raw_lock, *flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 22 | 61.11% | 2 | 33.33% |
luca barbieri | luca barbieri | 9 | 25.00% | 1 | 16.67% |
ingo molnar | ingo molnar | 3 | 8.33% | 2 | 33.33% |
linus torvalds | linus torvalds | 2 | 5.56% | 1 | 16.67% |
| Total | 36 | 100.00% | 6 | 100.00% |
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
return arch_spin_trylock(&(lock)->raw_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 86.36% | 2 | 66.67% |
zwane mwaikambo | zwane mwaikambo | 3 | 13.64% | 1 | 33.33% |
| Total | 22 | 100.00% | 3 | 100.00% |
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
arch_spin_unlock(&lock->raw_lock);
__release(lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 16 | 57.14% | 2 | 50.00% |
luca barbieri | luca barbieri | 9 | 32.14% | 1 | 25.00% |
zwane mwaikambo | zwane mwaikambo | 3 | 10.71% | 1 | 25.00% |
| Total | 28 | 100.00% | 4 | 100.00% |
#endif
/*
* Define the various spin_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
* various methods are defined as nops in the case they are not
* required.
*/
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
# define raw_spin_lock_bh_nested(lock, subclass) \
_raw_spin_lock_bh_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
/*
* Always evaluate the 'subclass' argument to avoid that the compiler
* warns about set-but-not-used variables when building with
* CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
*/
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_lock_irqsave(lock, flags); \
} while (0)
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
raw_spin_lock_irqsave(lock, flags)
#endif
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
#define raw_spin_trylock_bh(lock) \
__cond_lock(lock, _raw_spin_trylock_bh(lock))
#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
/**
* raw_spin_can_lock - would raw_spin_trylock() succeed?
* @lock: the spinlock in question.
*/
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
#include <linux/rwlock.h>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
*/
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 17 | 94.44% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.56% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
#define spin_lock_init(_lock) \
do { \
spinlock_check(_lock); \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 95.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
#define spin_lock_bh_nested(lock, subclass) \
do { \
raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 24 | 96.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 4.00% | 1 | 50.00% |
| Total | 25 | 100.00% | 2 | 100.00% |
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 95.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 95.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
#define spin_trylock_irqsave(lock, flags) \
({ \
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 18 | 94.74% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 95.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 95.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 19 | 95.00% | 1 | 50.00% |
denys vlasenko | denys vlasenko | 1 | 5.00% | 1 | 50.00% |
| Total | 20 | 100.00% | 2 | 100.00% |
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
#include <linux/atomic.h>
/**
* atomic_dec_and_lock - lock on reaching reference count zero
* @atomic: the atomic counter
* @lock: the spinlock in question
*
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
#endif /* __LINUX_SPINLOCK_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas gleixner | thomas gleixner | 522 | 51.43% | 6 | 13.33% |
ingo molnar | ingo molnar | 123 | 12.12% | 6 | 13.33% |
zwane mwaikambo | zwane mwaikambo | 41 | 4.04% | 1 | 2.22% |
robert love | robert love | 40 | 3.94% | 2 | 4.44% |
arjan van de ven | arjan van de ven | 37 | 3.65% | 1 | 2.22% |
luca barbieri | luca barbieri | 35 | 3.45% | 1 | 2.22% |
thomas graf | thomas graf | 29 | 2.86% | 1 | 2.22% |
heiko carstens | heiko carstens | 22 | 2.17% | 1 | 2.22% |
david s. miller | david s. miller | 19 | 1.87% | 1 | 2.22% |
nick piggin | nick piggin | 18 | 1.77% | 1 | 2.22% |
dave jones | dave jones | 17 | 1.67% | 1 | 2.22% |
denys vlasenko | denys vlasenko | 16 | 1.58% | 2 | 4.44% |
peter zijlstra | peter zijlstra | 15 | 1.48% | 1 | 2.22% |
james morris | james morris | 14 | 1.38% | 1 | 2.22% |
pre-git | pre-git | 13 | 1.28% | 2 | 4.44% |
kyle mcmartin | kyle mcmartin | 12 | 1.18% | 1 | 2.22% |
linus torvalds | linus torvalds | 8 | 0.79% | 3 | 6.67% |
oleg nesterov | oleg nesterov | 6 | 0.59% | 1 | 2.22% |
steven rostedt | steven rostedt | 5 | 0.49% | 1 | 2.22% |
jiri olsa | jiri olsa | 4 | 0.39% | 1 | 2.22% |
david howells | david howells | 4 | 0.39% | 2 | 4.44% |
bart van assche | bart van assche | 3 | 0.30% | 1 | 2.22% |
paul gortmaker | paul gortmaker | 3 | 0.30% | 1 | 2.22% |
andrew morton | andrew morton | 3 | 0.30% | 1 | 2.22% |
tom rini | tom rini | 2 | 0.20% | 1 | 2.22% |
lucas de marchi | lucas de marchi | 1 | 0.10% | 1 | 2.22% |
will deacon | will deacon | 1 | 0.10% | 1 | 2.22% |
harvey harrison | harvey harrison | 1 | 0.10% | 1 | 2.22% |
arun sharma | arun sharma | 1 | 0.10% | 1 | 2.22% |
| Total | 1015 | 100.00% | 45 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.