Contributors: 9
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Andrew Morton |
202 |
61.96% |
4 |
28.57% |
Linus Torvalds |
40 |
12.27% |
2 |
14.29% |
Nicholas Piggin |
33 |
10.12% |
2 |
14.29% |
Rusty Russell |
30 |
9.20% |
1 |
7.14% |
Ingo Molnar |
15 |
4.60% |
1 |
7.14% |
Paul Gortmaker |
3 |
0.92% |
1 |
7.14% |
Arun Sharma |
1 |
0.31% |
1 |
7.14% |
Frédéric Weisbecker |
1 |
0.31% |
1 |
7.14% |
Greg Kroah-Hartman |
1 |
0.31% |
1 |
7.14% |
Total |
326 |
|
14 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BIT_SPINLOCK_H
#define __LINUX_BIT_SPINLOCK_H
#include <linux/kernel.h>
#include <linux/preempt.h>
#include <linux/atomic.h>
#include <linux/bug.h>
/*
* bit-based spin_lock()
*
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
static inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
* the body of the outer loop. If it is contended, then
* within the inner loop a non-atomic test is used to
* busywait with less bus contention for a good time to
* attempt to acquire the lock bit.
*/
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
preempt_enable();
do {
cpu_relax();
} while (test_bit(bitnum, addr));
preempt_disable();
}
#endif
__acquire(bitlock);
}
/*
* Return true if it was acquired
*/
static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
preempt_enable();
return 0;
}
#endif
__acquire(bitlock);
return 1;
}
/*
* bit-based spin_unlock()
*/
static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
__release(bitlock);
}
/*
* bit-based spin_unlock()
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
__release(bitlock);
}
/*
* Return true if the lock is held.
*/
static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
{
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return test_bit(bitnum, addr);
#elif defined CONFIG_PREEMPT_COUNT
return preempt_count();
#else
return 1;
#endif
}
#endif /* __LINUX_BIT_SPINLOCK_H */