Contributors: 8
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Anna-Maria Gleixner |
69 |
50.00% |
1 |
11.11% |
Linus Torvalds (pre-git) |
29 |
21.01% |
1 |
11.11% |
David S. Miller |
23 |
16.67% |
1 |
11.11% |
Linus Torvalds |
7 |
5.07% |
2 |
22.22% |
Nicholas Piggin |
7 |
5.07% |
1 |
11.11% |
Greg Kroah-Hartman |
1 |
0.72% |
1 |
11.11% |
Paul Gortmaker |
1 |
0.72% |
1 |
11.11% |
Arun Sharma |
1 |
0.72% |
1 |
11.11% |
Total |
138 |
|
9 |
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* NOTE NOTE NOTE! This is _not_ equivalent to
*
* if (atomic_dec_and_test(&atomic)) {
* spin_lock(&lock);
* return 1;
* }
* return 0;
*
* because the spin-lock and the decrement must be
* "atomic".
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock_irqsave(lock, *flags);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock_irqrestore(lock, *flags);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);