Contributors: 18
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Keith M. Wesolowski |
238 |
35.52% |
2 |
5.71% |
David S. Miller |
111 |
16.57% |
2 |
5.71% |
Nicholas Piggin |
77 |
11.49% |
2 |
5.71% |
Linus Torvalds (pre-git) |
63 |
9.40% |
10 |
28.57% |
Peter Zijlstra |
45 |
6.72% |
3 |
8.57% |
Kyle McMartin |
39 |
5.82% |
1 |
2.86% |
Al Viro |
29 |
4.33% |
3 |
8.57% |
Andreas Larsson |
14 |
2.09% |
1 |
2.86% |
Mark Rutland |
10 |
1.49% |
1 |
2.86% |
Robert Reif |
10 |
1.49% |
1 |
2.86% |
Paul Jackson |
7 |
1.04% |
1 |
2.86% |
Arnaldo Carvalho de Melo |
6 |
0.90% |
1 |
2.86% |
Alexander Lobakin |
6 |
0.90% |
1 |
2.86% |
Andrew Morton |
5 |
0.75% |
2 |
5.71% |
Ingo Molnar |
4 |
0.60% |
1 |
2.86% |
Thomas Gleixner |
4 |
0.60% |
1 |
2.86% |
Greg Kroah-Hartman |
1 |
0.15% |
1 |
2.86% |
Arun Sharma |
1 |
0.15% |
1 |
2.86% |
Total |
670 |
|
35 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* atomic32.c: 32-bit atomic_t implementation
*
* Copyright (C) 2004 Keith M Wesolowski
* Copyright (C) 2007 Kyle McMartin
*
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
*/
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
};
#else /* SMP */
static DEFINE_SPINLOCK(dummy);
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) (&dummy)
#endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \
int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
ret = v->counter; \
v->counter c_op i; \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \
int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
ret = (v->counter c_op i); \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
ATOMIC_FETCH_OP(add, +=)
ATOMIC_FETCH_OP(and, &=)
ATOMIC_FETCH_OP(or, |=)
ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
int arch_atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = v->counter;
v->counter = new;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
EXPORT_SYMBOL(arch_atomic_xchg);
int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
EXPORT_SYMBOL(arch_atomic_cmpxchg);
int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = v->counter;
if (ret != u)
v->counter += a;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */
void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
EXPORT_SYMBOL(arch_atomic_set);
unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
old = *addr;
*addr = old | mask;
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
return old & mask;
}
EXPORT_SYMBOL(sp32___set_bit);
unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
old = *addr;
*addr = old & ~mask;
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
return old & mask;
}
EXPORT_SYMBOL(sp32___clear_bit);
unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
old = *addr;
*addr = old ^ mask;
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
return old & mask;
}
EXPORT_SYMBOL(sp32___change_bit);
#define CMPXCHG(T) \
T __cmpxchg_##T(volatile T *ptr, T old, T new) \
{ \
unsigned long flags; \
T prev; \
\
spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
if ((prev = *ptr) == old) \
*ptr = new; \
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
\
return prev; \
}
CMPXCHG(u8)
CMPXCHG(u16)
CMPXCHG(u32)
CMPXCHG(u64)
EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
{
unsigned long flags;
u32 prev;
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
prev = *ptr;
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return (unsigned long)prev;
}
EXPORT_SYMBOL(__xchg_u32);