Contributors: 11
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Peter Zijlstra |
138 |
26.80% |
5 |
17.86% |
Greg Ungerer |
77 |
14.95% |
5 |
17.86% |
Mark Rutland |
76 |
14.76% |
6 |
21.43% |
Linus Torvalds (pre-git) |
70 |
13.59% |
3 |
10.71% |
Geert Uytterhoeven |
56 |
10.87% |
3 |
10.71% |
Roman Zippel |
47 |
9.13% |
1 |
3.57% |
Alan Cox |
39 |
7.57% |
1 |
3.57% |
Sam Ravnborg |
6 |
1.17% |
1 |
3.57% |
Andrea Parri |
4 |
0.78% |
1 |
3.57% |
David Howells |
1 |
0.19% |
1 |
3.57% |
Greg Kroah-Hartman |
1 |
0.19% |
1 |
3.57% |
Total |
515 |
|
28 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_M68K_ATOMIC__
#define __ARCH_M68K_ATOMIC__
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
/*
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/*
* The ColdFire parts cannot do some immediate to memory operations,
* so for them we do not specify the "i" asm constraint.
*/
#ifdef CONFIG_COLDFIRE
#define ASM_DI "d"
#else
#define ASM_DI "di"
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \
#ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
__asm__ __volatile__( \
"1: movel %2,%1\n" \
" " #asm_op "l %3,%1\n" \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "di" (i), "2" (arch_atomic_read(v))); \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
__asm__ __volatile__( \
"1: movel %2,%1\n" \
" " #asm_op "l %3,%1\n" \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "di" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
\
local_irq_save(flags); \
t = (v->counter c_op i); \
local_irq_restore(flags); \
\
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
\
local_irq_save(flags); \
t = v->counter; \
v->counter c_op i; \
local_irq_restore(flags); \
\
return t; \
}
#endif /* CONFIG_RMW_INSNS */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, eor)
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void arch_atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
#define arch_atomic_inc arch_atomic_inc
static inline void arch_atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
#define arch_atomic_dec arch_atomic_dec
static inline int arch_atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
"subql #1,%1; slt %0"
: "=d" (c), "=m" (*v)
: "m" (*v));
return c != 0;
}
static inline int arch_atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
#ifndef CONFIG_RMW_INSNS
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
prev = arch_atomic_read(v);
if (prev == old)
arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
prev = arch_atomic_read(v);
arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#define arch_atomic_xchg arch_atomic_xchg
#endif /* !CONFIG_RMW_INSNS */
static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
: "=d" (c), "+m" (*v)
: ASM_DI (i));
return c != 0;
}
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static inline int arch_atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
: "=d" (c), "+m" (*v)
: ASM_DI (i));
return c != 0;
}
#define arch_atomic_add_negative arch_atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */