Contributors: 15
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
David Howells |
101 |
25.31% |
2 |
8.70% |
Linus Torvalds (pre-git) |
76 |
19.05% |
5 |
21.74% |
Geert Uytterhoeven |
74 |
18.55% |
3 |
13.04% |
Alan Cox |
54 |
13.53% |
1 |
4.35% |
Andrew Morton |
31 |
7.77% |
1 |
4.35% |
Mathieu Desnoyers |
30 |
7.52% |
1 |
4.35% |
Mark Rutland |
7 |
1.75% |
2 |
8.70% |
Kees Cook |
6 |
1.50% |
1 |
4.35% |
Sam Ravnborg |
6 |
1.50% |
1 |
4.35% |
Greg Ungerer |
4 |
1.00% |
1 |
4.35% |
Andrzej Hajda |
3 |
0.75% |
1 |
4.35% |
Paul Mundt |
3 |
0.75% |
1 |
4.35% |
Arnd Bergmann |
2 |
0.50% |
1 |
4.35% |
Greg Kroah-Hartman |
1 |
0.25% |
1 |
4.35% |
Thorsten Blum |
1 |
0.25% |
1 |
4.35% |
Total |
399 |
|
23 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_M68K_CMPXCHG__
#define __ARCH_M68K_CMPXCHG__
#include <linux/irqflags.h>
#define __xg(type, x) ((volatile type *)(x))
extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int);
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size)
{
unsigned long flags, tmp;
local_irq_save(flags);
switch (size) {
case 1:
tmp = *(u8 *)ptr;
*(u8 *)ptr = x;
x = tmp;
break;
case 2:
tmp = *(u16 *)ptr;
*(u16 *)ptr = x;
x = tmp;
break;
case 4:
tmp = *(u32 *)ptr;
*(u32 *)ptr = x;
x = tmp;
break;
default:
x = __invalid_xchg_size(x, ptr, size);
break;
}
local_irq_restore(flags);
return x;
}
#else
static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
__asm__ __volatile__
("moveb %2,%0\n\t"
"1:\n\t"
"casb %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(u8, ptr)) : "memory");
break;
case 2:
__asm__ __volatile__
("movew %2,%0\n\t"
"1:\n\t"
"casw %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(u16, ptr)) : "memory");
break;
case 4:
__asm__ __volatile__
("movel %2,%0\n\t"
"1:\n\t"
"casl %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(u32, ptr)) : "memory");
break;
default:
x = __invalid_xchg_size(x, ptr, size);
break;
}
return x;
}
#endif
#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h>
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int);
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#ifdef CONFIG_RMW_INSNS
static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 1:
__asm__ __volatile__ ("casb %0,%2,%1"
: "=d" (old), "=m" (*(char *)p)
: "d" (new), "0" (old), "m" (*(char *)p));
break;
case 2:
__asm__ __volatile__ ("casw %0,%2,%1"
: "=d" (old), "=m" (*(short *)p)
: "d" (new), "0" (old), "m" (*(short *)p));
break;
case 4:
__asm__ __volatile__ ("casl %0,%2,%1"
: "=d" (old), "=m" (*(int *)p)
: "d" (new), "0" (old), "m" (*(int *)p));
break;
default:
old = __invalid_cmpxchg_size(p, old, new, size);
break;
}
return old;
}
#define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
#define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#else
#include <asm-generic/cmpxchg.h>
#endif
#endif /* __ARCH_M68K_CMPXCHG__ */