cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/include/asm/cmpxchg.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_CMPXCHG_H_

#define _ASM_POWERPC_CMPXCHG_H_

#ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
#include <linux/bug.h>

#ifdef __BIG_ENDIAN

#define BITOFF_CAL(size, off)	((sizeof(u32) - size - off) * BITS_PER_BYTE)
#else

#define BITOFF_CAL(size, off)	(off * BITS_PER_BYTE)
#endif


#define XCHG_GEN(type, sfx, cl)				\
static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
{                                                               \
        unsigned int prev, prev_mask, tmp, bitoff, off;         \
                                                                \
        off = (unsigned long)p % sizeof(u32);                   \
        bitoff = BITOFF_CAL(sizeof(type), off);                 \
        p -= off;                                               \
        val <<= bitoff;                                         \
        prev_mask = (u32)(type)-1 << bitoff;                    \
                                                                \
        __asm__ __volatile__(                                   \
"1:     lwarx   %0,0,%3\n"                                      \
"       andc    %1,%0,%5\n"                                     \
"       or      %1,%1,%4\n"                                     \
        PPC405_ERR77(0,%3)                                      \
"       stwcx.  %1,0,%3\n"                                      \
"       bne-    1b\n"                                           \
        : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)            \
        : "r" (p), "r" (val), "r" (prev_mask)                   \
        : "cc", cl);                                            \
                                                                \
        return prev >> bitoff;                                  \
}


#define CMPXCHG_GEN(type, sfx, br, br2, cl)			\
static inline                                                   \
u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)   \
{                                                               \
        unsigned int prev, prev_mask, tmp, bitoff, off;         \
                                                                \
        off = (unsigned long)p % sizeof(u32);                   \
        bitoff = BITOFF_CAL(sizeof(type), off);                 \
        p -= off;                                               \
        old <<= bitoff;                                         \
        new <<= bitoff;                                         \
        prev_mask = (u32)(type)-1 << bitoff;                    \
                                                                \
        __asm__ __volatile__(                                   \
        br                                                      \
"1:     lwarx   %0,0,%3\n"                                      \
"       and     %1,%0,%6\n"                                     \
"       cmpw    0,%1,%4\n"                                      \
"       bne-    2f\n"                                           \
"       andc    %1,%0,%6\n"                                     \
"       or      %1,%1,%5\n"                                     \
        PPC405_ERR77(0,%3)                                      \
"       stwcx.  %1,0,%3\n"                                      \
"       bne-    1b\n"                                           \
        br2                                                     \
        "\n"                                                    \
"2:"                                                            \
        : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)            \
        : "r" (p), "r" (old), "r" (new), "r" (prev_mask)        \
        : "cc", cl);                                            \
                                                                \
        return prev >> bitoff;                                  \
}

/*
 * Atomic exchange
 *
 * Changes the memory location '*p' to be val and returns
 * the previous value stored there.
 */

XCHG_GEN(u8, _local, "memory");
XCHG_GEN(u8, _relaxed, "cc");
XCHG_GEN(u16, _local, "memory");
XCHG_GEN(u16, _relaxed, "cc");


static __always_inline unsigned long __xchg_u32_local(volatile void *p, unsigned long val) { unsigned long prev; __asm__ __volatile__( "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ bne- 1b" : "=&r" (prev), "+m" (*(volatile unsigned int *)p) : "r" (p), "r" (val) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells2592.59%150.00%
Boqun Feng27.41%150.00%
Total27100.00%2100.00%


static __always_inline unsigned long __xchg_u32_relaxed(u32 *p, unsigned long val) { unsigned long prev; __asm__ __volatile__( "1: lwarx %0,0,%2\n" PPC405_ERR77(0, %2) " stwcx. %3,0,%2\n" " bne- 1b" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (val) : "cc"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells2388.46%150.00%
Boqun Feng311.54%150.00%
Total26100.00%2100.00%

#ifdef CONFIG_PPC64
static __always_inline unsigned long __xchg_u64_local(volatile void *p, unsigned long val) { unsigned long prev; __asm__ __volatile__( "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ bne- 1b" : "=&r" (prev), "+m" (*(volatile unsigned long *)p) : "r" (p), "r" (val) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells2592.59%150.00%
Boqun Feng27.41%150.00%
Total27100.00%2100.00%


static __always_inline unsigned long __xchg_u64_relaxed(u64 *p, unsigned long val) { unsigned long prev; __asm__ __volatile__( "1: ldarx %0,0,%2\n" PPC405_ERR77(0, %2) " stdcx. %3,0,%2\n" " bne- 1b" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (val) : "cc"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells2388.46%150.00%
Boqun Feng311.54%150.00%
Total26100.00%2100.00%

#endif
static __always_inline unsigned long __xchg_local(void *ptr, unsigned long x, unsigned int size) { switch (size) { case 1: return __xchg_u8_local(ptr, x); case 2: return __xchg_u16_local(ptr, x); case 4: return __xchg_u32_local(ptr, x); #ifdef CONFIG_PPC64 case 8: return __xchg_u64_local(ptr, x); #endif } BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg"); return x; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells5463.53%125.00%
Pan Xinhui2832.94%250.00%
Boqun Feng33.53%125.00%
Total85100.00%4100.00%


static __always_inline unsigned long __xchg_relaxed(void *ptr, unsigned long x, unsigned int size) { switch (size) { case 1: return __xchg_u8_relaxed(ptr, x); case 2: return __xchg_u16_relaxed(ptr, x); case 4: return __xchg_u32_relaxed(ptr, x); #ifdef CONFIG_PPC64 case 8: return __xchg_u64_relaxed(ptr, x); #endif } BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local"); return x; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells5463.53%125.00%
Pan Xinhui2832.94%250.00%
Boqun Feng33.53%125.00%
Total85100.00%4100.00%

#define xchg_local(ptr,x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __xchg_local((ptr), \ (unsigned long)_x_, sizeof(*(ptr))); \ }) #define xchg_relaxed(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ (unsigned long)_x_, sizeof(*(ptr))); \ }) /* * Compare and exchange - if *p == old, set it to new, * and return the old value of *p. */ CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory"); CMPXCHG_GEN(u8, _local, , , "memory"); CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory"); CMPXCHG_GEN(u8, _relaxed, , , "cc"); CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory"); CMPXCHG_GEN(u16, _local, , , "memory"); CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory"); CMPXCHG_GEN(u16, _relaxed, , , "cc");
static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) { unsigned int prev; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" PPC405_ERR77(0,%2) " stwcx. %4,0,%2\n\ bne- 1b" PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells3196.88%150.00%
Boqun Feng13.12%150.00%
Total32100.00%2100.00%


static __always_inline unsigned long __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, unsigned long new) { unsigned int prev; __asm__ __volatile__ ( "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" PPC405_ERR77(0,%2) " stwcx. %4,0,%2\n\ bne- 1b" "\n\ 2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells32100.00%1100.00%
Total32100.00%1100.00%


static __always_inline unsigned long __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n" " cmpw 0,%0,%3\n" " bne- 2f\n" PPC405_ERR77(0, %2) " stwcx. %4,0,%2\n" " bne- 1b\n" "2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
Boqun Feng30100.00%1100.00%
Total30100.00%1100.00%

/* * cmpxchg family don't have order guarantee if cmp part fails, therefore we * can avoid superfluous barriers if we use assembly code to implement * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for * cmpxchg_release() because that will result in putting a barrier in the * middle of a ll/sc loop, which is probably a bad idea. For example, this * might cause the conditional store more likely to fail. */
static __always_inline unsigned long __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n" " cmpw 0,%0,%3\n" " bne- 2f\n" PPC405_ERR77(0, %2) " stwcx. %4,0,%2\n" " bne- 1b\n" PPC_ACQUIRE_BARRIER "\n" "2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
Boqun Feng30100.00%1100.00%
Total30100.00%1100.00%

#ifdef CONFIG_PPC64
static __always_inline unsigned long __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ stdcx. %4,0,%2\n\ bne- 1b" PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells3196.88%150.00%
Boqun Feng13.12%150.00%
Total32100.00%2100.00%


static __always_inline unsigned long __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ stdcx. %4,0,%2\n\ bne- 1b" "\n\ 2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells32100.00%1100.00%
Total32100.00%1100.00%


static __always_inline unsigned long __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n" " cmpd 0,%0,%3\n" " bne- 2f\n" " stdcx. %4,0,%2\n" " bne- 1b\n" "2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
Boqun Feng30100.00%1100.00%
Total30100.00%1100.00%


static __always_inline unsigned long __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new) { unsigned long prev; __asm__ __volatile__ ( "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n" " cmpd 0,%0,%3\n" " bne- 2f\n" " stdcx. %4,0,%2\n" " bne- 1b\n" PPC_ACQUIRE_BARRIER "\n" "2:" : "=&r" (prev), "+m" (*p) : "r" (p), "r" (old), "r" (new) : "cc", "memory"); return prev; }

Contributors

PersonTokensPropCommitsCommitProp
Boqun Feng30100.00%1100.00%
Total30100.00%1100.00%

#endif
static __always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { case 1: return __cmpxchg_u8(ptr, old, new); case 2: return __cmpxchg_u16(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); #ifdef CONFIG_PPC64 case 8: return __cmpxchg_u64(ptr, old, new); #endif } BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg"); return old; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells6566.33%125.00%
Pan Xinhui3232.65%250.00%
Michael Ellerman11.02%125.00%
Total98100.00%4100.00%


static __always_inline unsigned long __cmpxchg_local(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { case 1: return __cmpxchg_u8_local(ptr, old, new); case 2: return __cmpxchg_u16_local(ptr, old, new); case 4: return __cmpxchg_u32_local(ptr, old, new); #ifdef CONFIG_PPC64 case 8: return __cmpxchg_u64_local(ptr, old, new); #endif } BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local"); return old; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells6567.01%133.33%
Pan Xinhui3232.99%266.67%
Total97100.00%3100.00%


static __always_inline unsigned long __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { case 1: return __cmpxchg_u8_relaxed(ptr, old, new); case 2: return __cmpxchg_u16_relaxed(ptr, old, new); case 4: return __cmpxchg_u32_relaxed(ptr, old, new); #ifdef CONFIG_PPC64 case 8: return __cmpxchg_u64_relaxed(ptr, old, new); #endif } BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed"); return old; }

Contributors

PersonTokensPropCommitsCommitProp
Boqun Feng6567.01%133.33%
Pan Xinhui3232.99%266.67%
Total97100.00%3100.00%


static __always_inline unsigned long __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { case 1: return __cmpxchg_u8_acquire(ptr, old, new); case 2: return __cmpxchg_u16_acquire(ptr, old, new); case 4: return __cmpxchg_u32_acquire(ptr, old, new); #ifdef CONFIG_PPC64 case 8: return __cmpxchg_u64_acquire(ptr, old, new); #endif } BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire"); return old; }

Contributors

PersonTokensPropCommitsCommitProp
Boqun Feng6567.01%133.33%
Pan Xinhui3232.99%266.67%
Total97100.00%3100.00%

#define cmpxchg(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ (unsigned long)_n_, sizeof(*(ptr))); \ }) #define cmpxchg_local(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ (unsigned long)_n_, sizeof(*(ptr))); \ }) #define cmpxchg_relaxed(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ (unsigned long)_o_, (unsigned long)_n_, \ sizeof(*(ptr))); \ }) #define cmpxchg_acquire(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ (unsigned long)_o_, (unsigned long)_n_, \ sizeof(*(ptr))); \ }) #ifdef CONFIG_PPC64 #define cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg((ptr), (o), (n)); \ }) #define cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ }) #define cmpxchg64_relaxed(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_relaxed((ptr), (o), (n)); \ }) #define cmpxchg64_acquire(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_acquire((ptr), (o), (n)); \ }) #else #include <asm-generic/cmpxchg-local.h> #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CMPXCHG_H_ */

Overall Contributors

PersonTokensPropCommitsCommitProp
David Howells58046.03%111.11%
Pan Xinhui35428.10%222.22%
Boqun Feng31925.32%333.33%
Michael Ellerman60.48%222.22%
Greg Kroah-Hartman10.08%111.11%
Total1260100.00%9100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.