cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/include/asm/cmpxchg.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_X86_CMPXCHG_H

#define ASM_X86_CMPXCHG_H

#include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */

/*
 * Non-existant functions to indicate usage errors at link time
 * (or compile-time if the compiler implements __compiletime_error().
 */
extern void __xchg_wrong_size(void)
	__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
	__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
	__compiletime_error("Bad argument size for xadd");
extern void __add_wrong_size(void)
	__compiletime_error("Bad argument size for add");

/*
 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
 * -1 because sizeof will never return -1, thereby making those switch
 * case statements guaranteeed dead code which the compiler will
 * eliminate, and allowing the "missing symbol in the default case" to
 * indicate a usage error.
 */

#define __X86_CASE_B	1

#define __X86_CASE_W	2

#define __X86_CASE_L	4
#ifdef CONFIG_64BIT

#define __X86_CASE_Q	8
#else

#define	__X86_CASE_Q	-1		
/* sizeof will never return -1 */
#endif

/* 
 * An exchange-type operation, which takes a value and a pointer, and
 * returns the old value.
 */

#define __xchg_op(ptr, arg, op, lock)					\
	({                                                              \
                __typeof__ (*(ptr)) __ret = (arg);                      \
                switch (sizeof(*(ptr))) {                               \
                case __X86_CASE_B:                                      \
                        asm volatile (lock #op "b %b0, %1\n"            \
                                      : "+q" (__ret), "+m" (*(ptr))     \
                                      : : "memory", "cc");              \
                        break;                                          \
                case __X86_CASE_W:                                      \
                        asm volatile (lock #op "w %w0, %1\n"            \
                                      : "+r" (__ret), "+m" (*(ptr))     \
                                      : : "memory", "cc");              \
                        break;                                          \
                case __X86_CASE_L:                                      \
                        asm volatile (lock #op "l %0, %1\n"             \
                                      : "+r" (__ret), "+m" (*(ptr))     \
                                      : : "memory", "cc");              \
                        break;                                          \
                case __X86_CASE_Q:                                      \
                        asm volatile (lock #op "q %q0, %1\n"            \
                                      : "+r" (__ret), "+m" (*(ptr))     \
                                      : : "memory", "cc");              \
                        break;                                          \
                default:                                                \
                        __ ## op ## _wrong_size();                      \
                }                                                       \
                __ret;                                                  \
        })

/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 * Since this is generally used to protect other memory information, we
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 * information around.
 */

#define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 */

#define __raw_cmpxchg(ptr, old, new, size, lock)			\
({                                                                      \
        __typeof__(*(ptr)) __ret;                                       \
        __typeof__(*(ptr)) __old = (old);                               \
        __typeof__(*(ptr)) __new = (new);                               \
        switch (size) {                                                 \
        case __X86_CASE_B:                                              \
        {                                                               \
                volatile u8 *__ptr = (volatile u8 *)(ptr);              \
                asm volatile(lock "cmpxchgb %2,%1"                      \
                             : "=a" (__ret), "+m" (*__ptr)              \
                             : "q" (__new), "0" (__old)                 \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        case __X86_CASE_W:                                              \
        {                                                               \
                volatile u16 *__ptr = (volatile u16 *)(ptr);            \
                asm volatile(lock "cmpxchgw %2,%1"                      \
                             : "=a" (__ret), "+m" (*__ptr)              \
                             : "r" (__new), "0" (__old)                 \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        case __X86_CASE_L:                                              \
        {                                                               \
                volatile u32 *__ptr = (volatile u32 *)(ptr);            \
                asm volatile(lock "cmpxchgl %2,%1"                      \
                             : "=a" (__ret), "+m" (*__ptr)              \
                             : "r" (__new), "0" (__old)                 \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        case __X86_CASE_Q:                                              \
        {                                                               \
                volatile u64 *__ptr = (volatile u64 *)(ptr);            \
                asm volatile(lock "cmpxchgq %2,%1"                      \
                             : "=a" (__ret), "+m" (*__ptr)              \
                             : "r" (__new), "0" (__old)                 \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        default:                                                        \
                __cmpxchg_wrong_size();                                 \
        }                                                               \
        __ret;                                                          \
})


#define __cmpxchg(ptr, old, new, size)					\
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)


#define __sync_cmpxchg(ptr, old, new, size)				\
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")


#define __cmpxchg_local(ptr, old, new, size)				\
	__raw_cmpxchg((ptr), (old), (new), (size), "")

#ifdef CONFIG_X86_32
# include <asm/cmpxchg_32.h>
#else
# include <asm/cmpxchg_64.h>
#endif


#define cmpxchg(ptr, old, new)						\
	__cmpxchg(ptr, old, new, sizeof(*(ptr)))


#define sync_cmpxchg(ptr, old, new)					\
	__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))


#define cmpxchg_local(ptr, old, new)					\
	__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))



#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)		\
({                                                                      \
        bool success;                                                   \
        __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
        __typeof__(*(_ptr)) __old = *_old;                              \
        __typeof__(*(_ptr)) __new = (_new);                             \
        switch (size) {                                                 \
        case __X86_CASE_B:                                              \
        {                                                               \
                volatile u8 *__ptr = (volatile u8 *)(_ptr);             \
                asm volatile(lock "cmpxchgb %[new], %[ptr]"             \
                             CC_SET(z)                                  \
                             : CC_OUT(z) (success),                     \
                               [ptr] "+m" (*__ptr),                     \
                               [old] "+a" (__old)                       \
                             : [new] "q" (__new)                        \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        case __X86_CASE_W:                                              \
        {                                                               \
                volatile u16 *__ptr = (volatile u16 *)(_ptr);           \
                asm volatile(lock "cmpxchgw %[new], %[ptr]"             \
                             CC_SET(z)                                  \
                             : CC_OUT(z) (success),                     \
                               [ptr] "+m" (*__ptr),                     \
                               [old] "+a" (__old)                       \
                             : [new] "r" (__new)                        \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        case __X86_CASE_L:                                              \
        {                                                               \
                volatile u32 *__ptr = (volatile u32 *)(_ptr);           \
                asm volatile(lock "cmpxchgl %[new], %[ptr]"             \
                             CC_SET(z)                                  \
                             : CC_OUT(z) (success),                     \
                               [ptr] "+m" (*__ptr),                     \
                               [old] "+a" (__old)                       \
                             : [new] "r" (__new)                        \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        case __X86_CASE_Q:                                              \
        {                                                               \
                volatile u64 *__ptr = (volatile u64 *)(_ptr);           \
                asm volatile(lock "cmpxchgq %[new], %[ptr]"             \
                             CC_SET(z)                                  \
                             : CC_OUT(z) (success),                     \
                               [ptr] "+m" (*__ptr),                     \
                               [old] "+a" (__old)                       \
                             : [new] "r" (__new)                        \
                             : "memory");                               \
                break;                                                  \
        }                                                               \
        default:                                                        \
                __cmpxchg_wrong_size();                                 \
        }                                                               \
        if (unlikely(!success))                                         \
                *_old = __old;                                          \
        likely(success);                                                \
})


#define __try_cmpxchg(ptr, pold, new, size)				\
	__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)


#define try_cmpxchg(ptr, pold, new)					\
	__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))

/*
 * xadd() adds "inc" to "*ptr" and atomically returns the previous
 * value of "*ptr".
 *
 * xadd() is locked when multiple CPUs are online
 */

#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)

#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)


#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)			\
({                                                                      \
        bool __ret;                                                     \
        __typeof__(*(p1)) __old1 = (o1), __new1 = (n1);                 \
        __typeof__(*(p2)) __old2 = (o2), __new2 = (n2);                 \
        BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));                    \
        BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));                    \
        VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));            \
        VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));    \
        asm volatile(pfx "cmpxchg%c4b %2; sete %0"                      \
                     : "=a" (__ret), "+d" (__old2),                     \
                       "+m" (*(p1)), "+m" (*(p2))                       \
                     : "i" (2 * sizeof(long)), "a" (__old1),            \
                       "b" (__new1), "c" (__new2));                     \
        __ret;                                                          \
})


#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
	__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)


#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
	__cmpxchg_double(, p1, p2, o1, o2, n1, n2)

#endif	/* ASM_X86_CMPXCHG_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge22665.32%637.50%
Jan Beulich5917.05%212.50%
Peter Zijlstra4111.85%16.25%
Thomas Gleixner113.18%16.25%
Borislav Petkov30.87%16.25%
David Howells20.58%16.25%
Li Zhong10.29%16.25%
Nikolay Borisov10.29%16.25%
Dmitriy Vyukov10.29%16.25%
Greg Kroah-Hartman10.29%16.25%
Total346100.00%16100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.