cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/include/asm/barrier.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_BARRIER_H

#define _ASM_X86_BARRIER_H

#include <asm/alternative.h>
#include <asm/nops.h>

/*
 * Force strict CPU ordering.
 * And yes, this might be required on UP too when we're talking
 * to devices.
 */

#ifdef CONFIG_X86_32

#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
                                      X86_FEATURE_XMM2) ::: "memory", "cc")

#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
                                       X86_FEATURE_XMM2) ::: "memory", "cc")

#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
                                       X86_FEATURE_XMM2) ::: "memory", "cc")
#else

#define mb() 	asm volatile("mfence":::"memory")

#define rmb()	asm volatile("lfence":::"memory")

#define wmb()	asm volatile("sfence" ::: "memory")
#endif

#ifdef CONFIG_X86_PPRO_FENCE

#define dma_rmb()	rmb()
#else

#define dma_rmb()	barrier()
#endif

#define dma_wmb()	barrier()


#define __smp_mb()	mb()

#define __smp_rmb()	dma_rmb()

#define __smp_wmb()	barrier()

#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)

#if defined(CONFIG_X86_PPRO_FENCE)

/*
 * For this option x86 doesn't have a strong TSO memory
 * model and we should fall back to full barriers.
 */


#define __smp_store_release(p, v)					\
do {                                                                    \
        compiletime_assert_atomic_type(*p);                             \
        __smp_mb();                                                     \
        WRITE_ONCE(*p, v);                                              \
} while (0)


#define __smp_load_acquire(p)						\
({                                                                      \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
        __smp_mb();                                                     \
        ___p1;                                                          \
})

#else /* regular x86 TSO memory ordering */


#define __smp_store_release(p, v)					\
do {                                                                    \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
} while (0)


#define __smp_load_acquire(p)						\
({                                                                      \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        ___p1;                                                          \
})

#endif

/* Atomic operations are already serializing on x86 */

#define __smp_mb__before_atomic()	barrier()

#define __smp_mb__after_atomic()	barrier()

#include <asm-generic/barrier.h>

#endif /* _ASM_X86_BARRIER_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
David Howells7646.91%19.09%
Peter Zijlstra4930.25%327.27%
Michael S. Tsirkin1911.73%436.36%
Alexander Duyck159.26%19.09%
Andrey Konovalov21.23%19.09%
Greg Kroah-Hartman10.62%19.09%
Total162100.00%11100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.