Contributors: 6
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Al Viro |
65 |
78.31% |
1 |
16.67% |
Richard Weinberger |
7 |
8.43% |
1 |
16.67% |
Alexander Duyck |
6 |
7.23% |
1 |
16.67% |
Michael S. Tsirkin |
3 |
3.61% |
1 |
16.67% |
Borislav Petkov |
1 |
1.20% |
1 |
16.67% |
Greg Kroah-Hartman |
1 |
1.20% |
1 |
16.67% |
Total |
83 |
|
6 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_UM_BARRIER_H_
#define _ASM_UM_BARRIER_H_
#include <asm/asm.h>
#include <asm/segment.h>
#include <asm/cpufeatures.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else /* CONFIG_X86_32 */
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
#endif /* CONFIG_X86_32 */
#define dma_rmb() barrier()
#define dma_wmb() barrier()
#include <asm-generic/barrier.h>
#endif