Contributors: 6
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Al Viro |
43 |
74.14% |
2 |
28.57% |
Richard Weinberger |
7 |
12.07% |
1 |
14.29% |
Uros Bizjak |
3 |
5.17% |
1 |
14.29% |
Michael S. Tsirkin |
3 |
5.17% |
1 |
14.29% |
Johannes Berg |
1 |
1.72% |
1 |
14.29% |
Greg Kroah-Hartman |
1 |
1.72% |
1 |
14.29% |
Total |
58 |
|
7 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_UM_BARRIER_H_
#define _ASM_UM_BARRIER_H_
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
#define mb() alternative("lock addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else /* CONFIG_X86_32 */
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
#endif /* CONFIG_X86_32 */
#include <asm-generic/barrier.h>
#endif