Release 4.14 arch/x86/include/asm/msr.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_MSR_H
#define _ASM_X86_MSR_H
#include "msr-index.h"
#ifndef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
#include <uapi/asm/msr.h>
struct msr {
union {
struct {
u32 l;
u32 h;
};
u64 q;
};
};
struct msr_info {
u32 msr_no;
struct msr reg;
struct msr *msrs;
int err;
};
struct msr_regs_info {
u32 *regs;
int err;
};
struct saved_msr {
bool valid;
struct msr_info info;
};
struct saved_msrs {
unsigned int num;
struct saved_msr *array;
};
/*
* both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
* constraint has different meanings. For i386, "A" means exactly
* edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
* it means rax *or* rdx.
*/
#ifdef CONFIG_X86_64
/* Using 64-bit values saves one instruction clearing the high half of low */
#define DECLARE_ARGS(val, low, high) unsigned long low, high
#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
#define DECLARE_ARGS(val, low, high) unsigned long long val
#define EAX_EDX_VAL(val, low, high) (val)
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
#ifdef CONFIG_TRACEPOINTS
/*
* Be very careful with includes. This header is prone to include loops.
*/
#include <asm/atomic.h>
#include <linux/tracepoint-defs.h>
extern struct tracepoint __tracepoint_read_msr;
extern struct tracepoint __tracepoint_write_msr;
extern struct tracepoint __tracepoint_rdpmc;
#define msr_tracepoint_active(t) static_key_false(&(t).key)
extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
#else
#define msr_tracepoint_active(t) false
static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 15 | 93.75% | 1 | 50.00% |
Borislav Petkov | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 15 | 93.75% | 1 | 50.00% |
Borislav Petkov | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 15 | 93.75% | 1 | 50.00% |
Borislav Petkov | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
#endif
/*
* __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
* accessors and should not have any tracing or other functionality piggybacking
* on them - those are *purely* for accessing MSRs and nothing more. So don't even
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
static inline unsigned long long notrace __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
asm volatile("1: rdmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
: EAX_EDX_RET(val, low, high) : "c" (msr));
return EAX_EDX_VAL(val, low, high);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 14 | 38.89% | 1 | 25.00% |
Borislav Petkov | 13 | 36.11% | 1 | 25.00% |
Glauber de Oliveira Costa | 8 | 22.22% | 1 | 25.00% |
Andrew Lutomirski | 1 | 2.78% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Borislav Petkov | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static inline unsigned long long native_read_msr(unsigned int msr)
{
unsigned long long val;
val = __rdmsr(msr);
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, val, 0);
return val;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Borislav Petkov | 24 | 54.55% | 1 | 33.33% |
Andi Kleen | 16 | 36.36% | 1 | 33.33% |
Thomas Gleixner | 4 | 9.09% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
static inline unsigned long long native_read_msr_safe(unsigned int msr,
int *err)
{
DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %[fault],%[err]\n\t"
"xorl %%eax, %%eax\n\t"
"xorl %%edx, %%edx\n\t"
"jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EIO));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 24 | 38.10% | 1 | 25.00% |
Thomas Gleixner | 23 | 36.51% | 1 | 25.00% |
Glauber de Oliveira Costa | 15 | 23.81% | 1 | 25.00% |
Andrew Lutomirski | 1 | 1.59% | 1 | 25.00% |
Total | 63 | 100.00% | 4 | 100.00% |
/* Can be uninlined because referenced by paravirt */
static inline void notrace
native_write_msr(unsigned int msr, u32 low, u32 high)
{
__wrmsr(msr, low, high);
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 24 | 46.15% | 1 | 16.67% |
Wanpeng Li | 19 | 36.54% | 1 | 16.67% |
Borislav Petkov | 4 | 7.69% | 2 | 33.33% |
Thomas Gleixner | 4 | 7.69% | 1 | 16.67% |
David Alan Gilbert | 1 | 1.92% | 1 | 16.67% |
Total | 52 | 100.00% | 6 | 100.00% |
/* Can be uninlined because referenced by paravirt */
static inline int notrace
native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{
int err;
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 24 | 46.15% | 1 | 16.67% |
Thomas Gleixner | 20 | 38.46% | 1 | 16.67% |
Glauber de Oliveira Costa | 3 | 5.77% | 1 | 16.67% |
Borislav Petkov | 3 | 5.77% | 1 | 16.67% |
H. Peter Anvin | 1 | 1.92% | 1 | 16.67% |
David Alan Gilbert | 1 | 1.92% | 1 | 16.67% |
Total | 52 | 100.00% | 6 | 100.00% |
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
/**
* rdtsc() - returns the current TSC without ordering constraints
*
* rdtsc() returns the result of RDTSC as a 64-bit integer. The
* only ordering constraint it supplies is the ordering implied by
* "asm volatile": it will put the RDTSC in the place you expect. The
* CPU can and will speculatively execute that RDTSC, though, so the
* results can be non-monotonic if compared on different CPUs.
*/
static __always_inline unsigned long long rdtsc(void)
{
DECLARE_ARGS(val, low, high);
asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
return EAX_EDX_VAL(val, low, high);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 32 | 96.97% | 1 | 50.00% |
Andrew Lutomirski | 1 | 3.03% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
/**
* rdtsc_ordered() - read the current TSC in program order
*
* rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
* It is ordered like a load to a global in-memory counter. It should
* be impossible to observe non-monotonic rdtsc_unordered() behavior
* across multiple CPUs as long as the TSC is synced.
*/
static __always_inline unsigned long long rdtsc_ordered(void)
{
/*
* The RDTSC instruction is not ordered relative to memory
* access. The Intel SDM and the AMD APM are both vague on this
* point, but empirically an RDTSC instruction can be
* speculatively executed before prior loads. An RDTSC
* immediately after an appropriate barrier appears to be
* ordered as a normal load, that is, it provides the same
* ordering guarantees as reading from a global memory location
* that some other imaginary CPU is updating continuously with a
* time stamp.
*/
alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
"lfence", X86_FEATURE_LFENCE_RDTSC);
return rdtsc();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
/* Deprecated, keep it for a cycle for easier merging: */
#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
static inline unsigned long long native_read_pmc(int counter)
{
DECLARE_ARGS(val, low, high);
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
if (msr_tracepoint_active(__tracepoint_rdpmc))
do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 23 | 40.35% | 1 | 25.00% |
Glauber de Oliveira Costa | 18 | 31.58% | 2 | 50.00% |
Thomas Gleixner | 16 | 28.07% | 1 | 25.00% |
Total | 57 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#include <linux/errno.h>
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
* pointer indirection), this allows gcc to optimize better
*/
#define rdmsr(msr, low, high) \
do { \
u64 __val = native_read_msr((msr)); \
(void)((low) = (u32)__val); \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
static inline void wrmsr(unsigned int msr, u32 low, u32 high)
{
native_write_msr(msr, low, high);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 15 | 57.69% | 1 | 33.33% |
Glauber de Oliveira Costa | 8 | 30.77% | 1 | 33.33% |
Borislav Petkov | 3 | 11.54% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
#define rdmsrl(msr, val) \
((val) = native_read_msr((msr)))
static inline void wrmsrl(unsigned int msr, u64 val)
{
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 24 | 64.86% | 1 | 25.00% |
Borislav Petkov | 7 | 18.92% | 2 | 50.00% |
Thomas Gleixner | 6 | 16.22% | 1 | 25.00% |
Total | 37 | 100.00% | 4 | 100.00% |
/* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
{
return native_write_msr_safe(msr, low, high);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 16 | 59.26% | 1 | 33.33% |
Glauber de Oliveira Costa | 8 | 29.63% | 1 | 33.33% |
Borislav Petkov | 3 | 11.11% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
/* rdmsr with exception handling */
#define rdmsr_safe(msr, low, high) \
({ \
int __err; \
u64 __val = native_read_msr_safe((msr), &__err); \
(*low) = (u32)__val; \
(*high) = (u32)(__val >> 32); \
__err; \
})
static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
{
int err;
*p = native_read_msr_safe(msr, &err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 33 | 97.06% | 1 | 50.00% |
Borislav Petkov | 1 | 2.94% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
#define rdpmc(counter, low, high) \
do { \
u64 _l = native_read_pmc((counter)); \
(low) = (u32)_l; \
(high) = (u32)(_l >> 32); \
} while (0)
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
#endif /* !CONFIG_PARAVIRT */
/*
* 64-bit version of wrmsr_safe():
*/
static inline int wrmsrl_safe(u32 msr, u64 val)
{
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 27 | 81.82% | 1 | 25.00% |
Thomas Gleixner | 3 | 9.09% | 1 | 25.00% |
Glauber de Oliveira Costa | 2 | 6.06% | 1 | 25.00% |
H. Peter Anvin | 1 | 3.03% | 1 | 25.00% |
Total | 33 | 100.00% | 4 | 100.00% |
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
int msr_set_bit(u32 msr, u8 bit);
int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
rdmsr(msr_no, *l, *h);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 32 | 88.89% | 1 | 50.00% |
H. Peter Anvin | 4 | 11.11% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 28 | 87.50% | 1 | 50.00% |
H. Peter Anvin | 4 | 12.50% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
rdmsrl(msr_no, *q);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jacob jun Pan | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
wrmsrl(msr_no, q);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jacob jun Pan | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Borislav Petkov | 46 | 95.83% | 1 | 50.00% |
Rusty Russell | 2 | 4.17% | 1 | 50.00% |
Total | 48 | 100.00% | 2 | 100.00% |
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Borislav Petkov | 40 | 95.24% | 1 | 50.00% |
Rusty Russell | 2 | 4.76% | 1 | 50.00% |
Total | 42 | 100.00% | 2 | 100.00% |
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
{
return rdmsr_safe(msr_no, l, h);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
return wrmsr_safe(msr_no, l, h);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
return rdmsrl_safe(msr_no, q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jacob jun Pan | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
return wrmsrl_safe(msr_no, q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jacob jun Pan | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
return rdmsr_safe_regs(regs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
H. Peter Anvin | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
return wrmsr_safe_regs(regs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
H. Peter Anvin | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 393 | 24.52% | 2 | 3.92% |
Borislav Petkov | 328 | 20.46% | 12 | 23.53% |
Andi Kleen | 274 | 17.09% | 2 | 3.92% |
Jacob jun Pan | 165 | 10.29% | 1 | 1.96% |
Glauber de Oliveira Costa | 142 | 8.86% | 4 | 7.84% |
Andrew Lutomirski | 100 | 6.24% | 8 | 15.69% |
H. Peter Anvin | 97 | 6.05% | 7 | 13.73% |
Ingo Molnar | 41 | 2.56% | 3 | 5.88% |
Chen Yu | 26 | 1.62% | 1 | 1.96% |
Wanpeng Li | 19 | 1.19% | 1 | 1.96% |
Rusty Russell | 4 | 0.25% | 1 | 1.96% |
George Spelvin | 3 | 0.19% | 1 | 1.96% |
David Alan Gilbert | 2 | 0.12% | 1 | 1.96% |
Mike Frysinger | 2 | 0.12% | 1 | 1.96% |
Andre Przywara | 2 | 0.12% | 1 | 1.96% |
Frédéric Weisbecker | 1 | 0.06% | 1 | 1.96% |
Greg Kroah-Hartman | 1 | 0.06% | 1 | 1.96% |
Joe Perches | 1 | 0.06% | 1 | 1.96% |
Sheng Yang | 1 | 0.06% | 1 | 1.96% |
Jike Song | 1 | 0.06% | 1 | 1.96% |
Total | 1603 | 100.00% | 51 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.