Contributors: 25
Author Tokens Token Proportion Commits Commit Proportion
Sean Christopherson 474 37.53% 28 38.36%
Avi Kivity 253 20.03% 6 8.22%
Paolo Bonzini 112 8.87% 7 9.59%
Radim Krčmář 99 7.84% 2 2.74%
Borislav Petkov 83 6.57% 2 2.74%
Oliver Upton 44 3.48% 2 2.74%
Will Auld 31 2.45% 1 1.37%
Eugene Korenevsky 28 2.22% 1 1.37%
Kyle Huey 24 1.90% 1 1.37%
Robert Ho 17 1.35% 1 1.37%
Gleb Natapov 16 1.27% 3 4.11%
Dan Kenigsberg 15 1.19% 1 1.37%
Binbin Wu 14 1.11% 2 2.74%
Hollis Blanchard 13 1.03% 1 1.37%
Nadav Har'El 12 0.95% 3 4.11%
Like Xu 6 0.48% 1 1.37%
Xiantao Zhang 5 0.40% 2 2.74%
Eddie Dong 4 0.32% 1 1.37%
Yu Zhang 3 0.24% 1 1.37%
Ricardo Koller 3 0.24% 1 1.37%
Xiaoyao Li 2 0.16% 2 2.74%
Josh Poimboeuf 2 0.16% 1 1.37%
Guang Zeng 1 0.08% 1 1.37%
Alexander Graf 1 0.08% 1 1.37%
Greg Kroah-Hartman 1 0.08% 1 1.37%
Total 1263 73

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_X86_KVM_CPUID_H
#define ARCH_X86_KVM_CPUID_H

#include "reverse_cpuid.h"
#include <asm/cpu.h>
#include <asm/processor.h>
#include <uapi/asm/kvm_para.h>

extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
void kvm_set_cpu_caps(void);

void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
						    u32 function, u32 index);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
					      u32 function);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
			    struct kvm_cpuid_entry2 __user *entries,
			    unsigned int type);
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
			     struct kvm_cpuid *cpuid,
			     struct kvm_cpuid_entry __user *entries);
int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
			      struct kvm_cpuid2 *cpuid,
			      struct kvm_cpuid_entry2 __user *entries);
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
			      struct kvm_cpuid2 *cpuid,
			      struct kvm_cpuid_entry2 __user *entries);
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
	       u32 *ecx, u32 *edx, bool exact_only);

void __init kvm_init_xstate_sizes(void);
u32 xstate_required_size(u64 xstate_bv, bool compacted);

int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);

static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.maxphyaddr;
}

static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
	return !(gpa & vcpu->arch.reserved_gpa_bits);
}

static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
						 gpa_t gpa, gpa_t alignment)
{
	return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
}

static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
{
	return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
}

static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
						 unsigned int leaf)
{
	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);

	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
	*reg = kvm_cpu_caps[leaf];
}

static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
						     unsigned int x86_feature)
{
	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
	struct kvm_cpuid_entry2 *entry;

	entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
	if (!entry)
		return NULL;

	return __cpuid_entry_get_reg(entry, cpuid.reg);
}

static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
					    unsigned int x86_feature)
{
	u32 *reg;

	reg = guest_cpuid_get_register(vcpu, x86_feature);
	if (!reg)
		return false;

	return *reg & __feature_bit(x86_feature);
}

static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
					      unsigned int x86_feature)
{
	u32 *reg;

	reg = guest_cpuid_get_register(vcpu, x86_feature);
	if (reg)
		*reg &= ~__feature_bit(x86_feature);
}

static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.is_amd_compatible;
}

static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
{
	return !guest_cpuid_is_amd_compatible(vcpu);
}

static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
	struct kvm_cpuid_entry2 *best;

	best = kvm_find_cpuid_entry(vcpu, 0x1);
	if (!best)
		return -1;

	return x86_family(best->eax);
}

static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
{
	struct kvm_cpuid_entry2 *best;

	best = kvm_find_cpuid_entry(vcpu, 0x1);
	if (!best)
		return -1;

	return x86_model(best->eax);
}

static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
{
	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
}

static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
{
	struct kvm_cpuid_entry2 *best;

	best = kvm_find_cpuid_entry(vcpu, 0x1);
	if (!best)
		return -1;

	return x86_stepping(best->eax);
}

static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
{
	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
}

static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
{
	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
		guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
}

static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
}

static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.msr_misc_features_enables &
		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
}

static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
{
	unsigned int x86_leaf = __feature_leaf(x86_feature);

	reverse_cpuid_check(x86_leaf);
	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
}

static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
{
	unsigned int x86_leaf = __feature_leaf(x86_feature);

	reverse_cpuid_check(x86_leaf);
	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
}

static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
{
	unsigned int x86_leaf = __feature_leaf(x86_feature);

	reverse_cpuid_check(x86_leaf);
	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
}

static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
{
	return !!kvm_cpu_cap_get(x86_feature);
}

static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
{
	if (boot_cpu_has(x86_feature))
		kvm_cpu_cap_set(x86_feature);
}

static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
					 unsigned int kvm_feature)
{
	if (!vcpu->arch.pv_cpuid.enforce)
		return true;

	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
}

enum kvm_governed_features {
#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
#include "governed_features.h"
	KVM_NR_GOVERNED_FEATURES
};

static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
{
	switch (x86_feature) {
#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
#include "governed_features.h"
	default:
		return -1;
	}
}

static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
{
	return kvm_governed_feature_index(x86_feature) >= 0;
}

static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
						     unsigned int x86_feature)
{
	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));

	__set_bit(kvm_governed_feature_index(x86_feature),
		  vcpu->arch.governed_features.enabled);
}

static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
							       unsigned int x86_feature)
{
	if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
		kvm_governed_feature_set(vcpu, x86_feature);
}

static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
					  unsigned int x86_feature)
{
	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));

	return test_bit(kvm_governed_feature_index(x86_feature),
			vcpu->arch.governed_features.enabled);
}

static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
	if (guest_can_use(vcpu, X86_FEATURE_LAM))
		cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);

	return kvm_vcpu_is_legal_gpa(vcpu, cr3);
}

#endif