cregit-Linux how code gets into the kernel

Release 4.13 arch/x86/kvm/x86.c

Directory: arch/x86/kvm
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * derived from drivers/kvm/kvm_main.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Amit Shah    <amit.shah@qumranet.com>
 *   Ben-Ami Yassour <benami@il.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include <linux/kvm_host.h>
#include "irq.h"
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
#include "pmu.h"
#include "hyperv.h"

#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <linux/sched/stat.h>

#include <trace/events/kvm.h>

#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mce.h>
#include <linux/kernel_stat.h>
#include <asm/fpu/internal.h> /* Ugh! */
#include <asm/pvclock.h>
#include <asm/div64.h>
#include <asm/irq_remapping.h>


#define CREATE_TRACE_POINTS
#include "trace.h"


#define MAX_IO_MSRS 256

#define KVM_MAX_MCE_BANKS 32

u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;

EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);


#define emul_to_vcpu(ctxt) \
	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)

/* EFER defaults:
 * - enable syscall per default because its emulated by KVM
 * - enable LME and LMA per default on 64 bit KVM
 */
#ifdef CONFIG_X86_64
static

u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
#else

static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif


#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM

#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU


#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                    KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)

static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);


struct kvm_x86_ops *kvm_x86_ops __read_mostly;

EXPORT_SYMBOL_GPL(kvm_x86_ops);


static bool __read_mostly ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);


unsigned int min_timer_period_us = 500;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);


static bool __read_mostly kvmclock_periodic_sync = true;
module_param(kvmclock_periodic_sync, bool, S_IRUGO);


bool __read_mostly kvm_has_tsc_control;

EXPORT_SYMBOL_GPL(kvm_has_tsc_control);

u32  __read_mostly kvm_max_guest_tsc_khz;

EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);

u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;

EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);

u64  __read_mostly kvm_max_tsc_scaling_ratio;

EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);

u64 __read_mostly kvm_default_tsc_scaling_ratio;

EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);

/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */

static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);

/* lapic timer advance (tscdeadline mode only) in nanoseconds */

unsigned int __read_mostly lapic_timer_advance_ns = 0;
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);


static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);


#define KVM_NR_SHARED_MSRS 16


struct kvm_shared_msrs_global {
	
int nr;
	
u32 msrs[KVM_NR_SHARED_MSRS];
};


struct kvm_shared_msrs {
	
struct user_return_notifier urn;
	
bool registered;
	
struct kvm_shared_msr_values {
		
u64 host;
		
u64 curr;
	
} values[KVM_NR_SHARED_MSRS];
};


static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;

static struct kvm_shared_msrs __percpu *shared_msrs;


struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
	{ "pf_guest", VCPU_STAT(pf_guest) },
	{ "tlb_flush", VCPU_STAT(tlb_flush) },
	{ "invlpg", VCPU_STAT(invlpg) },
	{ "exits", VCPU_STAT(exits) },
	{ "io_exits", VCPU_STAT(io_exits) },
	{ "mmio_exits", VCPU_STAT(mmio_exits) },
	{ "signal_exits", VCPU_STAT(signal_exits) },
	{ "irq_window", VCPU_STAT(irq_window_exits) },
	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
	{ "halt_exits", VCPU_STAT(halt_exits) },
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
	{ "hypercalls", VCPU_STAT(hypercalls) },
	{ "request_irq", VCPU_STAT(request_irq_exits) },
	{ "irq_exits", VCPU_STAT(irq_exits) },
	{ "host_state_reload", VCPU_STAT(host_state_reload) },
	{ "efer_reload", VCPU_STAT(efer_reload) },
	{ "fpu_reload", VCPU_STAT(fpu_reload) },
	{ "insn_emulation", VCPU_STAT(insn_emulation) },
	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
	{ "irq_injections", VCPU_STAT(irq_injections) },
	{ "nmi_injections", VCPU_STAT(nmi_injections) },
	{ "req_event", VCPU_STAT(req_event) },
	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
	{ "mmu_flooded", VM_STAT(mmu_flooded) },
	{ "mmu_recycled", VM_STAT(mmu_recycled) },
	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
	{ "mmu_unsync", VM_STAT(mmu_unsync) },
	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
	{ "largepages", VM_STAT(lpages) },
	{ "max_mmu_page_hash_collisions",
		VM_STAT(max_mmu_page_hash_collisions) },
	{ NULL }
};


u64 __read_mostly host_xcr0;

static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);


static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov45100.00%1100.00%
Total45100.00%1100.00%


static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; unsigned long flags; /* * Disabling irqs at this point since the following code could be * interrupted and executed through kvm_arch_hardware_disable() */ local_irq_save(flags); if (locals->registered) { locals->registered = false; user_return_notifier_unregister(urn); } local_irq_restore(flags); for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity4736.43%120.00%
Ignacio Alvarado3426.36%120.00%
Sheng Yang2519.38%120.00%
Carsten Otte2116.28%120.00%
Akinobu Mita21.55%120.00%
Total129100.00%5100.00%


static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; }

Contributors

PersonTokensPropCommitsCommitProp
Sheng Yang3036.59%120.00%
Avi Kivity2935.37%120.00%
Marcelo Tosatti1518.29%120.00%
Carsten Otte78.54%120.00%
Xiantao Zhang11.22%120.00%
Total82100.00%5100.00%


void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); shared_msrs_global.msrs[slot] = msr; if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Sheng Yang2558.14%120.00%
Paolo Bonzini920.93%120.00%
Nadav Amit716.28%120.00%
Carsten Otte12.33%120.00%
Avi Kivity12.33%120.00%
Total43100.00%5100.00%

EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity2771.05%125.00%
Carsten Otte615.79%125.00%
Sheng Yang410.53%125.00%
Xiantao Zhang12.63%125.00%
Total38100.00%4100.00%


int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); int err; if (((value ^ smsr->values[slot].curr) & mask) == 0) return 0; smsr->values[slot].curr = value; err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); if (err) return 1; if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity8970.63%342.86%
Andrew Honig2015.87%114.29%
Marcelo Tosatti107.94%114.29%
Sheng Yang64.76%114.29%
Xiantao Zhang10.79%114.29%
Total126100.00%7100.00%

EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
static void drop_user_return_notifiers(void) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity3175.61%266.67%
Marcelo Tosatti1024.39%133.33%
Total41100.00%3100.00%


u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte1588.24%150.00%
Xiantao Zhang211.76%150.00%
Total17100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_get_apic_base);
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u64 old_state = vcpu->arch.apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 new_state = msr_info->data & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); if (!msr_info->host_initiated && ((msr_info->data & reserved_bits) != 0 || new_state == X2APIC_ENABLE || (new_state == MSR_IA32_APICBASE_ENABLE && old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && old_state == 0))) return 1; kvm_lapic_set_base(vcpu, msr_info->data); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka12087.59%150.00%
Carsten Otte1712.41%150.00%
Total137100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_set_apic_base);
asmlinkage __visible void kvm_spurious_fault(void) { /* Fault while not rebooting. We want the trace. */ BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
Geoff Levand1292.31%150.00%
Andi Kleen17.69%150.00%
Total13100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_spurious_fault); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2
static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong44100.00%1100.00%
Total44100.00%1100.00%

#define EXCPT_FAULT 0 #define EXCPT_TRAP 1 #define EXCPT_ABORT 2 #define EXCPT_INTERRUPT 3
static int exception_type(int vector) { unsigned int mask; if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) return EXCPT_INTERRUPT; mask = 1 << vector; /* #DB is trap, as instruction watchpoints are handled elsewhere */ if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) return EXCPT_TRAP; if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) return EXCPT_ABORT; /* Reserved exceptions will result in fault */ return EXCPT_FAULT; }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit88100.00%1100.00%
Total88100.00%1100.00%


static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: if (has_error && !is_protmode(vcpu)) has_error = false; vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong9943.04%19.09%
Avi Kivity7532.61%436.36%
Joerg Roedel219.13%327.27%
Xiantao Zhang166.96%19.09%
Nadav Amit146.09%19.09%
Gleb Natapov52.17%19.09%
Total230100.00%11100.00%


void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong2492.31%150.00%
Joerg Roedel27.69%150.00%
Total26100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_queue_exception);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel26100.00%1100.00%
Total26100.00%1100.00%

EXPORT_SYMBOL_GPL(kvm_requeue_exception);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else return kvm_skip_emulated_instruction(vcpu); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Andre Przywara1955.88%125.00%
Eddie Dong720.59%125.00%
Kyle Huey720.59%125.00%
Joerg Roedel12.94%125.00%
Total34100.00%4100.00%

EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.exception.nested_apf = is_guest_mode(vcpu) && fault->async_page_fault; if (vcpu->arch.exception.nested_apf) vcpu->arch.apf.nested_apf_token = fault->address; else vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); }

Contributors

PersonTokensPropCommitsCommitProp
Wanpeng Li4048.19%116.67%
Avi Kivity2226.51%233.33%
Eddie Dong1619.28%116.67%
Joerg Roedel33.61%116.67%
Xiantao Zhang22.41%116.67%
Total83100.00%6100.00%

EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); return fault->nested_page_fault; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel4270.00%250.00%
Avi Kivity1118.33%125.00%
Paolo Bonzini711.67%125.00%
Total60100.00%4100.00%


void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity1244.44%250.00%
Sheng Yang1244.44%125.00%
Marcelo Tosatti311.11%125.00%
Total27100.00%4100.00%

EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity2172.41%133.33%
Eddie Dong620.69%133.33%
Joerg Roedel26.90%133.33%
Total29100.00%3100.00%

EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel29100.00%1100.00%
Total29100.00%1100.00%

EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity39100.00%2100.00%
Total39100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_require_cpl);
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) { if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return true; kvm_queue_exception(vcpu, UD_VECTOR); return false; }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit46100.00%1100.00%
Total46100.00%1100.00%

EXPORT_SYMBOL_GPL(kvm_require_dr); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_vcpu_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { struct x86_exception exception; gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel8791.58%133.33%
Paolo Bonzini88.42%266.67%
Total95100.00%3100.00%

EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel4897.96%150.00%
Fengguang Wu12.04%150.00%
Total49100.00%2100.00%

/* * Load the pae pdptrs. Return true is they are all valid. */
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && (pdpte[i] & vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte15970.35%114.29%
Avi Kivity3415.04%114.29%
Joerg Roedel146.19%228.57%
Eddie Dong135.75%114.29%
Bandan Das41.77%114.29%
Xiao Guangrong20.88%114.29%
Total226100.00%7100.00%

EXPORT_SYMBOL_GPL(load_pdptrs);
bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity11971.69%342.86%
Joerg Roedel4124.70%228.57%
Xiantao Zhang42.41%114.29%
Paolo Bonzini21.20%114.29%
Total166100.00%7100.00%

EXPORT_SYMBOL_GPL(pdptrs_changed);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); if (((cr0 ^ old_cr0) & X86_CR0_CD) && kvm_arch_has_noncoherent_dma(vcpu->kvm) && !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte13946.64%16.67%
Xiao Guangrong3712.42%213.33%
Gleb Natapov3411.41%213.33%
Sheng Yang279.06%16.67%
Laszlo Ersek196.38%16.67%
Junjie Mao196.38%16.67%
Avi Kivity93.02%426.67%
Lai Jiangshan72.35%16.67%
Xiantao Zhang41.34%16.67%
Joerg Roedel31.01%16.67%
Total298100.00%15100.00%

EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte2672.22%120.00%
Avi Kivity1027.78%480.00%
Total36100.00%5100.00%

EXPORT_SYMBOL_GPL(kvm_lmsw);
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti45100.00%1100.00%
Total45100.00%1100.00%


static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti42100.00%1100.00%
Total42100.00%1100.00%


static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0 = xcr; u64 old_xcr0 = vcpu->arch.xcr0; u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; if (!(xcr0 & XFEATURE_MASK_FP)) return 1; if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) return 1; /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the * emulated CPU does not support XSAVE (see fx_init). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; if (xcr0 & ~valid_bits) return 1; if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != (!(xcr0 & XFEATURE_MASK_BNDCSR))) return 1; if (xcr0 & XFEATURE_MASK_AVX512) { if (!(xcr0 & XFEATURE_MASK_YMM)) return 1; if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) return 1; } vcpu->arch.xcr0 = xcr0; if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) kvm_update_cpuid(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dexuan Cui7140.11%112.50%
Liu Jinsong4625.99%225.00%
Chao Peng2916.38%112.50%
Paolo Bonzini1910.73%225.00%
Dave Hansen116.21%112.50%
Fengguang Wu10.56%112.50%
Total177100.00%8100.00%


int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (kvm_x86_ops->get_cpl(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dexuan Cui4282.35%150.00%
Zhanghaoyu (A)917.65%150.00%
Total51100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_set_xcr);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) return 1; if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if (!guest_cpuid_has_pcid(vcpu)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; } if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if (((cr4 ^ old_cr4) & pdptr_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) kvm_update_cpuid(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte7323.17%15.26%
Junjie Mao6620.95%15.26%
Wei Yang3511.11%210.53%
Avi Kivity3410.79%526.32%
Dexuan Cui319.84%15.26%
Huaitong Han237.30%15.26%
Feng Wu175.40%15.26%
Gleb Natapov144.44%15.26%
Sheng Yang103.17%15.26%
Nadav Har'El41.27%15.26%
Joerg Roedel30.95%15.26%
Xiantao Zhang20.63%15.26%
Xiao Guangrong20.63%15.26%
H. Peter Anvin10.32%15.26%
Total315100.00%19100.00%

EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { #ifdef CONFIG_X86_64 cr3 &= ~CR3_PCID_INVD; #endif if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_mmu_new_cr3(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte5641.18%16.67%
Avi Kivity3827.94%426.67%
Gleb Natapov139.56%16.67%
Joerg Roedel64.41%16.67%
Nadav Amit64.41%213.33%
Paolo Bonzini64.41%213.33%
Marcelo Tosatti53.68%16.67%
Liang Chen32.21%16.67%
Xiantao Zhang21.47%16.67%
Jan Kiszka10.74%16.67%
Total136100.00%15100.00%

EXPORT_SYMBOL_GPL(kvm_set_cr3);
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (lapic_in_kernel(vcpu)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte3775.51%120.00%
Gleb Natapov816.33%120.00%
Xiantao Zhang24.08%120.00%
Paolo Bonzini12.04%120.00%
Andre Przywara12.04%120.00%
Total49100.00%5100.00%

EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (lapic_in_kernel(vcpu)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte2887.50%125.00%
Xiantao Zhang26.25%125.00%
Avi Kivity13.12%125.00%
Paolo Bonzini13.12%125.00%
Total32100.00%4100.00%

EXPORT_SYMBOL_GPL(kvm_get_cr8);
static void kvm_update_dr0123(struct kvm_vcpu *vcpu) { int i; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; } }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit66100.00%1100.00%
Total66100.00%1100.00%


static void kvm_update_dr6(struct kvm_vcpu *vcpu) { if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka35100.00%1100.00%
Total35100.00%1100.00%


static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka5880.56%150.00%
Paolo Bonzini1419.44%150.00%
Total72100.00%2100.00%


static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { u64 fixed = DR6_FIXED_1; if (!guest_cpuid_has_rtm(vcpu)) fixed |= DR6_RTM; return fixed; }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit31100.00%1100.00%
Total31100.00%1100.00%


static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); kvm_update_dr6(vcpu); break; case 5: /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov10170.14%220.00%
Carsten Otte2114.58%110.00%
Alexander Graf96.25%220.00%
Jan Kiszka64.17%220.00%
Nadav Amit42.78%110.00%
Xiantao Zhang21.39%110.00%
Joerg Roedel10.69%110.00%
Total144100.00%10100.00%


int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { if (__kvm_set_dr(vcpu, dr, val)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov3581.40%150.00%
Nadav Amit818.60%150.00%
Total43100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_set_dr);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) *val = vcpu->arch.dr6; else *val = kvm_x86_ops->get_dr6(vcpu); break; case 5: /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov6263.92%116.67%
Jan Kiszka1919.59%116.67%
Carsten Otte1212.37%233.33%
Glauber de Oliveira Costa33.09%116.67%
Nadav Amit11.03%116.67%
Total97100.00%6100.00%

EXPORT_SYMBOL_GPL(kvm_get_dr);
bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_rdpmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity7098.59%150.00%
Wei Huang11.41%150.00%
Total71100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs * may depend on host virtualization features rather than host cpu features. */ static u32 msrs_to_save[] = { MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, }; static unsigned num_msrs_to_save; static u32 emulated_msrs[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, HV_X64_MSR_RESET, HV_X64_MSR_VP_INDEX, HV_X64_MSR_VP_RUNTIME, HV_X64_MSR_SCONTROL, HV_X64_MSR_STIMER0_CONFIG, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, MSR_IA32_MCG_EXT_CTL, MSR_IA32_SMBASE, MSR_PLATFORM_INFO, MSR_MISC_FEATURES_ENABLES, }; static unsigned num_emulated_msrs;
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Graf6658.41%218.18%
Gerd Hoffmann1210.62%218.18%
Glauber de Oliveira Costa87.08%19.09%
Joerg Roedel87.08%218.18%
Carsten Otte87.08%19.09%
Jan Kiszka76.19%19.09%
Avi Kivity32.65%19.09%
Matt T. Yourst10.88%19.09%
Total113100.00%11100.00%

EXPORT_SYMBOL_GPL(kvm_valid_efer);
static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka6158.10%111.11%
Sheng Yang2220.95%222.22%
Carsten Otte87.62%111.11%
Glauber de Oliveira Costa76.67%111.11%
Joerg Roedel32.86%111.11%
Jason (Hui) Wang21.90%111.11%
Xiantao Zhang10.95%111.11%
Avi Kivity10.95%111.11%
Total105100.00%9100.00%


void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { switch (msr->index) { case MSR_FS_BASE: case MSR_GS_BASE: case MSR_KERNEL_GS_BASE: case MSR_CSTAR: case MSR_LSTAR: if (is_noncanonical_address(msr->data)) return 1; break; case MSR_IA32_SYSENTER_EIP: case MSR_IA32_SYSENTER_ESP: /* * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if * non-canonical address is written on Intel but not on * AMD (which ignores the top 32-bits, because it does * not implement 64-bit SYSENTER). * * 64-bit code should hence be able to write a non-canonical * value on AMD. Making the address canonical ensures that * vmentry does not fail on Intel after writing a non-canonical * value, and that something deterministic happens if the guest * invokes 64-bit SYSENTER. */ msr->data = get_canonical(msr->data); } return kvm_x86_ops->set_msr(vcpu, msr); }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit5468.35%125.00%
Carsten Otte1316.46%125.00%
Glauber de Oliveira Costa78.86%125.00%
Will Auld56.33%125.00%
Total79100.00%4100.00%

EXPORT_SYMBOL_GPL(kvm_set_msr); /* * Adapt set_msr() to msr_io()'s calling convention */
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; int r; msr.index = index; msr.host_initiated = true; r = kvm_get_msr(vcpu, &msr); if (r) return r; *data = msr.data; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini64100.00%1100.00%
Total64100.00%1100.00%


static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; msr.data = *data; msr.index = index; msr.host_initiated = true; return kvm_set_msr(vcpu, &msr); }

Contributors

PersonTokensPropCommitsCommitProp
Will Auld2550.00%125.00%
Gerd Hoffmann1428.00%125.00%
Carsten Otte1122.00%250.00%
Total50100.00%4100.00%

#ifdef CONFIG_X86_64 struct pvclock_gtod_data { seqcount_t seq; struct { /* extract of a clocksource struct */ int vclock_mode; u64 cycle_last; u64 mask; u32 mult; u32 shift; } clock; u64 boot_ns; u64 nsec_base; u64 wall_time_sec; }; static struct pvclock_gtod_data pvclock_gtod_data;
static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; u64 boot_ns; boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot)); write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; vdata->clock.cycle_last = tk->tkr_mono.cycle_last; vdata->clock.mask = tk->tkr_mono.mask; vdata->clock.mult = tk->tkr_mono.mult; vdata->clock.shift = tk->tkr_mono.shift; vdata->boot_ns = boot_ns; vdata->nsec_base = tk->tkr_mono.xtime_nsec; vdata->wall_time_sec = tk->xtime_sec; write_seqcount_end(&vdata->seq); }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti11075.86%240.00%
Thomas Gleixner2718.62%240.00%
Peter Zijlstra85.52%120.00%
Total145100.00%5100.00%

#endif
void kvm_set_pending_timer(struct kvm_vcpu *vcpu) { /* * Note: KVM_REQ_PENDING_TIMER is implicitly checked in * vcpu_enter_guest. This function is only called from * the physical CPU that is running vcpu. */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Krause18100.00%1100.00%
Total18100.00%1100.00%


static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec64 boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) return; /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime64(&boot); if (kvm->arch.kvmclock_offset) { struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset); boot = timespec64_sub(boot, ts); } wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */ wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); }

Contributors

PersonTokensPropCommitsCommitProp
Glauber de Oliveira Costa7238.92%111.11%
Marcelo Tosatti3317.84%111.11%
Bruce Rogers2915.68%111.11%
Avi Kivity2513.51%222.22%
Gerd Hoffmann126.49%111.11%
Arnd Bergmann94.86%111.11%
Nicholas Krause42.16%111.11%
Zachary Amsden10.54%111.11%
Total185100.00%9100.00%


static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { do_shl32_div32(dividend, divisor); return dividend; }

Contributors

PersonTokensPropCommitsCommitProp
Gerd Hoffmann1254.55%133.33%
Paolo Bonzini836.36%133.33%
Marcelo Tosatti29.09%133.33%
Total22100.00%3100.00%


static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_hz; scaled64 = scaled_hz; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n", __func__, base_hz, scaled_hz, shift, *pmultiplier); }

Contributors

PersonTokensPropCommitsCommitProp
Gerd Hoffmann5440.00%114.29%
Zachary Amsden4331.85%114.29%
Sheng Yang1914.07%114.29%
Paolo Bonzini96.67%114.29%
Avi Kivity64.44%114.29%
Jan Kiszka32.22%114.29%
Harvey Harrison10.74%114.29%
Total135100.00%7100.00%

#ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); static unsigned long max_tsc_khz;
static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); do_div(v, 1000000); return v; }

Contributors

PersonTokensPropCommitsCommitProp
Zachary Amsden2877.78%250.00%
Avi Kivity719.44%125.00%
Joerg Roedel12.78%125.00%
Total36100.00%4100.00%


static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { u64 ratio; /* Guest TSC same frequency as host TSC? */ if (!scale) { vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; return 0; } /* TSC scaling supported? */ if (!kvm_has_tsc_control) { if (user_tsc_khz > tsc_khz) { vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_always_catchup = 1; return 0; } else { WARN(1, "user requested TSC rate below hardware speed\n"); return -1; } } /* TSC scaling required - calculate ratio */ ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits, user_tsc_khz, tsc_khz); if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", user_tsc_khz); return -1; } vcpu->arch.tsc_scaling_ratio = ratio; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang138100.00%1100.00%
Total138100.00%1100.00%


static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* tsc_khz can be zero if TSC calibration fails */ if (user_tsc_khz == 0) { /* set tsc_scaling_ratio to a safe value */ vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; return -1; } /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = user_tsc_khz; /* * Compute the variation in TSC rate which is acceptable * within the range of tolerance and decide if the * rate being applied is within that bounds of the hardware * rate. If so, no scaling or compensation need be done. */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); }

Contributors

PersonTokensPropCommitsCommitProp
Zachary Amsden9771.85%225.00%
Haozhong Zhang1813.33%225.00%
Paolo Bonzini107.41%225.00%
Marcelo Tosatti64.44%112.50%
Joerg Roedel42.96%112.50%
Total135100.00%8100.00%


static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); tsc += vcpu->arch.this_tsc_write; return tsc; }

Contributors

PersonTokensPropCommitsCommitProp
Zachary Amsden51100.00%3100.00%
Total51100.00%3100.00%


static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); /* * Once the masterclock is enabled, always perform request in * order to update it. * * In order to enable masterclock, the host clocksource must be TSC * and the vcpus need to have matched TSCs. When that happens, * perform request to enable masterclock. */ if (ka->use_master_clock || (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched)) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti10993.97%250.00%
Zachary Amsden65.17%125.00%
Fengguang Wu10.86%125.00%
Total116100.00%4100.00%


static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { u64 curr_offset = vcpu->arch.tsc_offset; vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; }

Contributors

PersonTokensPropCommitsCommitProp
Will Auld2884.85%133.33%
Luiz Fernando N. Capitulino412.12%133.33%
Zachary Amsden13.03%133.33%
Total33100.00%3100.00%

/* * Multiply tsc by a fixed point number represented by ratio. * * The most significant 64-N bits (mult) of ratio represent the * integral part of the fixed point number; the remaining N bits * (frac) represent the fractional part, ie. ratio represents a fixed * point number (mult + frac * 2^(-N)). * * N equals to kvm_tsc_scaling_ratio_frac_bits. */
static inline u64 __scale_tsc(u64 ratio, u64 tsc) { return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits); }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang23100.00%1100.00%
Total23100.00%1100.00%


u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) { u64 _tsc = tsc; u64 ratio = vcpu->arch.tsc_scaling_ratio; if (ratio != kvm_default_tsc_scaling_ratio) _tsc = __scale_tsc(ratio, tsc); return _tsc; }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang45100.00%1100.00%
Total45100.00%1100.00%

EXPORT_SYMBOL_GPL(kvm_scale_tsc);
static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { u64 tsc; tsc = kvm_scale_tsc(vcpu, rdtsc()); return target_tsc - tsc; }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang32100.00%1100.00%
Total32100.00%1100.00%


u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang2281.48%150.00%
Paolo Bonzini518.52%150.00%
Total27100.00%2100.00%

EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_offset = offset; }

Contributors

PersonTokensPropCommitsCommitProp
Luiz Fernando N. Capitulino31100.00%1100.00%
Total31100.00%1100.00%


void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; bool matched; bool already_matched; u64 data = msr->data; bool synchronizing = false; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_compute_tsc_offset(vcpu, data); ns = ktime_get_boot_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { if (data == 0 && msr->host_initiated) { /* * detection of vcpu initialization -- need to sync * with other vCPUs. This particularly helps to keep * kvm_clock stable after CPU hotplug */ synchronizing = true; } else { u64 tsc_exp = kvm->arch.last_tsc_write + nsec_to_cycles(vcpu, elapsed); u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; /* * Special case: TSC write with a small delta (1 second) * of virtual cycle time against real time is * interpreted as an attempt to synchronize the CPU. */ synchronizing = data < tsc_exp + tsc_hz && data + tsc_hz > tsc_exp; } } /* * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (synchronizing && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %llu, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_vcpu_write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (!matched) { kvm->arch.nr_vcpus_matched_tsc = 0; } else if (!already_matched) { kvm->arch.nr_vcpus_matched_tsc++; } kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Zachary Amsden27858.53%836.36%
Marcelo Tosatti6513.68%313.64%
Denis Plotnikov5611.79%29.09%
Tomasz Grabiec347.16%14.55%
Will Auld306.32%29.09%
Joerg Roedel61.26%14.55%
Haozhong Zhang20.42%14.55%
Jan Kiszka10.21%14.55%
Paolo Bonzini10.21%14.55%
Luiz Fernando N. Capitulino10.21%14.55%
Guo Chao10.21%14.55%
Total475100.00%22100.00%

EXPORT_SYMBOL_GPL(kvm_write_tsc);
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) { kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang2175.00%150.00%
Paolo Bonzini725.00%150.00%
Total28100.00%2100.00%


static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) { if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) WARN_ON(adjustment < 0); adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); adjust_tsc_offset_guest(vcpu, adjustment); }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang51100.00%1100.00%
Total51100.00%1100.00%

#ifdef CONFIG_X86_64
static u64 read_tsc(void) { u64 ret = (u64)rdtsc_ordered(); u64 last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a function of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti3168.89%116.67%
Andrew Lutomirski613.33%116.67%
Carsten Otte36.67%116.67%
Thomas Gleixner36.67%116.67%
Huang Ying12.22%116.67%
Adam Buchbinder12.22%116.67%
Total45100.00%6100.00%


static inline u64 vgettsc(u64 *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; *cycle_now = read_tsc(); v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti5598.21%150.00%
Thomas Gleixner11.79%150.00%
Total56100.00%2100.00%


static int do_monotonic_boot(s64 *t, u64 *cycle_now) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; int mode; u64 ns; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); *t = ns; return mode; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti8076.92%133.33%
Thomas Gleixner2423.08%266.67%
Total104100.00%3100.00%


static int do_realtime(struct timespec *ts, u64 *cycle_now) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; int mode; u64 ns; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return mode; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti122100.00%1100.00%
Total122100.00%1100.00%

/* returns true if host is using tsc clocksource */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now) { /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti3386.84%133.33%
Thomas Gleixner513.16%266.67%
Total38100.00%3100.00%

/* returns true if host is using tsc clocksource */
static bool kvm_get_walltime_and_clockread(struct timespec *ts, u64 *cycle_now) { /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; return do_realtime(ts, cycle_now) == VCLOCK_TSC; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti39100.00%1100.00%
Total39100.00%1100.00%

#endif /* * * Assuming a stable TSC across physical CPUS, and a stable TSC * across virtual CPUs, the following condition is possible. * Each numbered line represents an event visible to both * CPUs at the next numbered event. * * "timespecX" represents host monotonic time. "tscX" represents * RDTSC value. * * VCPU0 on CPU0 | VCPU1 on CPU1 * * 1. read timespec0,tsc0 * 2. | timespec1 = timespec0 + N * | tsc1 = tsc0 + M * 3. transition to guest | transition to guest * 4. ret0 = timespec0 + (rdtsc - tsc0) | * 5. | ret1 = timespec1 + (rdtsc - tsc1) * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) * * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: * * - ret0 < ret1 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) * ... * - 0 < N - M => M < N * * That is, when timespec0 != timespec1, M < N. Unfortunately that is not * always the case (the difference between two distinct xtime instances * might be smaller then the difference between corresponding TSC reads, * when updating guest vcpus pvclock areas). * * To avoid that problem, do not allow visibility of distinct * system_timestamp/tsc_timestamp values simultaneously: use a master * copy of host monotonic time values. Update that master copy * in lockstep. * * Rely on synchronization of host TSCs and guest TSCs for monotonicity. * */
static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource && vcpus_matched && !ka->backwards_tsc_observed && !ka->boot_vcpu_runs_old_kvmclock; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti11798.32%480.00%
Ladi Prosek21.68%120.00%
Total119100.00%5100.00%


void kvm_make_mclock_inprogress_request(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini17100.00%1100.00%
Total17100.00%1100.00%


static void kvm_gen_update_masterclock(struct kvm *kvm) { #ifdef CONFIG_X86_64 int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ pvclock_update_vm_gtod_copy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); spin_unlock(&ka->pvclock_gtod_sync_lock); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti9097.83%133.33%
Radim Krčmář11.09%133.33%
Guo Hui Liu11.09%133.33%
Total92100.00%3100.00%


u64 get_kvmclock_ns(struct kvm *kvm) { struct kvm_arch *ka = &kvm->arch; struct pvclock_vcpu_time_info hv_clock; u64 ret; spin_lock(&ka->pvclock_gtod_sync_lock); if (!ka->use_master_clock) { spin_unlock(&ka->pvclock_gtod_sync_lock); return ktime_get_boot_ns() + ka->kvmclock_offset; } hv_clock.tsc_timestamp = ka->master_cycle_now; hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; spin_unlock(&ka->pvclock_gtod_sync_lock); /* both __this_cpu_read() and rdtsc() should be on the same cpu */ get_cpu(); kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, &hv_clock.tsc_shift, &hv_clock.tsc_to_system_mul); ret = __pvclock_read_cycles(&hv_clock, rdtsc()); put_cpu(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini11587.79%250.00%
Wanpeng Li1511.45%125.00%
Marcelo Tosatti10.76%125.00%
Total131100.00%4100.00%


static void kvm_setup_pvclock_page(struct kvm_vcpu *v) { struct kvm_vcpu_arch *vcpu = &v->arch; struct pvclock_vcpu_time_info guest_hv_clock; if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, &guest_hv_clock, sizeof(guest_hv_clock)))) return; /* This VCPU is paused, but it's legal for a guest to read another * VCPU's kvmclock, so we really have to follow the specification where * it says that version is odd if data is being modified, and even after * it is consistent. * * Version field updates must be kept separate. This is because * kvm_write_guest_cached might use a "rep movs" instruction, and * writes within a string instruction are weakly ordered. So there * are three writes overall. * * As a small optimization, only write the version field in the first * and third write. The vcpu->pv_time cache is still valid, because the * version field is the first in the struct. */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock.version)); smp_wmb(); /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); smp_wmb(); vcpu->hv_clock.version++; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock.version)); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini21198.14%266.67%
Marcelo Tosatti41.86%133.33%
Total215100.00%3100.00%


static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, tgt_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns; u64 tsc_timestamp, host_tsc; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ spin_lock(&ka->pvclock_gtod_sync_lock); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); if (unlikely(tgt_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } if (!use_master_clock) { host_tsc = rdtsc(); kernel_ns = ktime_get_boot_ns(); } tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); /* With all the info we got, fill in the values */ if (kvm_has_tsc_control) tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz); if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = tgt_tsc_khz; } vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_guest_tsc = tsc_timestamp; /* If the host uses TSC clocksource, then it is stable */ pvclock_flags = 0; if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; vcpu->hv_clock.flags = pvclock_flags; if (vcpu->pv_time_enabled) kvm_setup_pvclock_page(v); if (v == kvm_get_vcpu(v->kvm, 0)) kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti13637.99%412.12%
Zachary Amsden10529.33%824.24%
Paolo Bonzini3610.06%515.15%
Glauber de Oliveira Costa308.38%39.09%
Radim Krčmář133.63%13.03%
Ed Swierk113.07%13.03%
Carsten Otte61.68%13.03%
David Matlack41.12%13.03%
Andrew Honig41.12%13.03%
Huang Ying41.12%13.03%
Haozhong Zhang30.84%26.06%
Gleb Natapov20.56%13.03%
Gerd Hoffmann10.28%13.03%
Andrew Lutomirski10.28%13.03%
Christoph Lameter10.28%13.03%
Nadav Har'El10.28%13.03%
Total358100.00%33100.00%

/* * kvmclock updates which are isolated to a given vcpu, such as * vcpu->cpu migration, should not allow system_timestamp from * the rest of the vcpus to remain static. Otherwise ntp frequency * correction applies to one vcpu's system_timestamp but not * the others. * * So in those cases, request a kvmclock update for all vcpus. * We need to rate-limit these requests though, as they can * considerably slow guests that have a large number of vcpus. * The time for a remote vcpu to update its kvmclock is bound * by the delay we use to rate-limit the updates. */ #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
static void kvmclock_update_fn(struct work_struct *work) { int i; struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_update_work); struct kvm *kvm = container_of(ka, struct kvm, arch); struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_vcpu_kick(vcpu); } }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti4454.32%133.33%
Andrew Jones3644.44%133.33%
Guo Hui Liu11.23%133.33%
Total81100.00%3100.00%


static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) { struct kvm *kvm = v->kvm; kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); schedule_delayed_work(&kvm->arch.kvmclock_update_work, KVMCLOCK_UPDATE_DELAY); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Jones3897.44%150.00%
Guo Hui Liu12.56%150.00%
Total39100.00%2100.00%

#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
static void kvmclock_sync_fn(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_sync_work); struct kvm *kvm = container_of(ka, struct kvm, arch); if (!kvmclock_periodic_sync) return; schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Jones7592.59%150.00%
Marcelo Tosatti67.41%150.00%
Total81100.00%2100.00%


static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MCx_CTL(bank_num)) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Xiao Guangrong9656.80%19.09%
Marcelo Tosatti2715.98%19.09%
Carsten Otte1710.06%19.09%
Gleb Natapov127.10%218.18%
Andre Przywara74.14%218.18%
Avi Kivity42.37%19.09%
Huang Ying31.78%19.09%
Wanpeng Li21.18%19.09%
Alexander Graf10.59%19.09%
Total169100.00%11100.00%


static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; }

Contributors

PersonTokensPropCommitsCommitProp
Ed Swierk12262.56%112.50%
Glauber de Oliveira Costa2914.87%112.50%
Sasha Levin168.21%112.50%
Gleb Natapov147.18%112.50%
Andre Przywara52.56%112.50%
Huang Ying52.56%112.50%
Paolo Bonzini21.03%112.50%
Carsten Otte21.03%112.50%
Total195100.00%8100.00%


static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 3:5 are reserved, Should be zero */ if (data & 0x38) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; kvm_async_pf_wakeup_all(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov9674.42%337.50%
Wanpeng Li1410.85%112.50%
Avi Kivity107.75%112.50%
Andrew Honig53.88%112.50%
Paolo Bonzini32.33%112.50%
Joerg Roedel10.78%112.50%
Total129100.00%8100.00%


static void kvmclock_reset(struct kvm_vcpu *vcpu) { vcpu->arch.pv_time_enabled = false; }

Contributors

PersonTokensPropCommitsCommitProp
Glauber de Oliveira Costa1789.47%150.00%
Andrew Honig210.53%150.00%
Total19100.00%2100.00%


static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.preempted = 0; if (vcpu->arch.st.steal.version & 1) vcpu->arch.st.steal.version += 1; /* first time write, random junk */ vcpu->arch.st.steal.version += 1; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); smp_wmb(); vcpu->arch.st.steal.steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); smp_wmb(); vcpu->arch.st.steal.version += 1; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); }

Contributors

PersonTokensPropCommitsCommitProp
Glauber de Oliveira Costa16863.64%116.67%
Wanpeng Li5420.45%116.67%
Liang Chen176.44%116.67%
Paolo Bonzini124.55%116.67%
Pan Xinhui124.55%116.67%
Marcelo Tosatti10.38%116.67%
Total264100.00%6100.00%


int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; u32 msr = msr_info->index; u64 data = msr_info->data; switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: case MSR_AMD64_DC_CFG: break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ data &= ~(u64)0x40000; /* ignore Mc status write enable */ if (data != 0) { vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case 0x200 ... 0x2ff: return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; adjust_tsc_offset_guest(vcpu, adj); } vcpu->arch.ia32_tsc_adjust_msr = data; } break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_IA32_SMBASE: if (!msr_info->host_initiated) return 1; vcpu->arch.smbase = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { struct kvm_arch *ka = &vcpu->kvm->arch; kvmclock_reset(vcpu); if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) { bool tmp = (msr == MSR_KVM_SYSTEM_TIME); if (ka->boot_vcpu_runs_old_kvmclock != tmp) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); ka->boot_vcpu_runs_old_kvmclock = tmp; } vcpu->arch.time = data; kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS, sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_KVM_PV_EOI_EN: if (kvm_lapic_enable_pv_eoi(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: return set_msr_mce(vcpu, msr, data); case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: pr = true; /* fall through */ case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to specify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: case HV_X64_MSR_CRASH_CTL: case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: return kvm_hv_set_msr_common(vcpu, msr, data, msr_info->host_initiated); case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.status = data; break; case MSR_PLATFORM_INFO: if (!msr_info->host_initiated || data & ~MSR_PLATFORM_INFO_CPUID_FAULT || (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && cpuid_fault_enabled(vcpu))) return 1; vcpu->arch.msr_platform_info = data; break; case MSR_MISC_FEATURES_ENABLES: if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && !supports_cpuid_fault(vcpu))) return 1; vcpu->arch.msr_misc_features_enables = data; break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (!ignore_msrs) { vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n", msr, data); return 1; } else { vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data); break; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov22823.55%610.71%
Glauber de Oliveira Costa12813.22%47.14%
Kyle Huey757.75%11.79%
Will Auld646.61%23.57%
Marcelo Tosatti575.89%23.57%
Boris Ostrovsky505.17%11.79%
Andrew Honig323.31%23.57%
Paolo Bonzini303.10%35.36%
Carsten Otte293.00%11.79%
Huang Ying282.89%11.79%
Sheng Yang282.89%11.79%
Avi Kivity272.79%23.57%
Andre Przywara222.27%23.57%
Borislav Petkov222.27%23.57%
Ed Swierk202.07%11.79%
Andrey Smetanin181.86%35.36%
Michael S. Tsirkin161.65%11.79%
john cooper141.45%11.79%
Liu Jinsong111.14%11.79%
Wei Huang101.03%23.57%
Matthias Lange90.93%11.79%
Nicolae Mogoreanu90.93%11.79%
Joerg Roedel80.83%23.57%
Christoffer Dall60.62%11.79%
Zachary Amsden60.62%11.79%
Jes Sorensen40.41%11.79%
Chen Yucong30.31%11.79%
Ladi Prosek30.31%11.79%
Jan Kiszka30.31%11.79%
Xiantao Zhang20.21%11.79%
Xiao Guangrong10.10%11.79%
Haozhong Zhang10.10%11.79%
Chris J Arges10.10%11.79%
Radim Krčmář10.10%11.79%
Guo Chao10.10%11.79%
Bandan Das10.10%11.79%
Total968100.00%56100.00%

EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return kvm_x86_ops->get_msr(vcpu, msr); }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov1040.00%125.00%
Avi Kivity624.00%125.00%
Carsten Otte520.00%125.00%
Paolo Bonzini416.00%125.00%
Total25100.00%4100.00%

EXPORT_SYMBOL_GPL(kvm_get_msr);
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MCx_CTL(bank_num)) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying6845.95%110.00%
Carsten Otte5939.86%330.00%
Avi Kivity96.08%110.00%
Marcelo Tosatti64.05%220.00%
Chen Yucong32.03%110.00%
Xiantao Zhang21.35%110.00%
Joerg Roedel10.68%110.00%
Total148100.00%10100.00%


int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { switch (msr_info->index) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K8_TSEG_ADDR: case MSR_K8_TSEG_MASK: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: case MSR_IA32_PERF_CTL: case MSR_AMD64_DC_CFG: msr_info->data = 0; break; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); msr_info->data = 0; break; case MSR_IA32_UCODE_REV: msr_info->data = 0x100000000ULL; break; case MSR_MTRRcap: case 0x200 ... 0x2ff: return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); case 0xcd: /* fsb frequency */ msr_info->data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: msr_info->data = 1 << 24; break; case MSR_IA32_APICBASE: msr_info->data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); break; case MSR_IA32_TSCDEADLINE: msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: msr_info->data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_SMBASE: if (!msr_info->host_initiated) return 1; msr_info->data = vcpu->arch.smbase; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ msr_info->data = 1000ULL; /* CPU multiplier */ msr_info->data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: msr_info->data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: msr_info->data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: msr_info->data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: msr_info->data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: msr_info->data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: msr_info->data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: return get_msr_mce(vcpu, msr_info->index, &msr_info->data); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ msr_info->data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: case HV_X64_MSR_CRASH_CTL: case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: return kvm_hv_get_msr_common(vcpu, msr_info->index, &msr_info->data); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ msr_info->data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; msr_info->data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; msr_info->data = vcpu->arch.osvw.status; break; case MSR_PLATFORM_INFO: msr_info->data = vcpu->arch.msr_platform_info; break; case MSR_MISC_FEATURES_ENABLES: msr_info->data = vcpu->arch.msr_misc_features_enables; break; default: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); if (!ignore_msrs) { vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index); return 1; } else { vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index); msr_info->data = 0; } break; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini13219.61%34.17%
Gleb Natapov10115.01%79.72%
Carsten Otte598.77%34.17%
Boris Ostrovsky507.43%11.39%
Glauber de Oliveira Costa385.65%34.17%
Huang Ying314.61%11.39%
Avi Kivity294.31%811.11%
Kyle Huey284.16%11.39%
Alexander Graf233.42%22.78%
Jes Sorensen202.97%22.78%
Marcelo Tosatti172.53%56.94%
Andre Przywara162.38%45.56%
Will Auld152.23%11.39%
Andrey Smetanin142.08%34.17%
Michael S. Tsirkin142.08%11.39%
Wei Huang131.93%22.78%
Liu Jinsong111.63%11.39%
Xiantao Zhang101.49%22.78%
john cooper91.34%11.39%
Ben-Ami Yassour50.74%11.39%
Gregory Haskins40.59%22.78%
Sheng Yang40.59%34.17%
Jan Kiszka40.59%22.78%
Dmitry Bilunov30.45%11.39%
Borislav Petkov30.45%11.39%
Ladi Prosek30.45%11.39%
Chen Yucong30.45%11.39%
Nicolas Iooss20.30%11.39%
Beth Kon20.30%11.39%
Zhai, Edwin20.30%11.39%
Jaswinder Singh Rajput20.30%11.39%
Laurent Vivier20.30%11.39%
Bandan Das10.15%11.39%
Gerd Hoffmann10.15%11.39%
Christoffer Dall10.15%11.39%
Xiao Guangrong10.15%11.39%
Total673100.00%72100.00%

EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte7568.18%233.33%
Avi Kivity1513.64%116.67%
Marcelo Tosatti1513.64%233.33%
Huang Ying54.55%116.67%
Total110100.00%6100.00%

/* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte15678.39%222.22%
Zachary Amsden157.54%222.22%
Sasha Levin136.53%111.11%
Dan Kenigsberg63.02%111.11%
Glauber de Oliveira Costa42.01%111.11%
Amit Shah31.51%111.11%
Avi Kivity21.01%111.11%
Total199100.00%9100.00%


int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_HYPERV_SYNIC: case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: case KVM_CAP_HYPERV_TIME: case KVM_CAP_IOAPIC_POLARITY_IGNORED: case KVM_CAP_TSC_DEADLINE_TIMER: case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_DISABLE_QUIRKS: case KVM_CAP_SET_BOOT_CPU_ID: case KVM_CAP_SPLIT_IRQCHIP: case KVM_CAP_IMMEDIATE_EXIT: r = 1; break; case KVM_CAP_ADJUST_CLOCK: r = KVM_CLOCK_TSC_STABLE; break; case KVM_CAP_X86_GUEST_MWAIT: r = kvm_mwait_in_guest(); break; case KVM_CAP_X86_SMM: /* SMBASE is usually relocated above 1M on modern chipsets, * and SMM handlers might indeed rely on 4G segment limits, * so do not report SMM to be available if real mode is * emulated via vm86 mode. Still, do not go to great lengths * to avoid userspace's usage of the feature, because it is a * fringe case that is not enabled except via specific settings * of the module parameters. */ r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = boot_cpu_has(X86_FEATURE_XSAVE); break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_X2APIC_API: r = KVM_X2APIC_API_VALID_FLAGS; break; default: r = 0; break; } return r; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini2910.58%58.47%
Xiantao Zhang2910.58%11.69%
Avi Kivity269.49%58.47%
Marcelo Tosatti186.57%58.47%
Carsten Otte176.20%11.69%
Sheng Yang165.84%46.78%
Gleb Natapov155.47%58.47%
Michael S. Tsirkin124.38%23.39%
Joerg Roedel114.01%11.69%
Radim Krčmář114.01%23.39%
Jan Kiszka114.01%58.47%
Huang Ying82.92%11.69%
Sasha Levin82.92%11.69%
Borislav Petkov72.55%23.39%
Nadav Amit62.19%11.69%
Alexander Graf62.19%11.69%
Roman Kagan62.19%23.39%
Andrey Smetanin31.09%11.69%
Lai Jiangshan31.09%11.69%
Dan Kenigsberg31.09%11.69%
Xiao Guangrong31.09%11.69%
Gabriel L. Somlo31.09%11.69%
Ed Swierk31.09%11.69%
Steve Rutherford31.09%11.69%
Gregory Haskins31.09%11.69%
Zhai, Edwin31.09%11.69%
Beth Kon31.09%11.69%
Gerd Hoffmann31.09%11.69%
Eric B Munson20.73%11.69%
Glauber de Oliveira Costa20.73%23.39%
Alex Williamson10.36%11.69%
Total274100.00%59100.00%


long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, num_emulated_msrs * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: case KVM_GET_EMULATED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ioctl); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { r = -EFAULT; if (copy_to_user(argp, &kvm_mce_cap_supported, sizeof(kvm_mce_cap_supported))) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; }

Contributors

PersonTokensPropCommitsCommitProp
Carsten Otte14445.86%220.00%
Avi Kivity7323.25%110.00%
Dan Kenigsberg6019.11%110.00%
Huang Ying216.69%110.00%
Borislav Petkov61.91%110.00%
Ashok Raj41.27%110.00%
Jan Kiszka30.96%110.00%
Paolo Bonzini20.64%110.00%
Dongxiao Xu10.32%110.00%
Total314100.00%10100.00%


static void wbinvd_ipi(void *garbage) { wbinvd(); }

Contributors

PersonTokensPropCommitsCommitProp
Sheng Yang13100.00%1100.00%
Total13100.00%1100.00%


static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); }

Contributors

PersonTokensPropCommitsCommitProp
Sheng Yang1684.21%150.00%
Alex Williamson315.79%150.00%
Total19100.00%2100.00%


void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : rdtsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_vcpu_write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } if (kvm_lapic_hv_timer_in_use(vcpu)) kvm_lapic_restart_hv_timer(vcpu); /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); vcpu->cpu = cpu; } kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); }

Contributors

PersonTokensPropCommitsCommitProp
Zachary Amsden12245.35%832.00%
Sheng Yang5721.19%14.00%
Dan Kenigsberg207.43%14.00%
Marcelo Tosatti207.43%28.00%
Carsten Otte134.83%14.00%
Wanpeng Li114.09%14.00%
Glauber de Oliveira Costa72.60%14.00%
Gleb Natapov41.49%14.00%
Joerg Roedel31.12%14.00%
Radim Krčmář31.12%14.00%
Dongxiao Xu31.12%14.00%
Guo Hui Liu10.37%14.00%
Andrew Lutomirski10.37%14.00%
Paolo Bonzini10.37%14.00%
Luiz Fernando N. Capitulino10.37%14.00%
Nikola Ciprich10.37%14.00%
Haozhong Zhang10.37%14.00%
Total269100.00%25100.00%


static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; vcpu->arch.st.steal.preempted = 1; kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal.preempted, offsetof(struct kvm_steal_time, preempted), sizeof(vcpu->arch.st.steal.preempted)); }

Contributors

PersonTokensPropCommitsCommitProp
Pan Xinhui8496.55%150.00%
Paolo Bonzini33.45%150.00%
Total87100.00%2100.00%


void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { int idx; /* * Disable page faults because we're in atomic context here. * kvm_write_guest_offset_cached() would call might_fault() * that relies on pagefault_disable() to tell if there's a * bug. NOTE: the write to guest memory may not go through if * during postcopy live migration or if there's heavy guest * paging. */ pagefault_disable(); /* * kvm_memslots() will be called by * kvm_write_guest_offset_cached() so take the srcu lock. */ idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_steal_time_set_preempted(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); pagefault_enable(); kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = rdtsc(); }

Contributors

PersonTokensPropCommitsCommitProp
Andrea Arcangeli3549.30%218.18%
Carsten Otte1014.08%19.09%
Zachary Amsden79.86%218.18%
Pan Xinhui57.04%19.09%
Dongxiao Xu45.63%19.09%
Sheng Yang45.63%19.09%
Avi Kivity45.63%19.09%
Andrew Lutomirski11.41%19.09%
Nadav Har'El11.41%19.09%
Total71100.00%11100.00%


static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); return kvm_apic_get_state(vcpu, s); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity1534.88%116.67%
Andrey Smetanin818.60%116.67%
Yang Zhang716.28%116.67%
Dan Kenigsberg716.28%116.67%
Paolo Bonzini49.30%116.67%
Radim Krčmář24.65%116.67%
Total43100.00%6100.00%


static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { int r; r = kvm_apic_set_state(vcpu, s); if (r) return r; update_cr8_intercept(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity1841.86%125.00%
Radim Krčmář1330.23%125.00%
Dan Kenigsberg818.60%125.00%
Carsten Otte49.30%125.00%
Total43100.00%4100.00%


static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) { return (!lapic_in_kernel(vcpu) || kvm_apic_accept_pic_intr(vcpu)); }

Contributors

PersonTokensPropCommitsCommitProp
Matt Gingell25100.00%1100.00%
Total25100.00%1100.00%

/* * if userspace requested an interrupt window, check that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) { return kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu) && kvm_cpu_accept_dm_intr(vcpu); }

Contributors

PersonTokensPropCommitsCommitProp
Matt Gingell34100.00%1100.00%
Total34100.00%1100.00%


static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq >= KVM_NR_INTERRUPTS) return -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) { kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } /* * With in-kernel LAPIC, we only use this to inject EXTINT, so * fail for in-kernel 8259. */ if (pic_in_kernel(vcpu->kvm)) return -ENXIO; if (vcpu->arch.pending_external_vector != -1) return -EEXIST; vcpu->arch.pending_external_vector = irq->irq; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steve Rutherford4540.91%111.11%
Avi Kivity3128.18%222.22%
Dan Kenigsberg1614.55%111.11%
Carsten Otte76.36%111.11%
Matt Gingell76.36%111.11%
Sheng Yang21.82%111.11%
Michael S. Tsirkin10.91%111.11%
Joerg Roedel10.91%111.11%
Total110100.00%9100.00%


static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity1684.21%150.00%
Dan Kenigsberg315.79%150.00%
Total19100.00%2100.00%


static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) { kvm_make_request(KVM_REQ_SMI, vcpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini21100.00%2100.00%
Total21100.00%2100.00%


static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity3278.05%133.33%
Dan Kenigsberg717.07%133.33%
Xiantao Zhang24.88%133.33%
Total41100.00%3100.00%


static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; if (kvm_x86_ops->setup_mce) kvm_x86_ops->setup_mce(vcpu); out: return r; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity8964.03%125.00%
Dan Kenigsberg3021.58%125.00%
Ashok Raj1410.07%125.00%
Xiantao Zhang64.32%125.00%
Total139100.00%4100.00%


static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity23877.52%450.00%
Dan Kenigsberg5818.89%112.50%
Xiantao Zhang61.95%112.50%
Dexuan Cui41.30%112.50%
Sheng Yang10.33%112.50%
Total307100.00%8100.00%


static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->smi.smm = is_smm(vcpu); events->smi.pending = vcpu->arch.smi_pending; events->smi.smm_inside_nmi = !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); events->smi.latched_init = kvm_lapic_latched_init(vcpu); events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW | KVM_VCPUEVENT_VALID_SMM); memset(&events->reserved, 0, sizeof(events->reserved)); }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka13044.83%531.25%
Paolo Bonzini5418.62%16.25%
Vasiliy Kulikov3311.38%16.25%
Marcelo Tosatti237.93%318.75%
Xiantao Zhang186.21%212.50%
Huang Ying144.83%16.25%
Avi Kivity124.14%212.50%
Dan Kenigsberg62.07%16.25%
Total290100.00%16100.00%

static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW | KVM_VCPUEVENT_VALID_SMM)) return -EINVAL; if (events->exception.injected && (events->exception.nr > 31 || events->