Contributors: 23
Author Tokens Token Proportion Commits Commit Proportion
Paolo Bonzini 619 40.75% 5 7.04%
Sean Christopherson 276 18.17% 20 28.17%
Jim Mattson 212 13.96% 2 2.82%
Isaku Yamahata 186 12.24% 15 21.13%
Gleb Natapov 65 4.28% 3 4.23%
Avi Kivity 52 3.42% 5 7.04%
Tiejun Chen 44 2.90% 1 1.41%
Marcelo Tosatti 12 0.79% 2 2.82%
Xiantao Zhang 9 0.59% 1 1.41%
Maxim Levitsky 8 0.53% 2 2.82%
Andre Przywara 6 0.39% 1 1.41%
Vishal Verma 4 0.26% 1 1.41%
Yunhong Jiang 4 0.26% 1 1.41%
Feng Wu 4 0.26% 3 4.23%
Joerg Roedel 3 0.20% 1 1.41%
Sheng Yang 3 0.20% 1 1.41%
Pawan Gupta 3 0.20% 1 1.41%
Chao Gao 2 0.13% 1 1.41%
Makarand Sonare 2 0.13% 1 1.41%
Liran Alon 2 0.13% 1 1.41%
Jason Baron 1 0.07% 1 1.41%
Binbin Wu 1 0.07% 1 1.41%
Ladi Prosek 1 0.07% 1 1.41%
Total 1519 71


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_X86_OPS_H
#define __KVM_X86_VMX_X86_OPS_H

#include <linux/kvm_host.h>

#include "x86.h"

__init int vmx_hardware_setup(void);

extern struct kvm_x86_ops vt_x86_ops __initdata;
extern struct kvm_x86_init_ops vt_init_ops __initdata;

void vmx_hardware_unsetup(void);
int vmx_check_processor_compat(void);
int vmx_enable_virtualization_cpu(void);
void vmx_disable_virtualization_cpu(void);
void vmx_emergency_disable_virtualization_cpu(void);
int vmx_vm_init(struct kvm *kvm);
void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm);
int vmx_vcpu_create(struct kvm_vcpu *vcpu);
int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
void vmx_vcpu_free(struct kvm_vcpu *vcpu);
void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void vmx_vcpu_put(struct kvm_vcpu *vcpu);
int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath);
void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu);
void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu);
int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
#ifdef CONFIG_KVM_SMM
int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram);
int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
void vmx_enable_smi_window(struct kvm_vcpu *vcpu);
#endif
int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
				  void *insn, int insn_len);
int vmx_check_intercept(struct kvm_vcpu *vcpu,
			struct x86_instruction_info *info,
			enum x86_intercept_stage stage,
			struct x86_exception *exception);
bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
void vmx_migrate_timers(struct kvm_vcpu *vcpu);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
			   int trig_mode, int vector);
void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
int vmx_get_feature_msr(u32 msr, u64 *data);
int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
#define vmx_complete_emulated_msr kvm_complete_insn_gp
u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int vmx_get_cpl(struct kvm_vcpu *vcpu);
void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val);
void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val);
void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
bool vmx_get_if_flag(struct kvm_vcpu *vcpu);
void vmx_flush_tlb_all(struct kvm_vcpu *vcpu);
void vmx_flush_tlb_current(struct kvm_vcpu *vcpu);
void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr);
void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu);
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall);
void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected);
void vmx_inject_nmi(struct kvm_vcpu *vcpu);
void vmx_inject_exception(struct kvm_vcpu *vcpu);
void vmx_cancel_injection(struct kvm_vcpu *vcpu);
int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection);
int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
void vmx_enable_nmi_window(struct kvm_vcpu *vcpu);
void vmx_enable_irq_window(struct kvm_vcpu *vcpu);
void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr);
void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu);
void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr);
u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);

void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
		       u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code);

u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
void vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu);
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
#ifdef CONFIG_X86_64
int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
		     bool *expired);
void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
#endif
void vmx_setup_mce(struct kvm_vcpu *vcpu);

#ifdef CONFIG_KVM_INTEL_TDX
void tdx_disable_virtualization_cpu(void);
int tdx_vm_init(struct kvm *kvm);
void tdx_mmu_release_hkid(struct kvm *kvm);
void tdx_vm_destroy(struct kvm *kvm);
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);

int tdx_vcpu_create(struct kvm_vcpu *vcpu);
void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void tdx_vcpu_free(struct kvm_vcpu *vcpu);
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
int tdx_handle_exit(struct kvm_vcpu *vcpu,
		enum exit_fastpath_completion fastpath);

void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
			   int trig_mode, int vector);
void tdx_inject_nmi(struct kvm_vcpu *vcpu);
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
		u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
bool tdx_has_emulated_msr(u32 index);
int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);

int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);

int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
			      enum pg_level level, void *private_spt);
int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
			      enum pg_level level, void *private_spt);
int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
			      enum pg_level level, kvm_pfn_t pfn);
int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
				 enum pg_level level, kvm_pfn_t pfn);

void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
#endif

#endif /* __KVM_X86_VMX_X86_OPS_H */