cregit-Linux how code gets into the kernel

Release 4.11 arch/x86/kvm/x86.h

Directory: arch/x86/kvm
#ifndef ARCH_X86_KVM_X86_H

#define ARCH_X86_KVM_X86_H

#include <linux/kvm_host.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h"


#define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL


static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { vcpu->arch.exception.pending = false; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity22100.00%1100.00%
Total22100.00%1100.00%


static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, bool soft) { vcpu->arch.interrupt.pending = true; vcpu->arch.interrupt.soft = soft; vcpu->arch.interrupt.nr = vector; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity3572.92%150.00%
Gleb Natapov1327.08%150.00%
Total48100.00%2100.00%


static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) { vcpu->arch.interrupt.pending = false; }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity22100.00%1100.00%
Total22100.00%1100.00%


static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) { return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || vcpu->arch.nmi_injected; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov35100.00%1100.00%
Total35100.00%1100.00%


static inline bool kvm_exception_is_soft(unsigned int nr) { return (nr == BP_VECTOR) || (nr == OF_VECTOR); }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov24100.00%1100.00%
Total24100.00%1100.00%


static inline bool is_protmode(struct kvm_vcpu *vcpu) { return kvm_read_cr0_bits(vcpu, X86_CR0_PE); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity20100.00%1100.00%
Total20100.00%1100.00%


static inline int is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 return vcpu->arch.efer & EFER_LMA; #else return 0; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity31100.00%2100.00%
Total31100.00%2100.00%


static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) { int cs_db, cs_l; if (!is_long_mode(vcpu)) return false; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); return cs_l; }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit44100.00%1100.00%
Total44100.00%1100.00%


static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) { return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel26100.00%1100.00%
Total26100.00%1100.00%


static inline int is_pae(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity20100.00%1100.00%
Total20100.00%1100.00%


static inline int is_pse(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity20100.00%1100.00%
Total20100.00%1100.00%


static inline int is_paging(struct kvm_vcpu *vcpu) { return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); }

Contributors

PersonTokensPropCommitsCommitProp
Avi Kivity2086.96%150.00%
Davidlohr Bueso A313.04%150.00%
Total23100.00%2100.00%


static inline u32 bit(int bitno) { return 1 << (bitno & 31); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel19100.00%1100.00%
Total19100.00%1100.00%


static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) { vcpu->arch.mmio_gva = gva & PAGE_MASK; vcpu->arch.access = access; vcpu->arch.mmio_gfn = gfn; vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; }

Contributors

PersonTokensPropCommitsCommitProp
Xiao Guangrong4674.19%150.00%
David Matlack1625.81%150.00%
Total62100.00%2100.00%


static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) { return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; }

Contributors

PersonTokensPropCommitsCommitProp
David Matlack2796.43%150.00%
Xiao Guangrong13.57%150.00%
Total28100.00%2100.00%

/* * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we * clear all mmio cache info. */ #define MMIO_GVA_ANY (~(gva_t)0)
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) { if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) return; vcpu->arch.mmio_gva = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Xiao Guangrong4197.62%150.00%
David Matlack12.38%150.00%
Total42100.00%2100.00%


static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) { if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Xiao Guangrong4289.36%150.00%
David Matlack510.64%150.00%
Total47100.00%2100.00%


static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Xiao Guangrong3988.64%150.00%
David Matlack511.36%150.00%
Total44100.00%2100.00%


static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, enum kvm_reg reg) { unsigned long val = kvm_register_read(vcpu, reg); return is_64_bit_mode(vcpu) ? val : (u32)val; }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit41100.00%1100.00%
Total41100.00%1100.00%


static inline void kvm_register_writel(struct kvm_vcpu *vcpu, enum kvm_reg reg, unsigned long val) { if (!is_64_bit_mode(vcpu)) val = (u32)val; return kvm_register_write(vcpu, reg, val); }

Contributors

PersonTokensPropCommitsCommitProp
Nadav Amit45100.00%1100.00%
Total45100.00%1100.00%


static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) { return !(kvm->arch.disabled_quirks & quirk); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini27100.00%1100.00%
Total27100.00%1100.00%

void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 get_kvmclock_ns(struct kvm *kvm); int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ | XFEATURE_MASK_PKRU) extern u64 host_xcr0; extern u64 kvm_supported_xcr0(void); extern unsigned int min_timer_period_us; extern unsigned int lapic_timer_advance_ns; extern struct static_key kvm_no_apic_vcpu;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti33100.00%1100.00%
Total33100.00%1100.00%

/* Same "calling convention" as do_div: * - divide (n << 32) by base * - put result in n * - return remainder */ #define do_shl32_div32(n, base) \ ({ \ u32 __quot, __rem; \ asm("divl %2" : "=a" (__quot), "=d" (__rem) \ : "rm" (base), "0" (0), "1" ((u32) n)); \ n = __quot; \ __rem; \ }) #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Xiao Guangrong23623.77%410.26%
Avi Kivity20820.95%615.38%
Nadav Amit14514.60%37.69%
Gleb Natapov777.75%37.69%
David Matlack595.94%12.56%
Paolo Bonzini575.74%512.82%
Nadav Har'El505.04%25.13%
Marcelo Tosatti464.63%37.69%
Joerg Roedel454.53%25.13%
Yanmin Zhang181.81%12.56%
Mohammed Gamal121.21%12.56%
Zachary Amsden101.01%12.56%
Nicholas Krause90.91%12.56%
Feng Wu60.60%12.56%
Radim Krčmář40.40%12.56%
Will Auld40.40%12.56%
Serge E. Hallyn30.30%12.56%
Davidlohr Bueso A30.30%12.56%
Huaitong Han10.10%12.56%
Total993100.00%39100.00%
Directory: arch/x86/kvm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.