Contributors: 33
Author Tokens Token Proportion Commits Commit Proportion
Anthony Liguori 163 29.48% 2 4.26%
Kuppuswamy Sathyanarayanan 94 17.00% 2 4.26%
Christian Bornträger 44 7.96% 1 2.13%
Andrew Lutomirski 43 7.78% 1 2.13%
Gleb Natapov 31 5.61% 1 2.13%
Glauber de Oliveira Costa 19 3.44% 2 4.26%
Srivatsa Vaddagiri 19 3.44% 1 2.13%
Andrew Morton 18 3.25% 1 2.13%
Kirill A. Shutemov 14 2.53% 2 4.26%
Avi Kivity 12 2.17% 3 6.38%
Paolo Bonzini 12 2.17% 3 6.38%
Joerg Roedel 10 1.81% 2 4.26%
Brijesh Singh 9 1.63% 1 2.13%
Eric B Munson 9 1.63% 1 2.13%
Wanpeng Li 8 1.45% 1 2.13%
Vitaly Kuznetsov 7 1.27% 3 6.38%
Borislav Petkov 6 1.08% 1 2.13%
Marcelo Tosatti 5 0.90% 2 4.26%
Alexander Graf 5 0.90% 2 4.26%
Ingo Molnar 4 0.72% 2 4.26%
Hollis Blanchard 3 0.54% 1 2.13%
H. Peter Anvin 3 0.54% 1 2.13%
Yi Wang 2 0.36% 1 2.13%
David Howells 2 0.36% 1 2.13%
Thomas Gleixner 2 0.36% 1 2.13%
Andi Kleen 2 0.36% 1 2.13%
Alok N Kataria 1 0.18% 1 2.13%
Peter Zijlstra 1 0.18% 1 2.13%
Jesse Larrew 1 0.18% 1 2.13%
Uros Bizjak 1 0.18% 1 2.13%
Joe Perches 1 0.18% 1 2.13%
Greg Kroah-Hartman 1 0.18% 1 2.13%
Jan Kiszka 1 0.18% 1 2.13%
Total 553 47


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_KVM_PARA_H
#define _ASM_X86_KVM_PARA_H

#include <asm/processor.h>
#include <asm/alternative.h>
#include <linux/interrupt.h>
#include <uapi/asm/kvm_para.h>

#include <asm/tdx.h>

#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
#else
static inline bool kvm_check_and_clear_guest_paused(void)
{
	return false;
}
#endif /* CONFIG_KVM_GUEST */

#define KVM_HYPERCALL \
        ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)

/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
 * instruction.  The hypervisor may replace it with something else but only the
 * instructions are guaranteed to be supported.
 *
 * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
 * The hypercall number should be placed in rax and the return value will be
 * placed in rax.  No other registers will be clobbered unless explicitly
 * noted by the particular hypercall.
 */

static inline long kvm_hypercall0(unsigned int nr)
{
	long ret;

	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
		return tdx_kvm_hypercall(nr, 0, 0, 0, 0);

	asm volatile(KVM_HYPERCALL
		     : "=a"(ret)
		     : "a"(nr)
		     : "memory");
	return ret;
}

static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
{
	long ret;

	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
		return tdx_kvm_hypercall(nr, p1, 0, 0, 0);

	asm volatile(KVM_HYPERCALL
		     : "=a"(ret)
		     : "a"(nr), "b"(p1)
		     : "memory");
	return ret;
}

static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
				  unsigned long p2)
{
	long ret;

	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
		return tdx_kvm_hypercall(nr, p1, p2, 0, 0);

	asm volatile(KVM_HYPERCALL
		     : "=a"(ret)
		     : "a"(nr), "b"(p1), "c"(p2)
		     : "memory");
	return ret;
}

static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
				  unsigned long p2, unsigned long p3)
{
	long ret;

	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
		return tdx_kvm_hypercall(nr, p1, p2, p3, 0);

	asm volatile(KVM_HYPERCALL
		     : "=a"(ret)
		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
		     : "memory");
	return ret;
}

static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
				  unsigned long p2, unsigned long p3,
				  unsigned long p4)
{
	long ret;

	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
		return tdx_kvm_hypercall(nr, p1, p2, p3, p4);

	asm volatile(KVM_HYPERCALL
		     : "=a"(ret)
		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
		     : "memory");
	return ret;
}

static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1,
				      unsigned long p2, unsigned long p3)
{
	long ret;

	asm volatile("vmmcall"
		     : "=a"(ret)
		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
		     : "memory");
	return ret;
}

#ifdef CONFIG_KVM_GUEST
void kvmclock_init(void);
void kvmclock_disable(void);
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
unsigned int kvm_arch_para_hints(void);
void kvm_async_pf_task_wait_schedule(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_apf_flags(void);
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);

DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);

static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
	if (static_branch_unlikely(&kvm_async_pf_enabled))
		return __kvm_handle_async_pf(regs, token);
	else
		return false;
}

#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init kvm_spinlock_init(void);
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static inline void kvm_spinlock_init(void)
{
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */

#else /* CONFIG_KVM_GUEST */
#define kvm_async_pf_task_wait_schedule(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)

static inline bool kvm_para_available(void)
{
	return false;
}

static inline unsigned int kvm_arch_para_features(void)
{
	return 0;
}

static inline unsigned int kvm_arch_para_hints(void)
{
	return 0;
}

static inline u32 kvm_read_and_reset_apf_flags(void)
{
	return 0;
}

static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
	return false;
}
#endif

#endif /* _ASM_X86_KVM_PARA_H */