1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_KVM_PARA_H
3 #define _ASM_X86_KVM_PARA_H
5 #include <asm/processor.h>
6 #include <asm/alternative.h>
7 #include <linux/interrupt.h>
8 #include <uapi/asm/kvm_para.h>
12 #ifdef CONFIG_KVM_GUEST
13 bool kvm_check_and_clear_guest_paused(void);
15 static inline bool kvm_check_and_clear_guest_paused(void)
19 #endif /* CONFIG_KVM_GUEST */
21 #define KVM_HYPERCALL \
22 ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)
24 /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
25 * instruction. The hypervisor may replace it with something else but only the
26 * instructions are guaranteed to be supported.
28 * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
29 * The hypercall number should be placed in rax and the return value will be
30 * placed in rax. No other registers will be clobbered unless explicitly
31 * noted by the particular hypercall.
34 static inline long kvm_hypercall0(unsigned int nr)
38 if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
39 return tdx_kvm_hypercall(nr, 0, 0, 0, 0);
41 asm volatile(KVM_HYPERCALL
48 static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
52 if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
53 return tdx_kvm_hypercall(nr, p1, 0, 0, 0);
55 asm volatile(KVM_HYPERCALL
62 static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
67 if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
68 return tdx_kvm_hypercall(nr, p1, p2, 0, 0);
70 asm volatile(KVM_HYPERCALL
72 : "a"(nr), "b"(p1), "c"(p2)
77 static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
78 unsigned long p2, unsigned long p3)
82 if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
83 return tdx_kvm_hypercall(nr, p1, p2, p3, 0);
85 asm volatile(KVM_HYPERCALL
87 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
92 static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
93 unsigned long p2, unsigned long p3,
98 if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
99 return tdx_kvm_hypercall(nr, p1, p2, p3, p4);
101 asm volatile(KVM_HYPERCALL
103 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
108 static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1,
109 unsigned long p2, unsigned long p3)
113 asm volatile("vmmcall"
115 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
120 #ifdef CONFIG_KVM_GUEST
121 void kvmclock_init(void);
122 void kvmclock_disable(void);
123 bool kvm_para_available(void);
124 unsigned int kvm_arch_para_features(void);
125 unsigned int kvm_arch_para_hints(void);
126 void kvm_async_pf_task_wait_schedule(u32 token);
127 void kvm_async_pf_task_wake(u32 token);
128 u32 kvm_read_and_reset_apf_flags(void);
129 bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
131 DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
133 static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
135 if (static_branch_unlikely(&kvm_async_pf_enabled))
136 return __kvm_handle_async_pf(regs, token);
141 #ifdef CONFIG_PARAVIRT_SPINLOCKS
142 void __init kvm_spinlock_init(void);
143 #else /* !CONFIG_PARAVIRT_SPINLOCKS */
144 static inline void kvm_spinlock_init(void)
147 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
149 #else /* CONFIG_KVM_GUEST */
150 #define kvm_async_pf_task_wait_schedule(T) do {} while(0)
151 #define kvm_async_pf_task_wake(T) do {} while(0)
153 static inline bool kvm_para_available(void)
158 static inline unsigned int kvm_arch_para_features(void)
163 static inline unsigned int kvm_arch_para_hints(void)
168 static inline u32 kvm_read_and_reset_apf_flags(void)
173 static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
179 #endif /* _ASM_X86_KVM_PARA_H */