1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
9 #ifndef __ARCH_X86_KVM_XEN_H__
10 #define __ARCH_X86_KVM_XEN_H__
13 #include <linux/jump_label_ratelimit.h>
15 extern struct static_key_false_deferred kvm_xen_enabled;
17 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
18 void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
19 void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
20 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
21 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
22 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
23 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
24 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
25 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
26 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
27 void kvm_xen_init_vm(struct kvm *kvm);
28 void kvm_xen_destroy_vm(struct kvm *kvm);
29 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu);
30 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
31 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
33 int kvm_xen_setup_evtchn(struct kvm *kvm,
34 struct kvm_kernel_irq_routing_entry *e,
35 const struct kvm_irq_routing_entry *ue);
37 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
40 * The local APIC is being enabled. If the per-vCPU upcall vector is
41 * set and the vCPU's evtchn_upcall_pending flag is set, inject the
44 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
45 vcpu->arch.xen.vcpu_info_cache.active &&
46 vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
47 kvm_xen_inject_vcpu_vector(vcpu);
50 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
52 return static_branch_unlikely(&kvm_xen_enabled.key) &&
53 kvm->arch.xen_hvm_config.msr;
56 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
58 return static_branch_unlikely(&kvm_xen_enabled.key) &&
59 (kvm->arch.xen_hvm_config.flags &
60 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
63 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
65 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
66 vcpu->arch.xen.vcpu_info_cache.active &&
67 vcpu->kvm->arch.xen.upcall_vector)
68 return __kvm_xen_has_interrupt(vcpu);
73 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
75 return static_branch_unlikely(&kvm_xen_enabled.key) &&
76 vcpu->arch.xen.evtchn_pending_sel;
79 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
81 return !!vcpu->arch.xen.timer_virq;
84 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
86 if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu))
87 return atomic_read(&vcpu->arch.xen.timer_pending);
92 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
94 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
99 static inline void kvm_xen_init_vm(struct kvm *kvm)
103 static inline void kvm_xen_destroy_vm(struct kvm *kvm)
107 static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
111 static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
115 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
119 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
124 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
129 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
134 static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
138 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
143 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
148 static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
152 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
158 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
160 #include <asm/pvclock-abi.h>
161 #include <asm/xen/interface.h>
162 #include <xen/interface/vcpu.h>
164 void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
166 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
168 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
171 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
174 * If the vCPU wasn't preempted but took a normal exit for
175 * some reason (hypercalls, I/O, etc.), that is accounted as
176 * still RUNSTATE_running, as the VMM is still operating on
177 * behalf of the vCPU. Only if the VMM does actually block
178 * does it need to enter RUNSTATE_blocked.
180 if (WARN_ON_ONCE(!vcpu->preempted))
183 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
186 /* 32-bit compatibility definitions, also used natively in 32-bit build */
187 struct compat_arch_vcpu_info {
192 struct compat_vcpu_info {
193 uint8_t evtchn_upcall_pending;
194 uint8_t evtchn_upcall_mask;
196 uint32_t evtchn_pending_sel;
197 struct compat_arch_vcpu_info arch;
198 struct pvclock_vcpu_time_info time;
199 }; /* 64 bytes (x86) */
201 struct compat_arch_shared_info {
202 unsigned int max_pfn;
203 unsigned int pfn_to_mfn_frame_list_list;
204 unsigned int nmi_reason;
205 unsigned int p2m_cr3;
206 unsigned int p2m_vaddr;
207 unsigned int p2m_generation;
211 struct compat_shared_info {
212 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
213 uint32_t evtchn_pending[32];
214 uint32_t evtchn_mask[32];
215 struct pvclock_wall_clock wc;
216 struct compat_arch_shared_info arch;
219 #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
220 sizeof_field(struct compat_shared_info, \
222 struct compat_vcpu_runstate_info {
224 uint64_t state_entry_time;
226 } __attribute__((packed));
228 #endif /* __ARCH_X86_KVM_XEN_H__ */