1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
9 #ifndef __ARCH_X86_KVM_XEN_H__
10 #define __ARCH_X86_KVM_XEN_H__
13 #include <linux/jump_label_ratelimit.h>
15 extern struct static_key_false_deferred kvm_xen_enabled;
17 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
18 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
19 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
20 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
21 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
23 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
24 void kvm_xen_init_vm(struct kvm *kvm);
25 void kvm_xen_destroy_vm(struct kvm *kvm);
27 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
29 return static_branch_unlikely(&kvm_xen_enabled.key) &&
30 kvm->arch.xen_hvm_config.msr;
33 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
35 return static_branch_unlikely(&kvm_xen_enabled.key) &&
36 (kvm->arch.xen_hvm_config.flags &
37 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
40 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
42 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
43 vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
44 return __kvm_xen_has_interrupt(vcpu);
49 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
54 static inline void kvm_xen_init_vm(struct kvm *kvm)
58 static inline void kvm_xen_destroy_vm(struct kvm *kvm)
62 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
67 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
72 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
78 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
80 #include <asm/pvclock-abi.h>
81 #include <asm/xen/interface.h>
82 #include <xen/interface/vcpu.h>
84 void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
86 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
88 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
91 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
94 * If the vCPU wasn't preempted but took a normal exit for
95 * some reason (hypercalls, I/O, etc.), that is accounted as
96 * still RUNSTATE_running, as the VMM is still operating on
97 * behalf of the vCPU. Only if the VMM does actually block
98 * does it need to enter RUNSTATE_blocked.
100 if (WARN_ON_ONCE(!vcpu->preempted))
103 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
106 /* 32-bit compatibility definitions, also used natively in 32-bit build */
107 struct compat_arch_vcpu_info {
112 struct compat_vcpu_info {
113 uint8_t evtchn_upcall_pending;
114 uint8_t evtchn_upcall_mask;
116 uint32_t evtchn_pending_sel;
117 struct compat_arch_vcpu_info arch;
118 struct pvclock_vcpu_time_info time;
119 }; /* 64 bytes (x86) */
121 struct compat_arch_shared_info {
122 unsigned int max_pfn;
123 unsigned int pfn_to_mfn_frame_list_list;
124 unsigned int nmi_reason;
125 unsigned int p2m_cr3;
126 unsigned int p2m_vaddr;
127 unsigned int p2m_generation;
131 struct compat_shared_info {
132 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
133 uint32_t evtchn_pending[32];
134 uint32_t evtchn_mask[32];
135 struct pvclock_wall_clock wc;
136 struct compat_arch_shared_info arch;
139 struct compat_vcpu_runstate_info {
141 uint64_t state_entry_time;
143 } __attribute__((packed));
145 #endif /* __ARCH_X86_KVM_XEN_H__ */