1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
17 #include <asm/kvm_vcpu_fp.h>
18 #include <asm/kvm_vcpu_timer.h>
20 #define KVM_MAX_VCPUS 1024
22 #define KVM_HALT_POLL_NS_DEFAULT 500000
24 #define KVM_VCPU_MAX_FEATURES 0
26 #define KVM_REQ_SLEEP \
27 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
28 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
29 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
30 #define KVM_REQ_FENCE_I \
31 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
32 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
33 #define KVM_REQ_HFENCE_VVMA_ALL \
34 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_HFENCE \
36 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
38 enum kvm_riscv_hfence_type {
39 KVM_RISCV_HFENCE_UNKNOWN = 0,
40 KVM_RISCV_HFENCE_GVMA_VMID_GPA,
41 KVM_RISCV_HFENCE_VVMA_ASID_GVA,
42 KVM_RISCV_HFENCE_VVMA_ASID_ALL,
43 KVM_RISCV_HFENCE_VVMA_GVA,
46 struct kvm_riscv_hfence {
47 enum kvm_riscv_hfence_type type;
54 #define KVM_RISCV_VCPU_MAX_HFENCE 64
57 struct kvm_vm_stat_generic generic;
60 struct kvm_vcpu_stat {
61 struct kvm_vcpu_stat_generic generic;
69 struct kvm_arch_memory_slot {
74 * Writes to vmid_version and vmid happen with vmid_lock held
75 * whereas reads happen without any lock held.
77 unsigned long vmid_version;
85 /* G-stage page table */
90 struct kvm_guest_timer timer;
93 struct kvm_mmio_decode {
101 struct kvm_sbi_context {
105 struct kvm_cpu_trap {
107 unsigned long scause;
110 unsigned long htinst;
113 struct kvm_cpu_context {
147 unsigned long sstatus;
148 unsigned long hstatus;
149 union __riscv_fp_state fp;
152 struct kvm_vcpu_csr {
153 unsigned long vsstatus;
155 unsigned long vstvec;
156 unsigned long vsscratch;
158 unsigned long vscause;
159 unsigned long vstval;
162 unsigned long scounteren;
165 struct kvm_vcpu_arch {
166 /* VCPU ran at least once */
167 bool ran_atleast_once;
169 /* Last Host CPU on which Guest VCPU exited */
172 /* ISA feature bits (similar to MISA) */
175 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
176 unsigned long host_sscratch;
177 unsigned long host_stvec;
178 unsigned long host_scounteren;
180 /* CPU context of Host */
181 struct kvm_cpu_context host_context;
183 /* CPU context of Guest VCPU */
184 struct kvm_cpu_context guest_context;
186 /* CPU CSR context of Guest VCPU */
187 struct kvm_vcpu_csr guest_csr;
189 /* CPU context upon Guest VCPU reset */
190 struct kvm_cpu_context guest_reset_context;
192 /* CPU CSR context upon Guest VCPU reset */
193 struct kvm_vcpu_csr guest_reset_csr;
198 * We have a lockless approach for tracking pending VCPU interrupts
199 * implemented using atomic bitops. The irqs_pending bitmap represent
200 * pending interrupts whereas irqs_pending_mask represent bits changed
201 * in irqs_pending. Our approach is modeled around multiple producer
202 * and single consumer problem where the consumer is the VCPU itself.
204 unsigned long irqs_pending;
205 unsigned long irqs_pending_mask;
208 struct kvm_vcpu_timer timer;
210 /* HFENCE request queue */
211 spinlock_t hfence_lock;
212 unsigned long hfence_head;
213 unsigned long hfence_tail;
214 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
216 /* MMIO instruction details */
217 struct kvm_mmio_decode mmio_decode;
220 struct kvm_sbi_context sbi_context;
222 /* Cache pages needed to program page tables with spinlock held */
223 struct kvm_mmu_memory_cache mmu_page_cache;
225 /* VCPU power-off state */
228 /* Don't run the VCPU (blocked) */
232 static inline void kvm_arch_hardware_unsetup(void) {}
233 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
234 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
236 #define KVM_ARCH_WANT_MMU_NOTIFIER
238 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
240 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
241 gpa_t gpa, gpa_t gpsz,
242 unsigned long order);
243 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
244 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
245 unsigned long order);
246 void kvm_riscv_local_hfence_gvma_all(void);
247 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
251 unsigned long order);
252 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
254 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
255 unsigned long gva, unsigned long gvsz,
256 unsigned long order);
257 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
259 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
261 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
262 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
263 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
264 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
266 void kvm_riscv_fence_i(struct kvm *kvm,
267 unsigned long hbase, unsigned long hmask);
268 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
269 unsigned long hbase, unsigned long hmask,
270 gpa_t gpa, gpa_t gpsz,
271 unsigned long order);
272 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
273 unsigned long hbase, unsigned long hmask);
274 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
275 unsigned long hbase, unsigned long hmask,
276 unsigned long gva, unsigned long gvsz,
277 unsigned long order, unsigned long asid);
278 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
279 unsigned long hbase, unsigned long hmask,
281 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
282 unsigned long hbase, unsigned long hmask,
283 unsigned long gva, unsigned long gvsz,
284 unsigned long order);
285 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
286 unsigned long hbase, unsigned long hmask);
288 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
289 struct kvm_memory_slot *memslot,
290 gpa_t gpa, unsigned long hva, bool is_write);
291 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
292 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
293 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
294 void kvm_riscv_gstage_mode_detect(void);
295 unsigned long kvm_riscv_gstage_mode(void);
296 int kvm_riscv_gstage_gpa_bits(void);
298 void kvm_riscv_gstage_vmid_detect(void);
299 unsigned long kvm_riscv_gstage_vmid_bits(void);
300 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
301 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
302 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
304 void __kvm_riscv_unpriv_trap(void);
306 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
307 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
309 unsigned long guest_addr,
310 struct kvm_cpu_trap *trap);
311 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
312 struct kvm_cpu_trap *trap);
313 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
314 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
315 struct kvm_cpu_trap *trap);
317 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
319 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
320 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
321 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
322 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
323 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
324 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
325 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
327 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
328 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
330 #endif /* __RISCV_KVM_HOST_H__ */