1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
20 #include <linux/kvm_host.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
25 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
26 KVM_GENERIC_VCPU_STATS(),
27 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
28 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
29 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
30 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
31 STATS_DESC_COUNTER(VCPU, csr_exit_user),
32 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
33 STATS_DESC_COUNTER(VCPU, signal_exits),
34 STATS_DESC_COUNTER(VCPU, exits)
37 const struct kvm_stats_header kvm_vcpu_stats_header = {
38 .name_size = KVM_STATS_NAME_SIZE,
39 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
40 .id_offset = sizeof(struct kvm_stats_header),
41 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
42 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
43 sizeof(kvm_vcpu_stats_desc),
46 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
48 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
50 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
51 static const unsigned long kvm_isa_ext_arr[] = {
52 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
53 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
54 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
55 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
56 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
57 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
58 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
60 KVM_ISA_EXT_ARR(SSTC),
61 KVM_ISA_EXT_ARR(SVINVAL),
62 KVM_ISA_EXT_ARR(SVPBMT),
63 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
64 KVM_ISA_EXT_ARR(ZICBOM),
67 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
71 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
72 if (kvm_isa_ext_arr[i] == base_ext)
76 return KVM_RISCV_ISA_EXT_MAX;
79 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
82 case KVM_RISCV_ISA_EXT_H:
91 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
94 case KVM_RISCV_ISA_EXT_A:
95 case KVM_RISCV_ISA_EXT_C:
96 case KVM_RISCV_ISA_EXT_I:
97 case KVM_RISCV_ISA_EXT_M:
98 case KVM_RISCV_ISA_EXT_SSTC:
99 case KVM_RISCV_ISA_EXT_SVINVAL:
100 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
109 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
111 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
112 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
113 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
114 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
118 * The preemption should be disabled here because it races with
119 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
120 * also calls vcpu_load/put.
123 loaded = (vcpu->cpu != -1);
125 kvm_arch_vcpu_put(vcpu);
127 vcpu->arch.last_exit_cpu = -1;
129 memcpy(csr, reset_csr, sizeof(*csr));
131 memcpy(cntx, reset_cntx, sizeof(*cntx));
133 kvm_riscv_vcpu_fp_reset(vcpu);
135 kvm_riscv_vcpu_timer_reset(vcpu);
137 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
138 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
140 vcpu->arch.hfence_head = 0;
141 vcpu->arch.hfence_tail = 0;
142 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
144 /* Reset the guest CSRs for hotplug usecase */
146 kvm_arch_vcpu_load(vcpu, smp_processor_id());
150 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
155 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
157 struct kvm_cpu_context *cntx;
158 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
159 unsigned long host_isa, i;
161 /* Mark this VCPU never ran */
162 vcpu->arch.ran_atleast_once = false;
163 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
164 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
166 /* Setup ISA features available to VCPU */
167 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
168 host_isa = kvm_isa_ext_arr[i];
169 if (__riscv_isa_extension_available(NULL, host_isa) &&
170 kvm_riscv_vcpu_isa_enable_allowed(i))
171 set_bit(host_isa, vcpu->arch.isa);
174 /* Setup VCPU hfence queue */
175 spin_lock_init(&vcpu->arch.hfence_lock);
177 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
178 cntx = &vcpu->arch.guest_reset_context;
179 cntx->sstatus = SR_SPP | SR_SPIE;
181 cntx->hstatus |= HSTATUS_VTW;
182 cntx->hstatus |= HSTATUS_SPVP;
183 cntx->hstatus |= HSTATUS_SPV;
185 /* By default, make CY, TM, and IR counters accessible in VU mode */
186 reset_csr->scounteren = 0x7;
188 /* Setup VCPU timer */
189 kvm_riscv_vcpu_timer_init(vcpu);
192 kvm_riscv_reset_vcpu(vcpu);
197 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
200 * vcpu with id 0 is the designated boot cpu.
201 * Keep all vcpus with non-zero id in power-off state so that
202 * they can be brought up using SBI HSM extension.
204 if (vcpu->vcpu_idx != 0)
205 kvm_riscv_vcpu_power_off(vcpu);
208 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
210 /* Cleanup VCPU timer */
211 kvm_riscv_vcpu_timer_deinit(vcpu);
213 /* Free unused pages pre-allocated for G-stage page table mappings */
214 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
217 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
219 return kvm_riscv_vcpu_timer_pending(vcpu);
222 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
226 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
230 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
232 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
233 !vcpu->arch.power_off && !vcpu->arch.pause);
236 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
238 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
241 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
243 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
246 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
248 return VM_FAULT_SIGBUS;
251 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
252 const struct kvm_one_reg *reg)
254 unsigned long __user *uaddr =
255 (unsigned long __user *)(unsigned long)reg->addr;
256 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
258 KVM_REG_RISCV_CONFIG);
259 unsigned long reg_val;
261 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
265 case KVM_REG_RISCV_CONFIG_REG(isa):
266 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
268 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
269 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
271 reg_val = riscv_cbom_block_size;
277 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
283 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
284 const struct kvm_one_reg *reg)
286 unsigned long __user *uaddr =
287 (unsigned long __user *)(unsigned long)reg->addr;
288 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
290 KVM_REG_RISCV_CONFIG);
291 unsigned long i, isa_ext, reg_val;
293 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
296 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
300 case KVM_REG_RISCV_CONFIG_REG(isa):
302 * This ONE REG interface is only defined for
303 * single letter extensions.
305 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
308 if (!vcpu->arch.ran_atleast_once) {
309 /* Ignore the enable/disable request for certain extensions */
310 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
311 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
312 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
316 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
317 if (reg_val & BIT(i))
319 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
320 if (!(reg_val & BIT(i)))
323 reg_val &= riscv_isa_extension_base(NULL);
324 /* Do not modify anything beyond single letter extensions */
325 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
326 (reg_val & KVM_RISCV_BASE_ISA_MASK);
327 vcpu->arch.isa[0] = reg_val;
328 kvm_riscv_vcpu_fp_reset(vcpu);
333 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
342 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
343 const struct kvm_one_reg *reg)
345 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
346 unsigned long __user *uaddr =
347 (unsigned long __user *)(unsigned long)reg->addr;
348 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
351 unsigned long reg_val;
353 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
355 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
358 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
359 reg_val = cntx->sepc;
360 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
361 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
362 reg_val = ((unsigned long *)cntx)[reg_num];
363 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
364 reg_val = (cntx->sstatus & SR_SPP) ?
365 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
369 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
375 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
376 const struct kvm_one_reg *reg)
378 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
379 unsigned long __user *uaddr =
380 (unsigned long __user *)(unsigned long)reg->addr;
381 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
384 unsigned long reg_val;
386 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
388 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
391 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
394 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
395 cntx->sepc = reg_val;
396 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
397 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
398 ((unsigned long *)cntx)[reg_num] = reg_val;
399 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
400 if (reg_val == KVM_RISCV_MODE_S)
401 cntx->sstatus |= SR_SPP;
403 cntx->sstatus &= ~SR_SPP;
410 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
411 const struct kvm_one_reg *reg)
413 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
414 unsigned long __user *uaddr =
415 (unsigned long __user *)(unsigned long)reg->addr;
416 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
419 unsigned long reg_val;
421 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
423 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
426 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
427 kvm_riscv_vcpu_flush_interrupts(vcpu);
428 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
430 reg_val = ((unsigned long *)csr)[reg_num];
432 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
438 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
439 const struct kvm_one_reg *reg)
441 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
442 unsigned long __user *uaddr =
443 (unsigned long __user *)(unsigned long)reg->addr;
444 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
447 unsigned long reg_val;
449 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
451 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
454 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
457 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
458 reg_val &= VSIP_VALID_MASK;
459 reg_val <<= VSIP_TO_HVIP_SHIFT;
462 ((unsigned long *)csr)[reg_num] = reg_val;
464 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
465 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
470 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
471 const struct kvm_one_reg *reg)
473 unsigned long __user *uaddr =
474 (unsigned long __user *)(unsigned long)reg->addr;
475 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
477 KVM_REG_RISCV_ISA_EXT);
478 unsigned long reg_val = 0;
479 unsigned long host_isa_ext;
481 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
484 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
485 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
488 host_isa_ext = kvm_isa_ext_arr[reg_num];
489 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
490 reg_val = 1; /* Mark the given extension as available */
492 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
498 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
499 const struct kvm_one_reg *reg)
501 unsigned long __user *uaddr =
502 (unsigned long __user *)(unsigned long)reg->addr;
503 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
505 KVM_REG_RISCV_ISA_EXT);
506 unsigned long reg_val;
507 unsigned long host_isa_ext;
509 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
512 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
513 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
516 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
519 host_isa_ext = kvm_isa_ext_arr[reg_num];
520 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
523 if (!vcpu->arch.ran_atleast_once) {
525 * All multi-letter extension and a few single letter
526 * extension can be disabled
529 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
530 set_bit(host_isa_ext, vcpu->arch.isa);
532 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
533 clear_bit(host_isa_ext, vcpu->arch.isa);
536 kvm_riscv_vcpu_fp_reset(vcpu);
544 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
545 const struct kvm_one_reg *reg)
547 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
548 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
549 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
550 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
551 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
552 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
553 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
554 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
555 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
556 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
558 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
559 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
561 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
562 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
567 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
568 const struct kvm_one_reg *reg)
570 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
571 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
572 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
573 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
574 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
575 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
576 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
577 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
578 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
579 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
581 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
582 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
584 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
585 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
590 long kvm_arch_vcpu_async_ioctl(struct file *filp,
591 unsigned int ioctl, unsigned long arg)
593 struct kvm_vcpu *vcpu = filp->private_data;
594 void __user *argp = (void __user *)arg;
596 if (ioctl == KVM_INTERRUPT) {
597 struct kvm_interrupt irq;
599 if (copy_from_user(&irq, argp, sizeof(irq)))
602 if (irq.irq == KVM_INTERRUPT_SET)
603 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
605 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
611 long kvm_arch_vcpu_ioctl(struct file *filp,
612 unsigned int ioctl, unsigned long arg)
614 struct kvm_vcpu *vcpu = filp->private_data;
615 void __user *argp = (void __user *)arg;
619 case KVM_SET_ONE_REG:
620 case KVM_GET_ONE_REG: {
621 struct kvm_one_reg reg;
624 if (copy_from_user(®, argp, sizeof(reg)))
627 if (ioctl == KVM_SET_ONE_REG)
628 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
630 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
640 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
641 struct kvm_sregs *sregs)
646 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
647 struct kvm_sregs *sregs)
652 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
657 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
662 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
663 struct kvm_translation *tr)
668 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
673 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
678 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
680 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
681 unsigned long mask, val;
683 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
684 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
685 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
692 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
695 struct kvm_vcpu_arch *v = &vcpu->arch;
696 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
698 /* Read current HVIP and VSIE CSRs */
699 csr->vsie = csr_read(CSR_VSIE);
701 /* Sync-up HVIP.VSSIP bit changes does by Guest */
702 hvip = csr_read(CSR_HVIP);
703 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
704 if (hvip & (1UL << IRQ_VS_SOFT)) {
705 if (!test_and_set_bit(IRQ_VS_SOFT,
706 &v->irqs_pending_mask))
707 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
709 if (!test_and_set_bit(IRQ_VS_SOFT,
710 &v->irqs_pending_mask))
711 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
715 /* Sync-up timer CSRs */
716 kvm_riscv_vcpu_timer_sync(vcpu);
719 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
721 if (irq != IRQ_VS_SOFT &&
722 irq != IRQ_VS_TIMER &&
726 set_bit(irq, &vcpu->arch.irqs_pending);
727 smp_mb__before_atomic();
728 set_bit(irq, &vcpu->arch.irqs_pending_mask);
735 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
737 if (irq != IRQ_VS_SOFT &&
738 irq != IRQ_VS_TIMER &&
742 clear_bit(irq, &vcpu->arch.irqs_pending);
743 smp_mb__before_atomic();
744 set_bit(irq, &vcpu->arch.irqs_pending_mask);
749 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
751 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
752 << VSIP_TO_HVIP_SHIFT) & mask;
754 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
757 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
759 vcpu->arch.power_off = true;
760 kvm_make_request(KVM_REQ_SLEEP, vcpu);
764 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
766 vcpu->arch.power_off = false;
767 kvm_vcpu_wake_up(vcpu);
770 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
771 struct kvm_mp_state *mp_state)
773 if (vcpu->arch.power_off)
774 mp_state->mp_state = KVM_MP_STATE_STOPPED;
776 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
781 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
782 struct kvm_mp_state *mp_state)
786 switch (mp_state->mp_state) {
787 case KVM_MP_STATE_RUNNABLE:
788 vcpu->arch.power_off = false;
790 case KVM_MP_STATE_STOPPED:
791 kvm_riscv_vcpu_power_off(vcpu);
800 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
801 struct kvm_guest_debug *dbg)
803 /* TODO; To be implemented later. */
807 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
811 if (riscv_isa_extension_available(isa, SVPBMT))
812 henvcfg |= ENVCFG_PBMTE;
814 if (riscv_isa_extension_available(isa, SSTC))
815 henvcfg |= ENVCFG_STCE;
817 if (riscv_isa_extension_available(isa, ZICBOM))
818 henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
820 csr_write(CSR_HENVCFG, henvcfg);
822 csr_write(CSR_HENVCFGH, henvcfg >> 32);
826 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
828 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
830 csr_write(CSR_VSSTATUS, csr->vsstatus);
831 csr_write(CSR_VSIE, csr->vsie);
832 csr_write(CSR_VSTVEC, csr->vstvec);
833 csr_write(CSR_VSSCRATCH, csr->vsscratch);
834 csr_write(CSR_VSEPC, csr->vsepc);
835 csr_write(CSR_VSCAUSE, csr->vscause);
836 csr_write(CSR_VSTVAL, csr->vstval);
837 csr_write(CSR_HVIP, csr->hvip);
838 csr_write(CSR_VSATP, csr->vsatp);
840 kvm_riscv_vcpu_update_config(vcpu->arch.isa);
842 kvm_riscv_gstage_update_hgatp(vcpu);
844 kvm_riscv_vcpu_timer_restore(vcpu);
846 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
847 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
853 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
855 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
859 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
861 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
863 kvm_riscv_vcpu_timer_save(vcpu);
865 csr->vsstatus = csr_read(CSR_VSSTATUS);
866 csr->vsie = csr_read(CSR_VSIE);
867 csr->vstvec = csr_read(CSR_VSTVEC);
868 csr->vsscratch = csr_read(CSR_VSSCRATCH);
869 csr->vsepc = csr_read(CSR_VSEPC);
870 csr->vscause = csr_read(CSR_VSCAUSE);
871 csr->vstval = csr_read(CSR_VSTVAL);
872 csr->hvip = csr_read(CSR_HVIP);
873 csr->vsatp = csr_read(CSR_VSATP);
876 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
878 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
880 if (kvm_request_pending(vcpu)) {
881 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
882 kvm_vcpu_srcu_read_unlock(vcpu);
883 rcuwait_wait_event(wait,
884 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
886 kvm_vcpu_srcu_read_lock(vcpu);
888 if (vcpu->arch.power_off || vcpu->arch.pause) {
890 * Awaken to handle a signal, request to
893 kvm_make_request(KVM_REQ_SLEEP, vcpu);
897 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
898 kvm_riscv_reset_vcpu(vcpu);
900 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
901 kvm_riscv_gstage_update_hgatp(vcpu);
903 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
904 kvm_riscv_fence_i_process(vcpu);
907 * The generic KVM_REQ_TLB_FLUSH is same as
908 * KVM_REQ_HFENCE_GVMA_VMID_ALL
910 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
911 kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
913 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
914 kvm_riscv_hfence_vvma_all_process(vcpu);
916 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
917 kvm_riscv_hfence_process(vcpu);
921 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
923 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
925 csr_write(CSR_HVIP, csr->hvip);
929 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
930 * the vCPU is running.
932 * This must be noinstr as instrumentation may make use of RCU, and this is not
933 * safe during the EQS.
935 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
937 guest_state_enter_irqoff();
938 __kvm_riscv_switch_to(&vcpu->arch);
939 vcpu->arch.last_exit_cpu = vcpu->cpu;
940 guest_state_exit_irqoff();
943 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
946 struct kvm_cpu_trap trap;
947 struct kvm_run *run = vcpu->run;
949 /* Mark this VCPU ran at least once */
950 vcpu->arch.ran_atleast_once = true;
952 kvm_vcpu_srcu_read_lock(vcpu);
954 switch (run->exit_reason) {
956 /* Process MMIO value returned from user-space */
957 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
959 case KVM_EXIT_RISCV_SBI:
960 /* Process SBI value returned from user-space */
961 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
963 case KVM_EXIT_RISCV_CSR:
964 /* Process CSR value returned from user-space */
965 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
972 kvm_vcpu_srcu_read_unlock(vcpu);
976 if (run->immediate_exit) {
977 kvm_vcpu_srcu_read_unlock(vcpu);
983 kvm_sigset_activate(vcpu);
986 run->exit_reason = KVM_EXIT_UNKNOWN;
988 /* Check conditions before entering the guest */
989 ret = xfer_to_guest_mode_handle_work(vcpu);
993 kvm_riscv_gstage_vmid_update(vcpu);
995 kvm_riscv_check_vcpu_requests(vcpu);
1000 * Ensure we set mode to IN_GUEST_MODE after we disable
1001 * interrupts and before the final VCPU requests check.
1002 * See the comment in kvm_vcpu_exiting_guest_mode() and
1003 * Documentation/virt/kvm/vcpu-requests.rst
1005 vcpu->mode = IN_GUEST_MODE;
1007 kvm_vcpu_srcu_read_unlock(vcpu);
1008 smp_mb__after_srcu_read_unlock();
1011 * We might have got VCPU interrupts updated asynchronously
1012 * so update it in HW.
1014 kvm_riscv_vcpu_flush_interrupts(vcpu);
1016 /* Update HVIP CSR for current CPU */
1017 kvm_riscv_update_hvip(vcpu);
1020 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1021 kvm_request_pending(vcpu) ||
1022 xfer_to_guest_mode_work_pending()) {
1023 vcpu->mode = OUTSIDE_GUEST_MODE;
1025 kvm_vcpu_srcu_read_lock(vcpu);
1030 * Cleanup stale TLB enteries
1032 * Note: This should be done after G-stage VMID has been
1033 * updated using kvm_riscv_gstage_vmid_ver_changed()
1035 kvm_riscv_local_tlb_sanitize(vcpu);
1037 guest_timing_enter_irqoff();
1039 kvm_riscv_vcpu_enter_exit(vcpu);
1041 vcpu->mode = OUTSIDE_GUEST_MODE;
1045 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1046 * get an interrupt between __kvm_riscv_switch_to() and
1047 * local_irq_enable() which can potentially change CSRs.
1049 trap.sepc = vcpu->arch.guest_context.sepc;
1050 trap.scause = csr_read(CSR_SCAUSE);
1051 trap.stval = csr_read(CSR_STVAL);
1052 trap.htval = csr_read(CSR_HTVAL);
1053 trap.htinst = csr_read(CSR_HTINST);
1055 /* Syncup interrupts state with HW */
1056 kvm_riscv_vcpu_sync_interrupts(vcpu);
1061 * We must ensure that any pending interrupts are taken before
1062 * we exit guest timing so that timer ticks are accounted as
1063 * guest time. Transiently unmask interrupts so that any
1064 * pending interrupts are taken.
1066 * There's no barrier which ensures that pending interrupts are
1067 * recognised, so we just hope that the CPU takes any pending
1068 * interrupts between the enable and disable.
1071 local_irq_disable();
1073 guest_timing_exit_irqoff();
1079 kvm_vcpu_srcu_read_lock(vcpu);
1081 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1084 kvm_sigset_deactivate(vcpu);
1088 kvm_vcpu_srcu_read_unlock(vcpu);