1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
13 #define CREATE_TRACE_POINTS
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 KVM_GENERIC_VCPU_STATS(),
18 STATS_DESC_COUNTER(VCPU, int_exits),
19 STATS_DESC_COUNTER(VCPU, idle_exits),
20 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 STATS_DESC_COUNTER(VCPU, signal_exits),
24 const struct kvm_stats_header kvm_vcpu_stats_header = {
25 .name_size = KVM_STATS_NAME_SIZE,
26 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
27 .id_offset = sizeof(struct kvm_stats_header),
28 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
29 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
30 sizeof(kvm_vcpu_stats_desc),
34 * kvm_check_requests - check and handle pending vCPU requests
36 * Return: RESUME_GUEST if we should enter the guest
37 * RESUME_HOST if we should exit to userspace
39 static int kvm_check_requests(struct kvm_vcpu *vcpu)
41 if (!kvm_request_pending(vcpu))
44 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
45 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
47 if (kvm_dirty_ring_check_request(vcpu))
54 * Check and handle pending signal and vCPU requests etc
55 * Run with irq enabled and preempt enabled
57 * Return: RESUME_GUEST if we should enter the guest
58 * RESUME_HOST if we should exit to userspace
59 * < 0 if we should exit to userspace, where the return value
62 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
67 * Check conditions before entering the guest
69 ret = xfer_to_guest_mode_handle_work(vcpu);
73 ret = kvm_check_requests(vcpu);
79 * Called with irq enabled
81 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
82 * Others if we should exit to userspace
84 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
89 ret = kvm_enter_guest_check(vcpu);
90 if (ret != RESUME_GUEST)
94 * Handle vcpu timer, interrupts, check requests and
95 * check vmid before vcpu enter guest
98 kvm_acquire_timer(vcpu);
99 kvm_deliver_intr(vcpu);
100 kvm_deliver_exception(vcpu);
101 /* Make sure the vcpu mode has been written */
102 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
103 kvm_check_vpid(vcpu);
104 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
105 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
106 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
108 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
109 /* make sure the vcpu mode has been written */
110 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
114 } while (ret != RESUME_GUEST);
120 * Return 1 for resume guest and "<= 0" for resume host.
122 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
124 int ret = RESUME_GUEST;
125 unsigned long estat = vcpu->arch.host_estat;
126 u32 intr = estat & 0x1fff; /* Ignore NMI */
127 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
129 vcpu->mode = OUTSIDE_GUEST_MODE;
131 /* Set a default exit reason */
132 run->exit_reason = KVM_EXIT_UNKNOWN;
134 guest_timing_exit_irqoff();
135 guest_state_exit_irqoff();
138 trace_kvm_exit(vcpu, ecode);
140 ret = kvm_handle_fault(vcpu, ecode);
142 WARN(!intr, "vm exiting with suspicious irq\n");
143 ++vcpu->stat.int_exits;
146 if (ret == RESUME_GUEST)
147 ret = kvm_pre_enter_guest(vcpu);
149 if (ret != RESUME_GUEST) {
154 guest_timing_enter_irqoff();
155 guest_state_enter_irqoff();
156 trace_kvm_reenter(vcpu);
161 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
163 return !!(vcpu->arch.irq_pending) &&
164 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
167 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
169 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
172 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
177 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
179 return VM_FAULT_SIGBUS;
182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
183 struct kvm_translation *tr)
188 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
190 return kvm_pending_timer(vcpu) ||
191 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
194 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
198 kvm_debug("vCPU Register Dump:\n");
199 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
200 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
202 for (i = 0; i < 32; i += 4) {
203 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
204 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
205 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
208 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
209 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
210 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
212 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
217 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
218 struct kvm_mp_state *mp_state)
220 *mp_state = vcpu->arch.mp_state;
225 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
226 struct kvm_mp_state *mp_state)
230 switch (mp_state->mp_state) {
231 case KVM_MP_STATE_RUNNABLE:
232 vcpu->arch.mp_state = *mp_state;
241 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
242 struct kvm_guest_debug *dbg)
248 * kvm_migrate_count() - Migrate timer.
249 * @vcpu: Virtual CPU.
251 * Migrate hrtimer to the current CPU by cancelling and restarting it
252 * if the hrtimer is active.
254 * Must be called when the vCPU is migrated to a different CPU, so that
255 * the timer can interrupt the guest at the new CPU, and the timer irq can
256 * be delivered to the vCPU.
258 static void kvm_migrate_count(struct kvm_vcpu *vcpu)
260 if (hrtimer_cancel(&vcpu->arch.swtimer))
261 hrtimer_restart(&vcpu->arch.swtimer);
264 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
267 struct loongarch_csrs *csr = vcpu->arch.csr;
269 if (get_gcsr_flag(id) & INVALID_GCSR)
272 if (id == LOONGARCH_CSR_ESTAT) {
273 /* ESTAT IP0~IP7 get from GINTC */
274 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
275 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
280 * Get software CSR state since software state is consistent
281 * with hardware for synchronous ioctl
283 *val = kvm_read_sw_gcsr(csr, id);
288 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
291 struct loongarch_csrs *csr = vcpu->arch.csr;
293 if (get_gcsr_flag(id) & INVALID_GCSR)
296 if (id == LOONGARCH_CSR_ESTAT) {
297 /* ESTAT IP0~IP7 inject through GINTC */
298 gintc = (val >> 2) & 0xff;
299 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
301 gintc = val & ~(0xffUL << 2);
302 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
307 kvm_write_sw_gcsr(csr, id, val);
312 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
313 const struct kvm_one_reg *reg, u64 *v)
316 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
319 case KVM_REG_LOONGARCH_CSR:
320 id = KVM_GET_IOC_CSR_IDX(reg->id);
321 ret = _kvm_getcsr(vcpu, id, v);
323 case KVM_REG_LOONGARCH_CPUCFG:
324 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
325 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
326 *v = vcpu->arch.cpucfg[id];
330 case KVM_REG_LOONGARCH_KVM:
332 case KVM_REG_LOONGARCH_COUNTER:
333 *v = drdtime() + vcpu->kvm->arch.time_offset;
348 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
351 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
354 case KVM_REG_SIZE_U64:
355 ret = kvm_get_one_reg(vcpu, reg, &v);
358 ret = put_user(v, (u64 __user *)(long)reg->addr);
368 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
369 const struct kvm_one_reg *reg, u64 v)
372 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
375 case KVM_REG_LOONGARCH_CSR:
376 id = KVM_GET_IOC_CSR_IDX(reg->id);
377 ret = _kvm_setcsr(vcpu, id, v);
379 case KVM_REG_LOONGARCH_CPUCFG:
380 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
381 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
382 vcpu->arch.cpucfg[id] = (u32)v;
386 case KVM_REG_LOONGARCH_KVM:
388 case KVM_REG_LOONGARCH_COUNTER:
390 * gftoffset is relative with board, not vcpu
391 * only set for the first time for smp system
393 if (vcpu->vcpu_id == 0)
394 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
396 case KVM_REG_LOONGARCH_VCPU_RESET:
397 kvm_reset_timer(vcpu);
398 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
399 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
414 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
417 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
420 case KVM_REG_SIZE_U64:
421 ret = get_user(v, (u64 __user *)(long)reg->addr);
429 return kvm_set_one_reg(vcpu, reg, v);
432 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
437 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
442 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
446 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
447 regs->gpr[i] = vcpu->arch.gprs[i];
449 regs->pc = vcpu->arch.pc;
454 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
458 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
459 vcpu->arch.gprs[i] = regs->gpr[i];
461 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
462 vcpu->arch.pc = regs->pc;
467 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
468 struct kvm_enable_cap *cap)
470 /* FPU is enabled by default, will support LSX/LASX later. */
474 long kvm_arch_vcpu_ioctl(struct file *filp,
475 unsigned int ioctl, unsigned long arg)
478 void __user *argp = (void __user *)arg;
479 struct kvm_vcpu *vcpu = filp->private_data;
482 * Only software CSR should be modified
484 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
485 * should be used. Since CSR registers owns by this vcpu, if switch
486 * to other vcpus, other vcpus need reload CSR registers.
488 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
489 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
490 * aux_inuse flag and reload CSR registers form software.
494 case KVM_SET_ONE_REG:
495 case KVM_GET_ONE_REG: {
496 struct kvm_one_reg reg;
499 if (copy_from_user(®, argp, sizeof(reg)))
501 if (ioctl == KVM_SET_ONE_REG) {
502 r = kvm_set_reg(vcpu, ®);
503 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
505 r = kvm_get_reg(vcpu, ®);
508 case KVM_ENABLE_CAP: {
509 struct kvm_enable_cap cap;
512 if (copy_from_user(&cap, argp, sizeof(cap)))
514 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
525 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
529 fpu->fcc = vcpu->arch.fpu.fcc;
530 fpu->fcsr = vcpu->arch.fpu.fcsr;
531 for (i = 0; i < NUM_FPU_REGS; i++)
532 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
537 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
541 vcpu->arch.fpu.fcc = fpu->fcc;
542 vcpu->arch.fpu.fcsr = fpu->fcsr;
543 for (i = 0; i < NUM_FPU_REGS; i++)
544 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
549 /* Enable FPU and restore context */
550 void kvm_own_fpu(struct kvm_vcpu *vcpu)
555 set_csr_euen(CSR_EUEN_FPEN);
557 kvm_restore_fpu(&vcpu->arch.fpu);
558 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
559 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
564 /* Save context and disable FPU */
565 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
569 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
570 kvm_save_fpu(&vcpu->arch.fpu);
571 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
572 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
575 clear_csr_euen(CSR_EUEN_FPEN);
581 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
583 int intr = (int)irq->irq;
586 kvm_queue_irq(vcpu, intr);
588 kvm_dequeue_irq(vcpu, -intr);
590 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
599 long kvm_arch_vcpu_async_ioctl(struct file *filp,
600 unsigned int ioctl, unsigned long arg)
602 void __user *argp = (void __user *)arg;
603 struct kvm_vcpu *vcpu = filp->private_data;
605 if (ioctl == KVM_INTERRUPT) {
606 struct kvm_interrupt irq;
608 if (copy_from_user(&irq, argp, sizeof(irq)))
611 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
613 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
619 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
624 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
626 unsigned long timer_hz;
627 struct loongarch_csrs *csr;
631 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
632 vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
634 vcpu->arch.handle_exit = kvm_handle_exit;
635 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
636 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
641 * All kvm exceptions share one exception entry, and host <-> guest
642 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
644 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
647 vcpu->arch.last_sched_cpu = -1;
650 * Initialize guest register state to valid architectural reset state.
652 timer_hz = calc_const_freq();
653 kvm_init_timer(vcpu, timer_hz);
655 /* Set Initialize mode for guest */
656 csr = vcpu->arch.csr;
657 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
660 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
662 /* Start with no pending virtual guest interrupts */
663 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
668 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
672 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
675 struct kvm_context *context;
677 hrtimer_cancel(&vcpu->arch.swtimer);
678 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
679 kfree(vcpu->arch.csr);
682 * If the vCPU is freed and reused as another vCPU, we don't want the
683 * matching pointer wrongly hanging around in last_vcpu.
685 for_each_possible_cpu(cpu) {
686 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
687 if (context->last_vcpu == vcpu)
688 context->last_vcpu = NULL;
692 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
695 struct kvm_context *context;
696 struct loongarch_csrs *csr = vcpu->arch.csr;
699 * Have we migrated to a different CPU?
700 * If so, any old guest TLB state may be stale.
702 migrated = (vcpu->arch.last_sched_cpu != cpu);
705 * Was this the last vCPU to run on this CPU?
706 * If not, any old guest state from this vCPU will have been clobbered.
708 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
709 if (migrated || (context->last_vcpu != vcpu))
710 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
711 context->last_vcpu = vcpu;
713 /* Restore timer state regardless */
714 kvm_restore_timer(vcpu);
716 /* Control guest page CCA attribute */
717 change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
719 /* Don't bother restoring registers multiple times unless necessary */
720 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
723 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
725 /* Restore guest CSR registers */
726 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
727 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
728 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
729 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
730 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
731 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
732 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
733 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
734 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
735 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
736 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
737 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
738 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
739 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
740 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
741 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
742 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
743 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
744 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
745 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
746 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
747 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
748 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
749 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
750 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
751 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
752 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
753 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
754 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
755 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
756 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
757 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
758 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
759 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
760 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
761 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
762 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
763 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
764 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
765 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
766 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
767 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
768 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
769 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
771 /* Restore Root.GINTC from unused Guest.GINTC register */
772 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
775 * We should clear linked load bit to break interrupted atomics. This
776 * prevents a SC on the next vCPU from succeeding by matching a LL on
779 if (vcpu->kvm->created_vcpus > 1)
780 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
782 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
787 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
791 local_irq_save(flags);
792 if (vcpu->arch.last_sched_cpu != cpu) {
793 kvm_debug("[%d->%d]KVM vCPU[%d] switch\n",
794 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
796 * Migrate the timer interrupt to the current CPU so that it
797 * always interrupts the guest and synchronously triggers a
798 * guest timer interrupt.
800 kvm_migrate_count(vcpu);
803 /* Restore guest state to registers */
804 _kvm_vcpu_load(vcpu, cpu);
805 local_irq_restore(flags);
808 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
810 struct loongarch_csrs *csr = vcpu->arch.csr;
815 * Update CSR state from hardware if software CSR state is stale,
816 * most CSR registers are kept unchanged during process context
817 * switch except CSR registers like remaining timer tick value and
818 * injected interrupt state.
820 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
823 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
824 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
825 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
826 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
827 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
828 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
829 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
830 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
831 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
832 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
833 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
834 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
835 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
836 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
837 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
838 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
839 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
840 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
841 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
842 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
843 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
844 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
845 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
846 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
847 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
848 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
849 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
850 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
851 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
852 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
853 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
854 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
855 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
856 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
857 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
858 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
859 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
860 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
861 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
862 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
863 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
864 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
865 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
866 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
867 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
868 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
869 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
871 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
874 kvm_save_timer(vcpu);
875 /* Save Root.GINTC into unused Guest.GINTC register */
876 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
881 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
886 local_irq_save(flags);
887 cpu = smp_processor_id();
888 vcpu->arch.last_sched_cpu = cpu;
890 /* Save guest state in registers */
891 _kvm_vcpu_put(vcpu, cpu);
892 local_irq_restore(flags);
895 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
898 struct kvm_run *run = vcpu->run;
900 if (vcpu->mmio_needed) {
901 if (!vcpu->mmio_is_write)
902 kvm_complete_mmio_read(vcpu, run);
903 vcpu->mmio_needed = 0;
906 if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
907 if (!run->iocsr_io.is_write)
908 kvm_complete_iocsr_read(vcpu, run);
911 if (run->immediate_exit)
914 /* Clear exit_reason */
915 run->exit_reason = KVM_EXIT_UNKNOWN;
918 kvm_sigset_activate(vcpu);
919 r = kvm_pre_enter_guest(vcpu);
920 if (r != RESUME_GUEST)
923 guest_timing_enter_irqoff();
924 guest_state_enter_irqoff();
925 trace_kvm_enter(vcpu);
926 r = kvm_loongarch_ops->enter_guest(run, vcpu);
930 * Guest exit is already recorded at kvm_handle_exit()
931 * return value must not be RESUME_GUEST
935 kvm_sigset_deactivate(vcpu);