2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
21 #include <linux/memblock.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <linux/kvm_host.h>
32 #include "interrupt.h"
35 #define CREATE_TRACE_POINTS
39 #define VECTORSPACING 0x100 /* for EI/VI mode */
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
45 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
46 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
47 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
48 { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
49 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
50 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
51 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
52 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
53 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
54 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
55 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
56 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
57 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
58 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
59 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
60 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
61 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
62 #ifdef CONFIG_KVM_MIPS_VZ
63 { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
64 { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
65 { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
66 { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
67 { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
68 { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
69 { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
70 { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
75 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
79 bool kvm_trace_guest_mode_change;
81 int kvm_guest_mode_change_trace_reg(void)
83 kvm_trace_guest_mode_change = 1;
87 void kvm_guest_mode_change_trace_unreg(void)
89 kvm_trace_guest_mode_change = 0;
93 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
94 * Config7, so we are "runnable" if interrupts are pending
96 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
98 return !!(vcpu->arch.pending_exceptions);
101 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
106 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
111 int kvm_arch_hardware_enable(void)
113 return kvm_mips_callbacks->hardware_enable();
116 void kvm_arch_hardware_disable(void)
118 kvm_mips_callbacks->hardware_disable();
121 int kvm_arch_hardware_setup(void)
126 int kvm_arch_check_processor_compat(void)
131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
134 case KVM_VM_MIPS_AUTO:
136 #ifdef CONFIG_KVM_MIPS_VZ
143 /* Unsupported KVM type */
147 /* Allocate page table to map GPA -> RPA */
148 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
149 if (!kvm->arch.gpa_mm.pgd)
155 void kvm_mips_free_vcpus(struct kvm *kvm)
158 struct kvm_vcpu *vcpu;
160 kvm_for_each_vcpu(i, vcpu, kvm) {
161 kvm_arch_vcpu_free(vcpu);
164 mutex_lock(&kvm->lock);
166 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
167 kvm->vcpus[i] = NULL;
169 atomic_set(&kvm->online_vcpus, 0);
171 mutex_unlock(&kvm->lock);
174 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
176 /* It should always be safe to remove after flushing the whole range */
177 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
178 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
181 void kvm_arch_destroy_vm(struct kvm *kvm)
183 kvm_mips_free_vcpus(kvm);
184 kvm_mips_free_gpa_pt(kvm);
187 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
193 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
194 unsigned long npages)
199 void kvm_arch_flush_shadow_all(struct kvm *kvm)
201 /* Flush whole GPA */
202 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
204 /* Let implementation do the rest */
205 kvm_mips_callbacks->flush_shadow_all(kvm);
208 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
209 struct kvm_memory_slot *slot)
212 * The slot has been made invalid (ready for moving or deletion), so we
213 * need to ensure that it can no longer be accessed by any guest VCPUs.
216 spin_lock(&kvm->mmu_lock);
217 /* Flush slot from GPA */
218 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
219 slot->base_gfn + slot->npages - 1);
220 /* Let implementation do the rest */
221 kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
222 spin_unlock(&kvm->mmu_lock);
225 int kvm_arch_prepare_memory_region(struct kvm *kvm,
226 struct kvm_memory_slot *memslot,
227 const struct kvm_userspace_memory_region *mem,
228 enum kvm_mr_change change)
233 void kvm_arch_commit_memory_region(struct kvm *kvm,
234 const struct kvm_userspace_memory_region *mem,
235 const struct kvm_memory_slot *old,
236 const struct kvm_memory_slot *new,
237 enum kvm_mr_change change)
241 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
242 __func__, kvm, mem->slot, mem->guest_phys_addr,
243 mem->memory_size, mem->userspace_addr);
246 * If dirty page logging is enabled, write protect all pages in the slot
247 * ready for dirty logging.
249 * There is no need to do this in any of the following cases:
250 * CREATE: No dirty mappings will already exist.
251 * MOVE/DELETE: The old mappings will already have been cleaned up by
252 * kvm_arch_flush_shadow_memslot()
254 if (change == KVM_MR_FLAGS_ONLY &&
255 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
256 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
257 spin_lock(&kvm->mmu_lock);
258 /* Write protect GPA page table entries */
259 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
260 new->base_gfn + new->npages - 1);
261 /* Let implementation do the rest */
263 kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
264 spin_unlock(&kvm->mmu_lock);
268 static inline void dump_handler(const char *symbol, void *start, void *end)
272 pr_debug("LEAF(%s)\n", symbol);
274 pr_debug("\t.set push\n");
275 pr_debug("\t.set noreorder\n");
277 for (p = start; p < (u32 *)end; ++p)
278 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
280 pr_debug("\t.set\tpop\n");
282 pr_debug("\tEND(%s)\n", symbol);
285 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
288 void *gebase, *p, *handler, *refill_start, *refill_end;
291 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
298 err = kvm_vcpu_init(vcpu, kvm, id);
303 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
306 * Allocate space for host mode exception handlers that handle
309 if (cpu_has_veic || cpu_has_vint)
310 size = 0x200 + VECTORSPACING * 64;
314 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
320 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
321 ALIGN(size, PAGE_SIZE), gebase);
324 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
325 * limits us to the low 512MB of physical address space. If the memory
326 * we allocate is out of range, just give up now.
328 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
329 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
332 goto out_free_gebase;
336 vcpu->arch.guest_ebase = gebase;
338 /* Build guest exception vectors dynamically in unmapped memory */
339 handler = gebase + 0x2000;
341 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
342 refill_start = gebase;
343 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
344 refill_start += 0x080;
345 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
347 /* General Exception Entry point */
348 kvm_mips_build_exception(gebase + 0x180, handler);
350 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
351 for (i = 0; i < 8; i++) {
352 kvm_debug("L1 Vectored handler @ %p\n",
353 gebase + 0x200 + (i * VECTORSPACING));
354 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
358 /* General exit handler */
360 p = kvm_mips_build_exit(p);
362 /* Guest entry routine */
363 vcpu->arch.vcpu_run = p;
364 p = kvm_mips_build_vcpu_run(p);
366 /* Dump the generated code */
367 pr_debug("#include <asm/asm.h>\n");
368 pr_debug("#include <asm/regdef.h>\n");
370 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
371 dump_handler("kvm_tlb_refill", refill_start, refill_end);
372 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
373 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
375 /* Invalidate the icache for these ranges */
376 flush_icache_range((unsigned long)gebase,
377 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
380 * Allocate comm page for guest kernel, a TLB will be reserved for
381 * mapping GVA @ 0xFFFF8000 to this page
383 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
385 if (!vcpu->arch.kseg0_commpage) {
387 goto out_free_gebase;
390 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
391 kvm_mips_commpage_init(vcpu);
394 vcpu->arch.last_sched_cpu = -1;
395 vcpu->arch.last_exec_cpu = -1;
403 kvm_vcpu_uninit(vcpu);
412 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
414 hrtimer_cancel(&vcpu->arch.comparecount_timer);
416 kvm_vcpu_uninit(vcpu);
418 kvm_mips_dump_stats(vcpu);
420 kvm_mmu_free_memory_caches(vcpu);
421 kfree(vcpu->arch.guest_ebase);
422 kfree(vcpu->arch.kseg0_commpage);
426 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
428 kvm_arch_vcpu_free(vcpu);
431 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
432 struct kvm_guest_debug *dbg)
437 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
443 kvm_sigset_activate(vcpu);
445 if (vcpu->mmio_needed) {
446 if (!vcpu->mmio_is_write)
447 kvm_mips_complete_mmio_load(vcpu, run);
448 vcpu->mmio_needed = 0;
451 if (run->immediate_exit)
457 guest_enter_irqoff();
458 trace_kvm_enter(vcpu);
461 * Make sure the read of VCPU requests in vcpu_run() callback is not
462 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
463 * flush request while the requester sees the VCPU as outside of guest
464 * mode and not needing an IPI.
466 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
468 r = kvm_mips_callbacks->vcpu_run(run, vcpu);
475 kvm_sigset_deactivate(vcpu);
481 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
482 struct kvm_mips_interrupt *irq)
484 int intr = (int)irq->irq;
485 struct kvm_vcpu *dvcpu = NULL;
487 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
488 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
494 dvcpu = vcpu->kvm->vcpus[irq->cpu];
496 if (intr == 2 || intr == 3 || intr == 4) {
497 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
499 } else if (intr == -2 || intr == -3 || intr == -4) {
500 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
502 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
507 dvcpu->arch.wait = 0;
509 if (swq_has_sleeper(&dvcpu->wq))
510 swake_up_one(&dvcpu->wq);
515 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
516 struct kvm_mp_state *mp_state)
521 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
522 struct kvm_mp_state *mp_state)
527 static u64 kvm_mips_get_one_regs[] = {
561 #ifndef CONFIG_CPU_MIPSR6
568 static u64 kvm_mips_get_one_regs_fpu[] = {
570 KVM_REG_MIPS_FCR_CSR,
573 static u64 kvm_mips_get_one_regs_msa[] = {
575 KVM_REG_MIPS_MSA_CSR,
578 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
582 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
583 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
584 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
586 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
589 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
590 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
591 ret += kvm_mips_callbacks->num_regs(vcpu);
596 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
601 if (copy_to_user(indices, kvm_mips_get_one_regs,
602 sizeof(kvm_mips_get_one_regs)))
604 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
606 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
607 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
608 sizeof(kvm_mips_get_one_regs_fpu)))
610 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
612 for (i = 0; i < 32; ++i) {
613 index = KVM_REG_MIPS_FPR_32(i);
614 if (copy_to_user(indices, &index, sizeof(index)))
618 /* skip odd doubles if no F64 */
619 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
622 index = KVM_REG_MIPS_FPR_64(i);
623 if (copy_to_user(indices, &index, sizeof(index)))
629 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
630 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
631 sizeof(kvm_mips_get_one_regs_msa)))
633 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
635 for (i = 0; i < 32; ++i) {
636 index = KVM_REG_MIPS_VEC_128(i);
637 if (copy_to_user(indices, &index, sizeof(index)))
643 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
646 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
647 const struct kvm_one_reg *reg)
649 struct mips_coproc *cop0 = vcpu->arch.cop0;
650 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
657 /* General purpose registers */
658 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
659 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
661 #ifndef CONFIG_CPU_MIPSR6
662 case KVM_REG_MIPS_HI:
663 v = (long)vcpu->arch.hi;
665 case KVM_REG_MIPS_LO:
666 v = (long)vcpu->arch.lo;
669 case KVM_REG_MIPS_PC:
670 v = (long)vcpu->arch.pc;
673 /* Floating point registers */
674 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
675 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
677 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
678 /* Odd singles in top of even double when FR=0 */
679 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
680 v = get_fpr32(&fpu->fpr[idx], 0);
682 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
684 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
685 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
687 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
688 /* Can't access odd doubles in FR=0 mode */
689 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
691 v = get_fpr64(&fpu->fpr[idx], 0);
693 case KVM_REG_MIPS_FCR_IR:
694 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
696 v = boot_cpu_data.fpu_id;
698 case KVM_REG_MIPS_FCR_CSR:
699 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
704 /* MIPS SIMD Architecture (MSA) registers */
705 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
706 if (!kvm_mips_guest_has_msa(&vcpu->arch))
708 /* Can't access MSA registers in FR=0 mode */
709 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
711 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
712 #ifdef CONFIG_CPU_LITTLE_ENDIAN
713 /* least significant byte first */
714 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
715 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
717 /* most significant byte first */
718 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
719 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
722 case KVM_REG_MIPS_MSA_IR:
723 if (!kvm_mips_guest_has_msa(&vcpu->arch))
725 v = boot_cpu_data.msa_id;
727 case KVM_REG_MIPS_MSA_CSR:
728 if (!kvm_mips_guest_has_msa(&vcpu->arch))
733 /* registers to be handled specially */
735 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
740 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
741 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
743 return put_user(v, uaddr64);
744 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
745 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
748 return put_user(v32, uaddr32);
749 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
750 void __user *uaddr = (void __user *)(long)reg->addr;
752 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
758 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
759 const struct kvm_one_reg *reg)
761 struct mips_coproc *cop0 = vcpu->arch.cop0;
762 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
767 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
768 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
770 if (get_user(v, uaddr64) != 0)
772 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
773 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
776 if (get_user(v32, uaddr32) != 0)
779 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
780 void __user *uaddr = (void __user *)(long)reg->addr;
782 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
788 /* General purpose registers */
789 case KVM_REG_MIPS_R0:
790 /* Silently ignore requests to set $0 */
792 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
793 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
795 #ifndef CONFIG_CPU_MIPSR6
796 case KVM_REG_MIPS_HI:
799 case KVM_REG_MIPS_LO:
803 case KVM_REG_MIPS_PC:
807 /* Floating point registers */
808 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
809 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
811 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
812 /* Odd singles in top of even double when FR=0 */
813 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
814 set_fpr32(&fpu->fpr[idx], 0, v);
816 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
818 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
819 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
821 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
822 /* Can't access odd doubles in FR=0 mode */
823 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
825 set_fpr64(&fpu->fpr[idx], 0, v);
827 case KVM_REG_MIPS_FCR_IR:
828 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
832 case KVM_REG_MIPS_FCR_CSR:
833 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
838 /* MIPS SIMD Architecture (MSA) registers */
839 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
840 if (!kvm_mips_guest_has_msa(&vcpu->arch))
842 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
843 #ifdef CONFIG_CPU_LITTLE_ENDIAN
844 /* least significant byte first */
845 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
846 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
848 /* most significant byte first */
849 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
850 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
853 case KVM_REG_MIPS_MSA_IR:
854 if (!kvm_mips_guest_has_msa(&vcpu->arch))
858 case KVM_REG_MIPS_MSA_CSR:
859 if (!kvm_mips_guest_has_msa(&vcpu->arch))
864 /* registers to be handled specially */
866 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
871 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
872 struct kvm_enable_cap *cap)
876 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
884 case KVM_CAP_MIPS_FPU:
885 vcpu->arch.fpu_enabled = true;
887 case KVM_CAP_MIPS_MSA:
888 vcpu->arch.msa_enabled = true;
898 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
901 struct kvm_vcpu *vcpu = filp->private_data;
902 void __user *argp = (void __user *)arg;
904 if (ioctl == KVM_INTERRUPT) {
905 struct kvm_mips_interrupt irq;
907 if (copy_from_user(&irq, argp, sizeof(irq)))
909 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
912 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
918 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
921 struct kvm_vcpu *vcpu = filp->private_data;
922 void __user *argp = (void __user *)arg;
928 case KVM_SET_ONE_REG:
929 case KVM_GET_ONE_REG: {
930 struct kvm_one_reg reg;
933 if (copy_from_user(®, argp, sizeof(reg)))
935 if (ioctl == KVM_SET_ONE_REG)
936 r = kvm_mips_set_reg(vcpu, ®);
938 r = kvm_mips_get_reg(vcpu, ®);
941 case KVM_GET_REG_LIST: {
942 struct kvm_reg_list __user *user_list = argp;
943 struct kvm_reg_list reg_list;
947 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
950 reg_list.n = kvm_mips_num_regs(vcpu);
951 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
956 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
959 case KVM_ENABLE_CAP: {
960 struct kvm_enable_cap cap;
963 if (copy_from_user(&cap, argp, sizeof(cap)))
965 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
977 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
979 * @log: slot id and address to which we copy the log
981 * Steps 1-4 below provide general overview of dirty page logging. See
982 * kvm_get_dirty_log_protect() function description for additional details.
984 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
985 * always flush the TLB (step 4) even if previous step failed and the dirty
986 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
987 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
988 * writes will be marked dirty for next log read.
990 * 1. Take a snapshot of the bit and clear it if needed.
991 * 2. Write protect the corresponding page.
992 * 3. Copy the snapshot to the userspace.
993 * 4. Flush TLB's if needed.
995 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
997 struct kvm_memslots *slots;
998 struct kvm_memory_slot *memslot;
1002 mutex_lock(&kvm->slots_lock);
1004 r = kvm_get_dirty_log_protect(kvm, log, &flush);
1007 slots = kvm_memslots(kvm);
1008 memslot = id_to_memslot(slots, log->slot);
1010 /* Let implementation handle TLB/GVA invalidation */
1011 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1014 mutex_unlock(&kvm->slots_lock);
1018 int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
1020 struct kvm_memslots *slots;
1021 struct kvm_memory_slot *memslot;
1025 mutex_lock(&kvm->slots_lock);
1027 r = kvm_clear_dirty_log_protect(kvm, log, &flush);
1030 slots = kvm_memslots(kvm);
1031 memslot = id_to_memslot(slots, log->slot);
1033 /* Let implementation handle TLB/GVA invalidation */
1034 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1037 mutex_unlock(&kvm->slots_lock);
1041 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1053 int kvm_arch_init(void *opaque)
1055 if (kvm_mips_callbacks) {
1056 kvm_err("kvm: module already exists\n");
1060 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1063 void kvm_arch_exit(void)
1065 kvm_mips_callbacks = NULL;
1068 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1069 struct kvm_sregs *sregs)
1071 return -ENOIOCTLCMD;
1074 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1075 struct kvm_sregs *sregs)
1077 return -ENOIOCTLCMD;
1080 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1084 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1086 return -ENOIOCTLCMD;
1089 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1091 return -ENOIOCTLCMD;
1094 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1096 return VM_FAULT_SIGBUS;
1099 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1104 case KVM_CAP_ONE_REG:
1105 case KVM_CAP_ENABLE_CAP:
1106 case KVM_CAP_READONLY_MEM:
1107 case KVM_CAP_SYNC_MMU:
1108 case KVM_CAP_IMMEDIATE_EXIT:
1111 case KVM_CAP_NR_VCPUS:
1112 r = num_online_cpus();
1114 case KVM_CAP_MAX_VCPUS:
1117 case KVM_CAP_MAX_VCPU_ID:
1118 r = KVM_MAX_VCPU_ID;
1120 case KVM_CAP_MIPS_FPU:
1121 /* We don't handle systems with inconsistent cpu_has_fpu */
1122 r = !!raw_cpu_has_fpu;
1124 case KVM_CAP_MIPS_MSA:
1126 * We don't support MSA vector partitioning yet:
1127 * 1) It would require explicit support which can't be tested
1128 * yet due to lack of support in current hardware.
1129 * 2) It extends the state that would need to be saved/restored
1130 * by e.g. QEMU for migration.
1132 * When vector partitioning hardware becomes available, support
1133 * could be added by requiring a flag when enabling
1134 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1135 * to save/restore the appropriate extra state.
1137 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1140 r = kvm_mips_callbacks->check_extension(kvm, ext);
1146 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1148 return kvm_mips_pending_timer(vcpu) ||
1149 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1152 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1155 struct mips_coproc *cop0;
1160 kvm_debug("VCPU Register Dump:\n");
1161 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1162 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1164 for (i = 0; i < 32; i += 4) {
1165 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1167 vcpu->arch.gprs[i + 1],
1168 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1170 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1171 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1173 cop0 = vcpu->arch.cop0;
1174 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1175 kvm_read_c0_guest_status(cop0),
1176 kvm_read_c0_guest_cause(cop0));
1178 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1183 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1189 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1190 vcpu->arch.gprs[i] = regs->gpr[i];
1191 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1192 vcpu->arch.hi = regs->hi;
1193 vcpu->arch.lo = regs->lo;
1194 vcpu->arch.pc = regs->pc;
1200 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1206 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1207 regs->gpr[i] = vcpu->arch.gprs[i];
1209 regs->hi = vcpu->arch.hi;
1210 regs->lo = vcpu->arch.lo;
1211 regs->pc = vcpu->arch.pc;
1217 static void kvm_mips_comparecount_func(unsigned long data)
1219 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1221 kvm_mips_callbacks->queue_timer_int(vcpu);
1223 vcpu->arch.wait = 0;
1224 if (swq_has_sleeper(&vcpu->wq))
1225 swake_up_one(&vcpu->wq);
1228 /* low level hrtimer wake routine */
1229 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1231 struct kvm_vcpu *vcpu;
1233 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1234 kvm_mips_comparecount_func((unsigned long) vcpu);
1235 return kvm_mips_count_timeout(vcpu);
1238 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1242 err = kvm_mips_callbacks->vcpu_init(vcpu);
1246 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1248 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1252 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1254 kvm_mips_callbacks->vcpu_uninit(vcpu);
1257 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1258 struct kvm_translation *tr)
1263 /* Initial guest state */
1264 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1266 return kvm_mips_callbacks->vcpu_setup(vcpu);
1269 static void kvm_mips_set_c0_status(void)
1271 u32 status = read_c0_status();
1276 write_c0_status(status);
1281 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1283 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1285 u32 cause = vcpu->arch.host_cp0_cause;
1286 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1287 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1288 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1289 enum emulation_result er = EMULATE_DONE;
1291 int ret = RESUME_GUEST;
1293 vcpu->mode = OUTSIDE_GUEST_MODE;
1295 /* re-enable HTW before enabling interrupts */
1296 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1299 /* Set a default exit reason */
1300 run->exit_reason = KVM_EXIT_UNKNOWN;
1301 run->ready_for_interrupt_injection = 1;
1304 * Set the appropriate status bits based on host CPU features,
1305 * before we hit the scheduler
1307 kvm_mips_set_c0_status();
1311 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1312 cause, opc, run, vcpu);
1313 trace_kvm_exit(vcpu, exccode);
1315 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1317 * Do a privilege check, if in UM most of these exit conditions
1318 * end up causing an exception to be delivered to the Guest
1321 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1322 if (er == EMULATE_PRIV_FAIL) {
1324 } else if (er == EMULATE_FAIL) {
1325 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1333 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1335 ++vcpu->stat.int_exits;
1344 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1346 ++vcpu->stat.cop_unusable_exits;
1347 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1348 /* XXXKYMA: Might need to return to user space */
1349 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1354 ++vcpu->stat.tlbmod_exits;
1355 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1359 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1360 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1363 ++vcpu->stat.tlbmiss_st_exits;
1364 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1368 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1369 cause, opc, badvaddr);
1371 ++vcpu->stat.tlbmiss_ld_exits;
1372 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1376 ++vcpu->stat.addrerr_st_exits;
1377 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1381 ++vcpu->stat.addrerr_ld_exits;
1382 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1386 ++vcpu->stat.syscall_exits;
1387 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1391 ++vcpu->stat.resvd_inst_exits;
1392 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1396 ++vcpu->stat.break_inst_exits;
1397 ret = kvm_mips_callbacks->handle_break(vcpu);
1401 ++vcpu->stat.trap_inst_exits;
1402 ret = kvm_mips_callbacks->handle_trap(vcpu);
1405 case EXCCODE_MSAFPE:
1406 ++vcpu->stat.msa_fpe_exits;
1407 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1411 ++vcpu->stat.fpe_exits;
1412 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1415 case EXCCODE_MSADIS:
1416 ++vcpu->stat.msa_disabled_exits;
1417 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1421 /* defer exit accounting to handler */
1422 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1426 if (cause & CAUSEF_BD)
1429 kvm_get_badinstr(opc, vcpu, &inst);
1430 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1431 exccode, opc, inst, badvaddr,
1432 kvm_read_c0_guest_status(vcpu->arch.cop0));
1433 kvm_arch_vcpu_dump_regs(vcpu);
1434 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1441 local_irq_disable();
1443 if (ret == RESUME_GUEST)
1444 kvm_vz_acquire_htimer(vcpu);
1446 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1447 kvm_mips_deliver_interrupts(vcpu, cause);
1449 if (!(ret & RESUME_HOST)) {
1450 /* Only check for signals if not already exiting to userspace */
1451 if (signal_pending(current)) {
1452 run->exit_reason = KVM_EXIT_INTR;
1453 ret = (-EINTR << 2) | RESUME_HOST;
1454 ++vcpu->stat.signal_exits;
1455 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1459 if (ret == RESUME_GUEST) {
1460 trace_kvm_reenter(vcpu);
1463 * Make sure the read of VCPU requests in vcpu_reenter()
1464 * callback is not reordered ahead of the write to vcpu->mode,
1465 * or we could miss a TLB flush request while the requester sees
1466 * the VCPU as outside of guest mode and not needing an IPI.
1468 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1470 kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1473 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1474 * is live), restore FCR31 / MSACSR.
1476 * This should be before returning to the guest exception
1477 * vector, as it may well cause an [MSA] FP exception if there
1478 * are pending exception bits unmasked. (see
1479 * kvm_mips_csr_die_notifier() for how that is handled).
1481 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1482 read_c0_status() & ST0_CU1)
1483 __kvm_restore_fcsr(&vcpu->arch);
1485 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1486 read_c0_config5() & MIPS_CONF5_MSAEN)
1487 __kvm_restore_msacsr(&vcpu->arch);
1490 /* Disable HTW before returning to guest or host */
1491 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1497 /* Enable FPU for guest and restore context */
1498 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1500 struct mips_coproc *cop0 = vcpu->arch.cop0;
1501 unsigned int sr, cfg5;
1505 sr = kvm_read_c0_guest_status(cop0);
1508 * If MSA state is already live, it is undefined how it interacts with
1509 * FR=0 FPU state, and we don't want to hit reserved instruction
1510 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1511 * play it safe and save it first.
1513 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1514 * get called when guest CU1 is set, however we can't trust the guest
1515 * not to clobber the status register directly via the commpage.
1517 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1518 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1522 * Enable FPU for guest
1523 * We set FR and FRE according to guest context
1525 change_c0_status(ST0_CU1 | ST0_FR, sr);
1527 cfg5 = kvm_read_c0_guest_config5(cop0);
1528 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1530 enable_fpu_hazard();
1532 /* If guest FPU state not active, restore it now */
1533 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1534 __kvm_restore_fpu(&vcpu->arch);
1535 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1536 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1538 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1544 #ifdef CONFIG_CPU_HAS_MSA
1545 /* Enable MSA for guest and restore context */
1546 void kvm_own_msa(struct kvm_vcpu *vcpu)
1548 struct mips_coproc *cop0 = vcpu->arch.cop0;
1549 unsigned int sr, cfg5;
1554 * Enable FPU if enabled in guest, since we're restoring FPU context
1555 * anyway. We set FR and FRE according to guest context.
1557 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1558 sr = kvm_read_c0_guest_status(cop0);
1561 * If FR=0 FPU state is already live, it is undefined how it
1562 * interacts with MSA state, so play it safe and save it first.
1564 if (!(sr & ST0_FR) &&
1565 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1566 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1569 change_c0_status(ST0_CU1 | ST0_FR, sr);
1570 if (sr & ST0_CU1 && cpu_has_fre) {
1571 cfg5 = kvm_read_c0_guest_config5(cop0);
1572 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1576 /* Enable MSA for guest */
1577 set_c0_config5(MIPS_CONF5_MSAEN);
1578 enable_fpu_hazard();
1580 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1581 case KVM_MIPS_AUX_FPU:
1583 * Guest FPU state already loaded, only restore upper MSA state
1585 __kvm_restore_msa_upper(&vcpu->arch);
1586 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1587 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1590 /* Neither FPU or MSA already active, restore full MSA state */
1591 __kvm_restore_msa(&vcpu->arch);
1592 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1593 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1594 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1595 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1596 KVM_TRACE_AUX_FPU_MSA);
1599 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1607 /* Drop FPU & MSA without saving it */
1608 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1611 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1613 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1614 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1616 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1617 clear_c0_status(ST0_CU1 | ST0_FR);
1618 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1619 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1624 /* Save and disable FPU & MSA */
1625 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1628 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1629 * is disabled in guest context (software), but the register state in
1630 * the hardware may still be in use.
1631 * This is why we explicitly re-enable the hardware before saving.
1635 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1636 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1637 set_c0_config5(MIPS_CONF5_MSAEN);
1638 enable_fpu_hazard();
1641 __kvm_save_msa(&vcpu->arch);
1642 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1644 /* Disable MSA & FPU */
1646 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1647 clear_c0_status(ST0_CU1 | ST0_FR);
1648 disable_fpu_hazard();
1650 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1651 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1652 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1653 set_c0_status(ST0_CU1);
1654 enable_fpu_hazard();
1657 __kvm_save_fpu(&vcpu->arch);
1658 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1659 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1662 clear_c0_status(ST0_CU1 | ST0_FR);
1663 disable_fpu_hazard();
1669 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1670 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1671 * exception if cause bits are set in the value being written.
1673 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1674 unsigned long cmd, void *ptr)
1676 struct die_args *args = (struct die_args *)ptr;
1677 struct pt_regs *regs = args->regs;
1680 /* Only interested in FPE and MSAFPE */
1681 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1684 /* Return immediately if guest context isn't active */
1685 if (!(current->flags & PF_VCPU))
1688 /* Should never get here from user mode */
1689 BUG_ON(user_mode(regs));
1691 pc = instruction_pointer(regs);
1694 /* match 2nd instruction in __kvm_restore_fcsr */
1695 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1699 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1701 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1702 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1707 /* Move PC forward a little and continue executing */
1708 instruction_pointer(regs) += 4;
1713 static struct notifier_block kvm_mips_csr_die_notifier = {
1714 .notifier_call = kvm_mips_csr_die_notify,
1717 static int __init kvm_mips_init(void)
1722 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1726 ret = kvm_mips_entry_setup();
1730 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1735 register_die_notifier(&kvm_mips_csr_die_notifier);
1740 static void __exit kvm_mips_exit(void)
1744 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1747 module_init(kvm_mips_init);
1748 module_exit(kvm_mips_exit);
1750 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);