1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
14 #include <asm/loongarch.h>
15 #include <asm/mmzone.h>
19 #include <asm/kvm_csr.h>
20 #include <asm/kvm_vcpu.h>
23 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
25 unsigned long val = 0;
26 struct loongarch_csrs *csr = vcpu->arch.csr;
29 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
30 * For undefined CSR id, return value is 0
32 if (get_gcsr_flag(csrid) & SW_GCSR)
33 val = kvm_read_sw_gcsr(csr, csrid);
35 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
40 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
42 unsigned long old = 0;
43 struct loongarch_csrs *csr = vcpu->arch.csr;
45 if (get_gcsr_flag(csrid) & SW_GCSR) {
46 old = kvm_read_sw_gcsr(csr, csrid);
47 kvm_write_sw_gcsr(csr, csrid, val);
49 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
54 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
55 unsigned long csr_mask, unsigned long val)
57 unsigned long old = 0;
58 struct loongarch_csrs *csr = vcpu->arch.csr;
60 if (get_gcsr_flag(csrid) & SW_GCSR) {
61 old = kvm_read_sw_gcsr(csr, csrid);
62 val = (old & ~csr_mask) | (val & csr_mask);
63 kvm_write_sw_gcsr(csr, csrid, val);
66 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
71 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
73 unsigned int rd, rj, csrid;
74 unsigned long csr_mask, val = 0;
80 * rj != 0,1 means csrxchg
82 rd = inst.reg2csr_format.rd;
83 rj = inst.reg2csr_format.rj;
84 csrid = inst.reg2csr_format.csr;
88 case 0: /* process csrrd */
89 val = kvm_emu_read_csr(vcpu, csrid);
90 vcpu->arch.gprs[rd] = val;
92 case 1: /* process csrwr */
93 val = vcpu->arch.gprs[rd];
94 val = kvm_emu_write_csr(vcpu, csrid, val);
95 vcpu->arch.gprs[rd] = val;
97 default: /* process csrxchg */
98 val = vcpu->arch.gprs[rd];
99 csr_mask = vcpu->arch.gprs[rj];
100 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
101 vcpu->arch.gprs[rd] = val;
107 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
111 u32 addr, rd, rj, opcode;
114 * Each IOCSR with different opcode
116 rd = inst.reg2_format.rd;
117 rj = inst.reg2_format.rj;
118 opcode = inst.reg2_format.opcode;
119 addr = vcpu->arch.gprs[rj];
120 ret = EMULATE_DO_IOCSR;
121 run->iocsr_io.phys_addr = addr;
122 run->iocsr_io.is_write = 0;
124 /* LoongArch is Little endian */
127 run->iocsr_io.len = 1;
130 run->iocsr_io.len = 2;
133 run->iocsr_io.len = 4;
136 run->iocsr_io.len = 8;
139 run->iocsr_io.len = 1;
140 run->iocsr_io.is_write = 1;
143 run->iocsr_io.len = 2;
144 run->iocsr_io.is_write = 1;
147 run->iocsr_io.len = 4;
148 run->iocsr_io.is_write = 1;
151 run->iocsr_io.len = 8;
152 run->iocsr_io.is_write = 1;
159 if (ret == EMULATE_DO_IOCSR) {
160 if (run->iocsr_io.is_write) {
161 val = vcpu->arch.gprs[rd];
162 memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
164 vcpu->arch.io_gpr = rd;
170 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
172 enum emulation_result er = EMULATE_DONE;
173 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
175 switch (run->iocsr_io.len) {
177 *gpr = *(s8 *)run->iocsr_io.data;
180 *gpr = *(s16 *)run->iocsr_io.data;
183 *gpr = *(s32 *)run->iocsr_io.data;
186 *gpr = *(s64 *)run->iocsr_io.data;
189 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
190 run->iocsr_io.len, vcpu->arch.badv);
198 int kvm_emu_idle(struct kvm_vcpu *vcpu)
200 ++vcpu->stat.idle_exits;
201 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
203 if (!kvm_arch_vcpu_runnable(vcpu)) {
205 * Switch to the software timer before halt-polling/blocking as
206 * the guest's timer may be a break event for the vCPU, and the
207 * hypervisor timer runs only when the CPU is in guest mode.
208 * Switch before halt-polling so that KVM recognizes an expired
209 * timer before blocking.
211 kvm_save_timer(vcpu);
212 kvm_vcpu_block(vcpu);
218 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
222 unsigned long curr_pc;
224 enum emulation_result er = EMULATE_DONE;
225 struct kvm_run *run = vcpu->run;
227 /* Fetch the instruction */
228 inst.word = vcpu->arch.badi;
229 curr_pc = vcpu->arch.pc;
230 update_pc(&vcpu->arch);
232 trace_kvm_exit_gspr(vcpu, inst.word);
234 switch (((inst.word >> 24) & 0xff)) {
235 case 0x0: /* CPUCFG GSPR */
236 if (inst.reg2_format.opcode == 0x1B) {
237 rd = inst.reg2_format.rd;
238 rj = inst.reg2_format.rj;
239 ++vcpu->stat.cpucfg_exits;
240 index = vcpu->arch.gprs[rj];
243 * By LoongArch Reference Manual 2.2.10.5
244 * return value is 0 for undefined cpucfg index
246 if (index < KVM_MAX_CPUCFG_REGS)
247 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
249 vcpu->arch.gprs[rd] = 0;
252 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
253 er = kvm_handle_csr(vcpu, inst);
255 case 0x6: /* Cache, Idle and IOCSR GSPR */
256 switch (((inst.word >> 22) & 0x3ff)) {
257 case 0x18: /* Cache GSPR */
259 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
261 case 0x19: /* Idle/IOCSR GSPR */
262 switch (((inst.word >> 15) & 0x1ffff)) {
263 case 0xc90: /* IOCSR GSPR */
264 er = kvm_emu_iocsr(inst, run, vcpu);
266 case 0xc91: /* Idle GSPR */
267 er = kvm_emu_idle(vcpu);
284 /* Rollback PC only if emulation was unsuccessful */
285 if (er == EMULATE_FAIL) {
286 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
287 curr_pc, __func__, inst.word);
289 kvm_arch_vcpu_dump_regs(vcpu);
290 vcpu->arch.pc = curr_pc;
298 * 1) Execute CPUCFG instruction;
299 * 2) Execute CACOP/IDLE instructions;
300 * 3) Access to unimplemented CSRs/IOCSRs.
302 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
304 int ret = RESUME_GUEST;
305 enum emulation_result er = EMULATE_DONE;
307 er = kvm_trap_handle_gspr(vcpu);
309 if (er == EMULATE_DONE) {
311 } else if (er == EMULATE_DO_MMIO) {
312 vcpu->run->exit_reason = KVM_EXIT_MMIO;
314 } else if (er == EMULATE_DO_IOCSR) {
315 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
318 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
325 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
328 unsigned int op8, opcode, rd;
329 struct kvm_run *run = vcpu->run;
331 run->mmio.phys_addr = vcpu->arch.badv;
332 vcpu->mmio_needed = 2; /* signed */
333 op8 = (inst.word >> 24) & 0xff;
334 ret = EMULATE_DO_MMIO;
337 case 0x24 ... 0x27: /* ldptr.w/d process */
338 rd = inst.reg2i14_format.rd;
339 opcode = inst.reg2i14_format.opcode;
352 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
353 rd = inst.reg2i12_format.rd;
354 opcode = inst.reg2i12_format.opcode;
361 vcpu->mmio_needed = 1; /* unsigned */
368 vcpu->mmio_needed = 1; /* unsigned */
375 vcpu->mmio_needed = 1; /* unsigned */
386 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
387 rd = inst.reg3_format.rd;
388 opcode = inst.reg3_format.opcode;
396 vcpu->mmio_needed = 1; /* unsigned */
403 vcpu->mmio_needed = 1; /* unsigned */
410 vcpu->mmio_needed = 1; /* unsigned */
424 if (ret == EMULATE_DO_MMIO) {
425 /* Set for kvm_complete_mmio_read() use */
426 vcpu->arch.io_gpr = rd;
427 run->mmio.is_write = 0;
428 vcpu->mmio_is_write = 0;
430 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
431 inst.word, vcpu->arch.pc, vcpu->arch.badv);
432 kvm_arch_vcpu_dump_regs(vcpu);
433 vcpu->mmio_needed = 0;
439 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
441 enum emulation_result er = EMULATE_DONE;
442 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
444 /* Update with new PC */
445 update_pc(&vcpu->arch);
446 switch (run->mmio.len) {
448 if (vcpu->mmio_needed == 2)
449 *gpr = *(s8 *)run->mmio.data;
451 *gpr = *(u8 *)run->mmio.data;
454 if (vcpu->mmio_needed == 2)
455 *gpr = *(s16 *)run->mmio.data;
457 *gpr = *(u16 *)run->mmio.data;
460 if (vcpu->mmio_needed == 2)
461 *gpr = *(s32 *)run->mmio.data;
463 *gpr = *(u32 *)run->mmio.data;
466 *gpr = *(s64 *)run->mmio.data;
469 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
470 run->mmio.len, vcpu->arch.badv);
478 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
481 unsigned int rd, op8, opcode;
482 unsigned long curr_pc, rd_val = 0;
483 struct kvm_run *run = vcpu->run;
484 void *data = run->mmio.data;
487 * Update PC and hold onto current PC in case there is
488 * an error and we want to rollback the PC
490 curr_pc = vcpu->arch.pc;
491 update_pc(&vcpu->arch);
493 op8 = (inst.word >> 24) & 0xff;
494 run->mmio.phys_addr = vcpu->arch.badv;
495 ret = EMULATE_DO_MMIO;
497 case 0x24 ... 0x27: /* stptr.w/d process */
498 rd = inst.reg2i14_format.rd;
499 opcode = inst.reg2i14_format.opcode;
504 *(unsigned int *)data = vcpu->arch.gprs[rd];
508 *(unsigned long *)data = vcpu->arch.gprs[rd];
515 case 0x28 ... 0x2e: /* st.b/h/w/d process */
516 rd = inst.reg2i12_format.rd;
517 opcode = inst.reg2i12_format.opcode;
518 rd_val = vcpu->arch.gprs[rd];
523 *(unsigned char *)data = rd_val;
527 *(unsigned short *)data = rd_val;
531 *(unsigned int *)data = rd_val;
535 *(unsigned long *)data = rd_val;
542 case 0x38: /* stx.b/h/w/d process */
543 rd = inst.reg3_format.rd;
544 opcode = inst.reg3_format.opcode;
549 *(unsigned char *)data = vcpu->arch.gprs[rd];
553 *(unsigned short *)data = vcpu->arch.gprs[rd];
557 *(unsigned int *)data = vcpu->arch.gprs[rd];
561 *(unsigned long *)data = vcpu->arch.gprs[rd];
572 if (ret == EMULATE_DO_MMIO) {
573 run->mmio.is_write = 1;
574 vcpu->mmio_needed = 1;
575 vcpu->mmio_is_write = 1;
577 vcpu->arch.pc = curr_pc;
578 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
579 inst.word, vcpu->arch.pc, vcpu->arch.badv);
580 kvm_arch_vcpu_dump_regs(vcpu);
581 /* Rollback PC if emulation was unsuccessful */
587 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
591 enum emulation_result er = EMULATE_DONE;
592 struct kvm_run *run = vcpu->run;
593 unsigned long badv = vcpu->arch.badv;
595 ret = kvm_handle_mm_fault(vcpu, badv, write);
598 inst.word = vcpu->arch.badi;
600 er = kvm_emu_mmio_write(vcpu, inst);
602 /* A code fetch fault doesn't count as an MMIO */
603 if (kvm_is_ifetch_fault(&vcpu->arch)) {
604 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
608 er = kvm_emu_mmio_read(vcpu, inst);
612 if (er == EMULATE_DONE) {
614 } else if (er == EMULATE_DO_MMIO) {
615 run->exit_reason = KVM_EXIT_MMIO;
618 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
625 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
627 return kvm_handle_rdwr_fault(vcpu, false);
630 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
632 return kvm_handle_rdwr_fault(vcpu, true);
636 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
637 * @vcpu: Virtual CPU context.
639 * Handle when the guest attempts to use fpu which hasn't been allowed
640 * by the root context.
642 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
644 struct kvm_run *run = vcpu->run;
647 * If guest FPU not present, the FPU operation should have been
648 * treated as a reserved instruction!
649 * If FPU already in use, we shouldn't get this at all.
651 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
652 kvm_err("%s internal error\n", __func__);
653 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
663 * LoongArch KVM callback handling for unimplemented guest exiting
665 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
667 unsigned int ecode, inst;
668 unsigned long estat, badv;
670 /* Fetch the instruction */
671 inst = vcpu->arch.badi;
672 badv = vcpu->arch.badv;
673 estat = vcpu->arch.host_estat;
674 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
675 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
676 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
677 kvm_arch_vcpu_dump_regs(vcpu);
678 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
683 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
684 [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
685 [EXCCODE_TLBI] = kvm_handle_read_fault,
686 [EXCCODE_TLBL] = kvm_handle_read_fault,
687 [EXCCODE_TLBS] = kvm_handle_write_fault,
688 [EXCCODE_TLBM] = kvm_handle_write_fault,
689 [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
690 [EXCCODE_GSPR] = kvm_handle_gspr,
693 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
695 return kvm_fault_tables[fault](vcpu);