1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (c) 2022 Ventana Micro Systems Inc.
7 #include <linux/bitops.h>
8 #include <linux/kvm_host.h>
10 #define INSN_OPCODE_MASK 0x007c
11 #define INSN_OPCODE_SHIFT 2
12 #define INSN_OPCODE_SYSTEM 28
14 #define INSN_MASK_WFI 0xffffffff
15 #define INSN_MATCH_WFI 0x10500073
17 #define INSN_MATCH_CSRRW 0x1073
18 #define INSN_MASK_CSRRW 0x707f
19 #define INSN_MATCH_CSRRS 0x2073
20 #define INSN_MASK_CSRRS 0x707f
21 #define INSN_MATCH_CSRRC 0x3073
22 #define INSN_MASK_CSRRC 0x707f
23 #define INSN_MATCH_CSRRWI 0x5073
24 #define INSN_MASK_CSRRWI 0x707f
25 #define INSN_MATCH_CSRRSI 0x6073
26 #define INSN_MASK_CSRRSI 0x707f
27 #define INSN_MATCH_CSRRCI 0x7073
28 #define INSN_MASK_CSRRCI 0x707f
30 #define INSN_MATCH_LB 0x3
31 #define INSN_MASK_LB 0x707f
32 #define INSN_MATCH_LH 0x1003
33 #define INSN_MASK_LH 0x707f
34 #define INSN_MATCH_LW 0x2003
35 #define INSN_MASK_LW 0x707f
36 #define INSN_MATCH_LD 0x3003
37 #define INSN_MASK_LD 0x707f
38 #define INSN_MATCH_LBU 0x4003
39 #define INSN_MASK_LBU 0x707f
40 #define INSN_MATCH_LHU 0x5003
41 #define INSN_MASK_LHU 0x707f
42 #define INSN_MATCH_LWU 0x6003
43 #define INSN_MASK_LWU 0x707f
44 #define INSN_MATCH_SB 0x23
45 #define INSN_MASK_SB 0x707f
46 #define INSN_MATCH_SH 0x1023
47 #define INSN_MASK_SH 0x707f
48 #define INSN_MATCH_SW 0x2023
49 #define INSN_MASK_SW 0x707f
50 #define INSN_MATCH_SD 0x3023
51 #define INSN_MASK_SD 0x707f
53 #define INSN_MATCH_C_LD 0x6000
54 #define INSN_MASK_C_LD 0xe003
55 #define INSN_MATCH_C_SD 0xe000
56 #define INSN_MASK_C_SD 0xe003
57 #define INSN_MATCH_C_LW 0x4000
58 #define INSN_MASK_C_LW 0xe003
59 #define INSN_MATCH_C_SW 0xc000
60 #define INSN_MASK_C_SW 0xe003
61 #define INSN_MATCH_C_LDSP 0x6002
62 #define INSN_MASK_C_LDSP 0xe003
63 #define INSN_MATCH_C_SDSP 0xe002
64 #define INSN_MASK_C_SDSP 0xe003
65 #define INSN_MATCH_C_LWSP 0x4002
66 #define INSN_MASK_C_LWSP 0xe003
67 #define INSN_MATCH_C_SWSP 0xc002
68 #define INSN_MASK_C_SWSP 0xe003
70 #define INSN_16BIT_MASK 0x3
72 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
74 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
77 #define LOG_REGBYTES 3
79 #define LOG_REGBYTES 2
81 #define REGBYTES (1 << LOG_REGBYTES)
89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
90 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
91 (RV_X(x, 10, 3) << 3) | \
93 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
95 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
96 (RV_X(x, 12, 1) << 5) | \
98 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
99 (RV_X(x, 12, 1) << 5) | \
100 (RV_X(x, 2, 3) << 6))
101 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
102 (RV_X(x, 7, 2) << 6))
103 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
104 (RV_X(x, 7, 3) << 6))
105 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
106 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
107 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
109 #define SHIFT_RIGHT(x, y) \
110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
115 #define REG_OFFSET(insn, pos) \
116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
118 #define REG_PTR(insn, pos, regs) \
119 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
121 #define GET_FUNCT3(insn) (((insn) >> 12) & 7)
123 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
124 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
125 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
126 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
127 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
128 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
129 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
130 #define IMM_I(insn) ((s32)(insn) >> 20)
131 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
132 (s32)(((insn) >> 7) & 0x1f))
138 * Possible return values are as follows:
139 * 1) Returns < 0 for error case
140 * 2) Returns 0 for exit to user-space
141 * 3) Returns 1 to continue with next sepc
142 * 4) Returns 2 to continue with same sepc
143 * 5) Returns 3 to inject illegal instruction trap and continue
144 * 6) Returns 4 to inject virtual instruction trap and continue
146 * Use enum kvm_insn_return for return values
148 int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
151 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
154 struct kvm_cpu_trap utrap = { 0 };
156 /* Redirect trap to Guest VCPU */
157 utrap.sepc = vcpu->arch.guest_context.sepc;
158 utrap.scause = EXC_INST_ILLEGAL;
162 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
167 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
170 struct kvm_cpu_trap utrap = { 0 };
172 /* Redirect trap to Guest VCPU */
173 utrap.sepc = vcpu->arch.guest_context.sepc;
174 utrap.scause = EXC_VIRTUAL_INST_FAULT;
178 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
186 * @vcpu: The VCPU pointer
188 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
190 if (!kvm_arch_vcpu_runnable(vcpu)) {
191 kvm_vcpu_srcu_read_unlock(vcpu);
193 kvm_vcpu_srcu_read_lock(vcpu);
197 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
199 vcpu->stat.wfi_exit_stat++;
200 kvm_riscv_vcpu_wfi(vcpu);
201 return KVM_INSN_CONTINUE_NEXT_SEPC;
208 * Possible return values are as same as "func" callback in
209 * "struct insn_func".
211 int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
212 unsigned long *val, unsigned long new_val,
213 unsigned long wr_mask);
216 static const struct csr_func csr_funcs[] = { };
219 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
220 * emulation or in-kernel emulation
222 * @vcpu: The VCPU pointer
223 * @run: The VCPU run struct containing the CSR data
225 * Returns > 0 upon failure and 0 upon success
227 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
231 if (vcpu->arch.csr_decode.return_handled)
233 vcpu->arch.csr_decode.return_handled = 1;
235 /* Update destination register for CSR reads */
236 insn = vcpu->arch.csr_decode.insn;
237 if ((insn >> SH_RD) & MASK_RX)
238 SET_RD(insn, &vcpu->arch.guest_context,
239 run->riscv_csr.ret_value);
241 /* Move to next instruction */
242 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
247 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
249 int i, rc = KVM_INSN_ILLEGAL_TRAP;
250 unsigned int csr_num = insn >> SH_RS2;
251 unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
252 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
253 const struct csr_func *tcfn, *cfn = NULL;
254 ulong val = 0, wr_mask = 0, new_val = 0;
256 /* Decode the CSR instruction */
257 switch (GET_FUNCT3(insn)) {
258 case GET_FUNCT3(INSN_MATCH_CSRRW):
262 case GET_FUNCT3(INSN_MATCH_CSRRS):
266 case GET_FUNCT3(INSN_MATCH_CSRRC):
270 case GET_FUNCT3(INSN_MATCH_CSRRWI):
274 case GET_FUNCT3(INSN_MATCH_CSRRSI):
278 case GET_FUNCT3(INSN_MATCH_CSRRCI):
286 /* Save instruction decode info */
287 vcpu->arch.csr_decode.insn = insn;
288 vcpu->arch.csr_decode.return_handled = 0;
290 /* Update CSR details in kvm_run struct */
291 run->riscv_csr.csr_num = csr_num;
292 run->riscv_csr.new_value = new_val;
293 run->riscv_csr.write_mask = wr_mask;
294 run->riscv_csr.ret_value = 0;
296 /* Find in-kernel CSR function */
297 for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
298 tcfn = &csr_funcs[i];
299 if ((tcfn->base <= csr_num) &&
300 (csr_num < (tcfn->base + tcfn->count))) {
306 /* First try in-kernel CSR emulation */
307 if (cfn && cfn->func) {
308 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
309 if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
310 if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
311 run->riscv_csr.ret_value = val;
312 vcpu->stat.csr_exit_kernel++;
313 kvm_riscv_vcpu_csr_return(vcpu, run);
314 rc = KVM_INSN_CONTINUE_SAME_SEPC;
320 /* Exit to user-space for CSR emulation */
321 if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
322 vcpu->stat.csr_exit_user++;
323 run->exit_reason = KVM_EXIT_RISCV_CSR;
329 static const struct insn_func system_opcode_funcs[] = {
331 .mask = INSN_MASK_CSRRW,
332 .match = INSN_MATCH_CSRRW,
336 .mask = INSN_MASK_CSRRS,
337 .match = INSN_MATCH_CSRRS,
341 .mask = INSN_MASK_CSRRC,
342 .match = INSN_MATCH_CSRRC,
346 .mask = INSN_MASK_CSRRWI,
347 .match = INSN_MATCH_CSRRWI,
351 .mask = INSN_MASK_CSRRSI,
352 .match = INSN_MATCH_CSRRSI,
356 .mask = INSN_MASK_CSRRCI,
357 .match = INSN_MATCH_CSRRCI,
361 .mask = INSN_MASK_WFI,
362 .match = INSN_MATCH_WFI,
367 static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
370 int i, rc = KVM_INSN_ILLEGAL_TRAP;
371 const struct insn_func *ifn;
373 for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
374 ifn = &system_opcode_funcs[i];
375 if ((insn & ifn->mask) == ifn->match) {
376 rc = ifn->func(vcpu, run, insn);
382 case KVM_INSN_ILLEGAL_TRAP:
383 return truly_illegal_insn(vcpu, run, insn);
384 case KVM_INSN_VIRTUAL_TRAP:
385 return truly_virtual_insn(vcpu, run, insn);
386 case KVM_INSN_CONTINUE_NEXT_SEPC:
387 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
393 return (rc <= 0) ? rc : 1;
397 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
399 * @vcpu: The VCPU pointer
400 * @run: The VCPU run struct containing the mmio data
401 * @trap: Trap details
403 * Returns > 0 to continue run-loop
404 * Returns 0 to exit run-loop and handle in user-space.
405 * Returns < 0 to report failure and exit run-loop
407 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
408 struct kvm_cpu_trap *trap)
410 unsigned long insn = trap->stval;
411 struct kvm_cpu_trap utrap = { 0 };
412 struct kvm_cpu_context *ct;
414 if (unlikely(INSN_IS_16BIT(insn))) {
416 ct = &vcpu->arch.guest_context;
417 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
421 utrap.sepc = ct->sepc;
422 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
426 if (INSN_IS_16BIT(insn))
427 return truly_illegal_insn(vcpu, run, insn);
430 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
431 case INSN_OPCODE_SYSTEM:
432 return system_opcode_insn(vcpu, run, insn);
434 return truly_illegal_insn(vcpu, run, insn);
439 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
441 * @vcpu: The VCPU pointer
442 * @run: The VCPU run struct containing the mmio data
443 * @fault_addr: Guest physical address to load
444 * @htinst: Transformed encoding of the load instruction
446 * Returns > 0 to continue run-loop
447 * Returns 0 to exit run-loop and handle in user-space.
448 * Returns < 0 to report failure and exit run-loop
450 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
451 unsigned long fault_addr,
452 unsigned long htinst)
456 int shift = 0, len = 0, insn_len = 0;
457 struct kvm_cpu_trap utrap = { 0 };
458 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
460 /* Determine trapped instruction */
463 * Bit[0] == 1 implies trapped instruction value is
464 * transformed instruction or custom instruction.
466 insn = htinst | INSN_16BIT_MASK;
467 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
470 * Bit[0] == 0 implies trapped instruction value is
471 * zero or special value.
473 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
476 /* Redirect trap if we failed to read instruction */
477 utrap.sepc = ct->sepc;
478 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
481 insn_len = INSN_LEN(insn);
484 /* Decode length of MMIO and shift */
485 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
487 shift = 8 * (sizeof(ulong) - len);
488 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
490 shift = 8 * (sizeof(ulong) - len);
491 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
493 shift = 8 * (sizeof(ulong) - len);
495 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
497 shift = 8 * (sizeof(ulong) - len);
498 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
501 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
503 shift = 8 * (sizeof(ulong) - len);
504 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
507 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
509 shift = 8 * (sizeof(ulong) - len);
510 insn = RVC_RS2S(insn) << SH_RD;
511 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
512 ((insn >> SH_RD) & 0x1f)) {
514 shift = 8 * (sizeof(ulong) - len);
516 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
518 shift = 8 * (sizeof(ulong) - len);
519 insn = RVC_RS2S(insn) << SH_RD;
520 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
521 ((insn >> SH_RD) & 0x1f)) {
523 shift = 8 * (sizeof(ulong) - len);
528 /* Fault address should be aligned to length of MMIO */
529 if (fault_addr & (len - 1))
532 /* Save instruction decode info */
533 vcpu->arch.mmio_decode.insn = insn;
534 vcpu->arch.mmio_decode.insn_len = insn_len;
535 vcpu->arch.mmio_decode.shift = shift;
536 vcpu->arch.mmio_decode.len = len;
537 vcpu->arch.mmio_decode.return_handled = 0;
539 /* Update MMIO details in kvm_run struct */
540 run->mmio.is_write = false;
541 run->mmio.phys_addr = fault_addr;
544 /* Try to handle MMIO access in the kernel */
545 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
546 /* Successfully handled MMIO access in the kernel so resume */
547 memcpy(run->mmio.data, data_buf, len);
548 vcpu->stat.mmio_exit_kernel++;
549 kvm_riscv_vcpu_mmio_return(vcpu, run);
553 /* Exit to userspace for MMIO emulation */
554 vcpu->stat.mmio_exit_user++;
555 run->exit_reason = KVM_EXIT_MMIO;
561 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
563 * @vcpu: The VCPU pointer
564 * @run: The VCPU run struct containing the mmio data
565 * @fault_addr: Guest physical address to store
566 * @htinst: Transformed encoding of the store instruction
568 * Returns > 0 to continue run-loop
569 * Returns 0 to exit run-loop and handle in user-space.
570 * Returns < 0 to report failure and exit run-loop
572 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
573 unsigned long fault_addr,
574 unsigned long htinst)
582 int len = 0, insn_len = 0;
583 struct kvm_cpu_trap utrap = { 0 };
584 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
586 /* Determine trapped instruction */
589 * Bit[0] == 1 implies trapped instruction value is
590 * transformed instruction or custom instruction.
592 insn = htinst | INSN_16BIT_MASK;
593 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
596 * Bit[0] == 0 implies trapped instruction value is
597 * zero or special value.
599 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
602 /* Redirect trap if we failed to read instruction */
603 utrap.sepc = ct->sepc;
604 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
607 insn_len = INSN_LEN(insn);
610 data = GET_RS2(insn, &vcpu->arch.guest_context);
611 data8 = data16 = data32 = data64 = data;
613 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
615 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
618 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
621 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
624 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
626 data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
627 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
628 ((insn >> SH_RD) & 0x1f)) {
630 data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
632 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
634 data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
635 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
636 ((insn >> SH_RD) & 0x1f)) {
638 data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
643 /* Fault address should be aligned to length of MMIO */
644 if (fault_addr & (len - 1))
647 /* Save instruction decode info */
648 vcpu->arch.mmio_decode.insn = insn;
649 vcpu->arch.mmio_decode.insn_len = insn_len;
650 vcpu->arch.mmio_decode.shift = 0;
651 vcpu->arch.mmio_decode.len = len;
652 vcpu->arch.mmio_decode.return_handled = 0;
654 /* Copy data to kvm_run instance */
657 *((u8 *)run->mmio.data) = data8;
660 *((u16 *)run->mmio.data) = data16;
663 *((u32 *)run->mmio.data) = data32;
666 *((u64 *)run->mmio.data) = data64;
672 /* Update MMIO details in kvm_run struct */
673 run->mmio.is_write = true;
674 run->mmio.phys_addr = fault_addr;
677 /* Try to handle MMIO access in the kernel */
678 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
679 fault_addr, len, run->mmio.data)) {
680 /* Successfully handled MMIO access in the kernel so resume */
681 vcpu->stat.mmio_exit_kernel++;
682 kvm_riscv_vcpu_mmio_return(vcpu, run);
686 /* Exit to userspace for MMIO emulation */
687 vcpu->stat.mmio_exit_user++;
688 run->exit_reason = KVM_EXIT_MMIO;
694 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
695 * or in-kernel IO emulation
697 * @vcpu: The VCPU pointer
698 * @run: The VCPU run struct containing the mmio data
700 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
709 if (vcpu->arch.mmio_decode.return_handled)
712 vcpu->arch.mmio_decode.return_handled = 1;
713 insn = vcpu->arch.mmio_decode.insn;
715 if (run->mmio.is_write)
718 len = vcpu->arch.mmio_decode.len;
719 shift = vcpu->arch.mmio_decode.shift;
723 data8 = *((u8 *)run->mmio.data);
724 SET_RD(insn, &vcpu->arch.guest_context,
725 (ulong)data8 << shift >> shift);
728 data16 = *((u16 *)run->mmio.data);
729 SET_RD(insn, &vcpu->arch.guest_context,
730 (ulong)data16 << shift >> shift);
733 data32 = *((u32 *)run->mmio.data);
734 SET_RD(insn, &vcpu->arch.guest_context,
735 (ulong)data32 << shift >> shift);
738 data64 = *((u64 *)run->mmio.data);
739 SET_RD(insn, &vcpu->arch.guest_context,
740 (ulong)data64 << shift >> shift);
747 /* Move to next instruction */
748 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;