1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
5 * Copyright 2011 Freescale Semiconductor, Inc.
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
37 #endif /* CONFIG_PPC_FPU */
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
49 #endif /* CONFIG_VSX */
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
61 #endif /* CONFIG_ALTIVEC */
66 * vector loads and stores
68 * Instructions that trap when used on cache-inhibited mappings
69 * are not emulated here: multiple and string instructions,
70 * lq/stq, and the load-reserve/store-conditional instructions.
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
75 enum emulation_result emulated = EMULATE_FAIL;
76 struct instruction_op op;
78 /* this default type might be overwritten by subcategories */
79 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
81 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
82 if (emulated != EMULATE_DONE)
85 vcpu->arch.mmio_vsx_copy_nums = 0;
86 vcpu->arch.mmio_vsx_offset = 0;
87 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
88 vcpu->arch.mmio_sp64_extend = 0;
89 vcpu->arch.mmio_sign_extend = 0;
90 vcpu->arch.mmio_vmx_copy_nums = 0;
91 vcpu->arch.mmio_vmx_offset = 0;
92 vcpu->arch.mmio_host_swabbed = 0;
94 emulated = EMULATE_FAIL;
95 vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
96 kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
97 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
98 int type = op.type & INSTR_TYPE_MASK;
99 int size = GETSIZE(op.type);
101 vcpu->mmio_is_write = OP_IS_STORE(type);
105 int instr_byte_swap = op.type & BYTEREV;
107 if (op.type & SIGNEXT)
108 emulated = kvmppc_handle_loads(vcpu,
109 op.reg, size, !instr_byte_swap);
111 emulated = kvmppc_handle_load(vcpu,
112 op.reg, size, !instr_byte_swap);
114 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
115 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
119 #ifdef CONFIG_PPC_FPU
121 if (kvmppc_check_fp_disabled(vcpu))
124 if (op.type & FPCONV)
125 vcpu->arch.mmio_sp64_extend = 1;
127 if (op.type & SIGNEXT)
128 emulated = kvmppc_handle_loads(vcpu,
129 KVM_MMIO_REG_FPR|op.reg, size, 1);
131 emulated = kvmppc_handle_load(vcpu,
132 KVM_MMIO_REG_FPR|op.reg, size, 1);
134 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
135 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
139 #ifdef CONFIG_ALTIVEC
141 if (kvmppc_check_altivec_disabled(vcpu))
144 /* Hardware enforces alignment of VMX accesses */
145 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
146 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
148 if (size == 16) { /* lvx */
149 vcpu->arch.mmio_copy_type =
150 KVMPPC_VMX_COPY_DWORD;
151 } else if (size == 4) { /* lvewx */
152 vcpu->arch.mmio_copy_type =
153 KVMPPC_VMX_COPY_WORD;
154 } else if (size == 2) { /* lvehx */
155 vcpu->arch.mmio_copy_type =
156 KVMPPC_VMX_COPY_HWORD;
157 } else if (size == 1) { /* lvebx */
158 vcpu->arch.mmio_copy_type =
159 KVMPPC_VMX_COPY_BYTE;
163 vcpu->arch.mmio_vmx_offset =
164 (vcpu->arch.vaddr_accessed & 0xf)/size;
167 vcpu->arch.mmio_vmx_copy_nums = 2;
168 emulated = kvmppc_handle_vmx_load(vcpu,
169 KVM_MMIO_REG_VMX|op.reg,
172 vcpu->arch.mmio_vmx_copy_nums = 1;
173 emulated = kvmppc_handle_vmx_load(vcpu,
174 KVM_MMIO_REG_VMX|op.reg,
183 if (op.vsx_flags & VSX_CHECK_VEC) {
184 if (kvmppc_check_altivec_disabled(vcpu))
187 if (kvmppc_check_vsx_disabled(vcpu))
191 if (op.vsx_flags & VSX_FPCONV)
192 vcpu->arch.mmio_sp64_extend = 1;
194 if (op.element_size == 8) {
195 if (op.vsx_flags & VSX_SPLAT)
196 vcpu->arch.mmio_copy_type =
197 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
199 vcpu->arch.mmio_copy_type =
200 KVMPPC_VSX_COPY_DWORD;
201 } else if (op.element_size == 4) {
202 if (op.vsx_flags & VSX_SPLAT)
203 vcpu->arch.mmio_copy_type =
204 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
206 vcpu->arch.mmio_copy_type =
207 KVMPPC_VSX_COPY_WORD;
211 if (size < op.element_size) {
212 /* precision convert case: lxsspx, etc */
213 vcpu->arch.mmio_vsx_copy_nums = 1;
215 } else { /* lxvw4x, lxvd2x, etc */
216 vcpu->arch.mmio_vsx_copy_nums =
217 size/op.element_size;
218 io_size_each = op.element_size;
221 emulated = kvmppc_handle_vsx_load(vcpu,
222 KVM_MMIO_REG_VSX|op.reg, io_size_each,
223 1, op.type & SIGNEXT);
228 /* if need byte reverse, op.val has been reversed by
231 emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
233 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
234 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
237 #ifdef CONFIG_PPC_FPU
239 if (kvmppc_check_fp_disabled(vcpu))
242 /* The FP registers need to be flushed so that
243 * kvmppc_handle_store() can read actual FP vals
246 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
247 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
250 if (op.type & FPCONV)
251 vcpu->arch.mmio_sp64_extend = 1;
253 emulated = kvmppc_handle_store(vcpu,
254 kvmppc_get_fpr(vcpu, op.reg), size, 1);
256 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
257 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
261 #ifdef CONFIG_ALTIVEC
263 if (kvmppc_check_altivec_disabled(vcpu))
266 /* Hardware enforces alignment of VMX accesses. */
267 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
268 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
270 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
271 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
273 if (size == 16) { /* stvx */
274 vcpu->arch.mmio_copy_type =
275 KVMPPC_VMX_COPY_DWORD;
276 } else if (size == 4) { /* stvewx */
277 vcpu->arch.mmio_copy_type =
278 KVMPPC_VMX_COPY_WORD;
279 } else if (size == 2) { /* stvehx */
280 vcpu->arch.mmio_copy_type =
281 KVMPPC_VMX_COPY_HWORD;
282 } else if (size == 1) { /* stvebx */
283 vcpu->arch.mmio_copy_type =
284 KVMPPC_VMX_COPY_BYTE;
288 vcpu->arch.mmio_vmx_offset =
289 (vcpu->arch.vaddr_accessed & 0xf)/size;
292 vcpu->arch.mmio_vmx_copy_nums = 2;
293 emulated = kvmppc_handle_vmx_store(vcpu,
296 vcpu->arch.mmio_vmx_copy_nums = 1;
297 emulated = kvmppc_handle_vmx_store(vcpu,
307 if (op.vsx_flags & VSX_CHECK_VEC) {
308 if (kvmppc_check_altivec_disabled(vcpu))
311 if (kvmppc_check_vsx_disabled(vcpu))
315 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
316 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
319 if (op.vsx_flags & VSX_FPCONV)
320 vcpu->arch.mmio_sp64_extend = 1;
322 if (op.element_size == 8)
323 vcpu->arch.mmio_copy_type =
324 KVMPPC_VSX_COPY_DWORD;
325 else if (op.element_size == 4)
326 vcpu->arch.mmio_copy_type =
327 KVMPPC_VSX_COPY_WORD;
331 if (size < op.element_size) {
332 /* precise conversion case, like stxsspx */
333 vcpu->arch.mmio_vsx_copy_nums = 1;
335 } else { /* stxvw4x, stxvd2x, etc */
336 vcpu->arch.mmio_vsx_copy_nums =
337 size/op.element_size;
338 io_size_each = op.element_size;
341 emulated = kvmppc_handle_vsx_store(vcpu,
342 op.reg, io_size_each, 1);
347 /* Do nothing. The guest is performing dcbi because
348 * hardware DMA is not snooped by the dcache, but
349 * emulated DMA either goes through the dcache as
350 * normal writes, or the host kernel has handled dcache
353 emulated = EMULATE_DONE;
360 trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
361 kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
363 /* Advance past emulated instruction. */
364 if (emulated != EMULATE_FAIL)
365 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));