2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/sstep.h>
39 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
41 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
42 kvmppc_core_queue_fpunavail(vcpu);
48 #endif /* CONFIG_PPC_FPU */
51 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
53 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
54 kvmppc_core_queue_vsx_unavail(vcpu);
60 #endif /* CONFIG_VSX */
63 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
65 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
66 kvmppc_core_queue_vec_unavail(vcpu);
72 #endif /* CONFIG_ALTIVEC */
77 * vector loads and stores
79 * Instructions that trap when used on cache-inhibited mappings
80 * are not emulated here: multiple and string instructions,
81 * lq/stq, and the load-reserve/store-conditional instructions.
83 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
85 struct kvm_run *run = vcpu->run;
88 enum emulation_result emulated = EMULATE_FAIL;
90 struct instruction_op op;
92 /* this default type might be overwritten by subcategories */
93 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
95 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
96 if (emulated != EMULATE_DONE)
104 * if mmio_vsx_tx_sx_enabled == 0, copy data between
105 * VSR[0..31] and memory
106 * if mmio_vsx_tx_sx_enabled == 1, copy data between
107 * VSR[32..63] and memory
109 vcpu->arch.mmio_vsx_copy_nums = 0;
110 vcpu->arch.mmio_vsx_offset = 0;
111 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
112 vcpu->arch.mmio_sp64_extend = 0;
113 vcpu->arch.mmio_sign_extend = 0;
114 vcpu->arch.mmio_vmx_copy_nums = 0;
115 vcpu->arch.mmio_vmx_offset = 0;
116 vcpu->arch.mmio_host_swabbed = 0;
118 emulated = EMULATE_FAIL;
119 vcpu->arch.regs.msr = vcpu->arch.shared->msr;
120 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
121 int type = op.type & INSTR_TYPE_MASK;
122 int size = GETSIZE(op.type);
126 int instr_byte_swap = op.type & BYTEREV;
128 if (op.type & SIGNEXT)
129 emulated = kvmppc_handle_loads(run, vcpu,
130 op.reg, size, !instr_byte_swap);
132 emulated = kvmppc_handle_load(run, vcpu,
133 op.reg, size, !instr_byte_swap);
135 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
136 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
140 #ifdef CONFIG_PPC_FPU
142 if (kvmppc_check_fp_disabled(vcpu))
145 if (op.type & FPCONV)
146 vcpu->arch.mmio_sp64_extend = 1;
148 if (op.type & SIGNEXT)
149 emulated = kvmppc_handle_loads(run, vcpu,
150 KVM_MMIO_REG_FPR|op.reg, size, 1);
152 emulated = kvmppc_handle_load(run, vcpu,
153 KVM_MMIO_REG_FPR|op.reg, size, 1);
155 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
156 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
160 #ifdef CONFIG_ALTIVEC
162 if (kvmppc_check_altivec_disabled(vcpu))
165 /* Hardware enforces alignment of VMX accesses */
166 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
167 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
169 if (size == 16) { /* lvx */
170 vcpu->arch.mmio_copy_type =
171 KVMPPC_VMX_COPY_DWORD;
172 } else if (size == 4) { /* lvewx */
173 vcpu->arch.mmio_copy_type =
174 KVMPPC_VMX_COPY_WORD;
175 } else if (size == 2) { /* lvehx */
176 vcpu->arch.mmio_copy_type =
177 KVMPPC_VMX_COPY_HWORD;
178 } else if (size == 1) { /* lvebx */
179 vcpu->arch.mmio_copy_type =
180 KVMPPC_VMX_COPY_BYTE;
184 vcpu->arch.mmio_vmx_offset =
185 (vcpu->arch.vaddr_accessed & 0xf)/size;
188 vcpu->arch.mmio_vmx_copy_nums = 2;
189 emulated = kvmppc_handle_vmx_load(run,
190 vcpu, KVM_MMIO_REG_VMX|op.reg,
193 vcpu->arch.mmio_vmx_copy_nums = 1;
194 emulated = kvmppc_handle_vmx_load(run, vcpu,
195 KVM_MMIO_REG_VMX|op.reg,
204 if (op.vsx_flags & VSX_CHECK_VEC) {
205 if (kvmppc_check_altivec_disabled(vcpu))
208 if (kvmppc_check_vsx_disabled(vcpu))
212 if (op.vsx_flags & VSX_FPCONV)
213 vcpu->arch.mmio_sp64_extend = 1;
215 if (op.element_size == 8) {
216 if (op.vsx_flags & VSX_SPLAT)
217 vcpu->arch.mmio_copy_type =
218 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
220 vcpu->arch.mmio_copy_type =
221 KVMPPC_VSX_COPY_DWORD;
222 } else if (op.element_size == 4) {
223 if (op.vsx_flags & VSX_SPLAT)
224 vcpu->arch.mmio_copy_type =
225 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
227 vcpu->arch.mmio_copy_type =
228 KVMPPC_VSX_COPY_WORD;
232 if (size < op.element_size) {
233 /* precision convert case: lxsspx, etc */
234 vcpu->arch.mmio_vsx_copy_nums = 1;
236 } else { /* lxvw4x, lxvd2x, etc */
237 vcpu->arch.mmio_vsx_copy_nums =
238 size/op.element_size;
239 io_size_each = op.element_size;
242 emulated = kvmppc_handle_vsx_load(run, vcpu,
243 KVM_MMIO_REG_VSX|op.reg, io_size_each,
244 1, op.type & SIGNEXT);
249 /* if need byte reverse, op.val has been reversed by
252 emulated = kvmppc_handle_store(run, vcpu, op.val,
255 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
256 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
259 #ifdef CONFIG_PPC_FPU
261 if (kvmppc_check_fp_disabled(vcpu))
264 /* The FP registers need to be flushed so that
265 * kvmppc_handle_store() can read actual FP vals
268 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
269 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
272 if (op.type & FPCONV)
273 vcpu->arch.mmio_sp64_extend = 1;
275 emulated = kvmppc_handle_store(run, vcpu,
276 VCPU_FPR(vcpu, op.reg), size, 1);
278 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
279 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
283 #ifdef CONFIG_ALTIVEC
285 if (kvmppc_check_altivec_disabled(vcpu))
288 /* Hardware enforces alignment of VMX accesses. */
289 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
290 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
292 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
293 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
295 if (size == 16) { /* stvx */
296 vcpu->arch.mmio_copy_type =
297 KVMPPC_VMX_COPY_DWORD;
298 } else if (size == 4) { /* stvewx */
299 vcpu->arch.mmio_copy_type =
300 KVMPPC_VMX_COPY_WORD;
301 } else if (size == 2) { /* stvehx */
302 vcpu->arch.mmio_copy_type =
303 KVMPPC_VMX_COPY_HWORD;
304 } else if (size == 1) { /* stvebx */
305 vcpu->arch.mmio_copy_type =
306 KVMPPC_VMX_COPY_BYTE;
310 vcpu->arch.mmio_vmx_offset =
311 (vcpu->arch.vaddr_accessed & 0xf)/size;
314 vcpu->arch.mmio_vmx_copy_nums = 2;
315 emulated = kvmppc_handle_vmx_store(run,
318 vcpu->arch.mmio_vmx_copy_nums = 1;
319 emulated = kvmppc_handle_vmx_store(run,
320 vcpu, op.reg, size, 1);
329 if (op.vsx_flags & VSX_CHECK_VEC) {
330 if (kvmppc_check_altivec_disabled(vcpu))
333 if (kvmppc_check_vsx_disabled(vcpu))
337 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
338 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
341 if (op.vsx_flags & VSX_FPCONV)
342 vcpu->arch.mmio_sp64_extend = 1;
344 if (op.element_size == 8)
345 vcpu->arch.mmio_copy_type =
346 KVMPPC_VSX_COPY_DWORD;
347 else if (op.element_size == 4)
348 vcpu->arch.mmio_copy_type =
349 KVMPPC_VSX_COPY_WORD;
353 if (size < op.element_size) {
354 /* precise conversion case, like stxsspx */
355 vcpu->arch.mmio_vsx_copy_nums = 1;
357 } else { /* stxvw4x, stxvd2x, etc */
358 vcpu->arch.mmio_vsx_copy_nums =
359 size/op.element_size;
360 io_size_each = op.element_size;
363 emulated = kvmppc_handle_vsx_store(run, vcpu,
364 op.reg, io_size_each, 1);
369 /* Do nothing. The guest is performing dcbi because
370 * hardware DMA is not snooped by the dcache, but
371 * emulated DMA either goes through the dcache as
372 * normal writes, or the host kernel has handled dcache
375 emulated = EMULATE_DONE;
382 if (emulated == EMULATE_FAIL) {
384 kvmppc_core_queue_program(vcpu, 0);
387 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
389 /* Advance past emulated instruction. */
391 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);