arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / powerpc / kvm / emulate_loadstore.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  *
7  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8  */
9
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
16
17 #include <asm/reg.h>
18 #include <asm/time.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
24 #include "timing.h"
25 #include "trace.h"
26
27 #ifdef CONFIG_PPC_FPU
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
29 {
30         if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31                 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
32                 return true;
33         }
34
35         return false;
36 }
37 #endif /* CONFIG_PPC_FPU */
38
39 #ifdef CONFIG_VSX
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
41 {
42         if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43                 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
44                 return true;
45         }
46
47         return false;
48 }
49 #endif /* CONFIG_VSX */
50
51 #ifdef CONFIG_ALTIVEC
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
53 {
54         if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55                 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
56                 return true;
57         }
58
59         return false;
60 }
61 #endif /* CONFIG_ALTIVEC */
62
63 /*
64  * XXX to do:
65  * lfiwax, lfiwzx
66  * vector loads and stores
67  *
68  * Instructions that trap when used on cache-inhibited mappings
69  * are not emulated here: multiple and string instructions,
70  * lq/stq, and the load-reserve/store-conditional instructions.
71  */
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
73 {
74         ppc_inst_t inst;
75         enum emulation_result emulated = EMULATE_FAIL;
76         struct instruction_op op;
77
78         /* this default type might be overwritten by subcategories */
79         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
80
81         emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
82         if (emulated != EMULATE_DONE)
83                 return emulated;
84
85         vcpu->arch.mmio_vsx_copy_nums = 0;
86         vcpu->arch.mmio_vsx_offset = 0;
87         vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
88         vcpu->arch.mmio_sp64_extend = 0;
89         vcpu->arch.mmio_sign_extend = 0;
90         vcpu->arch.mmio_vmx_copy_nums = 0;
91         vcpu->arch.mmio_vmx_offset = 0;
92         vcpu->arch.mmio_host_swabbed = 0;
93
94         emulated = EMULATE_FAIL;
95         vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
96         kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
97         if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
98                 int type = op.type & INSTR_TYPE_MASK;
99                 int size = GETSIZE(op.type);
100
101                 vcpu->mmio_is_write = OP_IS_STORE(type);
102
103                 switch (type) {
104                 case LOAD:  {
105                         int instr_byte_swap = op.type & BYTEREV;
106
107                         if (op.type & SIGNEXT)
108                                 emulated = kvmppc_handle_loads(vcpu,
109                                                 op.reg, size, !instr_byte_swap);
110                         else
111                                 emulated = kvmppc_handle_load(vcpu,
112                                                 op.reg, size, !instr_byte_swap);
113
114                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
115                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
116
117                         break;
118                 }
119 #ifdef CONFIG_PPC_FPU
120                 case LOAD_FP:
121                         if (kvmppc_check_fp_disabled(vcpu))
122                                 return EMULATE_DONE;
123
124                         if (op.type & FPCONV)
125                                 vcpu->arch.mmio_sp64_extend = 1;
126
127                         if (op.type & SIGNEXT)
128                                 emulated = kvmppc_handle_loads(vcpu,
129                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
130                         else
131                                 emulated = kvmppc_handle_load(vcpu,
132                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
133
134                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
135                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
136
137                         break;
138 #endif
139 #ifdef CONFIG_ALTIVEC
140                 case LOAD_VMX:
141                         if (kvmppc_check_altivec_disabled(vcpu))
142                                 return EMULATE_DONE;
143
144                         /* Hardware enforces alignment of VMX accesses */
145                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
146                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
147
148                         if (size == 16) { /* lvx */
149                                 vcpu->arch.mmio_copy_type =
150                                                 KVMPPC_VMX_COPY_DWORD;
151                         } else if (size == 4) { /* lvewx  */
152                                 vcpu->arch.mmio_copy_type =
153                                                 KVMPPC_VMX_COPY_WORD;
154                         } else if (size == 2) { /* lvehx  */
155                                 vcpu->arch.mmio_copy_type =
156                                                 KVMPPC_VMX_COPY_HWORD;
157                         } else if (size == 1) { /* lvebx  */
158                                 vcpu->arch.mmio_copy_type =
159                                                 KVMPPC_VMX_COPY_BYTE;
160                         } else
161                                 break;
162
163                         vcpu->arch.mmio_vmx_offset =
164                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
165
166                         if (size == 16) {
167                                 vcpu->arch.mmio_vmx_copy_nums = 2;
168                                 emulated = kvmppc_handle_vmx_load(vcpu,
169                                                 KVM_MMIO_REG_VMX|op.reg,
170                                                 8, 1);
171                         } else {
172                                 vcpu->arch.mmio_vmx_copy_nums = 1;
173                                 emulated = kvmppc_handle_vmx_load(vcpu,
174                                                 KVM_MMIO_REG_VMX|op.reg,
175                                                 size, 1);
176                         }
177                         break;
178 #endif
179 #ifdef CONFIG_VSX
180                 case LOAD_VSX: {
181                         int io_size_each;
182
183                         if (op.vsx_flags & VSX_CHECK_VEC) {
184                                 if (kvmppc_check_altivec_disabled(vcpu))
185                                         return EMULATE_DONE;
186                         } else {
187                                 if (kvmppc_check_vsx_disabled(vcpu))
188                                         return EMULATE_DONE;
189                         }
190
191                         if (op.vsx_flags & VSX_FPCONV)
192                                 vcpu->arch.mmio_sp64_extend = 1;
193
194                         if (op.element_size == 8)  {
195                                 if (op.vsx_flags & VSX_SPLAT)
196                                         vcpu->arch.mmio_copy_type =
197                                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
198                                 else
199                                         vcpu->arch.mmio_copy_type =
200                                                 KVMPPC_VSX_COPY_DWORD;
201                         } else if (op.element_size == 4) {
202                                 if (op.vsx_flags & VSX_SPLAT)
203                                         vcpu->arch.mmio_copy_type =
204                                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
205                                 else
206                                         vcpu->arch.mmio_copy_type =
207                                                 KVMPPC_VSX_COPY_WORD;
208                         } else
209                                 break;
210
211                         if (size < op.element_size) {
212                                 /* precision convert case: lxsspx, etc */
213                                 vcpu->arch.mmio_vsx_copy_nums = 1;
214                                 io_size_each = size;
215                         } else { /* lxvw4x, lxvd2x, etc */
216                                 vcpu->arch.mmio_vsx_copy_nums =
217                                         size/op.element_size;
218                                 io_size_each = op.element_size;
219                         }
220
221                         emulated = kvmppc_handle_vsx_load(vcpu,
222                                         KVM_MMIO_REG_VSX|op.reg, io_size_each,
223                                         1, op.type & SIGNEXT);
224                         break;
225                 }
226 #endif
227                 case STORE:
228                         /* if need byte reverse, op.val has been reversed by
229                          * analyse_instr().
230                          */
231                         emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
232
233                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
234                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
235
236                         break;
237 #ifdef CONFIG_PPC_FPU
238                 case STORE_FP:
239                         if (kvmppc_check_fp_disabled(vcpu))
240                                 return EMULATE_DONE;
241
242                         /* The FP registers need to be flushed so that
243                          * kvmppc_handle_store() can read actual FP vals
244                          * from vcpu->arch.
245                          */
246                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
247                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
248                                                 MSR_FP);
249
250                         if (op.type & FPCONV)
251                                 vcpu->arch.mmio_sp64_extend = 1;
252
253                         emulated = kvmppc_handle_store(vcpu,
254                                         kvmppc_get_fpr(vcpu, op.reg), size, 1);
255
256                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
257                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
258
259                         break;
260 #endif
261 #ifdef CONFIG_ALTIVEC
262                 case STORE_VMX:
263                         if (kvmppc_check_altivec_disabled(vcpu))
264                                 return EMULATE_DONE;
265
266                         /* Hardware enforces alignment of VMX accesses. */
267                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
268                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
269
270                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
271                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
272                                                 MSR_VEC);
273                         if (size == 16) { /* stvx */
274                                 vcpu->arch.mmio_copy_type =
275                                                 KVMPPC_VMX_COPY_DWORD;
276                         } else if (size == 4) { /* stvewx  */
277                                 vcpu->arch.mmio_copy_type =
278                                                 KVMPPC_VMX_COPY_WORD;
279                         } else if (size == 2) { /* stvehx  */
280                                 vcpu->arch.mmio_copy_type =
281                                                 KVMPPC_VMX_COPY_HWORD;
282                         } else if (size == 1) { /* stvebx  */
283                                 vcpu->arch.mmio_copy_type =
284                                                 KVMPPC_VMX_COPY_BYTE;
285                         } else
286                                 break;
287
288                         vcpu->arch.mmio_vmx_offset =
289                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
290
291                         if (size == 16) {
292                                 vcpu->arch.mmio_vmx_copy_nums = 2;
293                                 emulated = kvmppc_handle_vmx_store(vcpu,
294                                                 op.reg, 8, 1);
295                         } else {
296                                 vcpu->arch.mmio_vmx_copy_nums = 1;
297                                 emulated = kvmppc_handle_vmx_store(vcpu,
298                                                 op.reg, size, 1);
299                         }
300
301                         break;
302 #endif
303 #ifdef CONFIG_VSX
304                 case STORE_VSX: {
305                         int io_size_each;
306
307                         if (op.vsx_flags & VSX_CHECK_VEC) {
308                                 if (kvmppc_check_altivec_disabled(vcpu))
309                                         return EMULATE_DONE;
310                         } else {
311                                 if (kvmppc_check_vsx_disabled(vcpu))
312                                         return EMULATE_DONE;
313                         }
314
315                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
316                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
317                                                 MSR_VSX);
318
319                         if (op.vsx_flags & VSX_FPCONV)
320                                 vcpu->arch.mmio_sp64_extend = 1;
321
322                         if (op.element_size == 8)
323                                 vcpu->arch.mmio_copy_type =
324                                                 KVMPPC_VSX_COPY_DWORD;
325                         else if (op.element_size == 4)
326                                 vcpu->arch.mmio_copy_type =
327                                                 KVMPPC_VSX_COPY_WORD;
328                         else
329                                 break;
330
331                         if (size < op.element_size) {
332                                 /* precise conversion case, like stxsspx */
333                                 vcpu->arch.mmio_vsx_copy_nums = 1;
334                                 io_size_each = size;
335                         } else { /* stxvw4x, stxvd2x, etc */
336                                 vcpu->arch.mmio_vsx_copy_nums =
337                                                 size/op.element_size;
338                                 io_size_each = op.element_size;
339                         }
340
341                         emulated = kvmppc_handle_vsx_store(vcpu,
342                                         op.reg, io_size_each, 1);
343                         break;
344                 }
345 #endif
346                 case CACHEOP:
347                         /* Do nothing. The guest is performing dcbi because
348                          * hardware DMA is not snooped by the dcache, but
349                          * emulated DMA either goes through the dcache as
350                          * normal writes, or the host kernel has handled dcache
351                          * coherence.
352                          */
353                         emulated = EMULATE_DONE;
354                         break;
355                 default:
356                         break;
357                 }
358         }
359
360         trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
361         kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
362
363         /* Advance past emulated instruction. */
364         if (emulated != EMULATE_FAIL)
365                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
366
367         return emulated;
368 }