Mention branches and keyring.
[releases.git] / kvm / emulate_loadstore.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  *
7  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8  */
9
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
16
17 #include <asm/reg.h>
18 #include <asm/time.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
24 #include "timing.h"
25 #include "trace.h"
26
27 #ifdef CONFIG_PPC_FPU
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
29 {
30         if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31                 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
32                 return true;
33         }
34
35         return false;
36 }
37 #endif /* CONFIG_PPC_FPU */
38
39 #ifdef CONFIG_VSX
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
41 {
42         if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43                 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
44                 return true;
45         }
46
47         return false;
48 }
49 #endif /* CONFIG_VSX */
50
51 #ifdef CONFIG_ALTIVEC
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
53 {
54         if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55                 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
56                 return true;
57         }
58
59         return false;
60 }
61 #endif /* CONFIG_ALTIVEC */
62
63 /*
64  * XXX to do:
65  * lfiwax, lfiwzx
66  * vector loads and stores
67  *
68  * Instructions that trap when used on cache-inhibited mappings
69  * are not emulated here: multiple and string instructions,
70  * lq/stq, and the load-reserve/store-conditional instructions.
71  */
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
73 {
74         ppc_inst_t inst;
75         enum emulation_result emulated = EMULATE_FAIL;
76         struct instruction_op op;
77
78         /* this default type might be overwritten by subcategories */
79         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
80
81         emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
82         if (emulated != EMULATE_DONE)
83                 return emulated;
84
85         vcpu->arch.mmio_vsx_copy_nums = 0;
86         vcpu->arch.mmio_vsx_offset = 0;
87         vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
88         vcpu->arch.mmio_sp64_extend = 0;
89         vcpu->arch.mmio_sign_extend = 0;
90         vcpu->arch.mmio_vmx_copy_nums = 0;
91         vcpu->arch.mmio_vmx_offset = 0;
92         vcpu->arch.mmio_host_swabbed = 0;
93
94         emulated = EMULATE_FAIL;
95         vcpu->arch.regs.msr = vcpu->arch.shared->msr;
96         if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
97                 int type = op.type & INSTR_TYPE_MASK;
98                 int size = GETSIZE(op.type);
99
100                 vcpu->mmio_is_write = OP_IS_STORE(type);
101
102                 switch (type) {
103                 case LOAD:  {
104                         int instr_byte_swap = op.type & BYTEREV;
105
106                         if (op.type & SIGNEXT)
107                                 emulated = kvmppc_handle_loads(vcpu,
108                                                 op.reg, size, !instr_byte_swap);
109                         else
110                                 emulated = kvmppc_handle_load(vcpu,
111                                                 op.reg, size, !instr_byte_swap);
112
113                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
114                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
115
116                         break;
117                 }
118 #ifdef CONFIG_PPC_FPU
119                 case LOAD_FP:
120                         if (kvmppc_check_fp_disabled(vcpu))
121                                 return EMULATE_DONE;
122
123                         if (op.type & FPCONV)
124                                 vcpu->arch.mmio_sp64_extend = 1;
125
126                         if (op.type & SIGNEXT)
127                                 emulated = kvmppc_handle_loads(vcpu,
128                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
129                         else
130                                 emulated = kvmppc_handle_load(vcpu,
131                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
132
133                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
134                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
135
136                         break;
137 #endif
138 #ifdef CONFIG_ALTIVEC
139                 case LOAD_VMX:
140                         if (kvmppc_check_altivec_disabled(vcpu))
141                                 return EMULATE_DONE;
142
143                         /* Hardware enforces alignment of VMX accesses */
144                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
145                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
146
147                         if (size == 16) { /* lvx */
148                                 vcpu->arch.mmio_copy_type =
149                                                 KVMPPC_VMX_COPY_DWORD;
150                         } else if (size == 4) { /* lvewx  */
151                                 vcpu->arch.mmio_copy_type =
152                                                 KVMPPC_VMX_COPY_WORD;
153                         } else if (size == 2) { /* lvehx  */
154                                 vcpu->arch.mmio_copy_type =
155                                                 KVMPPC_VMX_COPY_HWORD;
156                         } else if (size == 1) { /* lvebx  */
157                                 vcpu->arch.mmio_copy_type =
158                                                 KVMPPC_VMX_COPY_BYTE;
159                         } else
160                                 break;
161
162                         vcpu->arch.mmio_vmx_offset =
163                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
164
165                         if (size == 16) {
166                                 vcpu->arch.mmio_vmx_copy_nums = 2;
167                                 emulated = kvmppc_handle_vmx_load(vcpu,
168                                                 KVM_MMIO_REG_VMX|op.reg,
169                                                 8, 1);
170                         } else {
171                                 vcpu->arch.mmio_vmx_copy_nums = 1;
172                                 emulated = kvmppc_handle_vmx_load(vcpu,
173                                                 KVM_MMIO_REG_VMX|op.reg,
174                                                 size, 1);
175                         }
176                         break;
177 #endif
178 #ifdef CONFIG_VSX
179                 case LOAD_VSX: {
180                         int io_size_each;
181
182                         if (op.vsx_flags & VSX_CHECK_VEC) {
183                                 if (kvmppc_check_altivec_disabled(vcpu))
184                                         return EMULATE_DONE;
185                         } else {
186                                 if (kvmppc_check_vsx_disabled(vcpu))
187                                         return EMULATE_DONE;
188                         }
189
190                         if (op.vsx_flags & VSX_FPCONV)
191                                 vcpu->arch.mmio_sp64_extend = 1;
192
193                         if (op.element_size == 8)  {
194                                 if (op.vsx_flags & VSX_SPLAT)
195                                         vcpu->arch.mmio_copy_type =
196                                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
197                                 else
198                                         vcpu->arch.mmio_copy_type =
199                                                 KVMPPC_VSX_COPY_DWORD;
200                         } else if (op.element_size == 4) {
201                                 if (op.vsx_flags & VSX_SPLAT)
202                                         vcpu->arch.mmio_copy_type =
203                                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
204                                 else
205                                         vcpu->arch.mmio_copy_type =
206                                                 KVMPPC_VSX_COPY_WORD;
207                         } else
208                                 break;
209
210                         if (size < op.element_size) {
211                                 /* precision convert case: lxsspx, etc */
212                                 vcpu->arch.mmio_vsx_copy_nums = 1;
213                                 io_size_each = size;
214                         } else { /* lxvw4x, lxvd2x, etc */
215                                 vcpu->arch.mmio_vsx_copy_nums =
216                                         size/op.element_size;
217                                 io_size_each = op.element_size;
218                         }
219
220                         emulated = kvmppc_handle_vsx_load(vcpu,
221                                         KVM_MMIO_REG_VSX|op.reg, io_size_each,
222                                         1, op.type & SIGNEXT);
223                         break;
224                 }
225 #endif
226                 case STORE:
227                         /* if need byte reverse, op.val has been reversed by
228                          * analyse_instr().
229                          */
230                         emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
231
232                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
233                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
234
235                         break;
236 #ifdef CONFIG_PPC_FPU
237                 case STORE_FP:
238                         if (kvmppc_check_fp_disabled(vcpu))
239                                 return EMULATE_DONE;
240
241                         /* The FP registers need to be flushed so that
242                          * kvmppc_handle_store() can read actual FP vals
243                          * from vcpu->arch.
244                          */
245                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
246                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
247                                                 MSR_FP);
248
249                         if (op.type & FPCONV)
250                                 vcpu->arch.mmio_sp64_extend = 1;
251
252                         emulated = kvmppc_handle_store(vcpu,
253                                         VCPU_FPR(vcpu, op.reg), size, 1);
254
255                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
256                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
257
258                         break;
259 #endif
260 #ifdef CONFIG_ALTIVEC
261                 case STORE_VMX:
262                         if (kvmppc_check_altivec_disabled(vcpu))
263                                 return EMULATE_DONE;
264
265                         /* Hardware enforces alignment of VMX accesses. */
266                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
267                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
268
269                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
270                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
271                                                 MSR_VEC);
272                         if (size == 16) { /* stvx */
273                                 vcpu->arch.mmio_copy_type =
274                                                 KVMPPC_VMX_COPY_DWORD;
275                         } else if (size == 4) { /* stvewx  */
276                                 vcpu->arch.mmio_copy_type =
277                                                 KVMPPC_VMX_COPY_WORD;
278                         } else if (size == 2) { /* stvehx  */
279                                 vcpu->arch.mmio_copy_type =
280                                                 KVMPPC_VMX_COPY_HWORD;
281                         } else if (size == 1) { /* stvebx  */
282                                 vcpu->arch.mmio_copy_type =
283                                                 KVMPPC_VMX_COPY_BYTE;
284                         } else
285                                 break;
286
287                         vcpu->arch.mmio_vmx_offset =
288                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
289
290                         if (size == 16) {
291                                 vcpu->arch.mmio_vmx_copy_nums = 2;
292                                 emulated = kvmppc_handle_vmx_store(vcpu,
293                                                 op.reg, 8, 1);
294                         } else {
295                                 vcpu->arch.mmio_vmx_copy_nums = 1;
296                                 emulated = kvmppc_handle_vmx_store(vcpu,
297                                                 op.reg, size, 1);
298                         }
299
300                         break;
301 #endif
302 #ifdef CONFIG_VSX
303                 case STORE_VSX: {
304                         int io_size_each;
305
306                         if (op.vsx_flags & VSX_CHECK_VEC) {
307                                 if (kvmppc_check_altivec_disabled(vcpu))
308                                         return EMULATE_DONE;
309                         } else {
310                                 if (kvmppc_check_vsx_disabled(vcpu))
311                                         return EMULATE_DONE;
312                         }
313
314                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
315                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
316                                                 MSR_VSX);
317
318                         if (op.vsx_flags & VSX_FPCONV)
319                                 vcpu->arch.mmio_sp64_extend = 1;
320
321                         if (op.element_size == 8)
322                                 vcpu->arch.mmio_copy_type =
323                                                 KVMPPC_VSX_COPY_DWORD;
324                         else if (op.element_size == 4)
325                                 vcpu->arch.mmio_copy_type =
326                                                 KVMPPC_VSX_COPY_WORD;
327                         else
328                                 break;
329
330                         if (size < op.element_size) {
331                                 /* precise conversion case, like stxsspx */
332                                 vcpu->arch.mmio_vsx_copy_nums = 1;
333                                 io_size_each = size;
334                         } else { /* stxvw4x, stxvd2x, etc */
335                                 vcpu->arch.mmio_vsx_copy_nums =
336                                                 size/op.element_size;
337                                 io_size_each = op.element_size;
338                         }
339
340                         emulated = kvmppc_handle_vsx_store(vcpu,
341                                         op.reg, io_size_each, 1);
342                         break;
343                 }
344 #endif
345                 case CACHEOP:
346                         /* Do nothing. The guest is performing dcbi because
347                          * hardware DMA is not snooped by the dcache, but
348                          * emulated DMA either goes through the dcache as
349                          * normal writes, or the host kernel has handled dcache
350                          * coherence.
351                          */
352                         emulated = EMULATE_DONE;
353                         break;
354                 default:
355                         break;
356                 }
357         }
358
359         trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
360
361         /* Advance past emulated instruction. */
362         if (emulated != EMULATE_FAIL)
363                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
364
365         return emulated;
366 }