2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Alexander Graf <agraf@suse.de>
21 #include <asm/ppc_asm.h>
22 #include <asm/kvm_asm.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/asm-compat.h>
28 #define KVM_MAGIC_PAGE (-4096)
31 #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
32 #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
34 #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
35 #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
38 #define SCRATCH_SAVE \
39 /* Enable critical section. We are critical if \
40 shared->critical == r1 */ \
41 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
44 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
45 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
47 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
49 #define SCRATCH_RESTORE \
51 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
52 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
54 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
56 /* Disable critical section. We are critical if \
57 shared->critical == r1 and r2 is always != r1 */ \
58 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
60 .global kvm_template_start
63 .global kvm_emulate_mtmsrd
68 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
69 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
70 lis r30, (~(MSR_EE | MSR_RI))@h
71 ori r30, r30, (~(MSR_EE | MSR_RI))@l
74 /* OR the register's (MSR_EE|MSR_RI) on MSR */
75 kvm_emulate_mtmsrd_reg:
77 andi. r30, r30, (MSR_EE|MSR_RI)
80 /* Put MSR back into magic page */
81 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
83 /* Check if we have to fetch an interrupt */
84 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
88 /* Check if we may trigger an interrupt */
89 andi. r30, r30, MSR_EE
95 kvm_emulate_mtmsrd_orig_ins:
98 b kvm_emulate_mtmsrd_branch
104 /* Go back to caller */
105 kvm_emulate_mtmsrd_branch:
107 kvm_emulate_mtmsrd_end:
109 .global kvm_emulate_mtmsrd_branch_offs
110 kvm_emulate_mtmsrd_branch_offs:
111 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
113 .global kvm_emulate_mtmsrd_reg_offs
114 kvm_emulate_mtmsrd_reg_offs:
115 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
117 .global kvm_emulate_mtmsrd_orig_ins_offs
118 kvm_emulate_mtmsrd_orig_ins_offs:
119 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
121 .global kvm_emulate_mtmsrd_len
122 kvm_emulate_mtmsrd_len:
123 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
126 #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
127 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
129 .global kvm_emulate_mtmsr
134 /* Fetch old MSR in r31 */
135 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
137 /* Find the changed bits between old and new MSR */
138 kvm_emulate_mtmsr_reg1:
142 /* Check if we need to really do mtmsr */
143 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
146 /* No critical bits changed? Maybe we can stay in the guest. */
147 beq maybe_stay_in_guest
153 /* Just fire off the mtmsr if it's critical */
154 kvm_emulate_mtmsr_orig_ins:
157 b kvm_emulate_mtmsr_branch
161 /* Get the target register in r30 */
162 kvm_emulate_mtmsr_reg2:
165 /* Put MSR into magic page because we don't call mtmsr */
166 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
168 /* Check if we have to fetch an interrupt */
169 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
173 /* Check if we may trigger an interrupt */
174 andi. r31, r30, MSR_EE
181 /* Go back to caller */
182 kvm_emulate_mtmsr_branch:
184 kvm_emulate_mtmsr_end:
186 .global kvm_emulate_mtmsr_branch_offs
187 kvm_emulate_mtmsr_branch_offs:
188 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
190 .global kvm_emulate_mtmsr_reg1_offs
191 kvm_emulate_mtmsr_reg1_offs:
192 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
194 .global kvm_emulate_mtmsr_reg2_offs
195 kvm_emulate_mtmsr_reg2_offs:
196 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
198 .global kvm_emulate_mtmsr_orig_ins_offs
199 kvm_emulate_mtmsr_orig_ins_offs:
200 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
202 .global kvm_emulate_mtmsr_len
203 kvm_emulate_mtmsr_len:
204 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
206 /* also used for wrteei 1 */
207 .global kvm_emulate_wrtee
212 /* Fetch old MSR in r31 */
213 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
215 /* Insert new MSR[EE] */
216 kvm_emulate_wrtee_reg:
218 rlwimi r31, r30, 0, MSR_EE
221 * If MSR[EE] is now set, check for a pending interrupt.
222 * We could skip this if MSR[EE] was already on, but that
223 * should be rare, so don't bother.
225 andi. r30, r30, MSR_EE
227 /* Put MSR into magic page because we don't call wrtee */
228 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
232 /* Check if we have to fetch an interrupt */
233 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
240 /* Go back to caller */
241 kvm_emulate_wrtee_branch:
247 /* Just fire off the wrtee if it's critical */
248 kvm_emulate_wrtee_orig_ins:
251 b kvm_emulate_wrtee_branch
253 kvm_emulate_wrtee_end:
255 .global kvm_emulate_wrtee_branch_offs
256 kvm_emulate_wrtee_branch_offs:
257 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
259 .global kvm_emulate_wrtee_reg_offs
260 kvm_emulate_wrtee_reg_offs:
261 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
263 .global kvm_emulate_wrtee_orig_ins_offs
264 kvm_emulate_wrtee_orig_ins_offs:
265 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
267 .global kvm_emulate_wrtee_len
268 kvm_emulate_wrtee_len:
269 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
271 .global kvm_emulate_wrteei_0
272 kvm_emulate_wrteei_0:
275 /* Fetch old MSR in r31 */
276 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
278 /* Remove MSR_EE from old MSR */
279 rlwinm r31, r31, 0, ~MSR_EE
281 /* Write new MSR value back */
282 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
286 /* Go back to caller */
287 kvm_emulate_wrteei_0_branch:
289 kvm_emulate_wrteei_0_end:
291 .global kvm_emulate_wrteei_0_branch_offs
292 kvm_emulate_wrteei_0_branch_offs:
293 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
295 .global kvm_emulate_wrteei_0_len
296 kvm_emulate_wrteei_0_len:
297 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
299 .global kvm_emulate_mtsrin
304 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
305 andi. r31, r31, MSR_DR | MSR_IR
306 beq kvm_emulate_mtsrin_reg1
310 kvm_emulate_mtsrin_orig_ins:
312 b kvm_emulate_mtsrin_branch
314 kvm_emulate_mtsrin_reg1:
316 rlwinm r30,r0,6,26,29
318 kvm_emulate_mtsrin_reg2:
319 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
323 /* Go back to caller */
324 kvm_emulate_mtsrin_branch:
326 kvm_emulate_mtsrin_end:
328 .global kvm_emulate_mtsrin_branch_offs
329 kvm_emulate_mtsrin_branch_offs:
330 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
332 .global kvm_emulate_mtsrin_reg1_offs
333 kvm_emulate_mtsrin_reg1_offs:
334 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
336 .global kvm_emulate_mtsrin_reg2_offs
337 kvm_emulate_mtsrin_reg2_offs:
338 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
340 .global kvm_emulate_mtsrin_orig_ins_offs
341 kvm_emulate_mtsrin_orig_ins_offs:
342 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
344 .global kvm_emulate_mtsrin_len
345 kvm_emulate_mtsrin_len:
346 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
348 .global kvm_template_end