2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Derived from book3s_hv_rmhandlers.S, which is:
13 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/export.h>
22 #include <asm/cputable.h>
24 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
25 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
28 * Save transactional state and TM-related registers.
30 * - r3 pointing to the vcpu struct
31 * - r4 points to the MSR with current TS bits:
32 * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
33 * This can modify all checkpointed registers, but
34 * restores r1, r2 before exit.
36 _GLOBAL(__kvmppc_save_tm)
38 std r0, PPC_LR_STKOFF(r1)
43 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
45 oris r8, r8, (MSR_VEC | MSR_VSX)@h
48 rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
49 beq 1f /* TM not active in guest. */
51 std r1, HSTATE_SCRATCH2(r13)
52 std r3, HSTATE_SCRATCH1(r13)
54 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
56 /* Emulation of the treclaim instruction needs TEXASR before treclaim */
58 std r6, VCPU_ORIG_TEXASR(r3)
59 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
62 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
66 li r3, TM_CAUSE_KVM_RESCHED
68 /* All GPRs are volatile at this point. */
71 /* Temporarily store r13 and r9 so we have some regs to play with */
74 std r9, PACATMSCRATCH(r13)
75 ld r9, HSTATE_SCRATCH1(r13)
77 /* Get a few more GPRs free. */
78 std r29, VCPU_GPRS_TM(29)(r9)
79 std r30, VCPU_GPRS_TM(30)(r9)
80 std r31, VCPU_GPRS_TM(31)(r9)
82 /* Save away PPR and DSCR soon so don't run with user values. */
86 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
87 ld r29, HSTATE_DSCR(r13)
91 /* Save all but r9, r13 & r29-r31 */
94 .if (reg != 9) && (reg != 13)
95 std reg, VCPU_GPRS_TM(reg)(r9)
99 /* ... now save r13 */
101 std r4, VCPU_GPRS_TM(13)(r9)
102 /* ... and save r9 */
103 ld r4, PACATMSCRATCH(r13)
104 std r4, VCPU_GPRS_TM(9)(r9)
106 /* Reload stack pointer and TOC. */
107 ld r1, HSTATE_SCRATCH2(r13)
110 /* Set MSR RI now we have r1 and r13 back. */
114 /* Save away checkpinted SPRs. */
115 std r31, VCPU_PPR_TM(r9)
116 std r30, VCPU_DSCR_TM(r9)
123 std r5, VCPU_LR_TM(r9)
124 stw r6, VCPU_CR_TM(r9)
125 std r7, VCPU_CTR_TM(r9)
126 std r8, VCPU_AMR_TM(r9)
127 std r10, VCPU_TAR_TM(r9)
128 std r11, VCPU_XER_TM(r9)
130 /* Restore r12 as trap number. */
131 lwz r12, VCPU_TRAP(r9)
134 addi r3, r9, VCPU_FPRS_TM
136 addi r3, r9, VCPU_VRS_TM
138 mfspr r6, SPRN_VRSAVE
139 stw r6, VCPU_VRSAVE_TM(r9)
142 * We need to save these SPRs after the treclaim so that the software
143 * error code is recorded correctly in the TEXASR. Also the user may
144 * change these outside of a transaction, so they must always be
147 mfspr r7, SPRN_TEXASR
148 std r7, VCPU_TEXASR(r9)
152 std r5, VCPU_TFHAR(r9)
153 std r6, VCPU_TFIAR(r9)
155 ld r0, PPC_LR_STKOFF(r1)
160 * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
161 * be invoked from C function by PR KVM only.
163 _GLOBAL(_kvmppc_save_tm_pr)
165 std r5, PPC_LR_STKOFF(r1)
166 stdu r1, -SWITCH_FRAME_SIZE(r1)
169 /* save MSR since TM/math bits might be impacted
170 * by __kvmppc_save_tm().
175 /* also save DSCR/CR/TAR so that it can be recovered later */
196 /* need preserve current MSR's MSR_TS bits */
199 rldicl r6, r6, 64 - MSR_TS_S_LG, 62
200 rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
204 addi r1, r1, SWITCH_FRAME_SIZE
205 ld r5, PPC_LR_STKOFF(r1)
209 EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
212 * Restore transactional state and TM-related registers.
214 * - r3 pointing to the vcpu struct.
215 * - r4 is the guest MSR with desired TS bits:
216 * For HV KVM, it is VCPU_MSR
217 * For PR KVM, it is provided by caller
218 * This potentially modifies all checkpointed registers.
219 * It restores r1, r2 from the PACA.
221 _GLOBAL(__kvmppc_restore_tm)
223 std r0, PPC_LR_STKOFF(r1)
225 /* Turn on TM/FP/VSX/VMX so we can restore them. */
231 oris r5, r5, (MSR_VEC | MSR_VSX)@h
235 * The user may change these outside of a transaction, so they must
236 * always be context switched.
238 ld r5, VCPU_TFHAR(r3)
239 ld r6, VCPU_TFIAR(r3)
240 ld r7, VCPU_TEXASR(r3)
243 mtspr SPRN_TEXASR, r7
246 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
247 beqlr /* TM not active in guest */
248 std r1, HSTATE_SCRATCH2(r13)
250 /* Make sure the failure summary is set, otherwise we'll program check
251 * when we trechkpt. It's possible that this might have been not set
252 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
255 oris r7, r7, (TEXASR_FS)@h
256 mtspr SPRN_TEXASR, r7
259 * We need to load up the checkpointed state for the guest.
260 * We need to do this early as it will blow away any GPRs, VSRs and
265 addi r3, r31, VCPU_FPRS_TM
267 addi r3, r31, VCPU_VRS_TM
270 lwz r7, VCPU_VRSAVE_TM(r3)
271 mtspr SPRN_VRSAVE, r7
273 ld r5, VCPU_LR_TM(r3)
274 lwz r6, VCPU_CR_TM(r3)
275 ld r7, VCPU_CTR_TM(r3)
276 ld r8, VCPU_AMR_TM(r3)
277 ld r9, VCPU_TAR_TM(r3)
278 ld r10, VCPU_XER_TM(r3)
287 * Load up PPR and DSCR values but don't put them in the actual SPRs
288 * till the last moment to avoid running with userspace PPR and DSCR for
291 ld r29, VCPU_DSCR_TM(r3)
292 ld r30, VCPU_PPR_TM(r3)
294 std r2, PACATMSCRATCH(r13) /* Save TOC */
296 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
300 /* Load GPRs r0-r28 */
303 ld reg, VCPU_GPRS_TM(reg)(r31)
310 /* Load final GPRs */
311 ld 29, VCPU_GPRS_TM(29)(r31)
312 ld 30, VCPU_GPRS_TM(30)(r31)
313 ld 31, VCPU_GPRS_TM(31)(r31)
315 /* TM checkpointed state is now setup. All GPRs are now volatile. */
318 /* Now let's get back the state we need. */
321 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
322 ld r29, HSTATE_DSCR(r13)
325 ld r1, HSTATE_SCRATCH2(r13)
326 ld r2, PACATMSCRATCH(r13)
328 /* Set the MSR RI since we have our registers back. */
331 ld r0, PPC_LR_STKOFF(r1)
336 * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
337 * can be invoked from C function by PR KVM only.
339 _GLOBAL(_kvmppc_restore_tm_pr)
341 std r5, PPC_LR_STKOFF(r1)
342 stdu r1, -SWITCH_FRAME_SIZE(r1)
345 /* save MSR to avoid TM/math bits change */
349 /* also save DSCR/CR/TAR so that it can be recovered later */
359 bl __kvmppc_restore_tm
370 /* need preserve current MSR's MSR_TS bits */
373 rldicl r6, r6, 64 - MSR_TS_S_LG, 62
374 rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
378 addi r1, r1, SWITCH_FRAME_SIZE
379 ld r5, PPC_LR_STKOFF(r1)
383 EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
384 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */