GNU Linux-libre 4.19.245-gnu1
[releases.git] / arch / powerpc / kvm / book3s_hv_tm.c
1 /*
2  * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation.
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kvm_host.h>
12
13 #include <asm/kvm_ppc.h>
14 #include <asm/kvm_book3s.h>
15 #include <asm/kvm_book3s_64.h>
16 #include <asm/reg.h>
17 #include <asm/ppc-opcode.h>
18
19 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
20 {
21         u64 texasr, tfiar;
22         u64 msr = vcpu->arch.shregs.msr;
23
24         tfiar = vcpu->arch.regs.nip & ~0x3ull;
25         texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
26         if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
27                 texasr |= TEXASR_SUSP;
28         if (msr & MSR_PR) {
29                 texasr |= TEXASR_PR;
30                 tfiar |= 1;
31         }
32         vcpu->arch.tfiar = tfiar;
33         /* Preserve ROT and TL fields of existing TEXASR */
34         vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
35 }
36
37 /*
38  * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
39  * We expect to find a TM-related instruction to be emulated.  The
40  * instruction image is in vcpu->arch.emul_inst.  If the guest was in
41  * TM suspended or transactional state, the checkpointed state has been
42  * reclaimed and is in the vcpu struct.  The CPU is in virtual mode in
43  * host context.
44  */
45 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
46 {
47         u32 instr = vcpu->arch.emul_inst;
48         u64 msr = vcpu->arch.shregs.msr;
49         u64 newmsr, bescr;
50         int ra, rs;
51
52         /*
53          * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
54          * in these instructions, so masking bit 31 out doesn't change these
55          * instructions. For treclaim., tsr., and trechkpt. instructions if bit
56          * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
57          * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
58          * 31 is an acceptable way to handle these invalid forms that have
59          * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
60          * bit 31 set) can generate a softpatch interrupt. Hence both forms
61          * are handled below for these instructions so they behave the same way.
62          */
63         switch (instr & PO_XOP_OPCODE_MASK) {
64         case PPC_INST_RFID:
65                 /* XXX do we need to check for PR=0 here? */
66                 newmsr = vcpu->arch.shregs.srr1;
67                 /* should only get here for Sx -> T1 transition */
68                 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
69                                MSR_TM_TRANSACTIONAL(newmsr) &&
70                                (newmsr & MSR_TM)));
71                 newmsr = sanitize_msr(newmsr);
72                 vcpu->arch.shregs.msr = newmsr;
73                 vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
74                 vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
75                 return RESUME_GUEST;
76
77         case PPC_INST_RFEBB:
78                 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
79                         /* generate an illegal instruction interrupt */
80                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
81                         return RESUME_GUEST;
82                 }
83                 /* check EBB facility is available */
84                 if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
85                         /* generate an illegal instruction interrupt */
86                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
87                         return RESUME_GUEST;
88                 }
89                 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
90                         /* generate a facility unavailable interrupt */
91                         vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
92                                 ((u64)FSCR_EBB_LG << 56);
93                         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
94                         return RESUME_GUEST;
95                 }
96                 bescr = vcpu->arch.bescr;
97                 /* expect to see a S->T transition requested */
98                 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
99                                ((bescr >> 30) & 3) == 2));
100                 bescr &= ~BESCR_GE;
101                 if (instr & (1 << 11))
102                         bescr |= BESCR_GE;
103                 vcpu->arch.bescr = bescr;
104                 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
105                 vcpu->arch.shregs.msr = msr;
106                 vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
107                 vcpu->arch.regs.nip = vcpu->arch.ebbrr;
108                 return RESUME_GUEST;
109
110         case PPC_INST_MTMSRD:
111                 /* XXX do we need to check for PR=0 here? */
112                 rs = (instr >> 21) & 0x1f;
113                 newmsr = kvmppc_get_gpr(vcpu, rs);
114                 /* check this is a Sx -> T1 transition */
115                 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
116                                MSR_TM_TRANSACTIONAL(newmsr) &&
117                                (newmsr & MSR_TM)));
118                 /* mtmsrd doesn't change LE */
119                 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
120                 newmsr = sanitize_msr(newmsr);
121                 vcpu->arch.shregs.msr = newmsr;
122                 return RESUME_GUEST;
123
124         /* ignore bit 31, see comment above */
125         case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
126                 /* check for PR=1 and arch 2.06 bit set in PCR */
127                 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
128                         /* generate an illegal instruction interrupt */
129                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
130                         return RESUME_GUEST;
131                 }
132                 /* check for TM disabled in the HFSCR or MSR */
133                 if (!(vcpu->arch.hfscr & HFSCR_TM)) {
134                         /* generate an illegal instruction interrupt */
135                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
136                         return RESUME_GUEST;
137                 }
138                 if (!(msr & MSR_TM)) {
139                         /* generate a facility unavailable interrupt */
140                         vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
141                                 ((u64)FSCR_TM_LG << 56);
142                         kvmppc_book3s_queue_irqprio(vcpu,
143                                                 BOOK3S_INTERRUPT_FAC_UNAVAIL);
144                         return RESUME_GUEST;
145                 }
146                 /* Set CR0 to indicate previous transactional state */
147                 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
148                         (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
149                 /* L=1 => tresume, L=0 => tsuspend */
150                 if (instr & (1 << 21)) {
151                         if (MSR_TM_SUSPENDED(msr))
152                                 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
153                 } else {
154                         if (MSR_TM_TRANSACTIONAL(msr))
155                                 msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
156                 }
157                 vcpu->arch.shregs.msr = msr;
158                 return RESUME_GUEST;
159
160         /* ignore bit 31, see comment above */
161         case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
162                 /* check for TM disabled in the HFSCR or MSR */
163                 if (!(vcpu->arch.hfscr & HFSCR_TM)) {
164                         /* generate an illegal instruction interrupt */
165                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
166                         return RESUME_GUEST;
167                 }
168                 if (!(msr & MSR_TM)) {
169                         /* generate a facility unavailable interrupt */
170                         vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
171                                 ((u64)FSCR_TM_LG << 56);
172                         kvmppc_book3s_queue_irqprio(vcpu,
173                                                 BOOK3S_INTERRUPT_FAC_UNAVAIL);
174                         return RESUME_GUEST;
175                 }
176                 /* If no transaction active, generate TM bad thing */
177                 if (!MSR_TM_ACTIVE(msr)) {
178                         kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
179                         return RESUME_GUEST;
180                 }
181                 /* If failure was not previously recorded, recompute TEXASR */
182                 if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
183                         ra = (instr >> 16) & 0x1f;
184                         if (ra)
185                                 ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
186                         emulate_tx_failure(vcpu, ra);
187                 }
188
189                 copy_from_checkpoint(vcpu);
190
191                 /* Set CR0 to indicate previous transactional state */
192                 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
193                         (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
194                 vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
195                 return RESUME_GUEST;
196
197         /* ignore bit 31, see comment above */
198         case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
199                 /* XXX do we need to check for PR=0 here? */
200                 /* check for TM disabled in the HFSCR or MSR */
201                 if (!(vcpu->arch.hfscr & HFSCR_TM)) {
202                         /* generate an illegal instruction interrupt */
203                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
204                         return RESUME_GUEST;
205                 }
206                 if (!(msr & MSR_TM)) {
207                         /* generate a facility unavailable interrupt */
208                         vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
209                                 ((u64)FSCR_TM_LG << 56);
210                         kvmppc_book3s_queue_irqprio(vcpu,
211                                                 BOOK3S_INTERRUPT_FAC_UNAVAIL);
212                         return RESUME_GUEST;
213                 }
214                 /* If transaction active or TEXASR[FS] = 0, bad thing */
215                 if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
216                         kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
217                         return RESUME_GUEST;
218                 }
219
220                 copy_to_checkpoint(vcpu);
221
222                 /* Set CR0 to indicate previous transactional state */
223                 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
224                         (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
225                 vcpu->arch.shregs.msr = msr | MSR_TS_S;
226                 return RESUME_GUEST;
227         }
228
229         /* What should we do here? We didn't recognize the instruction */
230         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
231         pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
232
233         return RESUME_GUEST;
234 }