2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/kvm_book3s.h>
24 #include <asm/switch_to.h>
28 #include <asm/asm-prototypes.h>
30 #define OP_19_XOP_RFID 18
31 #define OP_19_XOP_RFI 50
33 #define OP_31_XOP_MFMSR 83
34 #define OP_31_XOP_MTMSR 146
35 #define OP_31_XOP_MTMSRD 178
36 #define OP_31_XOP_MTSR 210
37 #define OP_31_XOP_MTSRIN 242
38 #define OP_31_XOP_TLBIEL 274
39 #define OP_31_XOP_TLBIE 306
40 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
41 #define OP_31_XOP_FAKE_SC1 308
42 #define OP_31_XOP_SLBMTE 402
43 #define OP_31_XOP_SLBIE 434
44 #define OP_31_XOP_SLBIA 498
45 #define OP_31_XOP_MFSR 595
46 #define OP_31_XOP_MFSRIN 659
47 #define OP_31_XOP_DCBA 758
48 #define OP_31_XOP_SLBMFEV 851
49 #define OP_31_XOP_EIOIO 854
50 #define OP_31_XOP_SLBMFEE 915
52 #define OP_31_XOP_TBEGIN 654
53 #define OP_31_XOP_TABORT 910
55 #define OP_31_XOP_TRECLAIM 942
56 #define OP_31_XOP_TRCHKPT 1006
58 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
59 #define OP_31_XOP_DCBZ 1010
75 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract
76 * function pointers, so let's just disable the define. */
85 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
87 /* PAPR VMs only access supervisor SPRs */
88 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
91 /* Limit user space to its own small SPR set */
92 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
98 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
99 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
101 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
102 sizeof(vcpu->arch.gpr_tm));
103 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
104 sizeof(struct thread_fp_state));
105 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
106 sizeof(struct thread_vr_state));
107 vcpu->arch.ppr_tm = vcpu->arch.ppr;
108 vcpu->arch.dscr_tm = vcpu->arch.dscr;
109 vcpu->arch.amr_tm = vcpu->arch.amr;
110 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
111 vcpu->arch.tar_tm = vcpu->arch.tar;
112 vcpu->arch.lr_tm = vcpu->arch.regs.link;
113 vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
114 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
115 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
118 static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
120 memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
121 sizeof(vcpu->arch.regs.gpr));
122 memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
123 sizeof(struct thread_fp_state));
124 memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
125 sizeof(struct thread_vr_state));
126 vcpu->arch.ppr = vcpu->arch.ppr_tm;
127 vcpu->arch.dscr = vcpu->arch.dscr_tm;
128 vcpu->arch.amr = vcpu->arch.amr_tm;
129 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
130 vcpu->arch.tar = vcpu->arch.tar_tm;
131 vcpu->arch.regs.link = vcpu->arch.lr_tm;
132 vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
133 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
134 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
137 static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
139 unsigned long guest_msr = kvmppc_get_msr(vcpu);
140 int fc_val = ra_val ? ra_val : 1;
143 /* CR0 = 0 | MSR[TS] | 0 */
144 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
145 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
150 texasr = mfspr(SPRN_TEXASR);
151 kvmppc_save_tm_pr(vcpu);
152 kvmppc_copyfrom_vcpu_tm(vcpu);
154 /* failure recording depends on Failure Summary bit */
155 if (!(texasr & TEXASR_FS)) {
156 texasr &= ~TEXASR_FC;
157 texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
159 texasr &= ~(TEXASR_PR | TEXASR_HV);
160 if (kvmppc_get_msr(vcpu) & MSR_PR)
163 if (kvmppc_get_msr(vcpu) & MSR_HV)
166 vcpu->arch.texasr = texasr;
167 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
168 mtspr(SPRN_TEXASR, texasr);
169 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
173 * treclaim need quit to non-transactional state.
175 guest_msr &= ~(MSR_TS_MASK);
176 kvmppc_set_msr(vcpu, guest_msr);
179 if (vcpu->arch.shadow_fscr & FSCR_TAR)
180 mtspr(SPRN_TAR, vcpu->arch.tar);
183 static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
185 unsigned long guest_msr = kvmppc_get_msr(vcpu);
189 * need flush FP/VEC/VSX to vcpu save area before
192 kvmppc_giveup_ext(vcpu, MSR_VSX);
193 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
194 kvmppc_copyto_vcpu_tm(vcpu);
195 kvmppc_save_tm_sprs(vcpu);
198 * as a result of trecheckpoint. set TS to suspended.
200 guest_msr &= ~(MSR_TS_MASK);
201 guest_msr |= MSR_TS_S;
202 kvmppc_set_msr(vcpu, guest_msr);
203 kvmppc_restore_tm_pr(vcpu);
207 /* emulate tabort. at guest privilege state */
208 void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
210 /* currently we only emulate tabort. but no emulation of other
211 * tabort variants since there is no kernel usage of them at
214 unsigned long guest_msr = kvmppc_get_msr(vcpu);
219 org_texasr = mfspr(SPRN_TEXASR);
222 /* CR0 = 0 | MSR[TS] | 0 */
223 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
224 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
227 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
228 /* failure recording depends on Failure Summary bit,
229 * and tabort will be treated as nops in non-transactional
232 if (!(org_texasr & TEXASR_FS) &&
233 MSR_TM_ACTIVE(guest_msr)) {
234 vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
235 if (guest_msr & MSR_PR)
236 vcpu->arch.texasr |= TEXASR_PR;
238 if (guest_msr & MSR_HV)
239 vcpu->arch.texasr |= TEXASR_HV;
241 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
249 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
250 unsigned int inst, int *advance)
252 int emulated = EMULATE_DONE;
253 int rt = get_rt(inst);
254 int rs = get_rs(inst);
255 int ra = get_ra(inst);
256 int rb = get_rb(inst);
257 u32 inst_sc = 0x44000002;
259 switch (get_op(inst)) {
261 emulated = EMULATE_FAIL;
262 if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
263 (inst == swab32(inst_sc))) {
265 * This is the byte reversed syscall instruction of our
266 * hypercall handler. Early versions of LE Linux didn't
267 * swap the instructions correctly and ended up in
268 * illegal instructions.
269 * Just always fail hypercalls on these broken systems.
271 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
272 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
273 emulated = EMULATE_DONE;
277 switch (get_xop(inst)) {
279 case OP_19_XOP_RFI: {
280 unsigned long srr1 = kvmppc_get_srr1(vcpu);
281 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
282 unsigned long cur_msr = kvmppc_get_msr(vcpu);
285 * add rules to fit in ISA specification regarding TM
286 * state transistion in TM disable/Suspended state,
287 * and target TM state is TM inactive(00) state. (the
288 * change should be suppressed).
290 if (((cur_msr & MSR_TM) == 0) &&
291 ((srr1 & MSR_TM) == 0) &&
292 MSR_TM_SUSPENDED(cur_msr) &&
293 !MSR_TM_ACTIVE(srr1))
296 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
297 kvmppc_set_msr(vcpu, srr1);
303 emulated = EMULATE_FAIL;
308 switch (get_xop(inst)) {
309 case OP_31_XOP_MFMSR:
310 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
312 case OP_31_XOP_MTMSRD:
314 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
315 if (inst & 0x10000) {
316 ulong new_msr = kvmppc_get_msr(vcpu);
317 new_msr &= ~(MSR_RI | MSR_EE);
318 new_msr |= rs_val & (MSR_RI | MSR_EE);
319 kvmppc_set_msr_fast(vcpu, new_msr);
321 kvmppc_set_msr(vcpu, rs_val);
324 case OP_31_XOP_MTMSR:
325 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
331 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
332 if (vcpu->arch.mmu.mfsrin) {
334 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
335 kvmppc_set_gpr(vcpu, rt, sr);
339 case OP_31_XOP_MFSRIN:
343 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
344 if (vcpu->arch.mmu.mfsrin) {
346 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
347 kvmppc_set_gpr(vcpu, rt, sr);
352 vcpu->arch.mmu.mtsrin(vcpu,
354 kvmppc_get_gpr(vcpu, rs));
356 case OP_31_XOP_MTSRIN:
357 vcpu->arch.mmu.mtsrin(vcpu,
358 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
359 kvmppc_get_gpr(vcpu, rs));
361 case OP_31_XOP_TLBIE:
362 case OP_31_XOP_TLBIEL:
364 bool large = (inst & 0x00200000) ? true : false;
365 ulong addr = kvmppc_get_gpr(vcpu, rb);
366 vcpu->arch.mmu.tlbie(vcpu, addr, large);
369 #ifdef CONFIG_PPC_BOOK3S_64
370 case OP_31_XOP_FAKE_SC1:
372 /* SC 1 papr hypercalls */
373 ulong cmd = kvmppc_get_gpr(vcpu, 3);
376 if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
377 !vcpu->arch.papr_enabled) {
378 emulated = EMULATE_FAIL;
382 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
385 run->papr_hcall.nr = cmd;
386 for (i = 0; i < 9; ++i) {
387 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
388 run->papr_hcall.args[i] = gpr;
391 run->exit_reason = KVM_EXIT_PAPR_HCALL;
392 vcpu->arch.hcall_needed = 1;
393 emulated = EMULATE_EXIT_USER;
397 case OP_31_XOP_EIOIO:
399 case OP_31_XOP_SLBMTE:
400 if (!vcpu->arch.mmu.slbmte)
403 vcpu->arch.mmu.slbmte(vcpu,
404 kvmppc_get_gpr(vcpu, rs),
405 kvmppc_get_gpr(vcpu, rb));
407 case OP_31_XOP_SLBIE:
408 if (!vcpu->arch.mmu.slbie)
411 vcpu->arch.mmu.slbie(vcpu,
412 kvmppc_get_gpr(vcpu, rb));
414 case OP_31_XOP_SLBIA:
415 if (!vcpu->arch.mmu.slbia)
418 vcpu->arch.mmu.slbia(vcpu);
420 case OP_31_XOP_SLBMFEE:
421 if (!vcpu->arch.mmu.slbmfee) {
422 emulated = EMULATE_FAIL;
426 rb_val = kvmppc_get_gpr(vcpu, rb);
427 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
428 kvmppc_set_gpr(vcpu, rt, t);
431 case OP_31_XOP_SLBMFEV:
432 if (!vcpu->arch.mmu.slbmfev) {
433 emulated = EMULATE_FAIL;
437 rb_val = kvmppc_get_gpr(vcpu, rb);
438 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
439 kvmppc_set_gpr(vcpu, rt, t);
443 /* Gets treated as NOP */
447 ulong rb_val = kvmppc_get_gpr(vcpu, rb);
450 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
455 ra_val = kvmppc_get_gpr(vcpu, ra);
457 addr = (ra_val + rb_val) & ~31ULL;
458 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
462 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
463 if ((r == -ENOENT) || (r == -EPERM)) {
465 kvmppc_set_dar(vcpu, vaddr);
466 vcpu->arch.fault_dar = vaddr;
468 dsisr = DSISR_ISSTORE;
470 dsisr |= DSISR_NOHPTE;
471 else if (r == -EPERM)
472 dsisr |= DSISR_PROTFAULT;
474 kvmppc_set_dsisr(vcpu, dsisr);
475 vcpu->arch.fault_dsisr = dsisr;
477 kvmppc_book3s_queue_irqprio(vcpu,
478 BOOK3S_INTERRUPT_DATA_STORAGE);
483 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
484 case OP_31_XOP_TBEGIN:
486 if (!cpu_has_feature(CPU_FTR_TM))
489 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
490 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
491 emulated = EMULATE_AGAIN;
495 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
497 vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
498 (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
500 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
501 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
504 if ((inst >> 21) & 0x1)
505 vcpu->arch.texasr |= TEXASR_ROT;
507 if (kvmppc_get_msr(vcpu) & MSR_HV)
508 vcpu->arch.texasr |= TEXASR_HV;
510 vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
511 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
513 kvmppc_restore_tm_sprs(vcpu);
516 emulated = EMULATE_FAIL;
519 case OP_31_XOP_TABORT:
521 ulong guest_msr = kvmppc_get_msr(vcpu);
522 unsigned long ra_val = 0;
524 if (!cpu_has_feature(CPU_FTR_TM))
527 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
528 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
529 emulated = EMULATE_AGAIN;
533 /* only emulate for privilege guest, since problem state
534 * guest can run with TM enabled and we don't expect to
535 * trap at here for that case.
537 WARN_ON(guest_msr & MSR_PR);
540 ra_val = kvmppc_get_gpr(vcpu, ra);
542 kvmppc_emulate_tabort(vcpu, ra_val);
545 case OP_31_XOP_TRECLAIM:
547 ulong guest_msr = kvmppc_get_msr(vcpu);
548 unsigned long ra_val = 0;
550 if (!cpu_has_feature(CPU_FTR_TM))
553 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
554 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
555 emulated = EMULATE_AGAIN;
559 /* generate interrupts based on priorities */
560 if (guest_msr & MSR_PR) {
561 /* Privileged Instruction type Program Interrupt */
562 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
563 emulated = EMULATE_AGAIN;
567 if (!MSR_TM_ACTIVE(guest_msr)) {
568 /* TM bad thing interrupt */
569 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
570 emulated = EMULATE_AGAIN;
575 ra_val = kvmppc_get_gpr(vcpu, ra);
576 kvmppc_emulate_treclaim(vcpu, ra_val);
579 case OP_31_XOP_TRCHKPT:
581 ulong guest_msr = kvmppc_get_msr(vcpu);
582 unsigned long texasr;
584 if (!cpu_has_feature(CPU_FTR_TM))
587 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
588 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
589 emulated = EMULATE_AGAIN;
593 /* generate interrupt based on priorities */
594 if (guest_msr & MSR_PR) {
595 /* Privileged Instruction type Program Intr */
596 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
597 emulated = EMULATE_AGAIN;
602 texasr = mfspr(SPRN_TEXASR);
605 if (MSR_TM_ACTIVE(guest_msr) ||
606 !(texasr & (TEXASR_FS))) {
607 /* TM bad thing interrupt */
608 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
609 emulated = EMULATE_AGAIN;
613 kvmppc_emulate_trchkpt(vcpu);
618 emulated = EMULATE_FAIL;
622 emulated = EMULATE_FAIL;
625 if (emulated == EMULATE_FAIL)
626 emulated = kvmppc_emulate_paired_single(run, vcpu);
631 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
636 u32 bl = (val >> 2) & 0x7ff;
637 bat->bepi_mask = (~bl << 17);
638 bat->bepi = val & 0xfffe0000;
639 bat->vs = (val & 2) ? 1 : 0;
640 bat->vp = (val & 1) ? 1 : 0;
641 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
644 bat->brpn = val & 0xfffe0000;
645 bat->wimg = (val >> 3) & 0xf;
647 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
651 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
653 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
654 struct kvmppc_bat *bat;
657 case SPRN_IBAT0U ... SPRN_IBAT3L:
658 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
660 case SPRN_IBAT4U ... SPRN_IBAT7L:
661 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
663 case SPRN_DBAT0U ... SPRN_DBAT3L:
664 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
666 case SPRN_DBAT4U ... SPRN_DBAT7L:
667 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
676 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
678 int emulated = EMULATE_DONE;
682 if (!spr_allowed(vcpu, PRIV_HYPER))
684 to_book3s(vcpu)->sdr1 = spr_val;
687 kvmppc_set_dsisr(vcpu, spr_val);
690 kvmppc_set_dar(vcpu, spr_val);
693 to_book3s(vcpu)->hior = spr_val;
695 case SPRN_IBAT0U ... SPRN_IBAT3L:
696 case SPRN_IBAT4U ... SPRN_IBAT7L:
697 case SPRN_DBAT0U ... SPRN_DBAT3L:
698 case SPRN_DBAT4U ... SPRN_DBAT7L:
700 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
702 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
703 /* BAT writes happen so rarely that we're ok to flush
705 kvmppc_mmu_pte_flush(vcpu, 0, 0);
706 kvmppc_mmu_flush_segments(vcpu);
710 to_book3s(vcpu)->hid[0] = spr_val;
713 to_book3s(vcpu)->hid[1] = spr_val;
716 to_book3s(vcpu)->hid[2] = spr_val;
718 case SPRN_HID2_GEKKO:
719 to_book3s(vcpu)->hid[2] = spr_val;
720 /* HID2.PSE controls paired single on gekko */
721 switch (vcpu->arch.pvr) {
722 case 0x00080200: /* lonestar 2.0 */
723 case 0x00088202: /* lonestar 2.2 */
724 case 0x70000100: /* gekko 1.0 */
725 case 0x00080100: /* gekko 2.0 */
726 case 0x00083203: /* gekko 2.3a */
727 case 0x00083213: /* gekko 2.3b */
728 case 0x00083204: /* gekko 2.4 */
729 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
730 case 0x00087200: /* broadway */
731 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
732 /* Native paired singles */
733 } else if (spr_val & (1 << 29)) { /* HID2.PSE */
734 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
735 kvmppc_giveup_ext(vcpu, MSR_FP);
737 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
743 case SPRN_HID4_GEKKO:
744 to_book3s(vcpu)->hid[4] = spr_val;
747 to_book3s(vcpu)->hid[5] = spr_val;
748 /* guest HID5 set can change is_dcbz32 */
749 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
751 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
761 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
763 #ifdef CONFIG_PPC_BOOK3S_64
765 kvmppc_set_fscr(vcpu, spr_val);
768 vcpu->arch.bescr = spr_val;
771 vcpu->arch.ebbhr = spr_val;
774 vcpu->arch.ebbrr = spr_val;
776 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
780 if (!cpu_has_feature(CPU_FTR_TM))
783 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
784 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
785 emulated = EMULATE_AGAIN;
789 if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
790 !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
791 (sprn == SPRN_TFHAR))) {
792 /* it is illegal to mtspr() TM regs in
793 * other than non-transactional state, with
794 * the exception of TFHAR in suspend state.
796 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
797 emulated = EMULATE_AGAIN;
802 if (sprn == SPRN_TFHAR)
803 mtspr(SPRN_TFHAR, spr_val);
804 else if (sprn == SPRN_TEXASR)
805 mtspr(SPRN_TEXASR, spr_val);
807 mtspr(SPRN_TFIAR, spr_val);
821 case SPRN_MMCR0_GEKKO:
822 case SPRN_MMCR1_GEKKO:
823 case SPRN_PMC1_GEKKO:
824 case SPRN_PMC2_GEKKO:
825 case SPRN_PMC3_GEKKO:
826 case SPRN_PMC4_GEKKO:
827 case SPRN_WPAR_GEKKO:
830 #ifdef CONFIG_PPC_BOOK3S_64
841 pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
843 if (kvmppc_get_msr(vcpu) & MSR_PR) {
844 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
845 emulated = EMULATE_AGAIN;
848 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
849 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
850 emulated = EMULATE_AGAIN;
859 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
861 int emulated = EMULATE_DONE;
864 case SPRN_IBAT0U ... SPRN_IBAT3L:
865 case SPRN_IBAT4U ... SPRN_IBAT7L:
866 case SPRN_DBAT0U ... SPRN_DBAT3L:
867 case SPRN_DBAT4U ... SPRN_DBAT7L:
869 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
872 *spr_val = bat->raw >> 32;
879 if (!spr_allowed(vcpu, PRIV_HYPER))
881 *spr_val = to_book3s(vcpu)->sdr1;
884 *spr_val = kvmppc_get_dsisr(vcpu);
887 *spr_val = kvmppc_get_dar(vcpu);
890 *spr_val = to_book3s(vcpu)->hior;
893 *spr_val = to_book3s(vcpu)->hid[0];
896 *spr_val = to_book3s(vcpu)->hid[1];
899 case SPRN_HID2_GEKKO:
900 *spr_val = to_book3s(vcpu)->hid[2];
903 case SPRN_HID4_GEKKO:
904 *spr_val = to_book3s(vcpu)->hid[4];
907 *spr_val = to_book3s(vcpu)->hid[5];
915 * On exit we would have updated purr
917 *spr_val = vcpu->arch.purr;
921 * On exit we would have updated spurr
923 *spr_val = vcpu->arch.spurr;
926 *spr_val = to_book3s(vcpu)->vtb;
929 *spr_val = vcpu->arch.ic;
939 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
941 #ifdef CONFIG_PPC_BOOK3S_64
943 *spr_val = vcpu->arch.fscr;
946 *spr_val = vcpu->arch.bescr;
949 *spr_val = vcpu->arch.ebbhr;
952 *spr_val = vcpu->arch.ebbrr;
954 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
958 if (!cpu_has_feature(CPU_FTR_TM))
961 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
962 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
963 emulated = EMULATE_AGAIN;
968 if (sprn == SPRN_TFHAR)
969 *spr_val = mfspr(SPRN_TFHAR);
970 else if (sprn == SPRN_TEXASR)
971 *spr_val = mfspr(SPRN_TEXASR);
972 else if (sprn == SPRN_TFIAR)
973 *spr_val = mfspr(SPRN_TFIAR);
984 case SPRN_MMCR0_GEKKO:
985 case SPRN_MMCR1_GEKKO:
986 case SPRN_PMC1_GEKKO:
987 case SPRN_PMC2_GEKKO:
988 case SPRN_PMC3_GEKKO:
989 case SPRN_PMC4_GEKKO:
990 case SPRN_WPAR_GEKKO:
993 #ifdef CONFIG_PPC_BOOK3S_64
1006 pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
1008 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1009 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1010 emulated = EMULATE_AGAIN;
1013 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
1014 sprn == 4 || sprn == 5 || sprn == 6) {
1015 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1016 emulated = EMULATE_AGAIN;
1026 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
1028 return make_dsisr(inst);
1031 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
1033 #ifdef CONFIG_PPC_BOOK3S_64
1035 * Linux's fix_alignment() assumes that DAR is valid, so can we
1037 return vcpu->arch.fault_dar;
1040 ulong ra = get_ra(inst);
1041 ulong rb = get_rb(inst);
1043 switch (get_op(inst)) {
1049 dar = kvmppc_get_gpr(vcpu, ra);
1050 dar += (s32)((s16)inst);
1054 dar = kvmppc_get_gpr(vcpu, ra);
1055 dar += kvmppc_get_gpr(vcpu, rb);
1058 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);