1 /* SPDX-License-Identifier: GPL-2.0 */
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
8 * This file is meant to be #included from head_64.S due to
9 * position dependent assembly.
11 * Most of this originates from head_64.S and thus has the same
16 #include <asm/hw_irq.h>
17 #include <asm/exception-64s.h>
18 #include <asm/ptrace.h>
19 #include <asm/cpuidle.h>
20 #include <asm/head-64.h>
21 #include <asm/feature-fixups.h>
24 * There are a few constraints to be concerned with.
25 * - Real mode exceptions code/data must be located at their physical location.
26 * - Virtual mode exceptions must be mapped at their 0xc000... location.
27 * - Fixed location code must not call directly beyond the __end_interrupts
28 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
30 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
32 * - Conditional branch targets must be within +/-32K of caller.
34 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
35 * therefore don't have to run in physically located code or rfid to
36 * virtual mode kernel code. However on relocatable kernels they do have
37 * to branch to KERNELBASE offset because the rest of the kernel (outside
38 * the exception vectors) may be located elsewhere.
40 * Virtual exceptions correspond with physical, except their entry points
41 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
42 * offset applied. Virtual exceptions are enabled with the Alternate
43 * Interrupt Location (AIL) bit set in the LPCR. However this does not
44 * guarantee they will be delivered virtually. Some conditions (see the ISA)
45 * cause exceptions to be delivered in real mode.
47 * It's impossible to receive interrupts below 0x300 via AIL.
49 * KVM: None of the virtual exceptions are from the guest. Anything that
50 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
53 * We layout physical memory as follows:
54 * 0x0000 - 0x00ff : Secondary processor spin code
55 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
56 * 0x1900 - 0x3fff : Real mode trampolines
57 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
58 * 0x5900 - 0x6fff : Relon mode trampolines
59 * 0x7000 - 0x7fff : FWNMI data area
60 * 0x8000 - .... : Common interrupt handlers, remaining early
61 * setup code, rest of kernel.
63 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
64 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
67 OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
68 OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
69 OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
70 OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
71 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
73 * Data area reserved for FWNMI option.
74 * This address (0x7000) is fixed by the RPA.
75 * pseries and powernv need to keep the whole page from
76 * 0x7000 to 0x8000 free for use by the firmware
78 ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
79 OPEN_TEXT_SECTION(0x8000)
81 OPEN_TEXT_SECTION(0x7000)
84 USE_FIXED_SECTION(real_vectors)
87 * This is the start of the interrupt handlers for pSeries
88 * This code runs with relocation off.
89 * Code from here to __end_interrupts gets copied down to real
90 * address 0x100 when we are running a relocatable kernel.
91 * Therefore any relative branches in this section must only
92 * branch to labels in this section.
94 .globl __start_interrupts
97 /* No virt vectors corresponding with 0x0..0x100 */
98 EXC_VIRT_NONE(0x4000, 0x100)
101 #ifdef CONFIG_PPC_P7_NAP
103 * If running native on arch 2.06 or later, check if we are waking up
104 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
105 * bits 46:47. A non-0 value indicates that we are coming from a power
106 * saving state. The idle wakeup handler initially runs in real mode,
107 * but we branch to the 0xc000... address so we can turn on relocation
110 #define IDLETEST(n) \
111 BEGIN_FTR_SECTION ; \
112 mfspr r10,SPRN_SRR1 ; \
113 rlwinm. r10,r10,47-31,30,31 ; \
116 BRANCH_TO_C000(r10, system_reset_idle_common) ; \
119 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
121 #define IDLETEST NOTEST
124 EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
127 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
128 * being used, so a nested NMI exception would corrupt it.
130 EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
133 EXC_REAL_END(system_reset, 0x100, 0x100)
134 EXC_VIRT_NONE(0x4100, 0x100)
135 TRAMP_KVM(PACA_EXNMI, 0x100)
137 #ifdef CONFIG_PPC_P7_NAP
138 EXC_COMMON_BEGIN(system_reset_idle_common)
140 b pnv_powersave_wakeup
144 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
145 * the right thing. We do not want to reconcile because that goes
146 * through irq tracing which we don't want in NMI.
148 * Save PACAIRQHAPPENED because some code will do a hard disable
149 * (e.g., xmon). So we want to restore this back to where it was
150 * when we return. DAR is unused in the stack, so save it there.
152 #define ADD_RECONCILE_NMI \
153 li r10,IRQS_ALL_DISABLED; \
154 stb r10,PACAIRQSOFTMASK(r13); \
155 lbz r10,PACAIRQHAPPENED(r13); \
158 EXC_COMMON_BEGIN(system_reset_common)
160 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
161 * to recover, but nested NMI will notice in_nmi and not recover
162 * because of the use of the NMI stack. in_nmi reentrancy is tested in
163 * system_reset_exception.
165 lhz r10,PACA_IN_NMI(r13)
167 sth r10,PACA_IN_NMI(r13)
172 ld r1,PACA_NMI_EMERG_SP(r13)
173 subi r1,r1,INT_FRAME_SIZE
174 EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100,
175 system_reset, system_reset_exception,
176 ADD_NVGPRS;ADD_RECONCILE_NMI)
178 /* This (and MCE) can be simplified with mtmsrd L=1 */
179 /* Clear MSR_RI before setting SRR0 and SRR1. */
186 * MSR_RI is clear, now we can decrement paca->in_nmi.
188 lhz r10,PACA_IN_NMI(r13)
190 sth r10,PACA_IN_NMI(r13)
193 * Restore soft mask settings.
196 stb r10,PACAIRQHAPPENED(r13)
198 stb r10,PACAIRQSOFTMASK(r13)
201 * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
202 * Should share common bits...
205 /* Move original SRR0 and SRR1 into the respective regs */
223 /* restore original r1. */
225 RFI_TO_USER_OR_KERNEL
227 #ifdef CONFIG_PPC_PSERIES
229 * Vectors for the FWNMI option. Share common code.
231 TRAMP_REAL_BEGIN(system_reset_fwnmi)
232 SET_SCRATCH0(r13) /* save r13 */
233 /* See comment at system_reset exception */
234 EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
236 #endif /* CONFIG_PPC_PSERIES */
239 EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
240 /* This is moved out of line as it can be patched by FW, but
241 * some code path might still want to branch into the original
244 SET_SCRATCH0(r13) /* save r13 */
245 EXCEPTION_PROLOG_0(PACA_EXMC)
247 b machine_check_powernv_early
249 b machine_check_pSeries_0
250 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
251 EXC_REAL_END(machine_check, 0x200, 0x100)
252 EXC_VIRT_NONE(0x4200, 0x100)
253 TRAMP_REAL_BEGIN(machine_check_powernv_early)
255 EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
260 * Original R9 to R13 is saved on PACA_EXMC
262 * Switch to mc_emergency stack and handle re-entrancy (we limit
263 * the nested MCE upto level 4 to avoid stack overflow).
264 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
266 * We use paca->in_mce to check whether this is the first entry or
267 * nested machine check. We increment paca->in_mce to track nested
270 * If this is the first entry then set stack pointer to
271 * paca->mc_emergency_sp, otherwise r1 is already pointing to
272 * stack frame on mc_emergency stack.
274 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
275 * checkstop if we get another machine check exception before we do
276 * rfid with MSR_ME=1.
278 * This interrupt can wake directly from idle. If that is the case,
279 * the machine check is handled then the idle wakeup code is called
282 mr r11,r1 /* Save r1 */
283 lhz r10,PACA_IN_MCE(r13)
284 cmpwi r10,0 /* Are we in nested machine check */
285 bne 0f /* Yes, we are. */
286 /* First machine check entry */
287 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
288 0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
289 addi r10,r10,1 /* increment paca->in_mce */
290 sth r10,PACA_IN_MCE(r13)
291 /* Limit nested MCE to level 4 to avoid stack overflow */
292 cmpwi r10,MAX_MCE_DEPTH
293 bgt 2f /* Check if we hit limit of 4 */
294 std r11,GPR1(r1) /* Save r1 on the stack. */
295 std r11,0(r1) /* make stack chain pointer */
296 mfspr r11,SPRN_SRR0 /* Save SRR0 */
298 mfspr r11,SPRN_SRR1 /* Save SRR1 */
300 mfspr r11,SPRN_DAR /* Save DAR */
302 mfspr r11,SPRN_DSISR /* Save DSISR */
304 std r9,_CCR(r1) /* Save CR in stackframe */
305 /* Save r9 through r13 from EXMC save area to stack frame. */
306 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
307 mfmsr r11 /* get MSR value */
308 ori r11,r11,MSR_ME /* turn on ME bit */
309 ori r11,r11,MSR_RI /* turn on RI bit */
310 LOAD_HANDLER(r12, machine_check_handle_early)
311 1: mtspr SPRN_SRR0,r12
314 b . /* prevent speculative execution */
316 /* Stack overflow. Stay on emergency stack and panic.
317 * Keep the ME bit off while panic-ing, so that if we hit
318 * another machine check we checkstop.
320 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
322 LOAD_HANDLER(r12, unrecover_mce)
324 andc r11,r11,r10 /* Turn off MSR_ME */
326 b . /* prevent speculative execution */
327 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
329 TRAMP_REAL_BEGIN(machine_check_pSeries)
330 .globl machine_check_fwnmi
332 SET_SCRATCH0(r13) /* save r13 */
333 EXCEPTION_PROLOG_0(PACA_EXMC)
334 machine_check_pSeries_0:
335 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
337 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
338 * nested machine check corrupts it. machine_check_common enables
341 EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD)
343 TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
345 EXC_COMMON_BEGIN(machine_check_common)
347 * Machine check is different because we use a different
348 * save area: PACA_EXMC instead of PACA_EXGEN.
351 std r10,PACA_EXMC+EX_DAR(r13)
353 stw r10,PACA_EXMC+EX_DSISR(r13)
354 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
356 RECONCILE_IRQ_STATE(r10, r11)
357 ld r3,PACA_EXMC+EX_DAR(r13)
358 lwz r4,PACA_EXMC+EX_DSISR(r13)
359 /* Enable MSR_RI when finished with PACA_EXMC */
365 addi r3,r1,STACK_FRAME_OVERHEAD
366 bl machine_check_exception
369 #define MACHINE_CHECK_HANDLER_WINDUP \
370 /* Clear MSR_RI before setting SRR0 and SRR1. */\
372 mfmsr r9; /* get MSR value */ \
374 mtmsrd r9,1; /* Clear MSR_RI */ \
375 /* Move original SRR0 and SRR1 into the respective regs */ \
377 mtspr SPRN_SRR1,r9; \
379 mtspr SPRN_SRR0,r3; \
391 /* Decrement paca->in_mce. */ \
392 lhz r12,PACA_IN_MCE(r13); \
394 sth r12,PACA_IN_MCE(r13); \
396 REST_2GPRS(12, r1); \
397 /* restore original r1. */ \
400 #ifdef CONFIG_PPC_P7_NAP
402 * This is an idle wakeup. Low level machine check has already been
403 * done. Queue the event then call the idle code to do the wake up.
405 EXC_COMMON_BEGIN(machine_check_idle_common)
406 bl machine_check_queue_event
409 * We have not used any non-volatile GPRs here, and as a rule
410 * most exception code including machine check does not.
411 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
412 * wakeup will restore volatile registers.
414 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
416 * Then decrement MCE nesting after finishing with the stack.
420 lhz r11,PACA_IN_MCE(r13)
422 sth r11,PACA_IN_MCE(r13)
424 /* Turn off the RI bit because SRR1 is used by idle wakeup code. */
425 /* Recoverability could be improved by reducing the use of SRR1. */
429 b pnv_powersave_wakeup_mce
432 * Handle machine check early in real mode. We come here with
433 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
435 EXC_COMMON_BEGIN(machine_check_handle_early)
436 std r0,GPR0(r1) /* Save r0 */
437 EXCEPTION_PROLOG_COMMON_3(0x200)
439 addi r3,r1,STACK_FRAME_OVERHEAD
440 bl machine_check_early
441 std r3,RESULT(r1) /* Save result */
444 #ifdef CONFIG_PPC_P7_NAP
446 * Check if thread was in power saving mode. We come here when any
447 * of the following is true:
448 * a. thread wasn't in power saving mode
449 * b. thread was in power saving mode with no state loss,
450 * supervisor state loss or hypervisor state loss.
452 * Go back to nap/sleep/winkle mode again if (b) is true.
455 rlwinm. r11,r12,47-31,30,31
456 bne machine_check_idle_common
457 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
461 * Check if we are coming from hypervisor userspace. If yes then we
462 * continue in host kernel in V mode to deliver the MC event.
464 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
466 andi. r11,r12,MSR_PR /* See if coming from user. */
467 bne 9f /* continue in V mode if we are. */
470 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
472 * We are coming from kernel context. Check if we are coming from
473 * guest. if yes, then we can continue. We will fall through
474 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
476 lbz r11,HSTATE_IN_GUEST(r13)
477 cmpwi r11,0 /* Check if coming from guest */
478 bne 9f /* continue if we are. */
481 * At this point we are not sure about what context we come from.
482 * Queue up the MCE event and return from the interrupt.
483 * But before that, check if this is an un-recoverable exception.
484 * If yes, then stay on emergency stack and panic.
488 1: mfspr r11,SPRN_SRR0
489 LOAD_HANDLER(r10,unrecover_mce)
493 * We are going down. But there are chances that we might get hit by
494 * another MCE during panic path and we may run into unstable state
495 * with no way out. Hence, turn ME bit off while going down, so that
496 * when another MCE is hit during panic path, system will checkstop
497 * and hypervisor will get restarted cleanly by SP.
500 andc r10,r10,r3 /* Turn off MSR_ME */
506 * Check if we have successfully handled/recovered from error, if not
507 * then stay on emergency stack and panic.
509 ld r3,RESULT(r1) /* Load result */
510 cmpdi r3,0 /* see if we handled MCE successfully */
512 beq 1b /* if !handled then panic */
514 * Return from MC interrupt.
515 * Queue up the MCE event so that we can log it later, while
516 * returning from kernel or opal call.
518 bl machine_check_queue_event
519 MACHINE_CHECK_HANDLER_WINDUP
520 RFI_TO_USER_OR_KERNEL
522 /* Deliver the machine check to host kernel in V mode. */
526 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
527 MACHINE_CHECK_HANDLER_WINDUP
528 b machine_check_pSeries
530 EXC_COMMON_BEGIN(unrecover_mce)
531 /* Invoke machine_check_exception to print MCE event and panic. */
532 addi r3,r1,STACK_FRAME_OVERHEAD
533 bl machine_check_exception
535 * We will not reach here. Even if we did, there is no way out. Call
536 * unrecoverable_exception and die.
538 1: addi r3,r1,STACK_FRAME_OVERHEAD
539 bl unrecoverable_exception
543 EXC_REAL_OOL(data_access, 0x300, 0x80)
544 EXC_VIRT(data_access, 0x4300, 0x80, 0x300)
545 TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
547 EXC_COMMON_BEGIN(data_access_common)
549 * Here r13 points to the paca, r9 contains the saved CR,
550 * SRR0 and SRR1 are saved in r11 and r12,
551 * r9 - r13 are saved in paca->exgen.
554 std r10,PACA_EXGEN+EX_DAR(r13)
556 stw r10,PACA_EXGEN+EX_DSISR(r13)
557 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
558 RECONCILE_IRQ_STATE(r10, r11)
560 ld r3,PACA_EXGEN+EX_DAR(r13)
561 lwz r4,PACA_EXGEN+EX_DSISR(r13)
565 BEGIN_MMU_FTR_SECTION
566 b do_hash_page /* Try to handle as hpte fault */
569 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
572 EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
574 EXCEPTION_PROLOG_0(PACA_EXSLB)
575 b tramp_data_access_slb
576 EXC_REAL_END(data_access_slb, 0x380, 0x80)
578 TRAMP_REAL_BEGIN(tramp_data_access_slb)
579 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
580 mr r12,r3 /* save r3 */
584 BRANCH_TO_COMMON(r10, slb_miss_common)
586 EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
588 EXCEPTION_PROLOG_0(PACA_EXSLB)
589 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
590 mr r12,r3 /* save r3 */
594 BRANCH_TO_COMMON(r10, slb_miss_common)
595 EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
596 TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
599 EXC_REAL_OOL(instruction_access, 0x400, 0x80)
600 EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
601 TRAMP_KVM(PACA_EXGEN, 0x400)
603 EXC_COMMON_BEGIN(instruction_access_common)
604 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
605 RECONCILE_IRQ_STATE(r10, r11)
608 andis. r4,r12,DSISR_SRR1_MATCH_64S@h
612 BEGIN_MMU_FTR_SECTION
613 b do_hash_page /* Try to handle as hpte fault */
616 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
619 EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
621 EXCEPTION_PROLOG_0(PACA_EXSLB)
622 b tramp_instruction_access_slb
623 EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
625 TRAMP_REAL_BEGIN(tramp_instruction_access_slb)
626 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
627 mr r12,r3 /* save r3 */
628 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
631 BRANCH_TO_COMMON(r10, slb_miss_common)
633 EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
635 EXCEPTION_PROLOG_0(PACA_EXSLB)
636 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
637 mr r12,r3 /* save r3 */
638 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
641 BRANCH_TO_COMMON(r10, slb_miss_common)
642 EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
643 TRAMP_KVM(PACA_EXSLB, 0x480)
647 * This handler is used by the 0x380 and 0x480 SLB miss interrupts, as well as
648 * the virtual mode 0x4380 and 0x4480 interrupts if AIL is enabled.
650 EXC_COMMON_BEGIN(slb_miss_common)
652 * r13 points to the PACA, r9 contains the saved CR,
653 * r12 contains the saved r3,
654 * r11 contain the saved SRR1, SRR0 is still ready for return
655 * r3 has the faulting address
656 * r9 - r13 are saved in paca->exslb.
657 * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
658 * We assume we aren't going to take any exceptions during this
662 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
663 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
665 andi. r9,r11,MSR_PR // Check for exception from userspace
666 cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
669 * Test MSR_RI before calling slb_allocate_realmode, because the
670 * MSR in r11 gets clobbered. However we still want to allocate
671 * SLB in case MSR_RI=0, to minimise the risk of getting stuck in
672 * recursive SLB faults. So use cr5 for this, which is preserved.
674 andi. r11,r11,MSR_RI /* check for unrecoverable exception */
678 #ifdef CONFIG_PPC_BOOK3S_64
679 BEGIN_MMU_FTR_SECTION
681 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
684 ld r10,PACA_EXSLB+EX_LR(r13)
685 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
689 * Large address, check whether we have to allocate new contexts.
693 bne- cr5,2f /* if unrecoverable exception, oops */
695 /* All done -- return from exception. */
697 bne cr4,1f /* returning to kernel */
700 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
701 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
702 mtcrf 0x02,r9 /* I/D indication is in cr6 */
703 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
705 RESTORE_CTR(r9, PACA_EXSLB)
706 RESTORE_PPR_PACA(PACA_EXSLB, r9)
708 ld r9,PACA_EXSLB+EX_R9(r13)
709 ld r10,PACA_EXSLB+EX_R10(r13)
710 ld r11,PACA_EXSLB+EX_R11(r13)
711 ld r12,PACA_EXSLB+EX_R12(r13)
712 ld r13,PACA_EXSLB+EX_R13(r13)
714 b . /* prevent speculative execution */
717 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
718 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
719 mtcrf 0x02,r9 /* I/D indication is in cr6 */
720 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
722 RESTORE_CTR(r9, PACA_EXSLB)
723 RESTORE_PPR_PACA(PACA_EXSLB, r9)
725 ld r9,PACA_EXSLB+EX_R9(r13)
726 ld r10,PACA_EXSLB+EX_R10(r13)
727 ld r11,PACA_EXSLB+EX_R11(r13)
728 ld r12,PACA_EXSLB+EX_R12(r13)
729 ld r13,PACA_EXSLB+EX_R13(r13)
731 b . /* prevent speculative execution */
734 2: std r3,PACA_EXSLB+EX_DAR(r13)
738 LOAD_HANDLER(r10,unrecov_slb)
745 8: std r3,PACA_EXSLB+EX_DAR(r13)
749 LOAD_HANDLER(r10, large_addr_slb)
756 EXC_COMMON_BEGIN(unrecov_slb)
757 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
758 RECONCILE_IRQ_STATE(r10, r11)
760 1: addi r3,r1,STACK_FRAME_OVERHEAD
761 bl unrecoverable_exception
764 EXC_COMMON_BEGIN(large_addr_slb)
765 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
766 RECONCILE_IRQ_STATE(r10, r11)
767 ld r3, PACA_EXSLB+EX_DAR(r13)
770 li r10, 0x481 /* fix trap number for I-SLB miss */
773 addi r3, r1, STACK_FRAME_OVERHEAD
774 bl slb_miss_large_addr
777 EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
778 .globl hardware_interrupt_hv;
779 hardware_interrupt_hv:
781 MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED)
783 MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED)
784 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
785 EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
787 EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
788 .globl hardware_interrupt_relon_hv;
789 hardware_interrupt_relon_hv:
791 MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common,
794 __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common,
795 EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED)
796 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
797 EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
799 TRAMP_KVM(PACA_EXGEN, 0x500)
800 TRAMP_KVM_HV(PACA_EXGEN, 0x500)
801 EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
804 EXC_REAL(alignment, 0x600, 0x100)
805 EXC_VIRT(alignment, 0x4600, 0x100, 0x600)
806 TRAMP_KVM(PACA_EXGEN, 0x600)
807 EXC_COMMON_BEGIN(alignment_common)
809 std r10,PACA_EXGEN+EX_DAR(r13)
811 stw r10,PACA_EXGEN+EX_DSISR(r13)
812 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
813 ld r3,PACA_EXGEN+EX_DAR(r13)
814 lwz r4,PACA_EXGEN+EX_DSISR(r13)
818 RECONCILE_IRQ_STATE(r10, r11)
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 bl alignment_exception
824 EXC_REAL(program_check, 0x700, 0x100)
825 EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
826 TRAMP_KVM(PACA_EXGEN, 0x700)
827 EXC_COMMON_BEGIN(program_check_common)
829 * It's possible to receive a TM Bad Thing type program check with
830 * userspace register values (in particular r1), but with SRR1 reporting
831 * that we came from the kernel. Normally that would confuse the bad
832 * stack logic, and we would report a bad kernel stack pointer. Instead
833 * we switch to the emergency stack if we're taking a TM Bad Thing from
836 li r10,MSR_PR /* Build a mask of MSR_PR .. */
837 oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
838 and r10,r10,r12 /* Mask SRR1 with that. */
839 srdi r10,r10,8 /* Shift it so we can compare */
840 cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
841 bne 1f /* If != go to normal path. */
843 /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
844 andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
845 /* 3 in EXCEPTION_PROLOG_COMMON */
846 mr r10,r1 /* Save r1 */
847 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
848 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
849 b 3f /* Jump into the macro !! */
850 1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
852 RECONCILE_IRQ_STATE(r10, r11)
853 addi r3,r1,STACK_FRAME_OVERHEAD
854 bl program_check_exception
858 EXC_REAL(fp_unavailable, 0x800, 0x100)
859 EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
860 TRAMP_KVM(PACA_EXGEN, 0x800)
861 EXC_COMMON_BEGIN(fp_unavailable_common)
862 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
863 bne 1f /* if from user, just load it up */
865 RECONCILE_IRQ_STATE(r10, r11)
866 addi r3,r1,STACK_FRAME_OVERHEAD
867 bl kernel_fp_unavailable_exception
870 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
872 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
873 * transaction), go do TM stuff
875 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
877 END_FTR_SECTION_IFSET(CPU_FTR_TM)
880 b fast_exception_return
881 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
882 2: /* User process was in a transaction */
884 RECONCILE_IRQ_STATE(r10, r11)
885 addi r3,r1,STACK_FRAME_OVERHEAD
891 EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
892 EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
893 TRAMP_KVM(PACA_EXGEN, 0x900)
894 EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
897 EXC_REAL_OOL_HV(hdecrementer, 0x980, 0x80)
898 EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x80, 0x980)
899 TRAMP_KVM_HV(PACA_EXGEN, 0x980)
900 EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
903 EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
904 EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
905 TRAMP_KVM(PACA_EXGEN, 0xa00)
906 #ifdef CONFIG_PPC_DOORBELL
907 EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
909 EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
913 EXC_REAL(trap_0b, 0xb00, 0x100)
914 EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
915 TRAMP_KVM(PACA_EXGEN, 0xb00)
916 EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
919 * system call / hypercall (0xc00, 0x4c00)
921 * The system call exception is invoked with "sc 0" and does not alter HV bit.
922 * There is support for kernel code to invoke system calls but there are no
925 * The hypercall is invoked with "sc 1" and sets HV=1.
927 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
928 * 0x4c00 virtual mode.
932 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
934 * For hypercalls, the register convention is as follows:
937 * r3 volatile parameter and return value for status
938 * r4-r10 volatile input and output value
939 * r11 volatile hypercall number and output value
940 * r12 volatile input and output value
941 * r13-r31 nonvolatile
945 * CR0-1 CR5-7 volatile
947 * Other registers nonvolatile
949 * The intersection of volatile registers that don't contain possible
950 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
951 * without saving, though xer is not a good idea to use, as hardware may
952 * interpret some bits so it may be costly to change them.
954 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
956 * There is a little bit of juggling to get syscall and hcall
957 * working well. Save r13 in ctr to avoid using SPRG scratch
960 * Userspace syscalls have already saved the PPR, hcalls must save
961 * it before setting HMT_MEDIUM.
963 #define SYSCALL_KVMTEST \
966 std r10,PACA_EXGEN+EX_R10(r13); \
967 INTERRUPT_TO_KERNEL; \
968 KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
973 #define SYSCALL_KVMTEST \
980 #define LOAD_SYSCALL_HANDLER(reg) \
981 __LOAD_HANDLER(reg, system_call_common)
984 * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9,
987 #define SYSCALL_REAL \
988 mfspr r11,SPRN_SRR0 ; \
989 mfspr r12,SPRN_SRR1 ; \
990 LOAD_SYSCALL_HANDLER(r10) ; \
991 mtspr SPRN_SRR0,r10 ; \
992 ld r10,PACAKMSR(r13) ; \
993 mtspr SPRN_SRR1,r10 ; \
995 b . ; /* prevent speculative execution */
997 #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
998 #define SYSCALL_FASTENDIAN_TEST \
1002 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
1004 #define SYSCALL_FASTENDIAN \
1005 /* Fast LE/BE switch system call */ \
1006 1: mfspr r12,SPRN_SRR1 ; \
1007 xori r12,r12,MSR_LE ; \
1008 mtspr SPRN_SRR1,r12 ; \
1010 RFI_TO_USER ; /* return to userspace */ \
1011 b . ; /* prevent speculative execution */
1013 #define SYSCALL_FASTENDIAN_TEST
1014 #define SYSCALL_FASTENDIAN
1015 #endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */
1017 #if defined(CONFIG_RELOCATABLE)
1019 * We can't branch directly so we do it via the CTR which
1020 * is volatile across system calls.
1022 #define SYSCALL_VIRT \
1023 LOAD_SYSCALL_HANDLER(r10) ; \
1025 mfspr r11,SPRN_SRR0 ; \
1026 mfspr r12,SPRN_SRR1 ; \
1031 /* We can branch directly */
1032 #define SYSCALL_VIRT \
1033 mfspr r11,SPRN_SRR0 ; \
1034 mfspr r12,SPRN_SRR1 ; \
1036 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
1037 b system_call_common ;
1040 EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1041 SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1042 SYSCALL_FASTENDIAN_TEST
1045 EXC_REAL_END(system_call, 0xc00, 0x100)
1047 EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1048 SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1049 SYSCALL_FASTENDIAN_TEST
1052 EXC_VIRT_END(system_call, 0x4c00, 0x100)
1054 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1056 * This is a hcall, so register convention is as above, with these
1060 * orig r10 saved in PACA
1062 TRAMP_KVM_BEGIN(do_kvm_0xc00)
1064 * Save the PPR (on systems that support it) before changing to
1065 * HMT_MEDIUM. That allows the KVM code to save that value into the
1066 * guest state (it is the guest's PPR value).
1068 OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
1070 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
1073 std r9,PACA_EXGEN+EX_R9(r13)
1075 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
1079 EXC_REAL(single_step, 0xd00, 0x100)
1080 EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
1081 TRAMP_KVM(PACA_EXGEN, 0xd00)
1082 EXC_COMMON(single_step_common, 0xd00, single_step_exception)
1084 EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
1085 EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
1086 TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1087 EXC_COMMON_BEGIN(h_data_storage_common)
1089 std r10,PACA_EXGEN+EX_DAR(r13)
1090 mfspr r10,SPRN_HDSISR
1091 stw r10,PACA_EXGEN+EX_DSISR(r13)
1092 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1094 RECONCILE_IRQ_STATE(r10, r11)
1095 addi r3,r1,STACK_FRAME_OVERHEAD
1096 bl unknown_exception
1100 EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
1101 EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
1102 TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1103 EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1106 EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1107 EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
1108 TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1109 EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1113 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1114 * first, and then eventaully from there to the trampoline to get into virtual
1117 __EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
1118 __TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
1119 EXC_VIRT_NONE(0x4e60, 0x20)
1120 TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1121 TRAMP_REAL_BEGIN(hmi_exception_early)
1122 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
1123 mr r10,r1 /* Save r1 */
1124 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
1125 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1126 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
1127 mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
1128 EXCEPTION_PROLOG_COMMON_1()
1129 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1130 EXCEPTION_PROLOG_COMMON_3(0xe60)
1131 addi r3,r1,STACK_FRAME_OVERHEAD
1132 BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
1135 /* Windup the stack. */
1136 /* Move original HSRR0 and HSRR1 into the respective regs */
1156 HRFI_TO_USER_OR_KERNEL
1163 * Go to virtual mode and pull the HMI event information from
1166 .globl hmi_exception_after_realmode
1167 hmi_exception_after_realmode:
1169 EXCEPTION_PROLOG_0(PACA_EXGEN)
1170 b tramp_real_hmi_exception
1172 EXC_COMMON_BEGIN(hmi_exception_common)
1173 EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception,
1174 ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON)
1176 EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1177 EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
1178 TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1179 #ifdef CONFIG_PPC_DOORBELL
1180 EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1182 EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1186 EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1187 EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
1188 TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1189 EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1192 EXC_REAL_NONE(0xec0, 0x20)
1193 EXC_VIRT_NONE(0x4ec0, 0x20)
1194 EXC_REAL_NONE(0xee0, 0x20)
1195 EXC_VIRT_NONE(0x4ee0, 0x20)
1198 EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1199 EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
1200 TRAMP_KVM(PACA_EXGEN, 0xf00)
1201 EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1204 EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1205 EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
1206 TRAMP_KVM(PACA_EXGEN, 0xf20)
1207 EXC_COMMON_BEGIN(altivec_unavailable_common)
1208 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1209 #ifdef CONFIG_ALTIVEC
1212 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1213 BEGIN_FTR_SECTION_NESTED(69)
1214 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1215 * transaction), go do TM stuff
1217 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1219 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1222 b fast_exception_return
1223 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1224 2: /* User process was in a transaction */
1226 RECONCILE_IRQ_STATE(r10, r11)
1227 addi r3,r1,STACK_FRAME_OVERHEAD
1228 bl altivec_unavailable_tm
1232 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1235 RECONCILE_IRQ_STATE(r10, r11)
1236 addi r3,r1,STACK_FRAME_OVERHEAD
1237 bl altivec_unavailable_exception
1241 EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1242 EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
1243 TRAMP_KVM(PACA_EXGEN, 0xf40)
1244 EXC_COMMON_BEGIN(vsx_unavailable_common)
1245 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1249 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1250 BEGIN_FTR_SECTION_NESTED(69)
1251 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1252 * transaction), go do TM stuff
1254 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1256 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1259 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1260 2: /* User process was in a transaction */
1262 RECONCILE_IRQ_STATE(r10, r11)
1263 addi r3,r1,STACK_FRAME_OVERHEAD
1264 bl vsx_unavailable_tm
1268 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1271 RECONCILE_IRQ_STATE(r10, r11)
1272 addi r3,r1,STACK_FRAME_OVERHEAD
1273 bl vsx_unavailable_exception
1277 EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1278 EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1279 TRAMP_KVM(PACA_EXGEN, 0xf60)
1280 EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1283 EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1284 EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
1285 TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1286 EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1289 EXC_REAL_NONE(0xfa0, 0x20)
1290 EXC_VIRT_NONE(0x4fa0, 0x20)
1291 EXC_REAL_NONE(0xfc0, 0x20)
1292 EXC_VIRT_NONE(0x4fc0, 0x20)
1293 EXC_REAL_NONE(0xfe0, 0x20)
1294 EXC_VIRT_NONE(0x4fe0, 0x20)
1296 EXC_REAL_NONE(0x1000, 0x100)
1297 EXC_VIRT_NONE(0x5000, 0x100)
1298 EXC_REAL_NONE(0x1100, 0x100)
1299 EXC_VIRT_NONE(0x5100, 0x100)
1301 #ifdef CONFIG_CBE_RAS
1302 EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
1303 EXC_VIRT_NONE(0x5200, 0x100)
1304 TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
1305 EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
1306 #else /* CONFIG_CBE_RAS */
1307 EXC_REAL_NONE(0x1200, 0x100)
1308 EXC_VIRT_NONE(0x5200, 0x100)
1312 EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
1313 EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
1314 TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
1315 EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
1317 EXC_REAL_NONE(0x1400, 0x100)
1318 EXC_VIRT_NONE(0x5400, 0x100)
1320 EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
1321 mtspr SPRN_SPRG_HSCRATCH0,r13
1322 EXCEPTION_PROLOG_0(PACA_EXGEN)
1323 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
1325 #ifdef CONFIG_PPC_DENORMALISATION
1326 mfspr r10,SPRN_HSRR1
1327 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
1332 EXCEPTION_PROLOG_2(denorm_common, EXC_HV)
1333 EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
1335 #ifdef CONFIG_PPC_DENORMALISATION
1336 EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
1337 b exc_real_0x1500_denorm_exception_hv
1338 EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
1340 EXC_VIRT_NONE(0x5500, 0x100)
1343 TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
1345 #ifdef CONFIG_PPC_DENORMALISATION
1346 TRAMP_REAL_BEGIN(denorm_assist)
1349 * To denormalise we need to move a copy of the register to itself.
1350 * For POWER6 do that here for all FP regs.
1353 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
1354 xori r10,r10,(MSR_FE0|MSR_FE1)
1358 #define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
1359 #define FMR4(n) FMR2(n) ; FMR2(n+2)
1360 #define FMR8(n) FMR4(n) ; FMR4(n+4)
1361 #define FMR16(n) FMR8(n) ; FMR8(n+8)
1362 #define FMR32(n) FMR16(n) ; FMR16(n+16)
1367 * To denormalise we need to move a copy of the register to itself.
1368 * For POWER7 do that here for the first 32 VSX registers only.
1371 oris r10,r10,MSR_VSX@h
1375 #define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
1376 #define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
1377 #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
1378 #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
1379 #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
1382 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
1386 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1388 * To denormalise we need to move a copy of the register to itself.
1389 * For POWER8 we need to do that for all 64 VSX registers
1393 mfspr r11,SPRN_HSRR0
1395 mtspr SPRN_HSRR0,r11
1397 ld r9,PACA_EXGEN+EX_R9(r13)
1398 RESTORE_PPR_PACA(PACA_EXGEN, r10)
1400 ld r10,PACA_EXGEN+EX_CFAR(r13)
1402 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1403 ld r10,PACA_EXGEN+EX_R10(r13)
1404 ld r11,PACA_EXGEN+EX_R11(r13)
1405 ld r12,PACA_EXGEN+EX_R12(r13)
1406 ld r13,PACA_EXGEN+EX_R13(r13)
1411 EXC_COMMON(denorm_common, 0x1500, unknown_exception)
1414 #ifdef CONFIG_CBE_RAS
1415 EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
1416 EXC_VIRT_NONE(0x5600, 0x100)
1417 TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
1418 EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
1419 #else /* CONFIG_CBE_RAS */
1420 EXC_REAL_NONE(0x1600, 0x100)
1421 EXC_VIRT_NONE(0x5600, 0x100)
1425 EXC_REAL(altivec_assist, 0x1700, 0x100)
1426 EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
1427 TRAMP_KVM(PACA_EXGEN, 0x1700)
1428 #ifdef CONFIG_ALTIVEC
1429 EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
1431 EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
1435 #ifdef CONFIG_CBE_RAS
1436 EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
1437 EXC_VIRT_NONE(0x5800, 0x100)
1438 TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
1439 EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
1440 #else /* CONFIG_CBE_RAS */
1441 EXC_REAL_NONE(0x1800, 0x100)
1442 EXC_VIRT_NONE(0x5800, 0x100)
1445 #ifdef CONFIG_PPC_WATCHDOG
1447 #define MASKED_DEC_HANDLER_LABEL 3f
1449 #define MASKED_DEC_HANDLER(_H) \
1451 std r12,PACA_EXGEN+EX_R12(r13); \
1452 GET_SCRATCH0(r10); \
1453 std r10,PACA_EXGEN+EX_R13(r13); \
1454 EXCEPTION_PROLOG_2(soft_nmi_common, _H)
1457 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
1458 * stack is one that is usable by maskable interrupts so long as MSR_EE
1459 * remains off. It is used for recovery when something has corrupted the
1460 * normal kernel stack, for example. The "soft NMI" must not use the process
1461 * stack because we want irq disabled sections to avoid touching the stack
1462 * at all (other than PMU interrupts), so use the emergency stack for this,
1463 * and run it entirely with interrupts hard disabled.
1465 EXC_COMMON_BEGIN(soft_nmi_common)
1467 ld r1,PACAEMERGSP(r13)
1468 subi r1,r1,INT_FRAME_SIZE
1469 EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
1470 system_reset, soft_nmi_interrupt,
1471 ADD_NVGPRS;ADD_RECONCILE)
1474 #else /* CONFIG_PPC_WATCHDOG */
1475 #define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
1476 #define MASKED_DEC_HANDLER(_H)
1477 #endif /* CONFIG_PPC_WATCHDOG */
1480 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
1481 * - If it was a decrementer interrupt, we bump the dec to max and and return.
1482 * - If it was a doorbell we return immediately since doorbells are edge
1483 * triggered and won't automatically refire.
1484 * - If it was a HMI we return immediately since we handled it in realmode
1485 * and it won't refire.
1486 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
1487 * This is called with r10 containing the value to OR to the paca field.
1489 #define MASKED_INTERRUPT(_H) \
1490 masked_##_H##interrupt: \
1491 std r11,PACA_EXGEN+EX_R11(r13); \
1492 lbz r11,PACAIRQHAPPENED(r13); \
1494 stb r11,PACAIRQHAPPENED(r13); \
1495 cmpwi r10,PACA_IRQ_DEC; \
1498 ori r10,r10,0xffff; \
1499 mtspr SPRN_DEC,r10; \
1500 b MASKED_DEC_HANDLER_LABEL; \
1501 1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
1503 mfspr r10,SPRN_##_H##SRR1; \
1504 xori r10,r10,MSR_EE; /* clear MSR_EE */ \
1505 mtspr SPRN_##_H##SRR1,r10; \
1506 ori r11,r11,PACA_IRQ_HARD_DIS; \
1507 stb r11,PACAIRQHAPPENED(r13); \
1510 std r1,PACAR1(r13); \
1511 ld r9,PACA_EXGEN+EX_R9(r13); \
1512 ld r10,PACA_EXGEN+EX_R10(r13); \
1513 ld r11,PACA_EXGEN+EX_R11(r13); \
1514 /* returns to kernel where r13 must be set up, so don't restore it */ \
1515 ##_H##RFI_TO_KERNEL; \
1517 MASKED_DEC_HANDLER(_H)
1519 TRAMP_REAL_BEGIN(stf_barrier_fallback)
1520 std r9,PACA_EXRFI+EX_R9(r13)
1521 std r10,PACA_EXRFI+EX_R10(r13)
1523 ld r9,PACA_EXRFI+EX_R9(r13)
1524 ld r10,PACA_EXRFI+EX_R10(r13)
1532 /* Clobbers r10, r11, ctr */
1533 .macro L1D_DISPLACEMENT_FLUSH
1534 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1535 ld r11,PACA_L1D_FLUSH_SIZE(r13)
1536 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
1538 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1540 /* order ld/st prior to dcbt stop all streams with flushing */
1544 * The load addresses are at staggered offsets within cachelines,
1545 * which suits some pipelines better (on others it should not
1549 ld r11,(0x80 + 8)*0(r10)
1550 ld r11,(0x80 + 8)*1(r10)
1551 ld r11,(0x80 + 8)*2(r10)
1552 ld r11,(0x80 + 8)*3(r10)
1553 ld r11,(0x80 + 8)*4(r10)
1554 ld r11,(0x80 + 8)*5(r10)
1555 ld r11,(0x80 + 8)*6(r10)
1556 ld r11,(0x80 + 8)*7(r10)
1561 TRAMP_REAL_BEGIN(entry_flush_fallback)
1562 std r9,PACA_EXRFI+EX_R9(r13)
1563 std r10,PACA_EXRFI+EX_R10(r13)
1564 std r11,PACA_EXRFI+EX_R11(r13)
1566 L1D_DISPLACEMENT_FLUSH
1568 ld r9,PACA_EXRFI+EX_R9(r13)
1569 ld r10,PACA_EXRFI+EX_R10(r13)
1570 ld r11,PACA_EXRFI+EX_R11(r13)
1573 TRAMP_REAL_BEGIN(rfi_flush_fallback)
1576 std r1,PACA_EXRFI+EX_R12(r13)
1577 ld r1,PACAKSAVE(r13)
1578 std r9,PACA_EXRFI+EX_R9(r13)
1579 std r10,PACA_EXRFI+EX_R10(r13)
1580 std r11,PACA_EXRFI+EX_R11(r13)
1582 L1D_DISPLACEMENT_FLUSH
1584 ld r9,PACA_EXRFI+EX_R9(r13)
1585 ld r10,PACA_EXRFI+EX_R10(r13)
1586 ld r11,PACA_EXRFI+EX_R11(r13)
1587 ld r1,PACA_EXRFI+EX_R12(r13)
1591 TRAMP_REAL_BEGIN(hrfi_flush_fallback)
1594 std r1,PACA_EXRFI+EX_R12(r13)
1595 ld r1,PACAKSAVE(r13)
1596 std r9,PACA_EXRFI+EX_R9(r13)
1597 std r10,PACA_EXRFI+EX_R10(r13)
1598 std r11,PACA_EXRFI+EX_R11(r13)
1600 L1D_DISPLACEMENT_FLUSH
1602 ld r9,PACA_EXRFI+EX_R9(r13)
1603 ld r10,PACA_EXRFI+EX_R10(r13)
1604 ld r11,PACA_EXRFI+EX_R11(r13)
1605 ld r1,PACA_EXRFI+EX_R12(r13)
1611 _GLOBAL(do_uaccess_flush)
1612 UACCESS_FLUSH_FIXUP_SECTION
1617 L1D_DISPLACEMENT_FLUSH
1619 _ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
1620 EXPORT_SYMBOL(do_uaccess_flush)
1623 * Real mode exceptions actually use this too, but alternate
1624 * instruction code patches (which end up in the common .text area)
1625 * cannot reach these if they are put there.
1627 USE_FIXED_SECTION(virt_trampolines)
1631 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1632 TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
1634 * Here all GPRs are unchanged from when the interrupt happened
1635 * except for r13, which is saved in SPRG_SCRATCH0.
1637 mfspr r13, SPRN_SRR0
1639 mtspr SPRN_SRR0, r13
1644 TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
1646 * Here all GPRs are unchanged from when the interrupt happened
1647 * except for r13, which is saved in SPRG_SCRATCH0.
1649 mfspr r13, SPRN_HSRR0
1651 mtspr SPRN_HSRR0, r13
1658 * Ensure that any handlers that get invoked from the exception prologs
1659 * above are below the first 64KB (0x10000) of the kernel image because
1660 * the prologs assemble the addresses of these handlers using the
1661 * LOAD_HANDLER macro, which uses an ori instruction.
1664 /*** Common interrupt handlers ***/
1668 * Relocation-on interrupts: A subset of the interrupts can be delivered
1669 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
1670 * it. Addresses are the same as the original interrupt addresses, but
1671 * offset by 0xc000000000004000.
1672 * It's impossible to receive interrupts below 0x300 via this mechanism.
1673 * KVM: None of these traps are from the guest ; anything that escalated
1674 * to HV=1 from HV=0 is delivered via real mode handlers.
1678 * This uses the standard macro, since the original 0x300 vector
1679 * only has extra guff for STAB-based processors -- which never
1683 EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
1684 b __ppc64_runlatch_on
1686 USE_FIXED_SECTION(virt_trampolines)
1688 * The __end_interrupts marker must be past the out-of-line (OOL)
1689 * handlers, so that they are copied to real address 0x100 when running
1690 * a relocatable kernel. This ensures they can be reached from the short
1691 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
1692 * directly, without using LOAD_HANDLER().
1695 .globl __end_interrupts
1697 DEFINE_FIXED_SYMBOL(__end_interrupts)
1699 #ifdef CONFIG_PPC_970_NAP
1700 EXC_COMMON_BEGIN(power4_fixup_nap)
1702 std r9,TI_LOCAL_FLAGS(r11)
1703 ld r10,_LINK(r1) /* make idle task do the */
1704 std r10,_NIP(r1) /* equivalent of a blr */
1708 CLOSE_FIXED_SECTION(real_vectors);
1709 CLOSE_FIXED_SECTION(real_trampolines);
1710 CLOSE_FIXED_SECTION(virt_vectors);
1711 CLOSE_FIXED_SECTION(virt_trampolines);
1718 .balign IFETCH_ALIGN_BYTES
1720 #ifdef CONFIG_PPC_BOOK3S_64
1721 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
1722 ori r0,r0,DSISR_BAD_FAULT_64S@l
1723 and. r0,r4,r0 /* weird error? */
1724 bne- handle_page_fault /* if not, try to insert a HPTE */
1725 CURRENT_THREAD_INFO(r11, r1)
1726 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1727 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1728 bne 77f /* then don't call hash_page now */
1731 * r3 contains the faulting address
1733 * r5 contains the trap number
1736 * at return r3 = 0 for success, 1 for page fault, negative for error
1740 bl __hash_page /* build HPTE if possible */
1741 cmpdi r3,0 /* see if __hash_page succeeded */
1744 beq fast_exc_return_irq /* Return from exception on success */
1749 /* Reload DSISR into r4 for the DABR check below */
1751 #endif /* CONFIG_PPC_BOOK3S_64 */
1753 /* Here we have a page fault that hash_page can't handle. */
1755 11: andis. r0,r4,DSISR_DABRMATCH@h
1756 bne- handle_dabr_fault
1759 addi r3,r1,STACK_FRAME_OVERHEAD
1762 beq+ ret_from_except_lite
1765 addi r3,r1,STACK_FRAME_OVERHEAD
1770 /* We have a data breakpoint exception - handle it */
1775 addi r3,r1,STACK_FRAME_OVERHEAD
1778 * do_break() may have changed the NV GPRS while handling a breakpoint.
1779 * If so, we need to restore them with their updated values. Don't use
1780 * ret_from_except_lite here.
1785 #ifdef CONFIG_PPC_BOOK3S_64
1786 /* We have a page fault that hash_page could handle but HV refused
1791 addi r3,r1,STACK_FRAME_OVERHEAD
1798 * We come here as a result of a DSI at a point where we don't want
1799 * to call hash_page, such as when we are accessing memory (possibly
1800 * user memory) inside a PMU interrupt that occurred while interrupts
1801 * were soft-disabled. We want to invoke the exception handler for
1802 * the access, or panic if there isn't a handler.
1806 addi r3,r1,STACK_FRAME_OVERHEAD
1812 * Here we have detected that the kernel stack pointer is bad.
1813 * R9 contains the saved CR, r13 points to the paca,
1814 * r10 contains the (bad) kernel stack pointer,
1815 * r11 and r12 contain the saved SRR0 and SRR1.
1816 * We switch to using an emergency stack, save the registers there,
1817 * and call kernel_bad_stack(), which panics.
1820 ld r1,PACAEMERGSP(r13)
1821 subi r1,r1,64+INT_FRAME_SIZE
1827 mfspr r12,SPRN_DSISR
1853 std r10,ORIG_GPR3(r1)
1854 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1857 lhz r12,PACA_TRAP_SAVE(r13)
1859 addi r11,r1,INT_FRAME_SIZE
1864 ld r11,exception_marker@toc(r2)
1866 std r11,STACK_FRAME_OVERHEAD-16(r1)
1867 1: addi r3,r1,STACK_FRAME_OVERHEAD
1870 _ASM_NOKPROBE_SYMBOL(bad_stack);
1873 * When doorbell is triggered from system reset wakeup, the message is
1874 * not cleared, so it would fire again when EE is enabled.
1876 * When coming from local_irq_enable, there may be the same problem if
1877 * we were hard disabled.
1879 * Execute msgclr to clear pending exceptions before handling it.
1881 h_doorbell_common_msgclr:
1882 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
1886 doorbell_super_common_msgclr:
1887 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
1889 b doorbell_super_common
1892 * Called from arch_local_irq_enable when an interrupt needs
1893 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
1894 * which kind of interrupt. MSR:EE is already off. We generate a
1895 * stackframe like if a real interrupt had happened.
1897 * Note: While MSR:EE is off, we need to make sure that _MSR
1898 * in the generated frame has EE set to 1 or the exception
1899 * handler will not properly re-enable them.
1901 * Note that we don't specify LR as the NIP (return address) for
1902 * the interrupt because that would unbalance the return branch
1905 _GLOBAL(__replay_interrupt)
1906 /* We are going to jump to the exception common code which
1907 * will retrieve various register values from the PACA which
1908 * we don't give a damn about, so we don't bother storing them.
1911 LOAD_REG_ADDR(r11, replay_interrupt_return)
1915 beq decrementer_common
1918 beq h_virt_irq_common
1920 beq hardware_interrupt_common
1921 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
1923 beq performance_monitor_common
1926 beq h_doorbell_common_msgclr
1928 beq hmi_exception_common
1931 beq doorbell_super_common_msgclr
1932 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1933 replay_interrupt_return:
1936 _ASM_NOKPROBE_SYMBOL(__replay_interrupt)