3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
27 #include <asm/thread_info.h>
28 #include <asm/code-patching-asm.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/cputable.h>
32 #include <asm/firmware.h>
34 #include <asm/ptrace.h>
35 #include <asm/irqflags.h>
36 #include <asm/ftrace.h>
37 #include <asm/hw_irq.h>
38 #include <asm/context_tracking.h>
40 #include <asm/barrier.h>
41 #ifdef CONFIG_PPC_BOOK3S
42 #include <asm/exception-64s.h>
44 #include <asm/exception-64e.h>
52 .tc sys_call_table[TC],sys_call_table
54 /* This value is used to mark exception frames on the stack. */
56 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
61 .globl system_call_common
63 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
65 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
67 END_FTR_SECTION_IFSET(CPU_FTR_TM)
71 addi r1,r1,-INT_FRAME_SIZE
79 beq 2f /* if from kernel mode */
80 #ifdef CONFIG_PPC_FSL_BOOK3E
81 START_BTB_FLUSH_SECTION
85 ACCOUNT_CPU_USER_ENTRY(r10, r11)
104 * This clears CR0.SO (bit 28), which is the error indication on
105 * return from this system call.
107 rldimi r2,r11,28,(63-28)
114 addi r9,r1,STACK_FRAME_OVERHEAD
115 ld r11,exception_marker@toc(r2)
116 std r11,-16(r9) /* "regshere" marker */
117 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
120 /* if from user, see if there are any DTL entries to process */
121 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
122 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
123 addi r10,r10,LPPACA_DTLIDX
124 LDX_BE r10,0,r10 /* get log write index */
127 bl accumulate_stolen_time
131 addi r9,r1,STACK_FRAME_OVERHEAD
133 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
134 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
137 * A syscall should always be called with interrupts enabled
138 * so we just unconditionally hard-enable here. When some kind
139 * of irq tracing is used, we additionally check that condition
142 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
143 lbz r10,PACASOFTIRQEN(r13)
146 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
149 #ifdef CONFIG_PPC_BOOK3E
155 #endif /* CONFIG_PPC_BOOK3E */
157 /* We do need to set SOFTE in the stack frame or the return
158 * from interrupt will be painful
163 CURRENT_THREAD_INFO(r11, r1)
165 andi. r11,r10,_TIF_SYSCALL_DOTRACE
166 bne syscall_dotrace /* does not return */
167 cmpldi 0,r0,NR_syscalls
170 system_call: /* label this so stack traces look sane */
172 * Need to vector to 32 Bit or default sys_call_table here,
173 * based on caller's run-mode / personality.
175 ld r11,SYS_CALL_TABLE@toc(2)
176 andi. r10,r10,_TIF_32BIT
178 addi r11,r11,8 /* use 32-bit syscall entries */
190 * Prevent the load of the handler below (based on the user-passed
191 * system call number) being speculatively executed until the test
192 * against NR_syscalls and branch to .Lsyscall_enosys above has
196 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
198 bctrl /* Call handler */
202 CURRENT_THREAD_INFO(r12, r1)
205 #ifdef CONFIG_PPC_BOOK3S
206 /* No MSR:RI on BookE */
211 * Disable interrupts so current_thread_info()->flags can't change,
212 * and so that we don't get interrupted after loading SRR0/1.
214 #ifdef CONFIG_PPC_BOOK3E
219 * For performance reasons we clear RI the same time that we
220 * clear EE. We only need to clear RI just before we restore r13
221 * below, but batching it with EE saves us one expensive mtmsrd call.
222 * We have to be careful to restore RI if we branch anywhere from
223 * here (eg syscall_exit_work).
228 #endif /* CONFIG_PPC_BOOK3E */
232 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
233 bne- syscall_exit_work
237 .Lsyscall_error_cont:
240 stdcx. r0,0,r1 /* to clear the reservation */
241 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
246 ACCOUNT_CPU_USER_EXIT(r11, r12)
247 HMT_MEDIUM_LOW_HAS_PPR
248 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
256 b . /* prevent speculative execution */
266 b . /* prevent speculative execution */
269 oris r5,r5,0x1000 /* Set SO bit in CR */
272 b .Lsyscall_error_cont
274 /* Traced system call support */
277 addi r3,r1,STACK_FRAME_OVERHEAD
278 bl do_syscall_trace_enter
281 * We use the return value of do_syscall_trace_enter() as the syscall
282 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
283 * returns an invalid syscall number and the test below against
284 * NR_syscalls will fail.
288 /* Restore argument registers just clobbered and/or possibly changed. */
296 /* Repopulate r9 and r10 for the system_call path */
297 addi r9,r1,STACK_FRAME_OVERHEAD
298 CURRENT_THREAD_INFO(r10, r1)
301 cmpldi r0,NR_syscalls
304 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
313 #ifdef CONFIG_PPC_BOOK3S
314 mtmsrd r10,1 /* Restore RI */
316 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
317 If TIF_NOERROR is set, just save r3 as it is. */
319 andi. r0,r9,_TIF_RESTOREALL
323 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
325 andi. r0,r9,_TIF_NOERROR
329 oris r5,r5,0x1000 /* Set SO bit in CR */
332 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
335 /* Clear per-syscall TIF flags if any are set. */
337 li r11,_TIF_PERSYSCALL_MASK
338 addi r12,r12,TI_FLAGS
343 subi r12,r12,TI_FLAGS
345 4: /* Anything else left to do? */
346 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
347 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
348 beq ret_from_except_lite
350 /* Re-enable interrupts */
351 #ifdef CONFIG_PPC_BOOK3E
357 #endif /* CONFIG_PPC_BOOK3E */
360 addi r3,r1,STACK_FRAME_OVERHEAD
361 bl do_syscall_trace_leave
364 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366 /* Firstly we need to enable TM in the kernel */
369 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
372 /* tabort, this dooms the transaction, nothing else */
373 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
377 * Return directly to userspace. We have corrupted user register state,
378 * but userspace will never see that register state. Execution will
379 * resume after the tbegin of the aborted transaction with the
380 * checkpointed register state.
388 b . /* prevent speculative execution */
391 /* Save non-volatile GPRs, if not already saved. */
403 * The sigsuspend and rt_sigsuspend system calls can call do_signal
404 * and thus put the process into the stopped state where we might
405 * want to examine its user state with ptrace. Therefore we need
406 * to save all the nonvolatile registers (r14 - r31) before calling
407 * the C code. Similarly, fork, vfork and clone need the full
408 * register state on the stack so that it can be copied to the child.
426 _GLOBAL(ppc32_swapcontext)
428 bl compat_sys_swapcontext
431 _GLOBAL(ppc64_swapcontext)
436 _GLOBAL(ppc_switch_endian)
441 _GLOBAL(ret_from_fork)
447 _GLOBAL(ret_from_kernel_thread)
452 #if defined(_CALL_ELF) && _CALL_ELF == 2
459 #ifdef CONFIG_PPC_BOOK3S_64
461 #define FLUSH_COUNT_CACHE \
463 patch_site 1b, patch__call_flush_count_cache
466 #define BCCTR_FLUSH .long 0x4c400420
475 .global flush_count_cache
477 /* Save LR into r9 */
480 // Flush the link stack
491 // If we're just flushing the link stack, return here
493 patch_site 3b patch__flush_link_stack_return
501 patch_site 2b patch__flush_count_cache_return
513 #define FLUSH_COUNT_CACHE
514 #endif /* CONFIG_PPC_BOOK3S_64 */
517 * This routine switches between two different tasks. The process
518 * state of one is saved on its kernel stack. Then the state
519 * of the other is restored from its kernel stack. The memory
520 * management hardware is updated to the second process's state.
521 * Finally, we can return to the second process, via ret_from_except.
522 * On entry, r3 points to the THREAD for the current task, r4
523 * points to the THREAD for the new task.
525 * Note: there are two ways to get to the "going out" portion
526 * of this code; either by coming in via the entry (_switch)
527 * or via "fork" which must set up an environment equivalent
528 * to the "_switch" path. If you change this you'll have to change
529 * the fork code also.
531 * The code which creates the new task context is in 'copy_thread'
532 * in arch/powerpc/kernel/process.c
538 stdu r1,-SWITCH_FRAME_SIZE(r1)
539 /* r3-r13 are caller saved -- Cort */
542 mflr r20 /* Return to switch caller */
547 oris r0,r0,MSR_VSX@h /* Disable VSX */
548 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
549 #endif /* CONFIG_VSX */
550 #ifdef CONFIG_ALTIVEC
552 oris r0,r0,MSR_VEC@h /* Disable altivec */
553 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
554 std r24,THREAD_VRSAVE(r3)
555 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
556 #endif /* CONFIG_ALTIVEC */
565 std r1,KSP(r3) /* Set old stack pointer */
567 #ifdef CONFIG_PPC_BOOK3S_64
569 /* Event based branch registers */
571 std r0, THREAD_BESCR(r3)
573 std r0, THREAD_EBBHR(r3)
575 std r0, THREAD_EBBRR(r3)
576 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
582 /* We need a sync somewhere here to make sure that if the
583 * previous task gets rescheduled on another CPU, it sees all
584 * stores it has performed on this one.
587 #endif /* CONFIG_SMP */
590 * If we optimise away the clear of the reservation in system
591 * calls because we know the CPU tracks the address of the
592 * reservation, then we need to clear it here to cover the
593 * case that the kernel context switch path has no larx
598 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
600 #ifdef CONFIG_PPC_BOOK3S
601 /* Cancel all explict user streams as they will have no use after context
602 * switch and will stop the HW from creating streams itself
604 DCBT_STOP_ALL_STREAM_IDS(r6)
607 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
608 std r6,PACACURRENT(r13) /* Set new 'current' */
610 ld r8,KSP(r4) /* new stack pointer */
611 #ifdef CONFIG_PPC_BOOK3S
613 clrrdi r6,r8,28 /* get its ESID */
614 clrrdi r9,r1,28 /* get current sp ESID */
616 clrrdi r6,r8,40 /* get its 1T ESID */
617 clrrdi r9,r1,40 /* get current sp 1T ESID */
618 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
619 clrldi. r0,r6,2 /* is new ESID c00000000? */
620 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
622 beq 2f /* if yes, don't slbie it */
624 /* Bolt in the new stack SLB entry */
625 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
626 oris r0,r6,(SLB_ESID_V)@h
627 ori r0,r0,(SLB_NUM_BOLTED-1)@l
629 li r9,MMU_SEGSIZE_1T /* insert B field */
630 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
631 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
632 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
634 /* Update the last bolted SLB. No write barriers are needed
635 * here, provided we only update the current CPU's SLB shadow
638 ld r9,PACA_SLBSHADOWPTR(r13)
640 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
641 li r12,SLBSHADOW_STACKVSID
642 STDX_BE r7,r12,r9 /* Save VSID */
643 li r12,SLBSHADOW_STACKESID
644 STDX_BE r0,r12,r9 /* Save ESID */
646 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
647 * we have 1TB segments, the only CPUs known to have the errata
648 * only support less than 1TB of system memory and we'll never
649 * actually hit this code path.
654 slbie r6 /* Workaround POWER5 < DD2.1 issue */
658 #endif /* !CONFIG_PPC_BOOK3S */
660 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
661 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
662 because we don't need to leave the 288-byte ABI gap at the
663 top of the kernel stack. */
664 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
666 mr r1,r8 /* start using new stack pointer */
667 std r7,PACAKSAVE(r13)
669 #ifdef CONFIG_PPC_BOOK3S_64
671 /* Event based branch registers */
672 ld r0, THREAD_BESCR(r4)
674 ld r0, THREAD_EBBHR(r4)
676 ld r0, THREAD_EBBRR(r4)
681 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
684 #ifdef CONFIG_ALTIVEC
686 ld r0,THREAD_VRSAVE(r4)
687 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
688 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
689 #endif /* CONFIG_ALTIVEC */
692 lwz r6,THREAD_DSCR_INHERIT(r4)
693 ld r0,THREAD_DSCR(r4)
696 ld r0,PACA_DSCR_DEFAULT(r13)
698 BEGIN_FTR_SECTION_NESTED(70)
700 rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
702 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
707 END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
713 /* r3-r13 are destroyed -- Cort */
717 /* convert old thread to its task_struct for return value */
719 ld r7,_NIP(r1) /* Return to _switch caller in new task */
721 addi r1,r1,SWITCH_FRAME_SIZE
725 _GLOBAL(ret_from_except)
728 bne ret_from_except_lite
731 _GLOBAL(ret_from_except_lite)
733 * Disable interrupts so that current_thread_info()->flags
734 * can't change between when we test it and when we return
735 * from the interrupt.
737 #ifdef CONFIG_PPC_BOOK3E
740 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
741 mtmsrd r10,1 /* Update machine state */
742 #endif /* CONFIG_PPC_BOOK3E */
744 CURRENT_THREAD_INFO(r9, r1)
746 #ifdef CONFIG_PPC_BOOK3E
747 ld r10,PACACURRENT(r13)
748 #endif /* CONFIG_PPC_BOOK3E */
752 #ifdef CONFIG_PPC_BOOK3E
753 lwz r3,(THREAD+THREAD_DBCR0)(r10)
754 #endif /* CONFIG_PPC_BOOK3E */
756 /* Check current_thread_info()->flags */
757 andi. r0,r4,_TIF_USER_WORK_MASK
758 #ifdef CONFIG_PPC_BOOK3E
761 * Check to see if the dbcr0 register is set up to debug.
762 * Use the internal debug mode bit to do this.
764 andis. r0,r3,DBCR0_IDM@h
767 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
776 1: andi. r0,r4,_TIF_NEED_RESCHED
778 bl restore_interrupts
780 b ret_from_except_lite
782 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
783 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
784 bne 3f /* only restore TM if nothing else to do */
785 addi r3,r1,STACK_FRAME_OVERHEAD
792 * Use a non volatile GPR to save and restore our thread_info flags
793 * across the call to restore_interrupts.
796 bl restore_interrupts
798 addi r3,r1,STACK_FRAME_OVERHEAD
803 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
804 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
807 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
810 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
811 mr r4,r1 /* src: current exception frame */
812 mr r1,r3 /* Reroute the trampoline frame to r1 */
814 /* Copy from the original to the trampoline. */
815 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
816 li r6,0 /* start offset: 0 */
823 /* Do real store operation to complete stdu */
827 /* Clear _TIF_EMULATE_STACK_STORE flag */
828 lis r11,_TIF_EMULATE_STACK_STORE@h
836 #ifdef CONFIG_PREEMPT
837 /* Check if we need to preempt */
838 andi. r0,r4,_TIF_NEED_RESCHED
840 /* Check that preempt_count() == 0 and interrupts are enabled */
841 lwz r8,TI_PREEMPT(r9)
845 crandc eq,cr1*4+eq,eq
849 * Here we are preempting the current task. We want to make
850 * sure we are soft-disabled first and reconcile irq state.
852 RECONCILE_IRQ_STATE(r3,r4)
853 1: bl preempt_schedule_irq
855 /* Re-test flags and eventually loop */
856 CURRENT_THREAD_INFO(r9, r1)
858 andi. r0,r4,_TIF_NEED_RESCHED
862 * arch_local_irq_restore() from preempt_schedule_irq above may
863 * enable hard interrupt but we really should disable interrupts
864 * when we return from the interrupt, and so that we don't get
865 * interrupted after loading SRR0/1.
867 #ifdef CONFIG_PPC_BOOK3E
870 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
871 mtmsrd r10,1 /* Update machine state */
872 #endif /* CONFIG_PPC_BOOK3E */
873 #endif /* CONFIG_PREEMPT */
875 .globl fast_exc_return_irq
879 * This is the main kernel exit path. First we check if we
880 * are about to re-enable interrupts
883 lbz r6,PACASOFTIRQEN(r13)
887 /* We are enabling, were we already enabled ? Yes, just return */
892 * We are about to soft-enable interrupts (we are hard disabled
893 * at this point). We check if there's anything that needs to
896 lbz r0,PACAIRQHAPPENED(r13)
898 bne- restore_check_irq_replay
901 * Get here when nothing happened while soft-disabled, just
902 * soft-enable and move-on. We will hard-enable as a side
908 stb r0,PACASOFTIRQEN(r13);
911 * Final return path. BookE is handled in a different file
914 #ifdef CONFIG_PPC_BOOK3E
915 b exception_return_book3e
918 * Clear the reservation. If we know the CPU tracks the address of
919 * the reservation then we can potentially save some cycles and use
920 * a larx. On POWER6 and POWER7 this is significantly faster.
923 stdcx. r0,0,r1 /* to clear the reservation */
926 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
929 * Some code path such as load_up_fpu or altivec return directly
930 * here. They run entirely hard disabled and do not alter the
931 * interrupt state. They also don't use lwarx/stwcx. and thus
932 * are known not to leave dangling reservations.
934 .globl fast_exception_return
935 fast_exception_return:
949 /* Load PPR from thread struct before we clear MSR:RI */
951 ld r2,PACACURRENT(r13)
952 ld r2,TASKTHREADPPR(r2)
953 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
956 * Clear RI before restoring r13. If we are returning to
957 * userspace and we take an exception after restoring r13,
958 * we end up corrupting the userspace r13 value.
960 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
961 andc r4,r4,r0 /* r0 contains MSR_RI here */
964 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
966 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
969 * r13 is our per cpu area, only restore it if we are returning to
970 * userspace the value stored in the stack frame may belong to
976 mtspr SPRN_PPR,r2 /* Restore PPR */
977 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
978 ACCOUNT_CPU_USER_EXIT(r2, r4)
994 b . /* prevent speculative execution */
996 1: mtspr SPRN_SRR1,r3
1009 b . /* prevent speculative execution */
1011 #endif /* CONFIG_PPC_BOOK3E */
1014 * We are returning to a context with interrupts soft disabled.
1016 * However, we may also about to hard enable, so we need to
1017 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1018 * or that bit can get out of sync and bad things will happen
1022 lbz r7,PACAIRQHAPPENED(r13)
1025 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1026 stb r7,PACAIRQHAPPENED(r13)
1028 stb r0,PACASOFTIRQEN(r13);
1033 * Something did happen, check if a re-emit is needed
1034 * (this also clears paca->irq_happened)
1036 restore_check_irq_replay:
1037 /* XXX: We could implement a fast path here where we check
1038 * for irq_happened being just 0x01, in which case we can
1039 * clear it and return. That means that we would potentially
1040 * miss a decrementer having wrapped all the way around.
1042 * Still, this might be useful for things like hash_page
1044 bl __check_irq_replay
1046 beq restore_no_replay
1049 * We need to re-emit an interrupt. We do so by re-using our
1050 * existing exception frame. We first change the trap value,
1051 * but we need to ensure we preserve the low nibble of it
1059 * Then find the right handler and call it. Interrupts are
1060 * still soft-disabled and we keep them that way.
1064 addi r3,r1,STACK_FRAME_OVERHEAD;
1067 1: cmpwi cr0,r3,0xe60
1069 addi r3,r1,STACK_FRAME_OVERHEAD;
1070 bl handle_hmi_exception
1072 1: cmpwi cr0,r3,0x900
1074 addi r3,r1,STACK_FRAME_OVERHEAD;
1077 #ifdef CONFIG_PPC_DOORBELL
1079 #ifdef CONFIG_PPC_BOOK3E
1086 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1087 #endif /* CONFIG_PPC_BOOK3E */
1089 addi r3,r1,STACK_FRAME_OVERHEAD;
1090 bl doorbell_exception
1092 #endif /* CONFIG_PPC_DOORBELL */
1093 1: b ret_from_except /* What else to do here ? */
1096 addi r3,r1,STACK_FRAME_OVERHEAD
1097 bl unrecoverable_exception
1100 #ifdef CONFIG_PPC_RTAS
1102 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1103 * called with the MMU off.
1105 * In addition, we need to be in 32b mode, at least for now.
1107 * Note: r3 is an input parameter to rtas, so don't trash it...
1112 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
1114 /* Because RTAS is running in 32b mode, it clobbers the high order half
1115 * of all registers that it saves. We therefore save those registers
1116 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1118 SAVE_GPR(2, r1) /* Save the TOC */
1119 SAVE_GPR(13, r1) /* Save paca */
1120 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1121 SAVE_10GPRS(22, r1) /* ditto */
1134 /* Temporary workaround to clear CR until RTAS can be modified to
1141 /* There is no way it is acceptable to get here with interrupts enabled,
1142 * check it with the asm equivalent of WARN_ON
1144 lbz r0,PACASOFTIRQEN(r13)
1146 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1149 /* Hard-disable interrupts */
1155 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1156 * so they are saved in the PACA which allows us to restore
1157 * our original state after RTAS returns.
1160 std r6,PACASAVEDMSR(r13)
1162 /* Setup our real return addr */
1163 LOAD_REG_ADDR(r4,rtas_return_loc)
1164 clrldi r4,r4,2 /* convert to realmode address */
1168 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1172 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1173 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1175 sync /* disable interrupts so SRR0/1 */
1176 mtmsrd r0 /* don't get trashed */
1178 LOAD_REG_ADDR(r4, rtas)
1179 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1180 ld r4,RTASBASE(r4) /* get the rtas->base value */
1185 b . /* prevent speculative execution */
1190 /* relocation is off at this point */
1192 clrldi r4,r4,2 /* convert to realmode address */
1196 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1204 ld r1,PACAR1(r4) /* Restore our SP */
1205 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1210 b . /* prevent speculative execution */
1213 1: .llong rtas_restore_regs
1216 /* relocation is on at this point */
1217 REST_GPR(2, r1) /* Restore the TOC */
1218 REST_GPR(13, r1) /* Restore paca */
1219 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1220 REST_10GPRS(22, r1) /* ditto */
1235 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1236 ld r0,16(r1) /* get return address */
1239 blr /* return to caller */
1241 #endif /* CONFIG_PPC_RTAS */
1246 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1248 /* Because PROM is running in 32b mode, it clobbers the high order half
1249 * of all registers that it saves. We therefore save those registers
1250 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1261 /* Put PROM address in SRR0 */
1264 /* Setup our trampoline return addr in LR */
1267 addi r4,r4,(1f - 0b)
1270 /* Prepare a 32-bit mode big endian MSR
1272 #ifdef CONFIG_PPC_BOOK3E
1273 rlwinm r11,r11,0,1,31
1276 #else /* CONFIG_PPC_BOOK3E */
1277 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1281 #endif /* CONFIG_PPC_BOOK3E */
1283 1: /* Return from OF */
1286 /* Just make sure that r1 top 32 bits didn't get
1291 /* Restore the MSR (back to 64 bits) */
1296 /* Restore other registers */
1304 addi r1,r1,PROM_FRAME_SIZE
1309 #ifdef CONFIG_FUNCTION_TRACER
1310 #ifdef CONFIG_DYNAMIC_FTRACE
1315 _GLOBAL_TOC(ftrace_caller)
1316 /* Taken from output of objdump from lib64/glibc */
1322 subi r3, r3, MCOUNT_INSN_SIZE
1327 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1328 .globl ftrace_graph_call
1331 _GLOBAL(ftrace_graph_stub)
1336 _GLOBAL(ftrace_stub)
1339 _GLOBAL_TOC(_mcount)
1340 /* Taken from output of objdump from lib64/glibc */
1347 subi r3, r3, MCOUNT_INSN_SIZE
1348 LOAD_REG_ADDR(r5,ftrace_trace_function)
1356 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1357 b ftrace_graph_caller
1362 _GLOBAL(ftrace_stub)
1365 #endif /* CONFIG_DYNAMIC_FTRACE */
1367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368 _GLOBAL(ftrace_graph_caller)
1369 /* load r4 with local address */
1371 subi r4, r4, MCOUNT_INSN_SIZE
1373 /* Grab the LR out of the caller stack frame */
1377 bl prepare_ftrace_return
1381 * prepare_ftrace_return gives us the address we divert to.
1382 * Change the LR in the callers stack frame to this.
1392 _GLOBAL(return_to_handler)
1393 /* need to save return values */
1403 * We might be called from a module.
1404 * Switch to our TOC to run inside the core kernel.
1408 bl ftrace_return_to_handler
1411 /* return value has real return address */
1420 /* Jump back to real return address */
1422 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1423 #endif /* CONFIG_FUNCTION_TRACER */