1 #include <asm/asm-offsets.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
6 #include <asm/exception-64e.h>
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
18 .macro DEBUG_SRR_VALID srr
19 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
26 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
30 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
37 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
41 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
46 #ifdef CONFIG_PPC_BOOK3S
47 .macro system_call_vectored name trapnr
48 .globl system_call_vectored_\name
49 system_call_vectored_\name:
50 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
51 SCV_INTERRUPT_TO_KERNEL
56 std r11,_NIP(r1) /* Saved LR is also the next instruction */
64 /* Save syscall parameters in r3-r8 */
66 /* Zero r9-r12, this should only be required when restoring all GPRs */
80 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
81 std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */
82 /* Calling convention has r3 = regs, r4 = orig r0 */
83 addi r3,r1,STACK_INT_FRAME_REGS
88 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
91 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
92 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
93 * and interrupts may be masked and pending already.
94 * system_call_exception() will call trace_hardirqs_off() which means
95 * interrupts could already have been blocked before trace_hardirqs_off,
96 * but this is the best we can do.
100 * Zero user registers to prevent influencing speculative execution
101 * state of kernel code.
103 SANITIZE_SYSCALL_GPRS()
104 bl CFUNC(system_call_exception)
106 .Lsyscall_vectored_\name\()_exit:
107 addi r4,r1,STACK_INT_FRAME_REGS
109 bl CFUNC(syscall_exit_prepare)
110 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
111 .Lsyscall_vectored_\name\()_rst_start:
112 lbz r11,PACAIRQHAPPENED(r13)
113 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
114 bne- syscall_vectored_\name\()_restart
116 stb r11,PACAIRQSOFTMASK(r13)
118 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
125 stdcx. r0,0,r1 /* to clear the reservation */
126 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
130 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
132 SANITIZE_RESTORE_NVGPRS()
134 bne .Lsyscall_vectored_\name\()_restore_regs
136 /* rfscv returns with LR->NIA and CTR->MSR */
140 /* Could zero these as per ABI, but we may consider a stricter ABI
141 * which preserves these if libc implementations can benefit, so
142 * restore them for now until further measurement is done. */
145 /* Zero volatile regs that may contain sensitive kernel data */
150 * We don't need to restore AMR on the way back to userspace for KUAP.
151 * The value of AMR only matters while we're in the kernel.
158 b . /* prevent speculative execution */
160 .Lsyscall_vectored_\name\()_restore_regs:
168 HANDLER_RESTORE_NVGPRS()
177 .Lsyscall_vectored_\name\()_rst_end:
179 syscall_vectored_\name\()_restart:
180 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
182 ld r1,PACA_EXIT_SAVE_R1(r13)
185 addi r4,r1,STACK_INT_FRAME_REGS
186 li r11,IRQS_ALL_DISABLED
187 stb r11,PACAIRQSOFTMASK(r13)
188 bl CFUNC(syscall_exit_restart)
189 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
190 b .Lsyscall_vectored_\name\()_rst_start
193 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
194 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
198 system_call_vectored common 0x3000
201 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
202 * which is tested by system_call_exception when r0 is -1 (as set by vector
205 system_call_vectored sigill 0x7ff0
207 #endif /* CONFIG_PPC_BOOK3S */
209 .balign IFETCH_ALIGN_BYTES
210 .globl system_call_common_real
211 system_call_common_real:
212 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
213 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
216 .balign IFETCH_ALIGN_BYTES
217 .globl system_call_common
219 _ASM_NOKPROBE_SYMBOL(system_call_common)
228 #ifdef CONFIG_PPC_E500
229 START_BTB_FLUSH_SECTION
231 END_BTB_FLUSH_SECTION
236 /* Save syscall parameters in r3-r8 */
238 /* Zero r9-r12, this should only be required when restoring all GPRs */
250 * This clears CR0.SO (bit 28), which is the error indication on
251 * return from this system call.
253 rldimi r12,r11,28,(63-28)
259 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
260 std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */
261 /* Calling convention has r3 = regs, r4 = orig r0 */
262 addi r3,r1,STACK_INT_FRAME_REGS
265 #ifdef CONFIG_PPC_BOOK3S
267 stb r11,PACASRR_VALID(r13)
271 * We always enter kernel from userspace with irq soft-mask enabled and
272 * nothing pending. system_call_exception() will call
273 * trace_hardirqs_off().
275 li r11,IRQS_ALL_DISABLED
276 stb r11,PACAIRQSOFTMASK(r13)
277 #ifdef CONFIG_PPC_BOOK3S
278 li r12,-1 /* Set MSR_EE and MSR_RI */
285 * Zero user registers to prevent influencing speculative execution
286 * state of kernel code.
288 SANITIZE_SYSCALL_GPRS()
289 bl CFUNC(system_call_exception)
292 addi r4,r1,STACK_INT_FRAME_REGS
294 bl CFUNC(syscall_exit_prepare)
295 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
296 #ifdef CONFIG_PPC_BOOK3S
298 lbz r11,PACAIRQHAPPENED(r13)
299 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
303 stb r11,PACAIRQSOFTMASK(r13)
305 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
311 #ifdef CONFIG_PPC_BOOK3S
312 lbz r4,PACASRR_VALID(r13)
316 stb r4,PACASRR_VALID(r13)
326 stdcx. r0,0,r1 /* to clear the reservation */
327 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
329 SANITIZE_RESTORE_NVGPRS()
331 bne .Lsyscall_restore_regs
332 /* Zero volatile regs that may contain sensitive kernel data */
337 .Lsyscall_restore_regs_cont:
341 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
344 * We don't need to restore AMR on the way back to userspace for KUAP.
345 * The value of AMR only matters while we're in the kernel.
352 b . /* prevent speculative execution */
354 .Lsyscall_restore_regs:
357 HANDLER_RESTORE_NVGPRS()
362 b .Lsyscall_restore_regs_cont
365 #ifdef CONFIG_PPC_BOOK3S
367 _ASM_NOKPROBE_SYMBOL(syscall_restart)
369 ld r1,PACA_EXIT_SAVE_R1(r13)
372 addi r4,r1,STACK_INT_FRAME_REGS
373 li r11,IRQS_ALL_DISABLED
374 stb r11,PACAIRQSOFTMASK(r13)
375 bl CFUNC(syscall_exit_restart)
376 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
377 b .Lsyscall_rst_start
380 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
381 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
385 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
386 * touched, no exit work created, then this can be used.
388 .balign IFETCH_ALIGN_BYTES
389 .globl fast_interrupt_return_srr
390 fast_interrupt_return_srr:
391 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
392 kuap_check_amr r3, r4
395 #ifdef CONFIG_PPC_BOOK3S
397 kuap_user_restore r3, r4
398 b .Lfast_user_interrupt_return_srr
399 1: kuap_kernel_restore r3, r4
401 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
402 bne+ .Lfast_kernel_interrupt_return_srr
403 addi r3,r1,STACK_INT_FRAME_REGS
404 bl CFUNC(unrecoverable_exception)
405 b . /* should not get here */
407 bne .Lfast_user_interrupt_return_srr
408 b .Lfast_kernel_interrupt_return_srr
411 .macro interrupt_return_macro srr
412 .balign IFETCH_ALIGN_BYTES
413 .globl interrupt_return_\srr
414 interrupt_return_\srr\():
415 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
418 beq interrupt_return_\srr\()_kernel
419 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
420 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
421 addi r3,r1,STACK_INT_FRAME_REGS
422 bl CFUNC(interrupt_exit_user_prepare)
423 #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
425 bne- .Lrestore_nvgprs_\srr
426 .Lrestore_nvgprs_\srr\()_cont:
428 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
429 #ifdef CONFIG_PPC_BOOK3S
430 .Linterrupt_return_\srr\()_user_rst_start:
431 lbz r11,PACAIRQHAPPENED(r13)
432 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
433 bne- interrupt_return_\srr\()_user_restart
436 stb r11,PACAIRQSOFTMASK(r13)
438 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
440 .Lfast_user_interrupt_return_\srr\():
441 SANITIZE_RESTORE_NVGPRS()
442 #ifdef CONFIG_PPC_BOOK3S
444 lbz r4,PACASRR_VALID(r13)
446 lbz r4,PACAHSRR_VALID(r13)
458 #ifdef CONFIG_PPC_BOOK3S
459 stb r4,PACASRR_VALID(r13)
465 #ifdef CONFIG_PPC_BOOK3S
466 stb r4,PACAHSRR_VALID(r13)
471 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
472 lbz r4,PACAIRQSOFTMASK(r13)
473 tdnei r4,IRQS_ENABLED
479 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
482 stdcx. r0,0,r1 /* to clear the reservation */
485 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
508 b . /* prevent speculative execution */
509 .Linterrupt_return_\srr\()_user_rst_end:
511 #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
512 .Lrestore_nvgprs_\srr\():
514 b .Lrestore_nvgprs_\srr\()_cont
517 #ifdef CONFIG_PPC_BOOK3S
518 interrupt_return_\srr\()_user_restart:
519 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
521 ld r1,PACA_EXIT_SAVE_R1(r13)
523 addi r3,r1,STACK_INT_FRAME_REGS
524 li r11,IRQS_ALL_DISABLED
525 stb r11,PACAIRQSOFTMASK(r13)
526 bl CFUNC(interrupt_exit_user_restart)
527 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
528 b .Linterrupt_return_\srr\()_user_rst_start
531 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
532 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
535 .balign IFETCH_ALIGN_BYTES
536 interrupt_return_\srr\()_kernel:
537 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
538 addi r3,r1,STACK_INT_FRAME_REGS
539 bl CFUNC(interrupt_exit_kernel_prepare)
541 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
542 .Linterrupt_return_\srr\()_kernel_rst_start:
544 cmpwi r11,IRQS_ENABLED
545 stb r11,PACAIRQSOFTMASK(r13)
546 beq .Linterrupt_return_\srr\()_soft_enabled
549 * Returning to soft-disabled context.
550 * Check if a MUST_HARD_MASK interrupt has become pending, in which
551 * case we need to disable MSR[EE] in the return context.
553 * The MSR[EE] check catches among other things the short incoherency
554 * in hard_irq_disable() between clearing MSR[EE] and setting
559 beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
560 lbz r11,PACAIRQHAPPENED(r13)
561 andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
562 bne 1f // HARD_MASK is pending
563 // No HARD_MASK pending, clear possible HARD_DIS set by interrupt
564 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
565 stb r11,PACAIRQHAPPENED(r13)
566 b .Lfast_kernel_interrupt_return_\srr\()
569 1: /* Must clear MSR_EE from _MSR */
570 #ifdef CONFIG_PPC_BOOK3S
572 /* Clear valid before changing _MSR */
574 stb r10,PACASRR_VALID(r13)
576 stb r10,PACAHSRR_VALID(r13)
581 b .Lfast_kernel_interrupt_return_\srr\()
583 .Linterrupt_return_\srr\()_soft_enabled:
585 * In the soft-enabled case, need to double-check that we have no
586 * pending interrupts that might have come in before we reached the
587 * restart section of code, and restart the exit so those can be
590 * If there are none, it is be possible that the interrupt still
591 * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
592 * interrupted context. This clear will not clobber a new pending
593 * interrupt coming in, because we're in the restart section, so
594 * such would return to the restart location.
596 #ifdef CONFIG_PPC_BOOK3S
597 lbz r11,PACAIRQHAPPENED(r13)
598 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
599 bne- interrupt_return_\srr\()_kernel_restart
602 stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
604 .Lfast_kernel_interrupt_return_\srr\():
605 SANITIZE_RESTORE_NVGPRS()
607 #ifdef CONFIG_PPC_BOOK3S
609 lbz r4,PACASRR_VALID(r13)
611 lbz r4,PACAHSRR_VALID(r13)
623 #ifdef CONFIG_PPC_BOOK3S
624 stb r4,PACASRR_VALID(r13)
630 #ifdef CONFIG_PPC_BOOK3S
631 stb r4,PACAHSRR_VALID(r13)
637 stdcx. r0,0,r1 /* to clear the reservation */
640 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
655 * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
656 * the reliable stack unwinder later on. Clear it.
658 std r0,STACK_INT_FRAME_MARKER(r1)
662 bne- cr1,1f /* emulate stack store */
672 b . /* prevent speculative execution */
675 * Emulate stack store with update. New r1 value was already calculated
676 * and updated in our interrupt regs by emulate_loadstore, but we can't
677 * store the previous value of r1 to the stack before re-loading our
678 * registers from it, otherwise they could be clobbered. Use
679 * PACA_EXGEN as temporary storage to hold the store data, as
680 * interrupts are disabled here so it won't be clobbered.
683 std r9,PACA_EXGEN+0(r13)
684 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
688 std r9,0(r1) /* perform store component of stdu */
689 ld r9,PACA_EXGEN+0(r13)
696 b . /* prevent speculative execution */
697 .Linterrupt_return_\srr\()_kernel_rst_end:
699 #ifdef CONFIG_PPC_BOOK3S
700 interrupt_return_\srr\()_kernel_restart:
701 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
703 ld r1,PACA_EXIT_SAVE_R1(r13)
705 addi r3,r1,STACK_INT_FRAME_REGS
706 li r11,IRQS_ALL_DISABLED
707 stb r11,PACAIRQSOFTMASK(r13)
708 bl CFUNC(interrupt_exit_kernel_restart)
709 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
710 b .Linterrupt_return_\srr\()_kernel_rst_start
713 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
714 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
719 interrupt_return_macro srr
720 #ifdef CONFIG_PPC_BOOK3S
721 interrupt_return_macro hsrr
723 .globl __end_soft_masked
725 DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
726 #endif /* CONFIG_PPC_BOOK3S */
728 #ifdef CONFIG_PPC_BOOK3S
729 _GLOBAL(ret_from_fork_scv)
730 bl CFUNC(schedule_tail)
731 HANDLER_RESTORE_NVGPRS()
732 li r3,0 /* fork() return value */
733 b .Lsyscall_vectored_common_exit
736 _GLOBAL(ret_from_fork)
737 bl CFUNC(schedule_tail)
738 HANDLER_RESTORE_NVGPRS()
739 li r3,0 /* fork() return value */
742 _GLOBAL(ret_from_kernel_user_thread)
743 bl CFUNC(schedule_tail)
746 #ifdef CONFIG_PPC64_ELF_ABI_V2
752 * It does not matter whether this returns via the scv or sc path
753 * because it returns as execve() and therefore has no calling ABI
754 * (i.e., it sets registers according to the exec()ed entry point).
758 _GLOBAL(start_kernel_thread)
759 bl CFUNC(schedule_tail)
762 #ifdef CONFIG_PPC64_ELF_ABI_V2
767 * This must not return. We actually want to BUG here, not WARN,
768 * because BUG will exit the process which is what the kernel thread
769 * should have done, which may give some hope of continuing.
772 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0