1 #include <asm/asm-offsets.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
6 #include <asm/exception-64e.h>
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
18 .tc sys_call_table[TC],sys_call_table
21 COMPAT_SYS_CALL_TABLE:
22 .tc compat_sys_call_table[TC],compat_sys_call_table
28 .macro DEBUG_SRR_VALID srr
29 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
36 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
40 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
47 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
51 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
56 #ifdef CONFIG_PPC_BOOK3S
57 .macro system_call_vectored name trapnr
58 .globl system_call_vectored_\name
59 system_call_vectored_\name:
60 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
61 SCV_INTERRUPT_TO_KERNEL
73 /* Can we avoid saving r3-r8 in common case? */
80 /* Zero r9-r12, this should only be required when restoring all GPRs */
94 addi r10,r1,STACK_FRAME_OVERHEAD
95 ld r11,exception_marker@toc(r2)
96 std r11,-16(r10) /* "regshere" marker */
100 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
103 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
104 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
105 * and interrupts may be masked and pending already.
106 * system_call_exception() will call trace_hardirqs_off() which means
107 * interrupts could already have been blocked before trace_hardirqs_off,
108 * but this is the best we can do.
111 /* Calling convention has r9 = orig r0, r10 = regs */
113 bl system_call_exception
115 .Lsyscall_vectored_\name\()_exit:
116 addi r4,r1,STACK_FRAME_OVERHEAD
118 bl syscall_exit_prepare
119 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
120 .Lsyscall_vectored_\name\()_rst_start:
121 lbz r11,PACAIRQHAPPENED(r13)
122 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
123 bne- syscall_vectored_\name\()_restart
125 stb r11,PACAIRQSOFTMASK(r13)
127 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
134 stdcx. r0,0,r1 /* to clear the reservation */
135 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
139 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
142 bne .Lsyscall_vectored_\name\()_restore_regs
144 /* rfscv returns with LR->NIA and CTR->MSR */
148 /* Could zero these as per ABI, but we may consider a stricter ABI
149 * which preserves these if libc implementations can benefit, so
150 * restore them for now until further measurement is done. */
157 /* Zero volatile regs that may contain sensitive kernel data */
165 * We don't need to restore AMR on the way back to userspace for KUAP.
166 * The value of AMR only matters while we're in the kernel.
173 b . /* prevent speculative execution */
175 .Lsyscall_vectored_\name\()_restore_regs:
192 .Lsyscall_vectored_\name\()_rst_end:
194 syscall_vectored_\name\()_restart:
195 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
197 ld r1,PACA_EXIT_SAVE_R1(r13)
200 addi r4,r1,STACK_FRAME_OVERHEAD
201 li r11,IRQS_ALL_DISABLED
202 stb r11,PACAIRQSOFTMASK(r13)
203 bl syscall_exit_restart
204 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
205 b .Lsyscall_vectored_\name\()_rst_start
208 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
209 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
213 system_call_vectored common 0x3000
216 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
217 * which is tested by system_call_exception when r0 is -1 (as set by vector
220 system_call_vectored sigill 0x7ff0
224 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
226 .globl system_call_vectored_emulate
227 system_call_vectored_emulate:
228 _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
229 li r10,IRQS_ALL_DISABLED
230 stb r10,PACAIRQSOFTMASK(r13)
231 b system_call_vectored_common
232 #endif /* CONFIG_PPC_BOOK3S */
234 .balign IFETCH_ALIGN_BYTES
235 .globl system_call_common_real
236 system_call_common_real:
237 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
238 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
241 .balign IFETCH_ALIGN_BYTES
242 .globl system_call_common
244 _ASM_NOKPROBE_SYMBOL(system_call_common)
253 #ifdef CONFIG_PPC_FSL_BOOK3E
254 START_BTB_FLUSH_SECTION
256 END_BTB_FLUSH_SECTION
261 /* Can we avoid saving r3-r8 in common case? */
268 /* Zero r9-r12, this should only be required when restoring all GPRs */
280 * This clears CR0.SO (bit 28), which is the error indication on
281 * return from this system call.
283 rldimi r12,r11,28,(63-28)
288 addi r10,r1,STACK_FRAME_OVERHEAD
289 ld r11,exception_marker@toc(r2)
290 std r11,-16(r10) /* "regshere" marker */
292 #ifdef CONFIG_PPC_BOOK3S
294 stb r11,PACASRR_VALID(r13)
298 * We always enter kernel from userspace with irq soft-mask enabled and
299 * nothing pending. system_call_exception() will call
300 * trace_hardirqs_off().
302 li r11,IRQS_ALL_DISABLED
303 stb r11,PACAIRQSOFTMASK(r13)
304 #ifdef CONFIG_PPC_BOOK3S
305 li r12,-1 /* Set MSR_EE and MSR_RI */
311 /* Calling convention has r9 = orig r0, r10 = regs */
313 bl system_call_exception
316 addi r4,r1,STACK_FRAME_OVERHEAD
318 bl syscall_exit_prepare
319 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
320 #ifdef CONFIG_PPC_BOOK3S
322 lbz r11,PACAIRQHAPPENED(r13)
323 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
327 stb r11,PACAIRQSOFTMASK(r13)
329 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
335 #ifdef CONFIG_PPC_BOOK3S
336 lbz r4,PACASRR_VALID(r13)
340 stb r4,PACASRR_VALID(r13)
350 stdcx. r0,0,r1 /* to clear the reservation */
351 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
354 bne .Lsyscall_restore_regs
355 /* Zero volatile regs that may contain sensitive kernel data */
368 .Lsyscall_restore_regs_cont:
372 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
375 * We don't need to restore AMR on the way back to userspace for KUAP.
376 * The value of AMR only matters while we're in the kernel.
383 b . /* prevent speculative execution */
385 .Lsyscall_restore_regs:
393 b .Lsyscall_restore_regs_cont
396 #ifdef CONFIG_PPC_BOOK3S
398 _ASM_NOKPROBE_SYMBOL(syscall_restart)
400 ld r1,PACA_EXIT_SAVE_R1(r13)
403 addi r4,r1,STACK_FRAME_OVERHEAD
404 li r11,IRQS_ALL_DISABLED
405 stb r11,PACAIRQSOFTMASK(r13)
406 bl syscall_exit_restart
407 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
408 b .Lsyscall_rst_start
411 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
412 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
416 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
417 * touched, no exit work created, then this can be used.
419 .balign IFETCH_ALIGN_BYTES
420 .globl fast_interrupt_return_srr
421 fast_interrupt_return_srr:
422 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
423 kuap_check_amr r3, r4
426 #ifdef CONFIG_PPC_BOOK3S
428 kuap_user_restore r3, r4
429 b .Lfast_user_interrupt_return_srr
430 1: kuap_kernel_restore r3, r4
432 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
433 bne+ .Lfast_kernel_interrupt_return_srr
434 addi r3,r1,STACK_FRAME_OVERHEAD
435 bl unrecoverable_exception
436 b . /* should not get here */
438 bne .Lfast_user_interrupt_return_srr
439 b .Lfast_kernel_interrupt_return_srr
442 .macro interrupt_return_macro srr
443 .balign IFETCH_ALIGN_BYTES
444 .globl interrupt_return_\srr
445 interrupt_return_\srr\():
446 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
449 beq interrupt_return_\srr\()_kernel
450 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
451 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
452 addi r3,r1,STACK_FRAME_OVERHEAD
453 bl interrupt_exit_user_prepare
455 bne- .Lrestore_nvgprs_\srr
456 .Lrestore_nvgprs_\srr\()_cont:
457 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
458 #ifdef CONFIG_PPC_BOOK3S
459 .Linterrupt_return_\srr\()_user_rst_start:
460 lbz r11,PACAIRQHAPPENED(r13)
461 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
462 bne- interrupt_return_\srr\()_user_restart
465 stb r11,PACAIRQSOFTMASK(r13)
467 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
469 .Lfast_user_interrupt_return_\srr\():
470 #ifdef CONFIG_PPC_BOOK3S
472 lbz r4,PACASRR_VALID(r13)
474 lbz r4,PACAHSRR_VALID(r13)
486 #ifdef CONFIG_PPC_BOOK3S
487 stb r4,PACASRR_VALID(r13)
493 #ifdef CONFIG_PPC_BOOK3S
494 stb r4,PACAHSRR_VALID(r13)
499 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
500 lbz r4,PACAIRQSOFTMASK(r13)
501 tdnei r4,IRQS_ENABLED
507 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
510 stdcx. r0,0,r1 /* to clear the reservation */
513 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
536 b . /* prevent speculative execution */
537 .Linterrupt_return_\srr\()_user_rst_end:
539 .Lrestore_nvgprs_\srr\():
541 b .Lrestore_nvgprs_\srr\()_cont
543 #ifdef CONFIG_PPC_BOOK3S
544 interrupt_return_\srr\()_user_restart:
545 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
547 ld r1,PACA_EXIT_SAVE_R1(r13)
549 addi r3,r1,STACK_FRAME_OVERHEAD
550 li r11,IRQS_ALL_DISABLED
551 stb r11,PACAIRQSOFTMASK(r13)
552 bl interrupt_exit_user_restart
553 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
554 b .Linterrupt_return_\srr\()_user_rst_start
557 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
558 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
561 .balign IFETCH_ALIGN_BYTES
562 interrupt_return_\srr\()_kernel:
563 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
564 addi r3,r1,STACK_FRAME_OVERHEAD
565 bl interrupt_exit_kernel_prepare
567 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
568 .Linterrupt_return_\srr\()_kernel_rst_start:
570 cmpwi r11,IRQS_ENABLED
571 stb r11,PACAIRQSOFTMASK(r13)
573 #ifdef CONFIG_PPC_BOOK3S
574 lbz r11,PACAIRQHAPPENED(r13)
575 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
576 bne- interrupt_return_\srr\()_kernel_restart
579 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
582 .Lfast_kernel_interrupt_return_\srr\():
584 #ifdef CONFIG_PPC_BOOK3S
586 lbz r4,PACASRR_VALID(r13)
588 lbz r4,PACAHSRR_VALID(r13)
600 #ifdef CONFIG_PPC_BOOK3S
601 stb r4,PACASRR_VALID(r13)
607 #ifdef CONFIG_PPC_BOOK3S
608 stb r4,PACAHSRR_VALID(r13)
614 stdcx. r0,0,r1 /* to clear the reservation */
617 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
632 * Leaving a stale exception_marker on the stack can confuse
633 * the reliable stack unwinder later on. Clear it.
635 std r0,STACK_FRAME_OVERHEAD-16(r1)
639 bne- cr1,1f /* emulate stack store */
649 b . /* prevent speculative execution */
652 * Emulate stack store with update. New r1 value was already calculated
653 * and updated in our interrupt regs by emulate_loadstore, but we can't
654 * store the previous value of r1 to the stack before re-loading our
655 * registers from it, otherwise they could be clobbered. Use
656 * PACA_EXGEN as temporary storage to hold the store data, as
657 * interrupts are disabled here so it won't be clobbered.
660 std r9,PACA_EXGEN+0(r13)
661 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
665 std r9,0(r1) /* perform store component of stdu */
666 ld r9,PACA_EXGEN+0(r13)
673 b . /* prevent speculative execution */
674 .Linterrupt_return_\srr\()_kernel_rst_end:
676 #ifdef CONFIG_PPC_BOOK3S
677 interrupt_return_\srr\()_kernel_restart:
678 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
680 ld r1,PACA_EXIT_SAVE_R1(r13)
682 addi r3,r1,STACK_FRAME_OVERHEAD
683 li r11,IRQS_ALL_DISABLED
684 stb r11,PACAIRQSOFTMASK(r13)
685 bl interrupt_exit_kernel_restart
686 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
687 b .Linterrupt_return_\srr\()_kernel_rst_start
690 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
691 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
696 interrupt_return_macro srr
697 #ifdef CONFIG_PPC_BOOK3S
698 interrupt_return_macro hsrr
700 .globl __end_soft_masked
702 DEFINE_FIXED_SYMBOL(__end_soft_masked)
703 #endif /* CONFIG_PPC_BOOK3S */
705 #ifdef CONFIG_PPC_BOOK3S
706 _GLOBAL(ret_from_fork_scv)
709 li r3,0 /* fork() return value */
710 b .Lsyscall_vectored_common_exit
713 _GLOBAL(ret_from_fork)
716 li r3,0 /* fork() return value */
719 _GLOBAL(ret_from_kernel_thread)
724 #ifdef PPC64_ELF_ABI_v2