1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level exception handling code
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors: Catalin Marinas <catalin.marinas@arm.com>
7 * Will Deacon <will.deacon@arm.com>
10 #include <linux/arm-smccc.h>
11 #include <linux/init.h>
12 #include <linux/linkage.h>
14 #include <asm/alternative.h>
15 #include <asm/assembler.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/asm_pointer_auth.h>
19 #include <asm/cpufeature.h>
20 #include <asm/errno.h>
23 #include <asm/memory.h>
25 #include <asm/processor.h>
26 #include <asm/ptrace.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-uaccess.h>
30 #include <asm/unistd.h>
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
36 .macro user_exit_irqoff
37 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38 bl enter_from_user_mode
42 .macro user_enter_irqoff
43 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
49 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
63 .macro kernel_ventry, el, label, regsize = 64
68 * This must be the first instruction of the EL0 vector entries. It is
69 * skipped by the trampoline vectors, to trigger the cleanup.
71 b .Lskip_tramp_vectors_cleanup\@
78 .Lskip_tramp_vectors_cleanup\@:
81 sub sp, sp, #S_FRAME_SIZE
82 #ifdef CONFIG_VMAP_STACK
84 * Test whether the SP has overflowed, without corrupting a GPR.
85 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
86 * should always be zero.
88 add sp, sp, x0 // sp' = sp + x0
89 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
90 tbnz x0, #THREAD_SHIFT, 0f
91 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
92 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
97 * Either we've just detected an overflow, or we've taken an exception
98 * while on the overflow stack. Either way, we won't return to
99 * userspace, and can clobber EL0 registers to free up GPRs.
102 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
105 /* Recover the original x0 value and stash it in tpidrro_el0 */
109 /* Switch to the overflow stack */
110 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
113 * Check whether we were already on the overflow stack. This may happen
114 * after panic() re-enables interrupts.
116 mrs x0, tpidr_el0 // sp of interrupted context
117 sub x0, sp, x0 // delta with top of overflow stack
118 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
119 b.ne __bad_stack // no? -> bad stack pointer
121 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
126 .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
129 .macro tramp_alias, dst, sym, tmp
130 mov_q \dst, TRAMP_VALIAS
133 adr_l \tmp, .entry.tramp.text
138 * This macro corrupts x0-x3. It is the caller's duty to save/restore
141 .macro apply_ssbd, state, tmp1, tmp2
142 alternative_cb spectre_v4_patch_fw_mitigation_enable
143 b .L__asm_ssbd_skip\@ // Patched to NOP
145 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
146 cbz \tmp2, .L__asm_ssbd_skip\@
147 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
148 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
149 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
151 alternative_cb smccc_patch_fw_mitigation_conduit
152 nop // Patched to SMC/HVC #0
157 /* Check for MTE asynchronous tag check faults */
158 .macro check_mte_async_tcf, tmp, ti_flags
159 #ifdef CONFIG_ARM64_MTE
161 alternative_if_not ARM64_MTE
163 alternative_else_nop_endif
164 mrs_s \tmp, SYS_TFSRE0_EL1
165 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
166 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
167 mov \tmp, #_TIF_MTE_ASYNC_FAULT
168 add \ti_flags, tsk, #TSK_TI_FLAGS
169 stset \tmp, [\ti_flags]
170 msr_s SYS_TFSRE0_EL1, xzr
175 /* Clear the MTE asynchronous tag check faults */
176 .macro clear_mte_async_tcf
177 #ifdef CONFIG_ARM64_MTE
178 alternative_if ARM64_MTE
180 msr_s SYS_TFSRE0_EL1, xzr
181 alternative_else_nop_endif
185 .macro kernel_entry, el, regsize = 64
187 mov w0, w0 // zero upper 32 bits of x0
189 stp x0, x1, [sp, #16 * 0]
190 stp x2, x3, [sp, #16 * 1]
191 stp x4, x5, [sp, #16 * 2]
192 stp x6, x7, [sp, #16 * 3]
193 stp x8, x9, [sp, #16 * 4]
194 stp x10, x11, [sp, #16 * 5]
195 stp x12, x13, [sp, #16 * 6]
196 stp x14, x15, [sp, #16 * 7]
197 stp x16, x17, [sp, #16 * 8]
198 stp x18, x19, [sp, #16 * 9]
199 stp x20, x21, [sp, #16 * 10]
200 stp x22, x23, [sp, #16 * 11]
201 stp x24, x25, [sp, #16 * 12]
202 stp x26, x27, [sp, #16 * 13]
203 stp x28, x29, [sp, #16 * 14]
208 ldr_this_cpu tsk, __entry_task, x20
212 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
215 ldr x19, [tsk, #TSK_TI_FLAGS]
216 disable_step_tsk x19, x20
218 /* Check for asynchronous tag check faults in user space */
219 check_mte_async_tcf x22, x23
220 apply_ssbd 1, x22, x23
222 ptrauth_keys_install_kernel tsk, x20, x22, x23
226 add x21, sp, #S_FRAME_SIZE
228 /* Save the task's original addr_limit and set USER_DS */
229 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
230 str x20, [sp, #S_ORIG_ADDR_LIMIT]
232 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
233 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
234 .endif /* \el == 0 */
237 stp lr, x21, [sp, #S_LR]
240 * In order to be able to dump the contents of struct pt_regs at the
241 * time the exception was taken (in case we attempt to walk the call
242 * stack later), chain it together with the stack frames.
245 stp xzr, xzr, [sp, #S_STACKFRAME]
247 stp x29, x22, [sp, #S_STACKFRAME]
249 add x29, sp, #S_STACKFRAME
251 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
252 alternative_if_not ARM64_HAS_PAN
253 bl __swpan_entry_el\el
254 alternative_else_nop_endif
257 stp x22, x23, [sp, #S_PC]
259 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
262 str w21, [sp, #S_SYSCALLNO]
266 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
267 mrs_s x20, SYS_ICC_PMR_EL1
268 str x20, [sp, #S_PMR_SAVE]
269 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
270 msr_s SYS_ICC_PMR_EL1, x20
271 alternative_else_nop_endif
273 /* Re-enable tag checking (TCO set on exception entry) */
274 #ifdef CONFIG_ARM64_MTE
275 alternative_if ARM64_MTE
277 alternative_else_nop_endif
281 * Registers that may be useful after this macro is invoked:
286 * x23 - aborted PSTATE
290 .macro kernel_exit, el
294 /* Restore the task's original addr_limit. */
295 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
296 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
298 /* No need to restore UAO, it will be restored from SPSR_EL1 */
302 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
303 ldr x20, [sp, #S_PMR_SAVE]
304 msr_s SYS_ICC_PMR_EL1, x20
305 mrs_s x21, SYS_ICC_CTLR_EL1
306 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
307 dsb sy // Ensure priority change is seen by redistributor
309 alternative_else_nop_endif
311 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
313 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
314 alternative_if_not ARM64_HAS_PAN
315 bl __swpan_exit_el\el
316 alternative_else_nop_endif
320 ldr x23, [sp, #S_SP] // load return stack pointer
322 tst x22, #PSR_MODE32_BIT // native task?
325 #ifdef CONFIG_ARM64_ERRATUM_845719
326 alternative_if ARM64_WORKAROUND_845719
327 #ifdef CONFIG_PID_IN_CONTEXTIDR
328 mrs x29, contextidr_el1
329 msr contextidr_el1, x29
331 msr contextidr_el1, xzr
333 alternative_else_nop_endif
338 /* No kernel C function calls after this as user keys are set. */
339 ptrauth_keys_install_user tsk, x0, x1, x2
344 msr elr_el1, x21 // set up the return data
346 ldp x0, x1, [sp, #16 * 0]
347 ldp x2, x3, [sp, #16 * 1]
348 ldp x4, x5, [sp, #16 * 2]
349 ldp x6, x7, [sp, #16 * 3]
350 ldp x8, x9, [sp, #16 * 4]
351 ldp x10, x11, [sp, #16 * 5]
352 ldp x12, x13, [sp, #16 * 6]
353 ldp x14, x15, [sp, #16 * 7]
354 ldp x16, x17, [sp, #16 * 8]
355 ldp x18, x19, [sp, #16 * 9]
356 ldp x20, x21, [sp, #16 * 10]
357 ldp x22, x23, [sp, #16 * 11]
358 ldp x24, x25, [sp, #16 * 12]
359 ldp x26, x27, [sp, #16 * 13]
360 ldp x28, x29, [sp, #16 * 14]
363 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
365 add sp, sp, #S_FRAME_SIZE // restore sp
367 alternative_else_nop_endif
368 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
371 tramp_alias x30, tramp_exit_native, x29
374 tramp_alias x30, tramp_exit_compat, x29
379 add sp, sp, #S_FRAME_SIZE // restore sp
381 /* Ensure any device/NC reads complete */
382 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
389 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
391 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
392 * EL0, there is no need to check the state of TTBR0_EL1 since
393 * accesses are always enabled.
394 * Note that the meaning of this bit differs from the ARMv8.1 PAN
395 * feature as all TTBR0_EL1 accesses are disabled, not just those to
398 SYM_CODE_START_LOCAL(__swpan_entry_el1)
400 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
401 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
402 b.eq 1f // TTBR0 access already disabled
403 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
404 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
405 __uaccess_ttbr0_disable x21
407 SYM_CODE_END(__swpan_entry_el1)
410 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
413 SYM_CODE_START_LOCAL(__swpan_exit_el1)
414 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
415 __uaccess_ttbr0_enable x0, x1
416 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
418 SYM_CODE_END(__swpan_exit_el1)
420 SYM_CODE_START_LOCAL(__swpan_exit_el0)
421 __uaccess_ttbr0_enable x0, x1
423 * Enable errata workarounds only if returning to user. The only
424 * workaround currently required for TTBR0_EL1 changes are for the
425 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
428 b post_ttbr_update_workaround
429 SYM_CODE_END(__swpan_exit_el0)
432 .macro irq_stack_entry
433 mov x19, sp // preserve the original sp
434 #ifdef CONFIG_SHADOW_CALL_STACK
435 mov x24, scs_sp // preserve the original shadow stack
439 * Compare sp with the base of the task stack.
440 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
441 * and should switch to the irq stack.
443 ldr x25, [tsk, TSK_STACK]
445 and x25, x25, #~(THREAD_SIZE - 1)
448 ldr_this_cpu x25, irq_stack_ptr, x26
449 mov x26, #IRQ_STACK_SIZE
452 /* switch to the irq stack */
455 #ifdef CONFIG_SHADOW_CALL_STACK
456 /* also switch to the irq shadow stack */
457 adr_this_cpu scs_sp, irq_shadow_call_stack, x26
464 * The callee-saved regs (x19-x29) should be preserved between
465 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
466 * uses x20-x23 to store data for later use.
468 .macro irq_stack_exit
470 #ifdef CONFIG_SHADOW_CALL_STACK
475 /* GPRs used by entry code */
476 tsk .req x28 // current thread_info
479 * Interrupt handling.
481 .macro irq_handler, handler:req
489 #ifdef CONFIG_ARM64_PSEUDO_NMI
491 * Set res to 0 if irqs were unmasked in interrupted context.
492 * Otherwise set res to non-0 value.
494 .macro test_irqs_unmasked res:req, pmr:req
495 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
496 sub \res, \pmr, #GIC_PRIO_IRQON
503 .macro gic_prio_kentry_setup, tmp:req
504 #ifdef CONFIG_ARM64_PSEUDO_NMI
505 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
506 mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
507 msr_s SYS_ICC_PMR_EL1, \tmp
508 alternative_else_nop_endif
512 .macro el1_interrupt_handler, handler:req
516 bl enter_el1_irq_or_nmi
520 #ifdef CONFIG_PREEMPTION
521 ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
522 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
524 * DA_F were cleared at start of handling. If anything is set in DAIF,
525 * we come back from an NMI, so skip preemption
529 alternative_else_nop_endif
530 cbnz x24, 1f // preempt count != 0 || NMI return path
531 bl arm64_preempt_schedule_irq // irq en/disable is done inside
536 bl exit_el1_irq_or_nmi
539 .macro el0_interrupt_handler, handler:req
544 bl do_el0_irq_bp_hardening
554 .pushsection ".entry.text", "ax"
557 SYM_CODE_START(vectors)
558 kernel_ventry 1, sync_invalid // Synchronous EL1t
559 kernel_ventry 1, irq_invalid // IRQ EL1t
560 kernel_ventry 1, fiq_invalid // FIQ EL1t
561 kernel_ventry 1, error_invalid // Error EL1t
563 kernel_ventry 1, sync // Synchronous EL1h
564 kernel_ventry 1, irq // IRQ EL1h
565 kernel_ventry 1, fiq_invalid // FIQ EL1h
566 kernel_ventry 1, error // Error EL1h
568 kernel_ventry 0, sync // Synchronous 64-bit EL0
569 kernel_ventry 0, irq // IRQ 64-bit EL0
570 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
571 kernel_ventry 0, error // Error 64-bit EL0
574 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
575 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
576 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
577 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
579 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
580 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
581 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
582 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
584 SYM_CODE_END(vectors)
586 #ifdef CONFIG_VMAP_STACK
588 * We detected an overflow in kernel_ventry, which switched to the
589 * overflow stack. Stash the exception regs, and head to our overflow
593 /* Restore the original x0 value */
597 * Store the original GPRs to the new stack. The orginal SP (minus
598 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
600 sub sp, sp, #S_FRAME_SIZE
603 add x0, x0, #S_FRAME_SIZE
606 /* Stash the regs for handle_bad_stack */
612 #endif /* CONFIG_VMAP_STACK */
615 * Invalid mode handlers
617 .macro inv_entry, el, reason, regsize = 64
618 kernel_entry \el, \regsize
626 SYM_CODE_START_LOCAL(el0_sync_invalid)
627 inv_entry 0, BAD_SYNC
628 SYM_CODE_END(el0_sync_invalid)
630 SYM_CODE_START_LOCAL(el0_irq_invalid)
632 SYM_CODE_END(el0_irq_invalid)
634 SYM_CODE_START_LOCAL(el0_fiq_invalid)
636 SYM_CODE_END(el0_fiq_invalid)
638 SYM_CODE_START_LOCAL(el0_error_invalid)
639 inv_entry 0, BAD_ERROR
640 SYM_CODE_END(el0_error_invalid)
643 SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
644 inv_entry 0, BAD_FIQ, 32
645 SYM_CODE_END(el0_fiq_invalid_compat)
648 SYM_CODE_START_LOCAL(el1_sync_invalid)
649 inv_entry 1, BAD_SYNC
650 SYM_CODE_END(el1_sync_invalid)
652 SYM_CODE_START_LOCAL(el1_irq_invalid)
654 SYM_CODE_END(el1_irq_invalid)
656 SYM_CODE_START_LOCAL(el1_fiq_invalid)
658 SYM_CODE_END(el1_fiq_invalid)
660 SYM_CODE_START_LOCAL(el1_error_invalid)
661 inv_entry 1, BAD_ERROR
662 SYM_CODE_END(el1_error_invalid)
668 SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
673 SYM_CODE_END(el1_sync)
676 SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
678 el1_interrupt_handler handle_arch_irq
680 SYM_CODE_END(el1_irq)
686 SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
691 SYM_CODE_END(el0_sync)
695 SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
698 bl el0_sync_compat_handler
700 SYM_CODE_END(el0_sync_compat)
703 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
706 SYM_CODE_END(el0_irq_compat)
708 SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
711 SYM_CODE_END(el0_error_compat)
715 SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
718 el0_interrupt_handler handle_arch_irq
720 SYM_CODE_END(el0_irq)
722 SYM_CODE_START_LOCAL(el1_error)
729 SYM_CODE_END(el1_error)
731 SYM_CODE_START_LOCAL(el0_error)
742 SYM_CODE_END(el0_error)
745 * "slow" syscall return path.
747 SYM_CODE_START_LOCAL(ret_to_user)
749 gic_prio_kentry_setup tmp=x3
750 #ifdef CONFIG_TRACE_IRQFLAGS
751 bl trace_hardirqs_off
753 ldr x19, [tsk, #TSK_TI_FLAGS]
754 and x2, x19, #_TIF_WORK_MASK
755 cbnz x2, work_pending
758 /* Ignore asynchronous tag check faults in the uaccess routines */
760 enable_step_tsk x19, x2
761 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
767 * Ok, we need to do extra processing, enter the slow path.
773 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
775 SYM_CODE_END(ret_to_user)
777 .popsection // .entry.text
779 // Move from tramp_pg_dir to swapper_pg_dir
780 .macro tramp_map_kernel, tmp
782 add \tmp, \tmp, #(2 * PAGE_SIZE)
783 bic \tmp, \tmp, #USER_ASID_FLAG
785 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
786 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
787 /* ASID already in \tmp[63:48] */
788 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
789 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
790 /* 2MB boundary containing the vectors, so we nobble the walk cache */
791 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
795 alternative_else_nop_endif
796 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
799 // Move from swapper_pg_dir to tramp_pg_dir
800 .macro tramp_unmap_kernel, tmp
802 sub \tmp, \tmp, #(2 * PAGE_SIZE)
803 orr \tmp, \tmp, #USER_ASID_FLAG
806 * We avoid running the post_ttbr_update_workaround here because
807 * it's only needed by Cavium ThunderX, which requires KPTI to be
812 .macro tramp_data_page dst
813 adr_l \dst, .entry.tramp.text
814 sub \dst, \dst, PAGE_SIZE
817 .macro tramp_data_read_var dst, var
818 #ifdef CONFIG_RANDOMIZE_BASE
820 add \dst, \dst, #:lo12:__entry_tramp_data_\var
827 #define BHB_MITIGATION_NONE 0
828 #define BHB_MITIGATION_LOOP 1
829 #define BHB_MITIGATION_FW 2
830 #define BHB_MITIGATION_INSN 3
832 .macro tramp_ventry, vector_start, regsize, kpti, bhb
836 msr tpidrro_el0, x30 // Restored in kernel_ventry
839 .if \bhb == BHB_MITIGATION_LOOP
841 * This sequence must appear before the first indirect branch. i.e. the
842 * ret out of tramp_ventry. It appears here because x30 is free.
844 __mitigate_spectre_bhb_loop x30
845 .endif // \bhb == BHB_MITIGATION_LOOP
847 .if \bhb == BHB_MITIGATION_INSN
850 .endif // \bhb == BHB_MITIGATION_INSN
854 * Defend against branch aliasing attacks by pushing a dummy
855 * entry onto the return stack and using a RET instruction to
856 * enter the full-fat kernel vectors.
862 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
863 tramp_data_read_var x30, vectors
864 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
865 prfm plil1strm, [x30, #(1b - \vector_start)]
866 alternative_else_nop_endif
874 .if \bhb == BHB_MITIGATION_FW
876 * The firmware sequence must appear before the first indirect branch.
877 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
878 * mapped to save/restore the registers the SMC clobbers.
880 __mitigate_spectre_bhb_fw
881 .endif // \bhb == BHB_MITIGATION_FW
883 add x30, x30, #(1b - \vector_start + 4)
885 .org 1b + 128 // Did we overflow the ventry slot?
888 .macro tramp_exit, regsize = 64
889 tramp_data_read_var x30, this_cpu_vector
895 tramp_unmap_kernel x29
899 add sp, sp, #S_FRAME_SIZE // restore sp
904 .macro generate_tramp_vector, kpti, bhb
909 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
912 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
916 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
918 * Exception vectors trampoline.
919 * The order must match __bp_harden_el1_vectors and the
920 * arm64_bp_harden_el1_vectors enum.
922 .pushsection ".entry.tramp.text", "ax"
924 SYM_CODE_START_NOALIGN(tramp_vectors)
925 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
926 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
927 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
928 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
929 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
930 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
931 SYM_CODE_END(tramp_vectors)
933 SYM_CODE_START(tramp_exit_native)
935 SYM_CODE_END(tramp_exit_native)
937 SYM_CODE_START(tramp_exit_compat)
939 SYM_CODE_END(tramp_exit_compat)
942 .popsection // .entry.tramp.text
943 #ifdef CONFIG_RANDOMIZE_BASE
944 .pushsection ".rodata", "a"
946 SYM_DATA_START(__entry_tramp_data_start)
947 __entry_tramp_data_vectors:
949 #ifdef CONFIG_ARM_SDE_INTERFACE
950 __entry_tramp_data___sdei_asm_handler:
951 .quad __sdei_asm_handler
952 #endif /* CONFIG_ARM_SDE_INTERFACE */
953 __entry_tramp_data_this_cpu_vector:
954 .quad this_cpu_vector
955 SYM_DATA_END(__entry_tramp_data_start)
956 .popsection // .rodata
957 #endif /* CONFIG_RANDOMIZE_BASE */
958 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
961 * Exception vectors for spectre mitigations on entry from EL1 when
962 * kpti is not in use.
964 .macro generate_el1_vector, bhb
966 kernel_ventry 1, sync_invalid // Synchronous EL1t
967 kernel_ventry 1, irq_invalid // IRQ EL1t
968 kernel_ventry 1, fiq_invalid // FIQ EL1t
969 kernel_ventry 1, error_invalid // Error EL1t
971 kernel_ventry 1, sync // Synchronous EL1h
972 kernel_ventry 1, irq // IRQ EL1h
973 kernel_ventry 1, fiq_invalid // FIQ EL1h
974 kernel_ventry 1, error // Error EL1h
977 tramp_ventry .Lvector_start\@, 64, 0, \bhb
980 tramp_ventry .Lvector_start\@, 32, 0, \bhb
984 /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
985 .pushsection ".entry.text", "ax"
987 SYM_CODE_START(__bp_harden_el1_vectors)
988 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
989 generate_el1_vector bhb=BHB_MITIGATION_LOOP
990 generate_el1_vector bhb=BHB_MITIGATION_FW
991 generate_el1_vector bhb=BHB_MITIGATION_INSN
992 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
993 SYM_CODE_END(__bp_harden_el1_vectors)
998 * Register switch for AArch64. The callee-saved registers need to be saved
999 * and restored. On entry:
1000 * x0 = previous task_struct (must be preserved across the switch)
1001 * x1 = next task_struct
1002 * Previous and next are guaranteed not to be the same.
1005 SYM_FUNC_START(cpu_switch_to)
1006 mov x10, #THREAD_CPU_CONTEXT
1009 stp x19, x20, [x8], #16 // store callee-saved registers
1010 stp x21, x22, [x8], #16
1011 stp x23, x24, [x8], #16
1012 stp x25, x26, [x8], #16
1013 stp x27, x28, [x8], #16
1014 stp x29, x9, [x8], #16
1017 ldp x19, x20, [x8], #16 // restore callee-saved registers
1018 ldp x21, x22, [x8], #16
1019 ldp x23, x24, [x8], #16
1020 ldp x25, x26, [x8], #16
1021 ldp x27, x28, [x8], #16
1022 ldp x29, x9, [x8], #16
1026 ptrauth_keys_install_kernel x1, x8, x9, x10
1030 SYM_FUNC_END(cpu_switch_to)
1031 NOKPROBE(cpu_switch_to)
1034 * This is how we return from a fork.
1036 SYM_CODE_START(ret_from_fork)
1038 cbz x19, 1f // not a kernel thread
1041 1: get_current_task tsk
1043 SYM_CODE_END(ret_from_fork)
1044 NOKPROBE(ret_from_fork)
1046 #ifdef CONFIG_ARM_SDE_INTERFACE
1048 #include <asm/sdei.h>
1049 #include <uapi/linux/arm_sdei.h>
1051 .macro sdei_handler_exit exit_mode
1052 /* On success, this call never returns... */
1053 cmp \exit_mode, #SDEI_EXIT_SMC
1061 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1063 * The regular SDEI entry point may have been unmapped along with the rest of
1064 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1065 * argument accessible.
1067 * This clobbers x4, __sdei_handler() will restore this from firmware's
1071 .pushsection ".entry.tramp.text", "ax"
1072 SYM_CODE_START(__sdei_asm_entry_trampoline)
1074 tbz x4, #USER_ASID_BIT, 1f
1076 tramp_map_kernel tmp=x4
1081 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1082 * the kernel on exit.
1084 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1086 tramp_data_read_var x4, __sdei_asm_handler
1088 SYM_CODE_END(__sdei_asm_entry_trampoline)
1089 NOKPROBE(__sdei_asm_entry_trampoline)
1092 * Make the exit call and restore the original ttbr1_el1
1094 * x0 & x1: setup for the exit API call
1096 * x4: struct sdei_registered_event argument from registration time.
1098 SYM_CODE_START(__sdei_asm_exit_trampoline)
1099 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1102 tramp_unmap_kernel tmp=x4
1104 1: sdei_handler_exit exit_mode=x2
1105 SYM_CODE_END(__sdei_asm_exit_trampoline)
1106 NOKPROBE(__sdei_asm_exit_trampoline)
1108 .popsection // .entry.tramp.text
1109 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1112 * Software Delegated Exception entry point.
1115 * x1: struct sdei_registered_event argument from registration time.
1116 * x2: interrupted PC
1117 * x3: interrupted PSTATE
1118 * x4: maybe clobbered by the trampoline
1120 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1121 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1124 SYM_CODE_START(__sdei_asm_handler)
1125 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1126 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1127 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1128 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1129 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1130 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1131 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1132 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1133 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1134 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1135 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1136 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1137 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1138 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1140 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1144 /* Store the registered-event for crash_smp_send_stop() */
1145 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1147 adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1149 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
1152 #ifdef CONFIG_VMAP_STACK
1154 * entry.S may have been using sp as a scratch register, find whether
1155 * this is a normal or critical event and switch to the appropriate
1156 * stack for this CPU.
1159 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1161 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1162 2: mov x6, #SDEI_STACK_SIZE
1167 #ifdef CONFIG_SHADOW_CALL_STACK
1168 /* Use a separate shadow call stack for normal and critical events */
1170 adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
1172 3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
1177 * We may have interrupted userspace, or a guest, or exit-from or
1178 * return-to either of these. We can't trust sp_el0, restore it.
1181 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1184 /* If we interrupted the kernel point to the previous stack/frame. */
1188 csel x29, x29, xzr, eq // fp, or zero
1189 csel x4, x2, xzr, eq // elr, or zero
1191 stp x29, x4, [sp, #-16]!
1194 add x0, x19, #SDEI_EVENT_INTREGS
1199 /* restore regs >x17 that we clobbered */
1200 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1201 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1202 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1203 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1206 mov x1, x0 // address to complete_and_resume
1207 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1209 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1210 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1213 ldr_l x2, sdei_exit_mode
1215 /* Clear the registered-event seen by crash_smp_send_stop() */
1216 ldrb w3, [x4, #SDEI_EVENT_PRIORITY]
1218 adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1220 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
1223 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1224 sdei_handler_exit exit_mode=x2
1225 alternative_else_nop_endif
1227 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1228 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1231 SYM_CODE_END(__sdei_asm_handler)
1232 NOKPROBE(__sdei_asm_handler)
1234 SYM_CODE_START(__sdei_handler_abort)
1235 mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1237 ldr_l x2, sdei_exit_mode
1238 sdei_handler_exit exit_mode=x2
1239 // exit the handler and jump to the next instruction.
1240 // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
1242 SYM_CODE_END(__sdei_handler_abort)
1243 NOKPROBE(__sdei_handler_abort)
1244 #endif /* CONFIG_ARM_SDE_INTERFACE */