1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level exception handling code
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors: Catalin Marinas <catalin.marinas@arm.com>
7 * Will Deacon <will.deacon@arm.com>
10 #include <linux/arm-smccc.h>
11 #include <linux/init.h>
12 #include <linux/linkage.h>
14 #include <asm/alternative.h>
15 #include <asm/assembler.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cpufeature.h>
18 #include <asm/errno.h>
21 #include <asm/memory.h>
23 #include <asm/processor.h>
24 #include <asm/ptrace.h>
25 #include <asm/thread_info.h>
26 #include <asm/asm-uaccess.h>
27 #include <asm/unistd.h>
30 * Context tracking subsystem. Used to instrument transitions
31 * between user and kernel mode.
33 .macro ct_user_exit_irqoff
34 #ifdef CONFIG_CONTEXT_TRACKING
35 bl enter_from_user_mode
40 #ifdef CONFIG_CONTEXT_TRACKING
41 bl context_tracking_user_enter
46 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
60 .macro kernel_ventry, el, label, regsize = 64
65 * This must be the first instruction of the EL0 vector entries. It is
66 * skipped by the trampoline vectors, to trigger the cleanup.
68 b .Lskip_tramp_vectors_cleanup\@
75 .Lskip_tramp_vectors_cleanup\@:
78 sub sp, sp, #S_FRAME_SIZE
79 #ifdef CONFIG_VMAP_STACK
81 * Test whether the SP has overflowed, without corrupting a GPR.
82 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
84 add sp, sp, x0 // sp' = sp + x0
85 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
86 tbnz x0, #THREAD_SHIFT, 0f
87 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
88 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
93 * Either we've just detected an overflow, or we've taken an exception
94 * while on the overflow stack. Either way, we won't return to
95 * userspace, and can clobber EL0 registers to free up GPRs.
98 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
101 /* Recover the original x0 value and stash it in tpidrro_el0 */
105 /* Switch to the overflow stack */
106 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
109 * Check whether we were already on the overflow stack. This may happen
110 * after panic() re-enables interrupts.
112 mrs x0, tpidr_el0 // sp of interrupted context
113 sub x0, sp, x0 // delta with top of overflow stack
114 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
115 b.ne __bad_stack // no? -> bad stack pointer
117 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
122 .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
125 .macro tramp_alias, dst, sym, tmp
126 mov_q \dst, TRAMP_VALIAS
129 adr_l \tmp, .entry.tramp.text
133 // This macro corrupts x0-x3. It is the caller's duty
134 // to save/restore them if required.
135 .macro apply_ssbd, state, tmp1, tmp2
136 #ifdef CONFIG_ARM64_SSBD
137 alternative_cb arm64_enable_wa2_handling
138 b .L__asm_ssbd_skip\@
140 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
141 cbz \tmp2, .L__asm_ssbd_skip\@
142 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
143 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
144 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
146 alternative_cb arm64_update_smccc_conduit
147 nop // Patched to SMC/HVC #0
153 .macro kernel_entry, el, regsize = 64
155 mov w0, w0 // zero upper 32 bits of x0
157 stp x0, x1, [sp, #16 * 0]
158 stp x2, x3, [sp, #16 * 1]
159 stp x4, x5, [sp, #16 * 2]
160 stp x6, x7, [sp, #16 * 3]
161 stp x8, x9, [sp, #16 * 4]
162 stp x10, x11, [sp, #16 * 5]
163 stp x12, x13, [sp, #16 * 6]
164 stp x14, x15, [sp, #16 * 7]
165 stp x16, x17, [sp, #16 * 8]
166 stp x18, x19, [sp, #16 * 9]
167 stp x20, x21, [sp, #16 * 10]
168 stp x22, x23, [sp, #16 * 11]
169 stp x24, x25, [sp, #16 * 12]
170 stp x26, x27, [sp, #16 * 13]
171 stp x28, x29, [sp, #16 * 14]
176 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
177 ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
178 disable_step_tsk x19, x20 // exceptions when scheduling.
180 apply_ssbd 1, x22, x23
183 add x21, sp, #S_FRAME_SIZE
185 /* Save the task's original addr_limit and set USER_DS */
186 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
187 str x20, [sp, #S_ORIG_ADDR_LIMIT]
189 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
190 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
191 .endif /* \el == 0 */
194 stp lr, x21, [sp, #S_LR]
197 * In order to be able to dump the contents of struct pt_regs at the
198 * time the exception was taken (in case we attempt to walk the call
199 * stack later), chain it together with the stack frames.
202 stp xzr, xzr, [sp, #S_STACKFRAME]
204 stp x29, x22, [sp, #S_STACKFRAME]
206 add x29, sp, #S_STACKFRAME
208 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
210 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
211 * EL0, there is no need to check the state of TTBR0_EL1 since
212 * accesses are always enabled.
213 * Note that the meaning of this bit differs from the ARMv8.1 PAN
214 * feature as all TTBR0_EL1 accesses are disabled, not just those to
217 alternative_if ARM64_HAS_PAN
218 b 1f // skip TTBR0 PAN
219 alternative_else_nop_endif
223 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
224 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
225 b.eq 1f // TTBR0 access already disabled
226 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
229 __uaccess_ttbr0_disable x21
233 stp x22, x23, [sp, #S_PC]
235 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
238 str w21, [sp, #S_SYSCALLNO]
242 * Set sp_el0 to current thread_info.
249 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
250 mrs_s x20, SYS_ICC_PMR_EL1
251 str x20, [sp, #S_PMR_SAVE]
252 alternative_else_nop_endif
255 * Registers that may be useful after this macro is invoked:
260 * x23 - aborted PSTATE
264 .macro kernel_exit, el
268 /* Restore the task's original addr_limit. */
269 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
270 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
272 /* No need to restore UAO, it will be restored from SPSR_EL1 */
276 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
277 ldr x20, [sp, #S_PMR_SAVE]
278 msr_s SYS_ICC_PMR_EL1, x20
279 /* Ensure priority change is seen by redistributor */
281 alternative_else_nop_endif
283 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
288 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
290 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
293 alternative_if ARM64_HAS_PAN
294 b 2f // skip TTBR0 PAN
295 alternative_else_nop_endif
298 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
301 __uaccess_ttbr0_enable x0, x1
305 * Enable errata workarounds only if returning to user. The only
306 * workaround currently required for TTBR0_EL1 changes are for the
307 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
310 bl post_ttbr_update_workaround
314 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
320 ldr x23, [sp, #S_SP] // load return stack pointer
322 tst x22, #PSR_MODE32_BIT // native task?
325 #ifdef CONFIG_ARM64_ERRATUM_845719
326 alternative_if ARM64_WORKAROUND_845719
327 #ifdef CONFIG_PID_IN_CONTEXTIDR
328 mrs x29, contextidr_el1
329 msr contextidr_el1, x29
331 msr contextidr_el1, xzr
333 alternative_else_nop_endif
336 #ifdef CONFIG_ARM64_ERRATUM_1418040
337 alternative_if_not ARM64_WORKAROUND_1418040
339 alternative_else_nop_endif
341 * if (x22.mode32 == cntkctl_el1.el0vcten)
342 * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
345 eon x0, x1, x22, lsr #3
347 eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
354 msr elr_el1, x21 // set up the return data
356 ldp x0, x1, [sp, #16 * 0]
357 ldp x2, x3, [sp, #16 * 1]
358 ldp x4, x5, [sp, #16 * 2]
359 ldp x6, x7, [sp, #16 * 3]
360 ldp x8, x9, [sp, #16 * 4]
361 ldp x10, x11, [sp, #16 * 5]
362 ldp x12, x13, [sp, #16 * 6]
363 ldp x14, x15, [sp, #16 * 7]
364 ldp x16, x17, [sp, #16 * 8]
365 ldp x18, x19, [sp, #16 * 9]
366 ldp x20, x21, [sp, #16 * 10]
367 ldp x22, x23, [sp, #16 * 11]
368 ldp x24, x25, [sp, #16 * 12]
369 ldp x26, x27, [sp, #16 * 13]
370 ldp x28, x29, [sp, #16 * 14]
373 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
375 add sp, sp, #S_FRAME_SIZE // restore sp
377 alternative_else_nop_endif
378 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
381 tramp_alias x30, tramp_exit_native, x29
384 tramp_alias x30, tramp_exit_compat, x29
389 add sp, sp, #S_FRAME_SIZE // restore sp
395 .macro irq_stack_entry
396 mov x19, sp // preserve the original sp
399 * Compare sp with the base of the task stack.
400 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
401 * and should switch to the irq stack.
403 ldr x25, [tsk, TSK_STACK]
405 and x25, x25, #~(THREAD_SIZE - 1)
408 ldr_this_cpu x25, irq_stack_ptr, x26
409 mov x26, #IRQ_STACK_SIZE
412 /* switch to the irq stack */
418 * x19 should be preserved between irq_stack_entry and
421 .macro irq_stack_exit
425 /* GPRs used by entry code */
426 tsk .req x28 // current thread_info
429 * Interrupt handling.
432 ldr_l x1, handle_arch_irq
439 #ifdef CONFIG_ARM64_PSEUDO_NMI
441 * Set res to 0 if irqs were unmasked in interrupted context.
442 * Otherwise set res to non-0 value.
444 .macro test_irqs_unmasked res:req, pmr:req
445 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
446 sub \res, \pmr, #GIC_PRIO_IRQON
453 .macro gic_prio_kentry_setup, tmp:req
454 #ifdef CONFIG_ARM64_PSEUDO_NMI
455 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
456 mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
457 msr_s SYS_ICC_PMR_EL1, \tmp
458 alternative_else_nop_endif
462 .macro gic_prio_irq_setup, pmr:req, tmp:req
463 #ifdef CONFIG_ARM64_PSEUDO_NMI
464 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
465 orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
466 msr_s SYS_ICC_PMR_EL1, \tmp
467 alternative_else_nop_endif
476 .pushsection ".entry.text", "ax"
480 kernel_ventry 1, sync_invalid // Synchronous EL1t
481 kernel_ventry 1, irq_invalid // IRQ EL1t
482 kernel_ventry 1, fiq_invalid // FIQ EL1t
483 kernel_ventry 1, error_invalid // Error EL1t
485 kernel_ventry 1, sync // Synchronous EL1h
486 kernel_ventry 1, irq // IRQ EL1h
487 kernel_ventry 1, fiq_invalid // FIQ EL1h
488 kernel_ventry 1, error // Error EL1h
490 kernel_ventry 0, sync // Synchronous 64-bit EL0
491 kernel_ventry 0, irq // IRQ 64-bit EL0
492 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
493 kernel_ventry 0, error // Error 64-bit EL0
496 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
497 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
498 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
499 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
501 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
502 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
503 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
504 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
508 #ifdef CONFIG_VMAP_STACK
510 * We detected an overflow in kernel_ventry, which switched to the
511 * overflow stack. Stash the exception regs, and head to our overflow
515 /* Restore the original x0 value */
519 * Store the original GPRs to the new stack. The orginal SP (minus
520 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
522 sub sp, sp, #S_FRAME_SIZE
525 add x0, x0, #S_FRAME_SIZE
528 /* Stash the regs for handle_bad_stack */
534 #endif /* CONFIG_VMAP_STACK */
537 * Invalid mode handlers
539 .macro inv_entry, el, reason, regsize = 64
540 kernel_entry \el, \regsize
549 inv_entry 0, BAD_SYNC
550 ENDPROC(el0_sync_invalid)
554 ENDPROC(el0_irq_invalid)
558 ENDPROC(el0_fiq_invalid)
561 inv_entry 0, BAD_ERROR
562 ENDPROC(el0_error_invalid)
565 el0_fiq_invalid_compat:
566 inv_entry 0, BAD_FIQ, 32
567 ENDPROC(el0_fiq_invalid_compat)
571 inv_entry 1, BAD_SYNC
572 ENDPROC(el1_sync_invalid)
576 ENDPROC(el1_irq_invalid)
580 ENDPROC(el1_fiq_invalid)
583 inv_entry 1, BAD_ERROR
584 ENDPROC(el1_error_invalid)
592 mrs x1, esr_el1 // read the syndrome register
593 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
594 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
596 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
598 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
600 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
602 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
604 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
610 * Fall through to the Data abort case
614 * Data abort handling
617 inherit_daif pstate=x23, tmp=x2
619 mov x2, sp // struct pt_regs
625 * PC alignment exception handling. We don't handle SP alignment faults,
626 * since we will have hit a recursive exception when trying to push the
630 inherit_daif pstate=x23, tmp=x2
636 * Undefined instruction
638 inherit_daif pstate=x23, tmp=x2
644 * Debug exception handling
646 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
647 cinc x24, x24, eq // set bit '0'
648 tbz x24, #0, el1_inv // EL1 only
649 gic_prio_kentry_setup tmp=x3
651 mov x2, sp // struct pt_regs
652 bl do_debug_exception
655 // TODO: add support for undefined instructions in kernel mode
656 inherit_daif pstate=x23, tmp=x2
667 gic_prio_irq_setup pmr=x20, tmp=x1
670 #ifdef CONFIG_ARM64_PSEUDO_NMI
671 test_irqs_unmasked res=x0, pmr=x20
677 #ifdef CONFIG_TRACE_IRQFLAGS
678 bl trace_hardirqs_off
683 #ifdef CONFIG_PREEMPT
684 ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
685 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
687 * DA_F were cleared at start of handling. If anything is set in DAIF,
688 * we come back from an NMI, so skip preemption
692 alternative_else_nop_endif
693 cbnz x24, 1f // preempt count != 0 || NMI return path
694 bl arm64_preempt_schedule_irq // irq en/disable is done inside
698 #ifdef CONFIG_ARM64_PSEUDO_NMI
700 * When using IRQ priority masking, we can get spurious interrupts while
701 * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
702 * section with interrupts disabled. Skip tracing in those cases.
704 test_irqs_unmasked res=x0, pmr=x20
710 #ifdef CONFIG_TRACE_IRQFLAGS
711 #ifdef CONFIG_ARM64_PSEUDO_NMI
712 test_irqs_unmasked res=x0, pmr=x20
728 mrs x25, esr_el1 // read the syndrome register
729 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
730 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
732 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
734 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
736 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
738 cmp x24, #ESR_ELx_EC_SVE // SVE access
740 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
742 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
743 ccmp x24, #ESR_ELx_EC_WFx, #4, ne
745 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
747 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
749 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
751 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
759 mrs x25, esr_el1 // read the syndrome register
760 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
761 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
763 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
765 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
767 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
769 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
771 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
773 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
775 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
777 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
779 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
781 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
783 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
785 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
789 gic_prio_kentry_setup tmp=x1
791 bl el0_svc_compat_handler
805 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
817 * Data abort handling
822 untagged_addr x0, x26
829 * Instruction abort handling
832 gic_prio_kentry_setup tmp=x0
835 #ifdef CONFIG_TRACE_IRQFLAGS
836 bl trace_hardirqs_off
841 bl do_el0_ia_bp_hardening
845 * Floating Point or Advanced SIMD access
855 * Scalable Vector Extension access
865 * Floating Point, Advanced SIMD or SVE exception
880 * Stack or PC alignment exception handling
882 gic_prio_kentry_setup tmp=x0
885 #ifdef CONFIG_TRACE_IRQFLAGS
886 bl trace_hardirqs_off
895 * Undefined instruction
904 * System instructions, for trapped cache maintenance instructions
914 * Debug exception handling
916 tbnz x24, #0, el0_inv // EL0 only
918 gic_prio_kentry_setup tmp=x3
923 bl do_debug_exception
940 gic_prio_irq_setup pmr=x20, tmp=x0
944 #ifdef CONFIG_TRACE_IRQFLAGS
945 bl trace_hardirqs_off
948 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
950 bl do_el0_irq_bp_hardening
955 #ifdef CONFIG_TRACE_IRQFLAGS
964 gic_prio_kentry_setup tmp=x2
975 gic_prio_kentry_setup tmp=x2
986 * Ok, we need to do extra processing, enter the slow path.
991 #ifdef CONFIG_TRACE_IRQFLAGS
992 bl trace_hardirqs_on // enabled while in userspace
994 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
997 * "slow" syscall return path.
1001 gic_prio_kentry_setup tmp=x3
1002 ldr x1, [tsk, #TSK_TI_FLAGS]
1003 and x2, x1, #_TIF_WORK_MASK
1004 cbnz x2, work_pending
1006 enable_step_tsk x1, x2
1007 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1011 ENDPROC(ret_to_user)
1018 gic_prio_kentry_setup tmp=x1
1024 .popsection // .entry.text
1026 // Move from tramp_pg_dir to swapper_pg_dir
1027 .macro tramp_map_kernel, tmp
1029 add \tmp, \tmp, #(2 * PAGE_SIZE)
1030 bic \tmp, \tmp, #USER_ASID_FLAG
1032 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1033 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
1034 /* ASID already in \tmp[63:48] */
1035 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1036 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1037 /* 2MB boundary containing the vectors, so we nobble the walk cache */
1038 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1042 alternative_else_nop_endif
1043 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1046 // Move from swapper_pg_dir to tramp_pg_dir
1047 .macro tramp_unmap_kernel, tmp
1049 sub \tmp, \tmp, #(2 * PAGE_SIZE)
1050 orr \tmp, \tmp, #USER_ASID_FLAG
1053 * We avoid running the post_ttbr_update_workaround here because
1054 * it's only needed by Cavium ThunderX, which requires KPTI to be
1059 .macro tramp_data_page dst
1060 adr_l \dst, .entry.tramp.text
1061 sub \dst, \dst, PAGE_SIZE
1064 .macro tramp_data_read_var dst, var
1065 #ifdef CONFIG_RANDOMIZE_BASE
1066 tramp_data_page \dst
1067 add \dst, \dst, #:lo12:__entry_tramp_data_\var
1074 #define BHB_MITIGATION_NONE 0
1075 #define BHB_MITIGATION_LOOP 1
1076 #define BHB_MITIGATION_FW 2
1077 #define BHB_MITIGATION_INSN 3
1079 .macro tramp_ventry, vector_start, regsize, kpti, bhb
1083 msr tpidrro_el0, x30 // Restored in kernel_ventry
1086 .if \bhb == BHB_MITIGATION_LOOP
1088 * This sequence must appear before the first indirect branch. i.e. the
1089 * ret out of tramp_ventry. It appears here because x30 is free.
1091 __mitigate_spectre_bhb_loop x30
1092 .endif // \bhb == BHB_MITIGATION_LOOP
1094 .if \bhb == BHB_MITIGATION_INSN
1097 .endif // \bhb == BHB_MITIGATION_INSN
1101 * Defend against branch aliasing attacks by pushing a dummy
1102 * entry onto the return stack and using a RET instruction to
1103 * enter the full-fat kernel vectors.
1108 tramp_map_kernel x30
1109 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1110 tramp_data_read_var x30, vectors
1111 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
1112 prfm plil1strm, [x30, #(1b - \vector_start)]
1113 alternative_else_nop_endif
1119 .endif // \kpti == 1
1121 .if \bhb == BHB_MITIGATION_FW
1123 * The firmware sequence must appear before the first indirect branch.
1124 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
1125 * mapped to save/restore the registers the SMC clobbers.
1127 __mitigate_spectre_bhb_fw
1128 .endif // \bhb == BHB_MITIGATION_FW
1130 add x30, x30, #(1b - \vector_start + 4)
1132 .org 1b + 128 // Did we overflow the ventry slot?
1135 .macro tramp_exit, regsize = 64
1136 tramp_data_read_var x30, this_cpu_vector
1137 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1146 tramp_unmap_kernel x29
1150 add sp, sp, #S_FRAME_SIZE // restore sp
1155 .macro generate_tramp_vector, kpti, bhb
1160 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
1163 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
1167 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1169 * Exception vectors trampoline.
1170 * The order must match __bp_harden_el1_vectors and the
1171 * arm64_bp_harden_el1_vectors enum.
1173 .pushsection ".entry.tramp.text", "ax"
1175 ENTRY(tramp_vectors)
1176 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1177 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
1178 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
1179 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
1180 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1181 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
1184 ENTRY(tramp_exit_native)
1186 END(tramp_exit_native)
1188 ENTRY(tramp_exit_compat)
1190 END(tramp_exit_compat)
1193 .popsection // .entry.tramp.text
1194 #ifdef CONFIG_RANDOMIZE_BASE
1195 .pushsection ".rodata", "a"
1197 .globl __entry_tramp_data_start
1198 __entry_tramp_data_start:
1199 __entry_tramp_data_vectors:
1201 #ifdef CONFIG_ARM_SDE_INTERFACE
1202 __entry_tramp_data___sdei_asm_handler:
1203 .quad __sdei_asm_handler
1204 #endif /* CONFIG_ARM_SDE_INTERFACE */
1205 __entry_tramp_data_this_cpu_vector:
1206 .quad this_cpu_vector
1207 .popsection // .rodata
1208 #endif /* CONFIG_RANDOMIZE_BASE */
1209 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1212 * Exception vectors for spectre mitigations on entry from EL1 when
1213 * kpti is not in use.
1215 .macro generate_el1_vector, bhb
1217 kernel_ventry 1, sync_invalid // Synchronous EL1t
1218 kernel_ventry 1, irq_invalid // IRQ EL1t
1219 kernel_ventry 1, fiq_invalid // FIQ EL1t
1220 kernel_ventry 1, error_invalid // Error EL1t
1222 kernel_ventry 1, sync // Synchronous EL1h
1223 kernel_ventry 1, irq // IRQ EL1h
1224 kernel_ventry 1, fiq_invalid // FIQ EL1h
1225 kernel_ventry 1, error // Error EL1h
1228 tramp_ventry .Lvector_start\@, 64, 0, \bhb
1231 tramp_ventry .Lvector_start\@, 32, 0, \bhb
1235 /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1236 .pushsection ".entry.text", "ax"
1238 SYM_CODE_START(__bp_harden_el1_vectors)
1239 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1240 generate_el1_vector bhb=BHB_MITIGATION_LOOP
1241 generate_el1_vector bhb=BHB_MITIGATION_FW
1242 generate_el1_vector bhb=BHB_MITIGATION_INSN
1243 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1244 SYM_CODE_END(__bp_harden_el1_vectors)
1249 * Register switch for AArch64. The callee-saved registers need to be saved
1250 * and restored. On entry:
1251 * x0 = previous task_struct (must be preserved across the switch)
1252 * x1 = next task_struct
1253 * Previous and next are guaranteed not to be the same.
1256 ENTRY(cpu_switch_to)
1257 mov x10, #THREAD_CPU_CONTEXT
1260 stp x19, x20, [x8], #16 // store callee-saved registers
1261 stp x21, x22, [x8], #16
1262 stp x23, x24, [x8], #16
1263 stp x25, x26, [x8], #16
1264 stp x27, x28, [x8], #16
1265 stp x29, x9, [x8], #16
1268 ldp x19, x20, [x8], #16 // restore callee-saved registers
1269 ldp x21, x22, [x8], #16
1270 ldp x23, x24, [x8], #16
1271 ldp x25, x26, [x8], #16
1272 ldp x27, x28, [x8], #16
1273 ldp x29, x9, [x8], #16
1278 ENDPROC(cpu_switch_to)
1279 NOKPROBE(cpu_switch_to)
1282 * This is how we return from a fork.
1284 ENTRY(ret_from_fork)
1286 cbz x19, 1f // not a kernel thread
1289 1: get_current_task tsk
1291 ENDPROC(ret_from_fork)
1292 NOKPROBE(ret_from_fork)
1294 #ifdef CONFIG_ARM_SDE_INTERFACE
1296 #include <asm/sdei.h>
1297 #include <uapi/linux/arm_sdei.h>
1299 .macro sdei_handler_exit exit_mode
1300 /* On success, this call never returns... */
1301 cmp \exit_mode, #SDEI_EXIT_SMC
1309 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1311 * The regular SDEI entry point may have been unmapped along with the rest of
1312 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1313 * argument accessible.
1315 * This clobbers x4, __sdei_handler() will restore this from firmware's
1319 .pushsection ".entry.tramp.text", "ax"
1320 ENTRY(__sdei_asm_entry_trampoline)
1322 tbz x4, #USER_ASID_BIT, 1f
1324 tramp_map_kernel tmp=x4
1329 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1330 * the kernel on exit.
1332 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1334 tramp_data_read_var x4, __sdei_asm_handler
1336 ENDPROC(__sdei_asm_entry_trampoline)
1337 NOKPROBE(__sdei_asm_entry_trampoline)
1340 * Make the exit call and restore the original ttbr1_el1
1342 * x0 & x1: setup for the exit API call
1344 * x4: struct sdei_registered_event argument from registration time.
1346 ENTRY(__sdei_asm_exit_trampoline)
1347 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1350 tramp_unmap_kernel tmp=x4
1352 1: sdei_handler_exit exit_mode=x2
1353 ENDPROC(__sdei_asm_exit_trampoline)
1354 NOKPROBE(__sdei_asm_exit_trampoline)
1356 .popsection // .entry.tramp.text
1357 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1360 * Software Delegated Exception entry point.
1363 * x1: struct sdei_registered_event argument from registration time.
1364 * x2: interrupted PC
1365 * x3: interrupted PSTATE
1366 * x4: maybe clobbered by the trampoline
1368 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1369 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1372 ENTRY(__sdei_asm_handler)
1373 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1374 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1375 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1376 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1377 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1378 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1379 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1380 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1381 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1382 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1383 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1384 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1385 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1386 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1388 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1392 #ifdef CONFIG_VMAP_STACK
1394 * entry.S may have been using sp as a scratch register, find whether
1395 * this is a normal or critical event and switch to the appropriate
1396 * stack for this CPU.
1398 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1400 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1402 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1403 2: mov x6, #SDEI_STACK_SIZE
1409 * We may have interrupted userspace, or a guest, or exit-from or
1410 * return-to either of these. We can't trust sp_el0, restore it.
1413 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1416 /* If we interrupted the kernel point to the previous stack/frame. */
1420 csel x29, x29, xzr, eq // fp, or zero
1421 csel x4, x2, xzr, eq // elr, or zero
1423 stp x29, x4, [sp, #-16]!
1426 add x0, x19, #SDEI_EVENT_INTREGS
1431 /* restore regs >x17 that we clobbered */
1432 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1433 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1434 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1435 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1438 mov x1, x0 // address to complete_and_resume
1439 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1441 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1442 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1445 ldr_l x2, sdei_exit_mode
1447 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1448 sdei_handler_exit exit_mode=x2
1449 alternative_else_nop_endif
1451 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1452 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1455 ENDPROC(__sdei_asm_handler)
1456 NOKPROBE(__sdei_asm_handler)
1457 #endif /* CONFIG_ARM_SDE_INTERFACE */