2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/arm-smccc.h>
22 #include <linux/init.h>
23 #include <linux/linkage.h>
25 #include <asm/alternative.h>
26 #include <asm/assembler.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/cpufeature.h>
29 #include <asm/errno.h>
32 #include <asm/memory.h>
34 #include <asm/processor.h>
35 #include <asm/ptrace.h>
36 #include <asm/thread_info.h>
37 #include <asm/asm-uaccess.h>
38 #include <asm/unistd.h>
41 * Context tracking subsystem. Used to instrument transitions
42 * between user and kernel mode.
44 .macro ct_user_exit, syscall = 0
45 #ifdef CONFIG_CONTEXT_TRACKING
46 bl context_tracking_user_exit
49 * Save/restore needed during syscalls. Restore syscall arguments from
50 * the values already saved on stack during kernel_entry.
53 ldp x2, x3, [sp, #S_X2]
54 ldp x4, x5, [sp, #S_X4]
55 ldp x6, x7, [sp, #S_X6]
61 #ifdef CONFIG_CONTEXT_TRACKING
62 bl context_tracking_user_enter
75 .macro kernel_ventry, el, label, regsize = 64
80 * This must be the first instruction of the EL0 vector entries. It is
81 * skipped by the trampoline vectors, to trigger the cleanup.
83 b .Lskip_tramp_vectors_cleanup\@
90 .Lskip_tramp_vectors_cleanup\@:
93 sub sp, sp, #S_FRAME_SIZE
94 #ifdef CONFIG_VMAP_STACK
96 * Test whether the SP has overflowed, without corrupting a GPR.
97 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
99 add sp, sp, x0 // sp' = sp + x0
100 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
101 tbnz x0, #THREAD_SHIFT, 0f
102 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
103 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
108 * Either we've just detected an overflow, or we've taken an exception
109 * while on the overflow stack. Either way, we won't return to
110 * userspace, and can clobber EL0 registers to free up GPRs.
113 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
116 /* Recover the original x0 value and stash it in tpidrro_el0 */
120 /* Switch to the overflow stack */
121 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
124 * Check whether we were already on the overflow stack. This may happen
125 * after panic() re-enables interrupts.
127 mrs x0, tpidr_el0 // sp of interrupted context
128 sub x0, sp, x0 // delta with top of overflow stack
129 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
130 b.ne __bad_stack // no? -> bad stack pointer
132 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
137 .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
140 .macro tramp_alias, dst, sym, tmp
141 mov_q \dst, TRAMP_VALIAS
144 adr_l \tmp, .entry.tramp.text
148 // This macro corrupts x0-x3. It is the caller's duty
149 // to save/restore them if required.
150 .macro apply_ssbd, state, targ, tmp1, tmp2
151 #ifdef CONFIG_ARM64_SSBD
152 alternative_cb arm64_enable_wa2_handling
155 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
157 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
158 tbnz \tmp2, #TIF_SSBD, \targ
159 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
161 alternative_cb arm64_update_smccc_conduit
162 nop // Patched to SMC/HVC #0
167 .macro kernel_entry, el, regsize = 64
169 mov w0, w0 // zero upper 32 bits of x0
171 stp x0, x1, [sp, #16 * 0]
172 stp x2, x3, [sp, #16 * 1]
173 stp x4, x5, [sp, #16 * 2]
174 stp x6, x7, [sp, #16 * 3]
175 stp x8, x9, [sp, #16 * 4]
176 stp x10, x11, [sp, #16 * 5]
177 stp x12, x13, [sp, #16 * 6]
178 stp x14, x15, [sp, #16 * 7]
179 stp x16, x17, [sp, #16 * 8]
180 stp x18, x19, [sp, #16 * 9]
181 stp x20, x21, [sp, #16 * 10]
182 stp x22, x23, [sp, #16 * 11]
183 stp x24, x25, [sp, #16 * 12]
184 stp x26, x27, [sp, #16 * 13]
185 stp x28, x29, [sp, #16 * 14]
189 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
190 ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
191 disable_step_tsk x19, x20 // exceptions when scheduling.
193 apply_ssbd 1, 1f, x22, x23
195 #ifdef CONFIG_ARM64_SSBD
196 ldp x0, x1, [sp, #16 * 0]
197 ldp x2, x3, [sp, #16 * 1]
201 mov x29, xzr // fp pointed to user-space
203 add x21, sp, #S_FRAME_SIZE
205 /* Save the task's original addr_limit and set USER_DS */
206 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
207 str x20, [sp, #S_ORIG_ADDR_LIMIT]
209 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
210 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
211 .endif /* \el == 0 */
214 stp lr, x21, [sp, #S_LR]
217 * In order to be able to dump the contents of struct pt_regs at the
218 * time the exception was taken (in case we attempt to walk the call
219 * stack later), chain it together with the stack frames.
222 stp xzr, xzr, [sp, #S_STACKFRAME]
224 stp x29, x22, [sp, #S_STACKFRAME]
226 add x29, sp, #S_STACKFRAME
228 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
230 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
231 * EL0, there is no need to check the state of TTBR0_EL1 since
232 * accesses are always enabled.
233 * Note that the meaning of this bit differs from the ARMv8.1 PAN
234 * feature as all TTBR0_EL1 accesses are disabled, not just those to
237 alternative_if ARM64_HAS_PAN
238 b 1f // skip TTBR0 PAN
239 alternative_else_nop_endif
243 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
244 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
245 b.eq 1f // TTBR0 access already disabled
246 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
249 __uaccess_ttbr0_disable x21
253 stp x22, x23, [sp, #S_PC]
255 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
258 str w21, [sp, #S_SYSCALLNO]
262 * Set sp_el0 to current thread_info.
269 * Registers that may be useful after this macro is invoked:
273 * x23 - aborted PSTATE
277 .macro kernel_exit, el
279 /* Restore the task's original addr_limit. */
280 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
281 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
283 /* No need to restore UAO, it will be restored from SPSR_EL1 */
286 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
291 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
293 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
296 alternative_if ARM64_HAS_PAN
297 b 2f // skip TTBR0 PAN
298 alternative_else_nop_endif
301 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
304 __uaccess_ttbr0_enable x0, x1
308 * Enable errata workarounds only if returning to user. The only
309 * workaround currently required for TTBR0_EL1 changes are for the
310 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
313 bl post_ttbr_update_workaround
317 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
323 ldr x23, [sp, #S_SP] // load return stack pointer
325 tst x22, #PSR_MODE32_BIT // native task?
328 #ifdef CONFIG_ARM64_ERRATUM_845719
329 alternative_if ARM64_WORKAROUND_845719
330 #ifdef CONFIG_PID_IN_CONTEXTIDR
331 mrs x29, contextidr_el1
332 msr contextidr_el1, x29
334 msr contextidr_el1, xzr
336 alternative_else_nop_endif
339 apply_ssbd 0, 5f, x0, x1
343 msr elr_el1, x21 // set up the return data
345 ldp x0, x1, [sp, #16 * 0]
346 ldp x2, x3, [sp, #16 * 1]
347 ldp x4, x5, [sp, #16 * 2]
348 ldp x6, x7, [sp, #16 * 3]
349 ldp x8, x9, [sp, #16 * 4]
350 ldp x10, x11, [sp, #16 * 5]
351 ldp x12, x13, [sp, #16 * 6]
352 ldp x14, x15, [sp, #16 * 7]
353 ldp x16, x17, [sp, #16 * 8]
354 ldp x18, x19, [sp, #16 * 9]
355 ldp x20, x21, [sp, #16 * 10]
356 ldp x22, x23, [sp, #16 * 11]
357 ldp x24, x25, [sp, #16 * 12]
358 ldp x26, x27, [sp, #16 * 13]
359 ldp x28, x29, [sp, #16 * 14]
362 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
364 add sp, sp, #S_FRAME_SIZE // restore sp
366 alternative_else_nop_endif
367 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
370 tramp_alias x30, tramp_exit_native, x29
373 tramp_alias x30, tramp_exit_compat, x29
378 add sp, sp, #S_FRAME_SIZE // restore sp
383 .macro irq_stack_entry
384 mov x19, sp // preserve the original sp
387 * Compare sp with the base of the task stack.
388 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
389 * and should switch to the irq stack.
391 ldr x25, [tsk, TSK_STACK]
393 and x25, x25, #~(THREAD_SIZE - 1)
396 ldr_this_cpu x25, irq_stack_ptr, x26
397 mov x26, #IRQ_STACK_SIZE
400 /* switch to the irq stack */
406 * x19 should be preserved between irq_stack_entry and
409 .macro irq_stack_exit
414 * These are the registers used in the syscall handler, and allow us to
415 * have in theory up to 7 arguments to a function - x0 to x6.
417 * x7 is reserved for the system call number in 32-bit mode.
419 wsc_nr .req w25 // number of system calls
420 xsc_nr .req x25 // number of system calls (zero-extended)
421 wscno .req w26 // syscall number
422 xscno .req x26 // syscall number (zero-extended)
423 stbl .req x27 // syscall table pointer
424 tsk .req x28 // current thread_info
427 * Interrupt handling.
430 ldr_l x1, handle_arch_irq
442 .pushsection ".entry.text", "ax"
446 kernel_ventry 1, sync_invalid // Synchronous EL1t
447 kernel_ventry 1, irq_invalid // IRQ EL1t
448 kernel_ventry 1, fiq_invalid // FIQ EL1t
449 kernel_ventry 1, error_invalid // Error EL1t
451 kernel_ventry 1, sync // Synchronous EL1h
452 kernel_ventry 1, irq // IRQ EL1h
453 kernel_ventry 1, fiq_invalid // FIQ EL1h
454 kernel_ventry 1, error_invalid // Error EL1h
456 kernel_ventry 0, sync // Synchronous 64-bit EL0
457 kernel_ventry 0, irq // IRQ 64-bit EL0
458 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
459 kernel_ventry 0, error_invalid // Error 64-bit EL0
462 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
463 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
464 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
465 kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
467 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
468 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
469 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
470 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
474 #ifdef CONFIG_VMAP_STACK
476 * We detected an overflow in kernel_ventry, which switched to the
477 * overflow stack. Stash the exception regs, and head to our overflow
481 /* Restore the original x0 value */
485 * Store the original GPRs to the new stack. The orginal SP (minus
486 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
488 sub sp, sp, #S_FRAME_SIZE
491 add x0, x0, #S_FRAME_SIZE
494 /* Stash the regs for handle_bad_stack */
500 #endif /* CONFIG_VMAP_STACK */
503 * Invalid mode handlers
505 .macro inv_entry, el, reason, regsize = 64
506 kernel_entry \el, \regsize
515 inv_entry 0, BAD_SYNC
516 ENDPROC(el0_sync_invalid)
520 ENDPROC(el0_irq_invalid)
524 ENDPROC(el0_fiq_invalid)
527 inv_entry 0, BAD_ERROR
528 ENDPROC(el0_error_invalid)
531 el0_fiq_invalid_compat:
532 inv_entry 0, BAD_FIQ, 32
533 ENDPROC(el0_fiq_invalid_compat)
535 el0_error_invalid_compat:
536 inv_entry 0, BAD_ERROR, 32
537 ENDPROC(el0_error_invalid_compat)
541 inv_entry 1, BAD_SYNC
542 ENDPROC(el1_sync_invalid)
546 ENDPROC(el1_irq_invalid)
550 ENDPROC(el1_fiq_invalid)
553 inv_entry 1, BAD_ERROR
554 ENDPROC(el1_error_invalid)
562 mrs x1, esr_el1 // read the syndrome register
563 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
564 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
566 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
568 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
570 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
572 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
574 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
576 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
582 * Fall through to the Data abort case
586 * Data abort handling
590 // re-enable interrupts if they were enabled in the aborted context
591 tbnz x23, #7, 1f // PSR_I_BIT
594 clear_address_tag x0, x3
595 mov x2, sp // struct pt_regs
598 // disable interrupts before pulling preserved data off the stack
603 * Stack or PC alignment exception handling
612 * Undefined instruction
620 * Debug exception handling
622 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
623 cinc x24, x24, eq // set bit '0'
624 tbz x24, #0, el1_inv // EL1 only
626 mov x2, sp // struct pt_regs
627 bl do_debug_exception
630 // TODO: add support for undefined instructions in kernel mode
643 #ifdef CONFIG_TRACE_IRQFLAGS
644 bl trace_hardirqs_off
649 #ifdef CONFIG_PREEMPT
650 ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
651 cbnz w24, 1f // preempt count != 0
652 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
653 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
657 #ifdef CONFIG_TRACE_IRQFLAGS
663 #ifdef CONFIG_PREEMPT
666 1: bl preempt_schedule_irq // irq en/disable is done inside
667 ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
668 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
678 mrs x25, esr_el1 // read the syndrome register
679 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
680 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
682 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
684 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
686 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
688 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
690 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
692 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
694 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
696 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
698 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
706 mrs x25, esr_el1 // read the syndrome register
707 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
708 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
710 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
712 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
714 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
716 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
718 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
720 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
722 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
724 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
726 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
728 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
730 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
732 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
737 * AArch32 syscall handling
739 adrp stbl, compat_sys_call_table // load compat syscall table pointer
740 mov wscno, w7 // syscall number in w7 (r7)
741 mov wsc_nr, #__NR_compat_syscalls
752 * Data abort handling
755 // enable interrupts before calling the main handler
758 clear_address_tag x0, x26
765 * Instruction abort handling
769 #ifdef CONFIG_TRACE_IRQFLAGS
770 bl trace_hardirqs_off
776 bl do_el0_ia_bp_hardening
780 * Floating Point or Advanced SIMD access
790 * Floating Point or Advanced SIMD exception
800 * Stack or PC alignment exception handling
804 #ifdef CONFIG_TRACE_IRQFLAGS
805 bl trace_hardirqs_off
815 * Undefined instruction
817 // enable interrupts before calling the main handler
825 * System instructions, for trapped cache maintenance instructions
835 * Debug exception handling
837 tbnz x24, #0, el0_inv // EL0 only
841 bl do_debug_exception
860 #ifdef CONFIG_TRACE_IRQFLAGS
861 bl trace_hardirqs_off
865 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
867 bl do_el0_irq_bp_hardening
872 #ifdef CONFIG_TRACE_IRQFLAGS
879 * This is the fast syscall return path. We do as little as possible here,
880 * and this includes saving x0 back into the kernel stack.
883 disable_irq // disable interrupts
884 str x0, [sp, #S_X0] // returned x0
885 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
886 and x2, x1, #_TIF_SYSCALL_WORK
887 cbnz x2, ret_fast_syscall_trace
888 and x2, x1, #_TIF_WORK_MASK
889 cbnz x2, work_pending
890 enable_step_tsk x1, x2
892 ret_fast_syscall_trace:
893 enable_irq // enable interrupts
894 b __sys_trace_return_skipped // we already saved x0
897 * Ok, we need to do extra processing, enter the slow path.
902 #ifdef CONFIG_TRACE_IRQFLAGS
903 bl trace_hardirqs_on // enabled while in userspace
905 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
908 * "slow" syscall return path.
911 disable_irq // disable interrupts
912 ldr x1, [tsk, #TSK_TI_FLAGS]
913 and x2, x1, #_TIF_WORK_MASK
914 cbnz x2, work_pending
916 enable_step_tsk x1, x2
925 adrp stbl, sys_call_table // load syscall table pointer
926 mov wscno, w8 // syscall number in w8
927 mov wsc_nr, #__NR_syscalls
928 el0_svc_naked: // compat entry point
929 stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
933 ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
934 tst x16, #_TIF_SYSCALL_WORK
936 cmp wscno, wsc_nr // check upper syscall limit
938 mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
939 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
940 blr x16 // call sys_* routine
949 * This is the really slow path. We're going to be doing context
950 * switches, and waiting for our parent to respond.
953 cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
955 mov x0, #-ENOSYS // set default errno if so
958 bl syscall_trace_enter
959 cmp w0, #NO_SYSCALL // skip the syscall?
960 b.eq __sys_trace_return_skipped
961 mov wscno, w0 // syscall number (possibly new)
962 mov x1, sp // pointer to regs
963 cmp wscno, wsc_nr // check upper syscall limit
965 ldp x0, x1, [sp] // restore the syscall args
966 ldp x2, x3, [sp, #S_X2]
967 ldp x4, x5, [sp, #S_X4]
968 ldp x6, x7, [sp, #S_X6]
969 ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
970 blr x16 // call sys_* routine
973 str x0, [sp, #S_X0] // save returned x0
974 __sys_trace_return_skipped:
976 bl syscall_trace_exit
984 .popsection // .entry.text
986 // Move from tramp_pg_dir to swapper_pg_dir
987 .macro tramp_map_kernel, tmp
989 sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
990 bic \tmp, \tmp, #USER_ASID_FLAG
992 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
993 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
994 /* ASID already in \tmp[63:48] */
995 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
996 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
997 /* 2MB boundary containing the vectors, so we nobble the walk cache */
998 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1002 alternative_else_nop_endif
1003 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1006 .macro tramp_unmap_kernel, tmp
1008 add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
1009 orr \tmp, \tmp, #USER_ASID_FLAG
1012 * We avoid running the post_ttbr_update_workaround here because
1013 * it's only needed by Cavium ThunderX, which requires KPTI to be
1018 .macro tramp_data_page dst
1019 adr_l \dst, .entry.tramp.text
1020 sub \dst, \dst, PAGE_SIZE
1023 .macro tramp_data_read_var dst, var
1024 #ifdef CONFIG_RANDOMIZE_BASE
1025 tramp_data_page \dst
1026 add \dst, \dst, #:lo12:__entry_tramp_data_\var
1033 #define BHB_MITIGATION_NONE 0
1034 #define BHB_MITIGATION_LOOP 1
1035 #define BHB_MITIGATION_FW 2
1036 #define BHB_MITIGATION_INSN 3
1038 .macro tramp_ventry, vector_start, regsize, kpti, bhb
1042 msr tpidrro_el0, x30 // Restored in kernel_ventry
1045 .if \bhb == BHB_MITIGATION_LOOP
1047 * This sequence must appear before the first indirect branch. i.e. the
1048 * ret out of tramp_ventry. It appears here because x30 is free.
1050 __mitigate_spectre_bhb_loop x30
1051 .endif // \bhb == BHB_MITIGATION_LOOP
1053 .if \bhb == BHB_MITIGATION_INSN
1056 .endif // \bhb == BHB_MITIGATION_INSN
1060 * Defend against branch aliasing attacks by pushing a dummy
1061 * entry onto the return stack and using a RET instruction to
1062 * enter the full-fat kernel vectors.
1067 tramp_map_kernel x30
1068 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1069 tramp_data_read_var x30, vectors
1070 prfm plil1strm, [x30, #(1b - \vector_start)]
1075 .endif // \kpti == 1
1077 .if \bhb == BHB_MITIGATION_FW
1079 * The firmware sequence must appear before the first indirect branch.
1080 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
1081 * mapped to save/restore the registers the SMC clobbers.
1083 __mitigate_spectre_bhb_fw
1084 .endif // \bhb == BHB_MITIGATION_FW
1086 add x30, x30, #(1b - \vector_start + 4)
1088 .org 1b + 128 // Did we overflow the ventry slot?
1091 .macro tramp_exit, regsize = 64
1092 tramp_data_read_var x30, this_cpu_vector
1093 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1102 tramp_unmap_kernel x29
1106 add sp, sp, #S_FRAME_SIZE // restore sp
1110 .macro generate_tramp_vector, kpti, bhb
1115 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
1118 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
1122 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1124 * Exception vectors trampoline.
1125 * The order must match __bp_harden_el1_vectors and the
1126 * arm64_bp_harden_el1_vectors enum.
1128 .pushsection ".entry.tramp.text", "ax"
1130 ENTRY(tramp_vectors)
1131 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1132 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
1133 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
1134 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
1135 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1136 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
1139 ENTRY(tramp_exit_native)
1141 END(tramp_exit_native)
1143 ENTRY(tramp_exit_compat)
1145 END(tramp_exit_compat)
1148 .popsection // .entry.tramp.text
1149 #ifdef CONFIG_RANDOMIZE_BASE
1150 .pushsection ".rodata", "a"
1152 .globl __entry_tramp_data_start
1153 __entry_tramp_data_start:
1154 __entry_tramp_data_vectors:
1156 #ifdef CONFIG_ARM_SDE_INTERFACE
1157 __entry_tramp_data___sdei_asm_trampoline_next_handler:
1158 .quad __sdei_asm_handler
1159 #endif /* CONFIG_ARM_SDE_INTERFACE */
1160 __entry_tramp_data_this_cpu_vector:
1161 .quad this_cpu_vector
1162 .popsection // .rodata
1163 #endif /* CONFIG_RANDOMIZE_BASE */
1164 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1167 * Exception vectors for spectre mitigations on entry from EL1 when
1168 * kpti is not in use.
1170 .macro generate_el1_vector, bhb
1172 kernel_ventry 1, sync_invalid // Synchronous EL1t
1173 kernel_ventry 1, irq_invalid // IRQ EL1t
1174 kernel_ventry 1, fiq_invalid // FIQ EL1t
1175 kernel_ventry 1, error_invalid // Error EL1t
1177 kernel_ventry 1, sync // Synchronous EL1h
1178 kernel_ventry 1, irq // IRQ EL1h
1179 kernel_ventry 1, fiq_invalid // FIQ EL1h
1180 kernel_ventry 1, error_invalid // Error EL1h
1183 tramp_ventry .Lvector_start\@, 64, 0, \bhb
1186 tramp_ventry .Lvector_start\@, 32, 0, \bhb
1190 /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1191 .pushsection ".entry.text", "ax"
1193 ENTRY(__bp_harden_el1_vectors)
1194 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1195 generate_el1_vector bhb=BHB_MITIGATION_LOOP
1196 generate_el1_vector bhb=BHB_MITIGATION_FW
1197 generate_el1_vector bhb=BHB_MITIGATION_INSN
1198 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1199 END(__bp_harden_el1_vectors)
1203 * Special system call wrappers.
1205 ENTRY(sys_rt_sigreturn_wrapper)
1208 ENDPROC(sys_rt_sigreturn_wrapper)
1211 * Register switch for AArch64. The callee-saved registers need to be saved
1212 * and restored. On entry:
1213 * x0 = previous task_struct (must be preserved across the switch)
1214 * x1 = next task_struct
1215 * Previous and next are guaranteed not to be the same.
1218 ENTRY(cpu_switch_to)
1219 mov x10, #THREAD_CPU_CONTEXT
1222 stp x19, x20, [x8], #16 // store callee-saved registers
1223 stp x21, x22, [x8], #16
1224 stp x23, x24, [x8], #16
1225 stp x25, x26, [x8], #16
1226 stp x27, x28, [x8], #16
1227 stp x29, x9, [x8], #16
1230 ldp x19, x20, [x8], #16 // restore callee-saved registers
1231 ldp x21, x22, [x8], #16
1232 ldp x23, x24, [x8], #16
1233 ldp x25, x26, [x8], #16
1234 ldp x27, x28, [x8], #16
1235 ldp x29, x9, [x8], #16
1240 ENDPROC(cpu_switch_to)
1241 NOKPROBE(cpu_switch_to)
1244 * This is how we return from a fork.
1246 ENTRY(ret_from_fork)
1248 cbz x19, 1f // not a kernel thread
1251 1: get_thread_info tsk
1253 ENDPROC(ret_from_fork)
1254 NOKPROBE(ret_from_fork)