1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/lockdep.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/thread_info.h>
17 #include <asm/cpufeature.h>
18 #include <asm/daifflags.h>
20 #include <asm/exception.h>
21 #include <asm/irq_regs.h>
22 #include <asm/kprobes.h>
24 #include <asm/processor.h>
26 #include <asm/stacktrace.h>
27 #include <asm/sysreg.h>
28 #include <asm/system_misc.h>
31 * Handle IRQ/context state management when entering from kernel mode.
32 * Before this function is called it is not safe to call regular kernel code,
33 * instrumentable code, or any code which may trigger an exception.
35 * This is intended to match the logic in irqentry_enter(), handling the kernel
36 * mode transitions only.
38 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
40 regs->exit_rcu = false;
42 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
43 lockdep_hardirqs_off(CALLER_ADDR0);
45 trace_hardirqs_off_finish();
47 regs->exit_rcu = true;
51 lockdep_hardirqs_off(CALLER_ADDR0);
52 rcu_irq_enter_check_tick();
53 trace_hardirqs_off_finish();
56 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
58 __enter_from_kernel_mode(regs);
59 mte_check_tfsr_entry();
60 mte_disable_tco_entry(current);
64 * Handle IRQ/context state management when exiting to kernel mode.
65 * After this function returns it is not safe to call regular kernel code,
66 * instrumentable code, or any code which may trigger an exception.
68 * This is intended to match the logic in irqentry_exit(), handling the kernel
69 * mode transitions only, and with preemption handled elsewhere.
71 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
73 lockdep_assert_irqs_disabled();
75 if (interrupts_enabled(regs)) {
77 trace_hardirqs_on_prepare();
78 lockdep_hardirqs_on_prepare();
80 lockdep_hardirqs_on(CALLER_ADDR0);
91 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
93 mte_check_tfsr_exit();
94 __exit_to_kernel_mode(regs);
98 * Handle IRQ/context state management when entering from user mode.
99 * Before this function is called it is not safe to call regular kernel code,
100 * instrumentable code, or any code which may trigger an exception.
102 static __always_inline void __enter_from_user_mode(void)
104 lockdep_hardirqs_off(CALLER_ADDR0);
105 CT_WARN_ON(ct_state() != CONTEXT_USER);
107 trace_hardirqs_off_finish();
108 mte_disable_tco_entry(current);
111 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
113 __enter_from_user_mode();
117 * Handle IRQ/context state management when exiting to user mode.
118 * After this function returns it is not safe to call regular kernel code,
119 * instrumentable code, or any code which may trigger an exception.
121 static __always_inline void __exit_to_user_mode(void)
123 trace_hardirqs_on_prepare();
124 lockdep_hardirqs_on_prepare();
126 lockdep_hardirqs_on(CALLER_ADDR0);
129 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
135 flags = read_thread_flags();
136 if (unlikely(flags & _TIF_WORK_MASK))
137 do_notify_resume(regs, flags);
142 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
144 exit_to_user_mode_prepare(regs);
145 mte_check_tfsr_exit();
146 __exit_to_user_mode();
149 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
151 exit_to_user_mode(regs);
155 * Handle IRQ/context state management when entering an NMI from user/kernel
156 * mode. Before this function is called it is not safe to call regular kernel
157 * code, instrumentable code, or any code which may trigger an exception.
159 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
161 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
164 lockdep_hardirqs_off(CALLER_ADDR0);
165 lockdep_hardirq_enter();
168 trace_hardirqs_off_finish();
173 * Handle IRQ/context state management when exiting an NMI from user/kernel
174 * mode. After this function returns it is not safe to call regular kernel
175 * code, instrumentable code, or any code which may trigger an exception.
177 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
179 bool restore = regs->lockdep_hardirqs;
183 trace_hardirqs_on_prepare();
184 lockdep_hardirqs_on_prepare();
188 lockdep_hardirq_exit();
190 lockdep_hardirqs_on(CALLER_ADDR0);
195 * Handle IRQ/context state management when entering a debug exception from
196 * kernel mode. Before this function is called it is not safe to call regular
197 * kernel code, instrumentable code, or any code which may trigger an exception.
199 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
201 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
203 lockdep_hardirqs_off(CALLER_ADDR0);
206 trace_hardirqs_off_finish();
210 * Handle IRQ/context state management when exiting a debug exception from
211 * kernel mode. After this function returns it is not safe to call regular
212 * kernel code, instrumentable code, or any code which may trigger an exception.
214 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
216 bool restore = regs->lockdep_hardirqs;
219 trace_hardirqs_on_prepare();
220 lockdep_hardirqs_on_prepare();
225 lockdep_hardirqs_on(CALLER_ADDR0);
228 #ifdef CONFIG_PREEMPT_DYNAMIC
229 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
230 #define need_irq_preemption() \
231 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
233 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
236 static void __sched arm64_preempt_schedule_irq(void)
238 if (!need_irq_preemption())
242 * Note: thread_info::preempt_count includes both thread_info::count
243 * and thread_info::need_resched, and is not equivalent to
246 if (READ_ONCE(current_thread_info()->preempt_count) != 0)
250 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
251 * priority masking is used the GIC irqchip driver will clear DAIF.IF
252 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
253 * DAIF we must have handled an NMI, so skip preemption.
255 if (system_uses_irq_prio_masking() && read_sysreg(daif))
259 * Preempting a task from an IRQ means we leave copies of PSTATE
260 * on the stack. cpufeature's enable calls may modify PSTATE, but
261 * resuming one of these preempted tasks would undo those changes.
263 * Only allow a task to be preempted once cpufeatures have been
266 if (system_capabilities_finalized())
267 preempt_schedule_irq();
270 static void do_interrupt_handler(struct pt_regs *regs,
271 void (*handler)(struct pt_regs *))
273 struct pt_regs *old_regs = set_irq_regs(regs);
275 if (on_thread_stack())
276 call_on_irq_stack(regs, handler);
280 set_irq_regs(old_regs);
283 extern void (*handle_arch_irq)(struct pt_regs *);
284 extern void (*handle_arch_fiq)(struct pt_regs *);
286 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
289 arm64_enter_nmi(regs);
293 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
294 vector, smp_processor_id(), esr,
295 esr_get_class_string(esr));
298 panic("Unhandled exception");
301 #define UNHANDLED(el, regsize, vector) \
302 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
304 const char *desc = #regsize "-bit " #el " " #vector; \
305 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
308 #ifdef CONFIG_ARM64_ERRATUM_1463225
309 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
311 static void cortex_a76_erratum_1463225_svc_handler(void)
315 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
318 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
321 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
322 reg = read_sysreg(mdscr_el1);
323 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
324 write_sysreg(val, mdscr_el1);
325 asm volatile("msr daifclr, #8");
328 /* We will have taken a single-step exception by this point */
330 write_sysreg(reg, mdscr_el1);
331 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
334 static __always_inline bool
335 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
337 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
341 * We've taken a dummy step exception from the kernel to ensure
342 * that interrupts are re-enabled on the syscall path. Return back
343 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
344 * masked so that we can safely restore the mdscr and get on with
345 * handling the syscall.
347 regs->pstate |= PSR_D_BIT;
350 #else /* CONFIG_ARM64_ERRATUM_1463225 */
351 static void cortex_a76_erratum_1463225_svc_handler(void) { }
352 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
356 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
359 * As per the ABI exit SME streaming mode and clear the SVE state not
360 * shared with FPSIMD on syscall entry.
362 static inline void fp_user_discard(void)
365 * If SME is active then exit streaming mode. If ZA is active
366 * then flush the SVE registers but leave userspace access to
367 * both SVE and SME enabled, otherwise disable SME for the
368 * task and fall through to disabling SVE too. This means
369 * that after a syscall we never have any streaming mode
370 * register state to track, if this changes the KVM code will
373 if (system_supports_sme())
376 if (!system_supports_sve())
379 if (test_thread_flag(TIF_SVE)) {
380 unsigned int sve_vq_minus_one;
382 sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
383 sve_flush_live(true, sve_vq_minus_one);
387 UNHANDLED(el1t, 64, sync)
388 UNHANDLED(el1t, 64, irq)
389 UNHANDLED(el1t, 64, fiq)
390 UNHANDLED(el1t, 64, error)
392 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
394 unsigned long far = read_sysreg(far_el1);
396 enter_from_kernel_mode(regs);
397 local_daif_inherit(regs);
398 do_mem_abort(far, esr, regs);
400 exit_to_kernel_mode(regs);
403 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
405 unsigned long far = read_sysreg(far_el1);
407 enter_from_kernel_mode(regs);
408 local_daif_inherit(regs);
409 do_sp_pc_abort(far, esr, regs);
411 exit_to_kernel_mode(regs);
414 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
416 enter_from_kernel_mode(regs);
417 local_daif_inherit(regs);
418 do_el1_undef(regs, esr);
420 exit_to_kernel_mode(regs);
423 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
425 enter_from_kernel_mode(regs);
426 local_daif_inherit(regs);
427 do_el1_bti(regs, esr);
429 exit_to_kernel_mode(regs);
432 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
434 unsigned long far = read_sysreg(far_el1);
436 arm64_enter_el1_dbg(regs);
437 if (!cortex_a76_erratum_1463225_debug_handler(regs))
438 do_debug_exception(far, esr, regs);
439 arm64_exit_el1_dbg(regs);
442 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
444 enter_from_kernel_mode(regs);
445 local_daif_inherit(regs);
446 do_el1_fpac(regs, esr);
448 exit_to_kernel_mode(regs);
451 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
453 unsigned long esr = read_sysreg(esr_el1);
455 switch (ESR_ELx_EC(esr)) {
456 case ESR_ELx_EC_DABT_CUR:
457 case ESR_ELx_EC_IABT_CUR:
458 el1_abort(regs, esr);
461 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
462 * recursive exception when trying to push the initial pt_regs.
464 case ESR_ELx_EC_PC_ALIGN:
467 case ESR_ELx_EC_SYS64:
468 case ESR_ELx_EC_UNKNOWN:
469 el1_undef(regs, esr);
474 case ESR_ELx_EC_BREAKPT_CUR:
475 case ESR_ELx_EC_SOFTSTP_CUR:
476 case ESR_ELx_EC_WATCHPT_CUR:
477 case ESR_ELx_EC_BRK64:
480 case ESR_ELx_EC_FPAC:
484 __panic_unhandled(regs, "64-bit el1h sync", esr);
488 static __always_inline void __el1_pnmi(struct pt_regs *regs,
489 void (*handler)(struct pt_regs *))
491 arm64_enter_nmi(regs);
492 do_interrupt_handler(regs, handler);
493 arm64_exit_nmi(regs);
496 static __always_inline void __el1_irq(struct pt_regs *regs,
497 void (*handler)(struct pt_regs *))
499 enter_from_kernel_mode(regs);
502 do_interrupt_handler(regs, handler);
505 arm64_preempt_schedule_irq();
507 exit_to_kernel_mode(regs);
509 static void noinstr el1_interrupt(struct pt_regs *regs,
510 void (*handler)(struct pt_regs *))
512 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
514 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
515 __el1_pnmi(regs, handler);
517 __el1_irq(regs, handler);
520 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
522 el1_interrupt(regs, handle_arch_irq);
525 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
527 el1_interrupt(regs, handle_arch_fiq);
530 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
532 unsigned long esr = read_sysreg(esr_el1);
534 local_daif_restore(DAIF_ERRCTX);
535 arm64_enter_nmi(regs);
536 do_serror(regs, esr);
537 arm64_exit_nmi(regs);
540 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
542 unsigned long far = read_sysreg(far_el1);
544 enter_from_user_mode(regs);
545 local_daif_restore(DAIF_PROCCTX);
546 do_mem_abort(far, esr, regs);
547 exit_to_user_mode(regs);
550 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
552 unsigned long far = read_sysreg(far_el1);
555 * We've taken an instruction abort from userspace and not yet
556 * re-enabled IRQs. If the address is a kernel address, apply
557 * BP hardening prior to enabling IRQs and pre-emption.
559 if (!is_ttbr0_addr(far))
560 arm64_apply_bp_hardening();
562 enter_from_user_mode(regs);
563 local_daif_restore(DAIF_PROCCTX);
564 do_mem_abort(far, esr, regs);
565 exit_to_user_mode(regs);
568 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
570 enter_from_user_mode(regs);
571 local_daif_restore(DAIF_PROCCTX);
572 do_fpsimd_acc(esr, regs);
573 exit_to_user_mode(regs);
576 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
578 enter_from_user_mode(regs);
579 local_daif_restore(DAIF_PROCCTX);
580 do_sve_acc(esr, regs);
581 exit_to_user_mode(regs);
584 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
586 enter_from_user_mode(regs);
587 local_daif_restore(DAIF_PROCCTX);
588 do_sme_acc(esr, regs);
589 exit_to_user_mode(regs);
592 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
594 enter_from_user_mode(regs);
595 local_daif_restore(DAIF_PROCCTX);
596 do_fpsimd_exc(esr, regs);
597 exit_to_user_mode(regs);
600 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
602 enter_from_user_mode(regs);
603 local_daif_restore(DAIF_PROCCTX);
604 do_el0_sys(esr, regs);
605 exit_to_user_mode(regs);
608 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
610 unsigned long far = read_sysreg(far_el1);
612 if (!is_ttbr0_addr(instruction_pointer(regs)))
613 arm64_apply_bp_hardening();
615 enter_from_user_mode(regs);
616 local_daif_restore(DAIF_PROCCTX);
617 do_sp_pc_abort(far, esr, regs);
618 exit_to_user_mode(regs);
621 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
623 enter_from_user_mode(regs);
624 local_daif_restore(DAIF_PROCCTX);
625 do_sp_pc_abort(regs->sp, esr, regs);
626 exit_to_user_mode(regs);
629 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
631 enter_from_user_mode(regs);
632 local_daif_restore(DAIF_PROCCTX);
633 do_el0_undef(regs, esr);
634 exit_to_user_mode(regs);
637 static void noinstr el0_bti(struct pt_regs *regs)
639 enter_from_user_mode(regs);
640 local_daif_restore(DAIF_PROCCTX);
642 exit_to_user_mode(regs);
645 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
647 enter_from_user_mode(regs);
648 local_daif_restore(DAIF_PROCCTX);
649 do_el0_mops(regs, esr);
650 exit_to_user_mode(regs);
653 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
655 enter_from_user_mode(regs);
656 local_daif_restore(DAIF_PROCCTX);
657 bad_el0_sync(regs, 0, esr);
658 exit_to_user_mode(regs);
661 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
663 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
664 unsigned long far = read_sysreg(far_el1);
666 enter_from_user_mode(regs);
667 do_debug_exception(far, esr, regs);
668 local_daif_restore(DAIF_PROCCTX);
669 exit_to_user_mode(regs);
672 static void noinstr el0_svc(struct pt_regs *regs)
674 enter_from_user_mode(regs);
675 cortex_a76_erratum_1463225_svc_handler();
677 local_daif_restore(DAIF_PROCCTX);
679 exit_to_user_mode(regs);
682 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
684 enter_from_user_mode(regs);
685 local_daif_restore(DAIF_PROCCTX);
686 do_el0_fpac(regs, esr);
687 exit_to_user_mode(regs);
690 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
692 unsigned long esr = read_sysreg(esr_el1);
694 switch (ESR_ELx_EC(esr)) {
695 case ESR_ELx_EC_SVC64:
698 case ESR_ELx_EC_DABT_LOW:
701 case ESR_ELx_EC_IABT_LOW:
704 case ESR_ELx_EC_FP_ASIMD:
705 el0_fpsimd_acc(regs, esr);
708 el0_sve_acc(regs, esr);
711 el0_sme_acc(regs, esr);
713 case ESR_ELx_EC_FP_EXC64:
714 el0_fpsimd_exc(regs, esr);
716 case ESR_ELx_EC_SYS64:
720 case ESR_ELx_EC_SP_ALIGN:
723 case ESR_ELx_EC_PC_ALIGN:
726 case ESR_ELx_EC_UNKNOWN:
727 el0_undef(regs, esr);
732 case ESR_ELx_EC_MOPS:
735 case ESR_ELx_EC_BREAKPT_LOW:
736 case ESR_ELx_EC_SOFTSTP_LOW:
737 case ESR_ELx_EC_WATCHPT_LOW:
738 case ESR_ELx_EC_BRK64:
741 case ESR_ELx_EC_FPAC:
749 static void noinstr el0_interrupt(struct pt_regs *regs,
750 void (*handler)(struct pt_regs *))
752 enter_from_user_mode(regs);
754 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
756 if (regs->pc & BIT(55))
757 arm64_apply_bp_hardening();
760 do_interrupt_handler(regs, handler);
763 exit_to_user_mode(regs);
766 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
768 el0_interrupt(regs, handle_arch_irq);
771 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
773 __el0_irq_handler_common(regs);
776 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
778 el0_interrupt(regs, handle_arch_fiq);
781 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
783 __el0_fiq_handler_common(regs);
786 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
788 unsigned long esr = read_sysreg(esr_el1);
790 enter_from_user_mode(regs);
791 local_daif_restore(DAIF_ERRCTX);
792 arm64_enter_nmi(regs);
793 do_serror(regs, esr);
794 arm64_exit_nmi(regs);
795 local_daif_restore(DAIF_PROCCTX);
796 exit_to_user_mode(regs);
799 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
801 __el0_error_handler_common(regs);
805 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
807 enter_from_user_mode(regs);
808 local_daif_restore(DAIF_PROCCTX);
809 do_el0_cp15(esr, regs);
810 exit_to_user_mode(regs);
813 static void noinstr el0_svc_compat(struct pt_regs *regs)
815 enter_from_user_mode(regs);
816 cortex_a76_erratum_1463225_svc_handler();
817 local_daif_restore(DAIF_PROCCTX);
818 do_el0_svc_compat(regs);
819 exit_to_user_mode(regs);
822 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
824 unsigned long esr = read_sysreg(esr_el1);
826 switch (ESR_ELx_EC(esr)) {
827 case ESR_ELx_EC_SVC32:
828 el0_svc_compat(regs);
830 case ESR_ELx_EC_DABT_LOW:
833 case ESR_ELx_EC_IABT_LOW:
836 case ESR_ELx_EC_FP_ASIMD:
837 el0_fpsimd_acc(regs, esr);
839 case ESR_ELx_EC_FP_EXC32:
840 el0_fpsimd_exc(regs, esr);
842 case ESR_ELx_EC_PC_ALIGN:
845 case ESR_ELx_EC_UNKNOWN:
846 case ESR_ELx_EC_CP14_MR:
847 case ESR_ELx_EC_CP14_LS:
848 case ESR_ELx_EC_CP14_64:
849 el0_undef(regs, esr);
851 case ESR_ELx_EC_CP15_32:
852 case ESR_ELx_EC_CP15_64:
855 case ESR_ELx_EC_BREAKPT_LOW:
856 case ESR_ELx_EC_SOFTSTP_LOW:
857 case ESR_ELx_EC_WATCHPT_LOW:
858 case ESR_ELx_EC_BKPT32:
866 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
868 __el0_irq_handler_common(regs);
871 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
873 __el0_fiq_handler_common(regs);
876 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
878 __el0_error_handler_common(regs);
880 #else /* CONFIG_COMPAT */
881 UNHANDLED(el0t, 32, sync)
882 UNHANDLED(el0t, 32, irq)
883 UNHANDLED(el0t, 32, fiq)
884 UNHANDLED(el0t, 32, error)
885 #endif /* CONFIG_COMPAT */
887 #ifdef CONFIG_VMAP_STACK
888 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
890 unsigned long esr = read_sysreg(esr_el1);
891 unsigned long far = read_sysreg(far_el1);
893 arm64_enter_nmi(regs);
894 panic_bad_stack(regs, esr, far);
896 #endif /* CONFIG_VMAP_STACK */
898 #ifdef CONFIG_ARM_SDE_INTERFACE
899 asmlinkage noinstr unsigned long
900 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
905 * We didn't take an exception to get here, so the HW hasn't
906 * set/cleared bits in PSTATE that we may rely on.
908 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
909 * whether PSTATE bits are inherited unchanged or generated from
910 * scratch, and the TF-A implementation always clears PAN and always
911 * clears UAO. There are no other known implementations.
913 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
914 * PSTATE is modified upon architectural exceptions, and so PAN is
915 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
918 * We must explicitly reset PAN to the expected state, including
919 * clearing it when the host isn't using it, in case a VM had it set.
921 if (system_uses_hw_pan())
923 else if (cpu_has_pan())
926 arm64_enter_nmi(regs);
927 ret = do_sdei_event(regs, arg);
928 arm64_exit_nmi(regs);
932 #endif /* CONFIG_ARM_SDE_INTERFACE */