1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
26 regs->exit_rcu = false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
31 trace_hardirqs_off_finish();
33 regs->exit_rcu = true;
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
48 lockdep_assert_irqs_disabled();
50 if (interrupts_enabled(regs)) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
55 lockdep_hardirqs_on(CALLER_ADDR0);
66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
68 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
71 lockdep_hardirqs_off(CALLER_ADDR0);
72 lockdep_hardirq_enter();
75 trace_hardirqs_off_finish();
79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
81 bool restore = regs->lockdep_hardirqs;
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
90 lockdep_hardirq_exit();
92 lockdep_hardirqs_on(CALLER_ADDR0);
96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 arm64_enter_nmi(regs);
101 enter_from_kernel_mode(regs);
104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 arm64_exit_nmi(regs);
109 exit_to_kernel_mode(regs);
112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
114 unsigned long far = read_sysreg(far_el1);
116 enter_from_kernel_mode(regs);
117 local_daif_inherit(regs);
118 far = untagged_addr(far);
119 do_mem_abort(far, esr, regs);
121 exit_to_kernel_mode(regs);
124 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
126 unsigned long far = read_sysreg(far_el1);
128 enter_from_kernel_mode(regs);
129 local_daif_inherit(regs);
130 do_sp_pc_abort(far, esr, regs);
132 exit_to_kernel_mode(regs);
135 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
137 enter_from_kernel_mode(regs);
138 local_daif_inherit(regs);
139 do_el1_undef(regs, esr);
141 exit_to_kernel_mode(regs);
144 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
146 enter_from_kernel_mode(regs);
147 local_daif_inherit(regs);
148 do_el1_bti(regs, esr);
150 exit_to_kernel_mode(regs);
153 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
155 enter_from_kernel_mode(regs);
156 local_daif_inherit(regs);
157 bad_mode(regs, 0, esr);
159 exit_to_kernel_mode(regs);
162 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
164 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
166 lockdep_hardirqs_off(CALLER_ADDR0);
169 trace_hardirqs_off_finish();
172 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
174 bool restore = regs->lockdep_hardirqs;
177 trace_hardirqs_on_prepare();
178 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
183 lockdep_hardirqs_on(CALLER_ADDR0);
186 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
188 unsigned long far = read_sysreg(far_el1);
190 arm64_enter_el1_dbg(regs);
191 do_debug_exception(far, esr, regs);
192 arm64_exit_el1_dbg(regs);
195 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
197 enter_from_kernel_mode(regs);
198 local_daif_inherit(regs);
199 do_el1_fpac(regs, esr);
201 exit_to_kernel_mode(regs);
204 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
206 unsigned long esr = read_sysreg(esr_el1);
208 switch (ESR_ELx_EC(esr)) {
209 case ESR_ELx_EC_DABT_CUR:
210 case ESR_ELx_EC_IABT_CUR:
211 el1_abort(regs, esr);
214 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
215 * recursive exception when trying to push the initial pt_regs.
217 case ESR_ELx_EC_PC_ALIGN:
220 case ESR_ELx_EC_SYS64:
221 case ESR_ELx_EC_UNKNOWN:
222 el1_undef(regs, esr);
227 case ESR_ELx_EC_BREAKPT_CUR:
228 case ESR_ELx_EC_SOFTSTP_CUR:
229 case ESR_ELx_EC_WATCHPT_CUR:
230 case ESR_ELx_EC_BRK64:
233 case ESR_ELx_EC_FPAC:
241 asmlinkage void noinstr enter_from_user_mode(void)
243 lockdep_hardirqs_off(CALLER_ADDR0);
244 CT_WARN_ON(ct_state() != CONTEXT_USER);
246 trace_hardirqs_off_finish();
249 asmlinkage void noinstr exit_to_user_mode(void)
251 trace_hardirqs_on_prepare();
252 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
254 lockdep_hardirqs_on(CALLER_ADDR0);
257 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
259 unsigned long far = read_sysreg(far_el1);
261 enter_from_user_mode();
262 local_daif_restore(DAIF_PROCCTX);
263 far = untagged_addr(far);
264 do_mem_abort(far, esr, regs);
267 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
269 unsigned long far = read_sysreg(far_el1);
272 * We've taken an instruction abort from userspace and not yet
273 * re-enabled IRQs. If the address is a kernel address, apply
274 * BP hardening prior to enabling IRQs and pre-emption.
276 if (!is_ttbr0_addr(far))
277 arm64_apply_bp_hardening();
279 enter_from_user_mode();
280 local_daif_restore(DAIF_PROCCTX);
281 do_mem_abort(far, esr, regs);
284 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
286 enter_from_user_mode();
287 local_daif_restore(DAIF_PROCCTX);
288 do_fpsimd_acc(esr, regs);
291 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
293 enter_from_user_mode();
294 local_daif_restore(DAIF_PROCCTX);
295 do_sve_acc(esr, regs);
298 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
300 enter_from_user_mode();
301 local_daif_restore(DAIF_PROCCTX);
302 do_fpsimd_exc(esr, regs);
305 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
307 enter_from_user_mode();
308 local_daif_restore(DAIF_PROCCTX);
309 do_el0_sys(esr, regs);
312 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
314 unsigned long far = read_sysreg(far_el1);
316 if (!is_ttbr0_addr(instruction_pointer(regs)))
317 arm64_apply_bp_hardening();
319 enter_from_user_mode();
320 local_daif_restore(DAIF_PROCCTX);
321 do_sp_pc_abort(far, esr, regs);
324 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
326 enter_from_user_mode();
327 local_daif_restore(DAIF_PROCCTX);
328 do_sp_pc_abort(regs->sp, esr, regs);
331 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
333 enter_from_user_mode();
334 local_daif_restore(DAIF_PROCCTX);
335 do_el0_undef(regs, esr);
338 static void noinstr el0_bti(struct pt_regs *regs)
340 enter_from_user_mode();
341 local_daif_restore(DAIF_PROCCTX);
345 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
347 enter_from_user_mode();
348 local_daif_restore(DAIF_PROCCTX);
349 bad_el0_sync(regs, 0, esr);
352 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
354 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
355 unsigned long far = read_sysreg(far_el1);
357 enter_from_user_mode();
358 do_debug_exception(far, esr, regs);
359 local_daif_restore(DAIF_PROCCTX_NOIRQ);
362 static void noinstr el0_svc(struct pt_regs *regs)
364 enter_from_user_mode();
368 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
370 enter_from_user_mode();
371 local_daif_restore(DAIF_PROCCTX);
372 do_el0_fpac(regs, esr);
375 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
377 unsigned long esr = read_sysreg(esr_el1);
379 switch (ESR_ELx_EC(esr)) {
380 case ESR_ELx_EC_SVC64:
383 case ESR_ELx_EC_DABT_LOW:
386 case ESR_ELx_EC_IABT_LOW:
389 case ESR_ELx_EC_FP_ASIMD:
390 el0_fpsimd_acc(regs, esr);
393 el0_sve_acc(regs, esr);
395 case ESR_ELx_EC_FP_EXC64:
396 el0_fpsimd_exc(regs, esr);
398 case ESR_ELx_EC_SYS64:
402 case ESR_ELx_EC_SP_ALIGN:
405 case ESR_ELx_EC_PC_ALIGN:
408 case ESR_ELx_EC_UNKNOWN:
409 el0_undef(regs, esr);
414 case ESR_ELx_EC_BREAKPT_LOW:
415 case ESR_ELx_EC_SOFTSTP_LOW:
416 case ESR_ELx_EC_WATCHPT_LOW:
417 case ESR_ELx_EC_BRK64:
420 case ESR_ELx_EC_FPAC:
429 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
431 enter_from_user_mode();
432 local_daif_restore(DAIF_PROCCTX);
433 do_el0_cp15(esr, regs);
436 static void noinstr el0_svc_compat(struct pt_regs *regs)
438 enter_from_user_mode();
439 do_el0_svc_compat(regs);
442 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
444 unsigned long esr = read_sysreg(esr_el1);
446 switch (ESR_ELx_EC(esr)) {
447 case ESR_ELx_EC_SVC32:
448 el0_svc_compat(regs);
450 case ESR_ELx_EC_DABT_LOW:
453 case ESR_ELx_EC_IABT_LOW:
456 case ESR_ELx_EC_FP_ASIMD:
457 el0_fpsimd_acc(regs, esr);
459 case ESR_ELx_EC_FP_EXC32:
460 el0_fpsimd_exc(regs, esr);
462 case ESR_ELx_EC_PC_ALIGN:
465 case ESR_ELx_EC_UNKNOWN:
466 case ESR_ELx_EC_CP14_MR:
467 case ESR_ELx_EC_CP14_LS:
468 case ESR_ELx_EC_CP14_64:
469 el0_undef(regs, esr);
471 case ESR_ELx_EC_CP15_32:
472 case ESR_ELx_EC_CP15_64:
475 case ESR_ELx_EC_BREAKPT_LOW:
476 case ESR_ELx_EC_SOFTSTP_LOW:
477 case ESR_ELx_EC_WATCHPT_LOW:
478 case ESR_ELx_EC_BKPT32:
485 #endif /* CONFIG_COMPAT */