GNU Linux-libre 4.14.324-gnu1
[releases.git] / arch / arm64 / kernel / traps.c
1 /*
2  * Based on arch/arm/kernel/traps.c
3  *
4  * Copyright (C) 1995-2009 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/bug.h>
21 #include <linux/signal.h>
22 #include <linux/personality.h>
23 #include <linux/kallsyms.h>
24 #include <linux/spinlock.h>
25 #include <linux/uaccess.h>
26 #include <linux/hardirq.h>
27 #include <linux/kdebug.h>
28 #include <linux/module.h>
29 #include <linux/kexec.h>
30 #include <linux/delay.h>
31 #include <linux/init.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/task_stack.h>
35 #include <linux/sizes.h>
36 #include <linux/syscalls.h>
37 #include <linux/mm_types.h>
38
39 #include <asm/atomic.h>
40 #include <asm/bug.h>
41 #include <asm/cpufeature.h>
42 #include <asm/debug-monitors.h>
43 #include <asm/esr.h>
44 #include <asm/insn.h>
45 #include <asm/traps.h>
46 #include <asm/smp.h>
47 #include <asm/stack_pointer.h>
48 #include <asm/stacktrace.h>
49 #include <asm/exception.h>
50 #include <asm/system_misc.h>
51 #include <asm/sysreg.h>
52
53 static const char *handler[]= {
54         "Synchronous Abort",
55         "IRQ",
56         "FIQ",
57         "Error"
58 };
59
60 int show_unhandled_signals = 0;
61
62 static void dump_backtrace_entry(unsigned long where)
63 {
64         printk(" %pS\n", (void *)where);
65 }
66
67 static void __dump_instr(const char *lvl, struct pt_regs *regs)
68 {
69         unsigned long addr = instruction_pointer(regs);
70         char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
71         int i;
72
73         for (i = -4; i < 1; i++) {
74                 unsigned int val, bad;
75
76                 bad = get_user(val, &((u32 *)addr)[i]);
77
78                 if (!bad)
79                         p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
80                 else {
81                         p += sprintf(p, "bad PC value");
82                         break;
83                 }
84         }
85         printk("%sCode: %s\n", lvl, str);
86 }
87
88 static void dump_instr(const char *lvl, struct pt_regs *regs)
89 {
90         if (!user_mode(regs)) {
91                 mm_segment_t fs = get_fs();
92                 set_fs(KERNEL_DS);
93                 __dump_instr(lvl, regs);
94                 set_fs(fs);
95         } else {
96                 __dump_instr(lvl, regs);
97         }
98 }
99
100 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
101 {
102         struct stackframe frame;
103         int skip = 0;
104
105         pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
106
107         if (regs) {
108                 if (user_mode(regs))
109                         return;
110                 skip = 1;
111         }
112
113         if (!tsk)
114                 tsk = current;
115
116         if (!try_get_task_stack(tsk))
117                 return;
118
119         if (tsk == current) {
120                 frame.fp = (unsigned long)__builtin_frame_address(0);
121                 frame.pc = (unsigned long)dump_backtrace;
122         } else {
123                 /*
124                  * task blocked in __switch_to
125                  */
126                 frame.fp = thread_saved_fp(tsk);
127                 frame.pc = thread_saved_pc(tsk);
128         }
129 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
130         frame.graph = tsk->curr_ret_stack;
131 #endif
132
133         printk("Call trace:\n");
134         do {
135                 /* skip until specified stack frame */
136                 if (!skip) {
137                         dump_backtrace_entry(frame.pc);
138                 } else if (frame.fp == regs->regs[29]) {
139                         skip = 0;
140                         /*
141                          * Mostly, this is the case where this function is
142                          * called in panic/abort. As exception handler's
143                          * stack frame does not contain the corresponding pc
144                          * at which an exception has taken place, use regs->pc
145                          * instead.
146                          */
147                         dump_backtrace_entry(regs->pc);
148                 }
149         } while (!unwind_frame(tsk, &frame));
150
151         put_task_stack(tsk);
152 }
153
154 void show_stack(struct task_struct *tsk, unsigned long *sp)
155 {
156         dump_backtrace(NULL, tsk);
157         barrier();
158 }
159
160 #ifdef CONFIG_PREEMPT
161 #define S_PREEMPT " PREEMPT"
162 #else
163 #define S_PREEMPT ""
164 #endif
165 #define S_SMP " SMP"
166
167 static int __die(const char *str, int err, struct pt_regs *regs)
168 {
169         struct task_struct *tsk = current;
170         static int die_counter;
171         int ret;
172
173         pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
174                  str, err, ++die_counter);
175
176         /* trap and error numbers are mostly meaningless on ARM */
177         ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
178         if (ret == NOTIFY_STOP)
179                 return ret;
180
181         print_modules();
182         pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
183                  TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
184                  end_of_stack(tsk));
185         show_regs(regs);
186
187         if (!user_mode(regs))
188                 dump_instr(KERN_EMERG, regs);
189
190         return ret;
191 }
192
193 static DEFINE_RAW_SPINLOCK(die_lock);
194
195 /*
196  * This function is protected against re-entrancy.
197  */
198 void die(const char *str, struct pt_regs *regs, int err)
199 {
200         int ret;
201         unsigned long flags;
202
203         raw_spin_lock_irqsave(&die_lock, flags);
204
205         oops_enter();
206
207         console_verbose();
208         bust_spinlocks(1);
209         ret = __die(str, err, regs);
210
211         if (regs && kexec_should_crash(current))
212                 crash_kexec(regs);
213
214         bust_spinlocks(0);
215         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
216         oops_exit();
217
218         if (in_interrupt())
219                 panic("Fatal exception in interrupt");
220         if (panic_on_oops)
221                 panic("Fatal exception");
222
223         raw_spin_unlock_irqrestore(&die_lock, flags);
224
225         if (ret != NOTIFY_STOP)
226                 make_task_dead(SIGSEGV);
227 }
228
229 void arm64_notify_die(const char *str, struct pt_regs *regs,
230                       struct siginfo *info, int err)
231 {
232         if (user_mode(regs)) {
233                 current->thread.fault_address = 0;
234                 current->thread.fault_code = err;
235                 force_sig_info(info->si_signo, info, current);
236         } else {
237                 die(str, regs, err);
238         }
239 }
240
241 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
242 {
243         regs->pc += size;
244
245         /*
246          * If we were single stepping, we want to get the step exception after
247          * we return from the trap.
248          */
249         if (user_mode(regs))
250                 user_fastforward_single_step(current);
251 }
252
253 static LIST_HEAD(undef_hook);
254 static DEFINE_RAW_SPINLOCK(undef_lock);
255
256 void register_undef_hook(struct undef_hook *hook)
257 {
258         unsigned long flags;
259
260         raw_spin_lock_irqsave(&undef_lock, flags);
261         list_add(&hook->node, &undef_hook);
262         raw_spin_unlock_irqrestore(&undef_lock, flags);
263 }
264
265 void unregister_undef_hook(struct undef_hook *hook)
266 {
267         unsigned long flags;
268
269         raw_spin_lock_irqsave(&undef_lock, flags);
270         list_del(&hook->node);
271         raw_spin_unlock_irqrestore(&undef_lock, flags);
272 }
273
274 static int call_undef_hook(struct pt_regs *regs)
275 {
276         struct undef_hook *hook;
277         unsigned long flags;
278         u32 instr;
279         int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
280         void __user *pc = (void __user *)instruction_pointer(regs);
281
282         if (!user_mode(regs))
283                 return 1;
284
285         if (compat_thumb_mode(regs)) {
286                 /* 16-bit Thumb instruction */
287                 __le16 instr_le;
288                 if (get_user(instr_le, (__le16 __user *)pc))
289                         goto exit;
290                 instr = le16_to_cpu(instr_le);
291                 if (aarch32_insn_is_wide(instr)) {
292                         u32 instr2;
293
294                         if (get_user(instr_le, (__le16 __user *)(pc + 2)))
295                                 goto exit;
296                         instr2 = le16_to_cpu(instr_le);
297                         instr = (instr << 16) | instr2;
298                 }
299         } else {
300                 /* 32-bit ARM instruction */
301                 __le32 instr_le;
302                 if (get_user(instr_le, (__le32 __user *)pc))
303                         goto exit;
304                 instr = le32_to_cpu(instr_le);
305         }
306
307         raw_spin_lock_irqsave(&undef_lock, flags);
308         list_for_each_entry(hook, &undef_hook, node)
309                 if ((instr & hook->instr_mask) == hook->instr_val &&
310                         (regs->pstate & hook->pstate_mask) == hook->pstate_val)
311                         fn = hook->fn;
312
313         raw_spin_unlock_irqrestore(&undef_lock, flags);
314 exit:
315         return fn ? fn(regs, instr) : 1;
316 }
317
318 static void force_signal_inject(int signal, int code, struct pt_regs *regs,
319                                 unsigned long address)
320 {
321         siginfo_t info;
322         void __user *pc = (void __user *)instruction_pointer(regs);
323         const char *desc;
324
325         switch (signal) {
326         case SIGILL:
327                 desc = "undefined instruction";
328                 break;
329         case SIGSEGV:
330                 desc = "illegal memory access";
331                 break;
332         default:
333                 desc = "bad mode";
334                 break;
335         }
336
337         if (unhandled_signal(current, signal) &&
338             show_unhandled_signals_ratelimited()) {
339                 pr_info("%s[%d]: %s: pc=%p\n",
340                         current->comm, task_pid_nr(current), desc, pc);
341                 dump_instr(KERN_INFO, regs);
342         }
343
344         info.si_signo = signal;
345         info.si_errno = 0;
346         info.si_code  = code;
347         info.si_addr  = pc;
348
349         arm64_notify_die(desc, regs, &info, 0);
350 }
351
352 /*
353  * Set up process info to signal segmentation fault - called on access error.
354  */
355 void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
356 {
357         int code;
358
359         down_read(&current->mm->mmap_sem);
360         if (find_vma(current->mm, addr) == NULL)
361                 code = SEGV_MAPERR;
362         else
363                 code = SEGV_ACCERR;
364         up_read(&current->mm->mmap_sem);
365
366         force_signal_inject(SIGSEGV, code, regs, addr);
367 }
368
369 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
370 {
371         /* check for AArch32 breakpoint instructions */
372         if (!aarch32_break_handler(regs))
373                 return;
374
375         if (call_undef_hook(regs) == 0)
376                 return;
377
378         force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
379 }
380
381 void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
382 {
383         config_sctlr_el1(SCTLR_EL1_UCI, 0);
384 }
385
386 #define __user_cache_maint(insn, address, res)                  \
387         if (address >= user_addr_max()) {                       \
388                 res = -EFAULT;                                  \
389         } else {                                                \
390                 uaccess_ttbr0_enable();                         \
391                 asm volatile (                                  \
392                         "1:     " insn ", %1\n"                 \
393                         "       mov     %w0, #0\n"              \
394                         "2:\n"                                  \
395                         "       .pushsection .fixup,\"ax\"\n"   \
396                         "       .align  2\n"                    \
397                         "3:     mov     %w0, %w2\n"             \
398                         "       b       2b\n"                   \
399                         "       .popsection\n"                  \
400                         _ASM_EXTABLE(1b, 3b)                    \
401                         : "=r" (res)                            \
402                         : "r" (address), "i" (-EFAULT));        \
403                 uaccess_ttbr0_disable();                        \
404         }
405
406 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
407 {
408         unsigned long address;
409         int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
410         int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
411         int ret = 0;
412
413         address = untagged_addr(pt_regs_read_reg(regs, rt));
414
415         switch (crm) {
416         case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:     /* DC CVAU, gets promoted */
417                 __user_cache_maint("dc civac", address, ret);
418                 break;
419         case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
420                 __user_cache_maint("dc civac", address, ret);
421                 break;
422         case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
423                 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
424                 break;
425         case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:    /* DC CIVAC */
426                 __user_cache_maint("dc civac", address, ret);
427                 break;
428         case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:     /* IC IVAU */
429                 __user_cache_maint("ic ivau", address, ret);
430                 break;
431         default:
432                 force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
433                 return;
434         }
435
436         if (ret)
437                 arm64_notify_segfault(regs, address);
438         else
439                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
440 }
441
442 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
443 {
444         int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
445         unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
446
447         pt_regs_write_reg(regs, rt, val);
448
449         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
450 }
451
452 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
453 {
454         int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
455
456         pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
457         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
458 }
459
460 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
461 {
462         int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
463
464         pt_regs_write_reg(regs, rt, arch_timer_get_rate());
465         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
466 }
467
468 struct sys64_hook {
469         unsigned int esr_mask;
470         unsigned int esr_val;
471         void (*handler)(unsigned int esr, struct pt_regs *regs);
472 };
473
474 static struct sys64_hook sys64_hooks[] = {
475         {
476                 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
477                 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
478                 .handler = user_cache_maint_handler,
479         },
480         {
481                 /* Trap read access to CTR_EL0 */
482                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
483                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
484                 .handler = ctr_read_handler,
485         },
486         {
487                 /* Trap read access to CNTVCT_EL0 */
488                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
489                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
490                 .handler = cntvct_read_handler,
491         },
492         {
493                 /* Trap read access to CNTFRQ_EL0 */
494                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
495                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
496                 .handler = cntfrq_read_handler,
497         },
498         {},
499 };
500
501 asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
502 {
503         struct sys64_hook *hook;
504
505         for (hook = sys64_hooks; hook->handler; hook++)
506                 if ((hook->esr_mask & esr) == hook->esr_val) {
507                         hook->handler(esr, regs);
508                         return;
509                 }
510
511         /*
512          * New SYS instructions may previously have been undefined at EL0. Fall
513          * back to our usual undefined instruction handler so that we handle
514          * these consistently.
515          */
516         do_undefinstr(regs);
517 }
518
519 long compat_arm_syscall(struct pt_regs *regs);
520
521 asmlinkage long do_ni_syscall(struct pt_regs *regs)
522 {
523 #ifdef CONFIG_COMPAT
524         long ret;
525         if (is_compat_task()) {
526                 ret = compat_arm_syscall(regs);
527                 if (ret != -ENOSYS)
528                         return ret;
529         }
530 #endif
531
532         return sys_ni_syscall();
533 }
534
535 static const char *esr_class_str[] = {
536         [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
537         [ESR_ELx_EC_UNKNOWN]            = "Unknown/Uncategorized",
538         [ESR_ELx_EC_WFx]                = "WFI/WFE",
539         [ESR_ELx_EC_CP15_32]            = "CP15 MCR/MRC",
540         [ESR_ELx_EC_CP15_64]            = "CP15 MCRR/MRRC",
541         [ESR_ELx_EC_CP14_MR]            = "CP14 MCR/MRC",
542         [ESR_ELx_EC_CP14_LS]            = "CP14 LDC/STC",
543         [ESR_ELx_EC_FP_ASIMD]           = "ASIMD",
544         [ESR_ELx_EC_CP10_ID]            = "CP10 MRC/VMRS",
545         [ESR_ELx_EC_CP14_64]            = "CP14 MCRR/MRRC",
546         [ESR_ELx_EC_ILL]                = "PSTATE.IL",
547         [ESR_ELx_EC_SVC32]              = "SVC (AArch32)",
548         [ESR_ELx_EC_HVC32]              = "HVC (AArch32)",
549         [ESR_ELx_EC_SMC32]              = "SMC (AArch32)",
550         [ESR_ELx_EC_SVC64]              = "SVC (AArch64)",
551         [ESR_ELx_EC_HVC64]              = "HVC (AArch64)",
552         [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
553         [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
554         [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
555         [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
556         [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
557         [ESR_ELx_EC_PC_ALIGN]           = "PC Alignment",
558         [ESR_ELx_EC_DABT_LOW]           = "DABT (lower EL)",
559         [ESR_ELx_EC_DABT_CUR]           = "DABT (current EL)",
560         [ESR_ELx_EC_SP_ALIGN]           = "SP Alignment",
561         [ESR_ELx_EC_FP_EXC32]           = "FP (AArch32)",
562         [ESR_ELx_EC_FP_EXC64]           = "FP (AArch64)",
563         [ESR_ELx_EC_SERROR]             = "SError",
564         [ESR_ELx_EC_BREAKPT_LOW]        = "Breakpoint (lower EL)",
565         [ESR_ELx_EC_BREAKPT_CUR]        = "Breakpoint (current EL)",
566         [ESR_ELx_EC_SOFTSTP_LOW]        = "Software Step (lower EL)",
567         [ESR_ELx_EC_SOFTSTP_CUR]        = "Software Step (current EL)",
568         [ESR_ELx_EC_WATCHPT_LOW]        = "Watchpoint (lower EL)",
569         [ESR_ELx_EC_WATCHPT_CUR]        = "Watchpoint (current EL)",
570         [ESR_ELx_EC_BKPT32]             = "BKPT (AArch32)",
571         [ESR_ELx_EC_VECTOR32]           = "Vector catch (AArch32)",
572         [ESR_ELx_EC_BRK64]              = "BRK (AArch64)",
573 };
574
575 const char *esr_get_class_string(u32 esr)
576 {
577         return esr_class_str[ESR_ELx_EC(esr)];
578 }
579
580 /*
581  * bad_mode handles the impossible case in the exception vector. This is always
582  * fatal.
583  */
584 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
585 {
586         console_verbose();
587
588         pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
589                 handler[reason], smp_processor_id(), esr,
590                 esr_get_class_string(esr));
591
592         local_irq_disable();
593         panic("bad mode");
594 }
595
596 /*
597  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
598  * exceptions taken from EL0. Unlike bad_mode, this returns.
599  */
600 asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
601 {
602         siginfo_t info;
603         void __user *pc = (void __user *)instruction_pointer(regs);
604         console_verbose();
605
606         pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
607                 smp_processor_id(), esr, esr_get_class_string(esr));
608         __show_regs(regs);
609
610         info.si_signo = SIGILL;
611         info.si_errno = 0;
612         info.si_code  = ILL_ILLOPC;
613         info.si_addr  = pc;
614
615         current->thread.fault_address = 0;
616         current->thread.fault_code = 0;
617
618         force_sig_info(info.si_signo, &info, current);
619 }
620
621 #ifdef CONFIG_VMAP_STACK
622
623 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
624         __aligned(16);
625
626 asmlinkage void handle_bad_stack(struct pt_regs *regs)
627 {
628         unsigned long tsk_stk = (unsigned long)current->stack;
629         unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
630         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
631         unsigned int esr = read_sysreg(esr_el1);
632         unsigned long far = read_sysreg(far_el1);
633
634         console_verbose();
635         pr_emerg("Insufficient stack space to handle exception!");
636
637         pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
638         pr_emerg("FAR: 0x%016lx\n", far);
639
640         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
641                  tsk_stk, tsk_stk + THREAD_SIZE);
642         pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
643                  irq_stk, irq_stk + THREAD_SIZE);
644         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
645                  ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
646
647         __show_regs(regs);
648
649         /*
650          * We use nmi_panic to limit the potential for recusive overflows, and
651          * to get a better stack trace.
652          */
653         nmi_panic(NULL, "kernel stack overflow");
654         cpu_park_loop();
655 }
656 #endif
657
658 void __pte_error(const char *file, int line, unsigned long val)
659 {
660         pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
661 }
662
663 void __pmd_error(const char *file, int line, unsigned long val)
664 {
665         pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
666 }
667
668 void __pud_error(const char *file, int line, unsigned long val)
669 {
670         pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
671 }
672
673 void __pgd_error(const char *file, int line, unsigned long val)
674 {
675         pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
676 }
677
678 /* GENERIC_BUG traps */
679
680 int is_valid_bugaddr(unsigned long addr)
681 {
682         /*
683          * bug_handler() only called for BRK #BUG_BRK_IMM.
684          * So the answer is trivial -- any spurious instances with no
685          * bug table entry will be rejected by report_bug() and passed
686          * back to the debug-monitors code and handled as a fatal
687          * unexpected debug exception.
688          */
689         return 1;
690 }
691
692 static int bug_handler(struct pt_regs *regs, unsigned int esr)
693 {
694         if (user_mode(regs))
695                 return DBG_HOOK_ERROR;
696
697         switch (report_bug(regs->pc, regs)) {
698         case BUG_TRAP_TYPE_BUG:
699                 die("Oops - BUG", regs, 0);
700                 break;
701
702         case BUG_TRAP_TYPE_WARN:
703                 break;
704
705         default:
706                 /* unknown/unrecognised bug trap type */
707                 return DBG_HOOK_ERROR;
708         }
709
710         /* If thread survives, skip over the BUG instruction and continue: */
711         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
712         return DBG_HOOK_HANDLED;
713 }
714
715 static struct break_hook bug_break_hook = {
716         .esr_val = 0xf2000000 | BUG_BRK_IMM,
717         .esr_mask = 0xffffffff,
718         .fn = bug_handler,
719 };
720
721 /*
722  * Initial handler for AArch64 BRK exceptions
723  * This handler only used until debug_traps_init().
724  */
725 int __init early_brk64(unsigned long addr, unsigned int esr,
726                 struct pt_regs *regs)
727 {
728         return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
729 }
730
731 /* This registration must happen early, before debug_traps_init(). */
732 void __init trap_init(void)
733 {
734         register_break_hook(&bug_break_hook);
735 }