2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/ftrace.h>
16 #include <linux/kexec.h>
17 #include <linux/bug.h>
18 #include <linux/nmi.h>
19 #include <linux/sysfs.h>
20 #include <linux/kasan.h>
22 #include <asm/cpu_entry_area.h>
23 #include <asm/stacktrace.h>
24 #include <asm/unwind.h>
26 int panic_on_unrecovered_nmi;
28 unsigned int code_bytes = 64;
29 static int die_counter;
31 bool in_task_stack(unsigned long *stack, struct task_struct *task,
32 struct stack_info *info)
34 unsigned long *begin = task_stack_page(task);
35 unsigned long *end = task_stack_page(task) + THREAD_SIZE;
37 if (stack < begin || stack >= end)
40 info->type = STACK_TYPE_TASK;
48 bool in_entry_stack(unsigned long *stack, struct stack_info *info)
50 struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
55 if ((void *)stack < begin || (void *)stack >= end)
58 info->type = STACK_TYPE_ENTRY;
66 static void printk_stack_address(unsigned long address, int reliable,
70 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
73 void show_iret_regs(struct pt_regs *regs)
75 printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
76 printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
77 regs->sp, regs->flags);
80 static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
84 * These on_stack() checks aren't strictly necessary: the unwind code
85 * has already validated the 'regs' pointer. The checks are done for
86 * ordering reasons: if the registers are on the next stack, we don't
87 * want to print them out yet. Otherwise they'll be shown as part of
88 * the wrong stack. Later, when show_trace_log_lvl() switches to the
89 * next stack, this function will be called again with the same regs so
90 * they can be printed in the right context.
92 if (!partial && on_stack(info, regs, sizeof(*regs))) {
95 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
98 * When an interrupt or exception occurs in entry code, the
99 * full pt_regs might not have been saved yet. In that case
100 * just print the iret frame.
102 show_iret_regs(regs);
106 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
107 unsigned long *stack, char *log_lvl)
109 struct unwind_state state;
110 struct stack_info stack_info = {0};
111 unsigned long visit_mask = 0;
113 bool partial = false;
115 printk("%sCall Trace:\n", log_lvl);
117 unwind_start(&state, task, regs, stack);
118 regs = unwind_get_entry_regs(&state, &partial);
121 * Iterate through the stacks, starting with the current stack pointer.
122 * Each stack has a pointer to the next one.
124 * x86-64 can have several stacks:
127 * - HW exception stacks (double fault, nmi, debug, mce)
130 * x86-32 can have up to four stacks:
136 for (stack = stack ?: get_stack_pointer(task, regs);
138 stack = stack_info.next_sp) {
139 const char *stack_name;
141 stack = PTR_ALIGN(stack, sizeof(long));
143 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
145 * We weren't on a valid stack. It's possible that
146 * we overflowed a valid stack into a guard page.
147 * See if the next page up is valid so that we can
148 * generate some kind of backtrace if this happens.
150 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
151 if (get_stack_info(stack, task, &stack_info, &visit_mask))
155 stack_name = stack_type_name(stack_info.type);
157 printk("%s <%s>\n", log_lvl, stack_name);
160 show_regs_if_on_stack(&stack_info, regs, partial);
163 * Scan the stack, printing any text addresses we find. At the
164 * same time, follow proper stack frames with the unwinder.
166 * Addresses found during the scan which are not reported by
167 * the unwinder are considered to be additional clues which are
168 * sometimes useful for debugging and are prefixed with '?'.
169 * This also serves as a failsafe option in case the unwinder
170 * goes off in the weeds.
172 for (; stack < stack_info.end; stack++) {
173 unsigned long real_addr;
175 unsigned long addr = READ_ONCE_NOCHECK(*stack);
176 unsigned long *ret_addr_p =
177 unwind_get_return_address_ptr(&state);
179 if (!__kernel_text_address(addr))
183 * Don't print regs->ip again if it was already printed
184 * by show_regs_if_on_stack().
186 if (regs && stack == ®s->ip)
189 if (stack == ret_addr_p)
193 * When function graph tracing is enabled for a
194 * function, its return address on the stack is
195 * replaced with the address of an ftrace handler
196 * (return_to_handler). In that case, before printing
197 * the "real" address, we want to print the handler
198 * address as an "unreliable" hint that function graph
199 * tracing was involved.
201 real_addr = ftrace_graph_ret_addr(task, &graph_idx,
203 if (real_addr != addr)
204 printk_stack_address(addr, 0, log_lvl);
205 printk_stack_address(real_addr, reliable, log_lvl);
212 * Get the next frame from the unwinder. No need to
213 * check for an error: if anything goes wrong, the rest
214 * of the addresses will just be printed as unreliable.
216 unwind_next_frame(&state);
218 /* if the frame has entry regs, print them */
219 regs = unwind_get_entry_regs(&state, &partial);
221 show_regs_if_on_stack(&stack_info, regs, partial);
225 printk("%s </%s>\n", log_lvl, stack_name);
229 void show_stack(struct task_struct *task, unsigned long *sp)
231 task = task ? : current;
234 * Stack frames below this one aren't interesting. Don't show them
235 * if we're printing for %current.
237 if (!sp && task == current)
238 sp = get_stack_pointer(current, NULL);
240 show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
243 void show_stack_regs(struct pt_regs *regs)
245 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
248 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
249 static int die_owner = -1;
250 static unsigned int die_nest_count;
252 unsigned long oops_begin(void)
259 /* racy, but better than risking deadlock. */
260 raw_local_irq_save(flags);
261 cpu = smp_processor_id();
262 if (!arch_spin_trylock(&die_lock)) {
263 if (cpu == die_owner)
264 /* nested oops. should stop eventually */;
266 arch_spin_lock(&die_lock);
274 EXPORT_SYMBOL_GPL(oops_begin);
275 NOKPROBE_SYMBOL(oops_begin);
277 void __noreturn rewind_stack_and_make_dead(int signr);
279 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
281 if (regs && kexec_should_crash(current))
286 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
289 /* Nest count reaches zero, release the lock. */
290 arch_spin_unlock(&die_lock);
291 raw_local_irq_restore(flags);
297 panic("Fatal exception in interrupt");
299 panic("Fatal exception");
302 * We're not going to return, but we might be on an IST stack or
303 * have very little stack space left. Rewind the stack and kill
305 * Before we rewind the stack, we have to tell KASAN that we're going to
306 * reuse the task stack and that existing poisons are invalid.
308 kasan_unpoison_task_stack(current);
309 rewind_stack_and_make_dead(signr);
311 NOKPROBE_SYMBOL(oops_end);
313 int __die(const char *str, struct pt_regs *regs, long err)
320 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
321 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
322 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
323 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
324 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
325 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
326 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
328 if (notify_die(DIE_OOPS, str, regs, err,
329 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
335 if (user_mode(regs)) {
339 sp = kernel_stack_pointer(regs);
342 printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n",
343 (void *)regs->ip, ss, sp);
345 /* Executive summary in case the oops scrolled away */
346 printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp);
350 NOKPROBE_SYMBOL(__die);
353 * This is gone through when something in the kernel has done something bad
354 * and is about to be terminated:
356 void die(const char *str, struct pt_regs *regs, long err)
358 unsigned long flags = oops_begin();
361 if (__die(str, regs, err))
363 oops_end(flags, regs, sig);
366 static int __init code_bytes_setup(char *s)
374 ret = kstrtoul(s, 0, &val);
379 if (code_bytes > 8192)
384 __setup("code_bytes=", code_bytes_setup);