1 // SPDX-License-Identifier: GPL-2.0
4 * Stack trace utility functions etc.
6 * Copyright 2008 Christoph Hellwig, IBM Corp.
7 * Copyright 2018 SUSE Linux GmbH
8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
11 #include <linux/delay.h>
12 #include <linux/export.h>
13 #include <linux/kallsyms.h>
14 #include <linux/module.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/stacktrace.h>
20 #include <asm/ptrace.h>
21 #include <asm/processor.h>
22 #include <linux/ftrace.h>
23 #include <asm/kprobes.h>
27 void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
28 struct task_struct *task, struct pt_regs *regs)
32 if (regs && !consume_entry(cookie, regs->nip))
37 else if (task == current)
38 sp = current_stack_frame();
40 sp = task->thread.ksp;
43 unsigned long *stack = (unsigned long *) sp;
44 unsigned long newsp, ip;
46 if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD))
50 ip = stack[STACK_FRAME_LR_SAVE];
52 if (!consume_entry(cookie, ip))
60 * This function returns an error if it detects any unreliable features of the
61 * stack. Otherwise it guarantees that the stack trace is reliable.
63 * If the task is not 'current', the caller *must* ensure the task is inactive.
65 int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
66 void *cookie, struct task_struct *task)
70 unsigned long stack_page = (unsigned long)task_stack_page(task);
71 unsigned long stack_end;
75 stack_end = stack_page + THREAD_SIZE;
76 if (!is_idle_task(task)) {
78 * For user tasks, this is the SP value loaded on
79 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
80 * system_call_common()/EXCEPTION_PROLOG_COMMON().
82 * Likewise for non-swapper kernel threads,
83 * this also happens to be the top of the stack
84 * as setup by copy_thread().
86 * Note that stack backlinks are not properly setup by
87 * copy_thread() and thus, a forked task() will have
88 * an unreliable stack trace until it's been
89 * _switch()'ed to for the first time.
91 stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
94 * idle tasks have a custom stack layout,
95 * c.f. cpu_idle_thread_init().
97 stack_end -= STACK_FRAME_OVERHEAD;
101 sp = current_stack_frame();
103 sp = task->thread.ksp;
105 if (sp < stack_page + sizeof(struct thread_struct) ||
106 sp > stack_end - STACK_FRAME_MIN_SIZE) {
110 for (firstframe = true; sp != stack_end;
111 firstframe = false, sp = newsp) {
112 unsigned long *stack = (unsigned long *) sp;
115 /* sanity check: ABI requires SP to be aligned 16 bytes. */
120 /* Stack grows downwards; unwinder may only go up. */
124 if (newsp != stack_end &&
125 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
126 return -EINVAL; /* invalid backlink, too far up. */
130 * We can only trust the bottom frame's backlink, the
131 * rest of the frame may be uninitialized, continue to
137 /* Mark stacktraces with exception frames as unreliable. */
138 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
139 stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
143 /* Examine the saved LR: it must point into kernel code. */
144 ip = stack[STACK_FRAME_LR_SAVE];
145 if (!__kernel_text_address(ip))
149 * FIXME: IMHO these tests do not belong in
150 * arch-dependent code, they are generic.
152 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
153 #ifdef CONFIG_KPROBES
155 * Mark stacktraces with kretprobed functions on them
158 if (ip == (unsigned long)kretprobe_trampoline)
162 if (!consume_entry(cookie, ip))
168 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
169 static void handle_backtrace_ipi(struct pt_regs *regs)
171 nmi_cpu_backtrace(regs);
174 static void raise_backtrace_ipi(cpumask_t *mask)
176 struct paca_struct *p;
180 for_each_cpu(cpu, mask) {
181 if (cpu == smp_processor_id()) {
182 handle_backtrace_ipi(NULL);
186 delay_us = 5 * USEC_PER_SEC;
188 if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
189 // Now wait up to 5s for the other CPU to do its backtrace
190 while (cpumask_test_cpu(cpu, mask) && delay_us) {
195 // Other CPU cleared itself from the mask
202 cpumask_clear_cpu(cpu, mask);
204 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
205 if (!virt_addr_valid(p)) {
206 pr_warn("paca pointer appears corrupt? (%px)\n", p);
210 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
211 p->irq_soft_mask, p->in_mce, p->in_nmi);
213 if (virt_addr_valid(p->__current))
214 pr_cont(" current: %d (%s)\n", p->__current->pid,
217 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
219 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
220 show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
224 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
226 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
228 #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */