1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stack tracing support
5 * Copyright (C) 2012 ARM Ltd.
7 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
26 * @common: Common unwind state.
27 * @task: The task being unwound.
28 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
29 * associated with the most recently encountered replacement lr
32 struct kunwind_state {
33 struct unwind_state common;
34 struct task_struct *task;
35 #ifdef CONFIG_KRETPROBES
36 struct llist_node *kr_cur;
40 static __always_inline void
41 kunwind_init(struct kunwind_state *state,
42 struct task_struct *task)
44 unwind_init_common(&state->common);
49 * Start an unwind from a pt_regs.
51 * The unwind will begin at the PC within the regs.
53 * The regs must be on a stack currently owned by the calling task.
55 static __always_inline void
56 kunwind_init_from_regs(struct kunwind_state *state,
59 kunwind_init(state, current);
61 state->common.fp = regs->regs[29];
62 state->common.pc = regs->pc;
66 * Start an unwind from a caller.
68 * The unwind will begin at the caller of whichever function this is inlined
71 * The function which invokes this must be noinline.
73 static __always_inline void
74 kunwind_init_from_caller(struct kunwind_state *state)
76 kunwind_init(state, current);
78 state->common.fp = (unsigned long)__builtin_frame_address(1);
79 state->common.pc = (unsigned long)__builtin_return_address(0);
83 * Start an unwind from a blocked task.
85 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
88 * The caller should ensure the task is blocked in cpu_switch_to() for the
89 * duration of the unwind, or the unwind will be bogus. It is never valid to
90 * call this for the current task.
92 static __always_inline void
93 kunwind_init_from_task(struct kunwind_state *state,
94 struct task_struct *task)
96 kunwind_init(state, task);
98 state->common.fp = thread_saved_fp(task);
99 state->common.pc = thread_saved_pc(task);
102 static __always_inline int
103 kunwind_recover_return_address(struct kunwind_state *state)
105 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
106 if (state->task->ret_stack &&
107 (state->common.pc == (unsigned long)return_to_handler)) {
108 unsigned long orig_pc;
109 orig_pc = ftrace_graph_ret_addr(state->task, NULL,
111 (void *)state->common.fp);
112 if (WARN_ON_ONCE(state->common.pc == orig_pc))
114 state->common.pc = orig_pc;
116 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
118 #ifdef CONFIG_KRETPROBES
119 if (is_kretprobe_trampoline(state->common.pc)) {
120 unsigned long orig_pc;
121 orig_pc = kretprobe_find_ret_addr(state->task,
122 (void *)state->common.fp,
124 state->common.pc = orig_pc;
126 #endif /* CONFIG_KRETPROBES */
132 * Unwind from one frame record (A) to the next frame record (B).
134 * We terminate early if the location of B indicates a malformed chain of frame
135 * records (e.g. a cycle), determined based on the location and fp value of A
136 * and the location (but not the fp value) of B.
138 static __always_inline int
139 kunwind_next(struct kunwind_state *state)
141 struct task_struct *tsk = state->task;
142 unsigned long fp = state->common.fp;
145 /* Final frame; nothing to unwind */
146 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
149 err = unwind_next_frame_record(&state->common);
153 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
155 return kunwind_recover_return_address(state);
158 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
160 static __always_inline void
161 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
164 if (kunwind_recover_return_address(state))
170 if (!consume_state(state, cookie))
172 ret = kunwind_next(state);
179 * Per-cpu stacks are only accessible when unwinding the current task in a
180 * non-preemptible context.
182 #define STACKINFO_CPU(name) \
184 ((task == current) && !preemptible()) \
185 ? stackinfo_get_##name() \
186 : stackinfo_get_unknown(); \
190 * SDEI stacks are only accessible when unwinding the current task in an NMI
193 #define STACKINFO_SDEI(name) \
195 ((task == current) && in_nmi()) \
196 ? stackinfo_get_sdei_##name() \
197 : stackinfo_get_unknown(); \
200 #define STACKINFO_EFI \
202 ((task == current) && current_in_efi()) \
203 ? stackinfo_get_efi() \
204 : stackinfo_get_unknown(); \
207 static __always_inline void
208 kunwind_stack_walk(kunwind_consume_fn consume_state,
209 void *cookie, struct task_struct *task,
210 struct pt_regs *regs)
212 struct stack_info stacks[] = {
213 stackinfo_get_task(task),
215 #if defined(CONFIG_VMAP_STACK)
216 STACKINFO_CPU(overflow),
218 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
219 STACKINFO_SDEI(normal),
220 STACKINFO_SDEI(critical),
226 struct kunwind_state state = {
229 .nr_stacks = ARRAY_SIZE(stacks),
236 kunwind_init_from_regs(&state, regs);
237 } else if (task == current) {
238 kunwind_init_from_caller(&state);
240 kunwind_init_from_task(&state, task);
243 do_kunwind(&state, consume_state, cookie);
246 struct kunwind_consume_entry_data {
247 stack_trace_consume_fn consume_entry;
251 static __always_inline bool
252 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
254 struct kunwind_consume_entry_data *data = cookie;
255 return data->consume_entry(data->cookie, state->common.pc);
258 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
259 void *cookie, struct task_struct *task,
260 struct pt_regs *regs)
262 struct kunwind_consume_entry_data data = {
263 .consume_entry = consume_entry,
267 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
270 struct bpf_unwind_consume_entry_data {
271 bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
276 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
278 struct bpf_unwind_consume_entry_data *data = cookie;
280 return data->consume_entry(data->cookie, state->common.pc, 0,
284 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
285 u64 fp), void *cookie)
287 struct bpf_unwind_consume_entry_data data = {
288 .consume_entry = consume_entry,
292 kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
295 static bool dump_backtrace_entry(void *arg, unsigned long where)
298 printk("%s %pSb\n", loglvl, (void *)where);
302 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
305 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
307 if (regs && user_mode(regs))
313 if (!try_get_task_stack(tsk))
316 printk("%sCall trace:\n", loglvl);
317 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
322 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
324 dump_backtrace(NULL, tsk, loglvl);