2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
24 * Reserve one entry for the passed in ip. This will allow
25 * us to remove most or all of the stack size overhead
26 * added by the stack tracer itself.
28 struct stack_trace stack_trace_max = {
29 .max_entries = STACK_TRACE_ENTRIES - 1,
30 .entries = &stack_dump_trace[0],
33 unsigned long stack_trace_max_size;
34 arch_spinlock_t stack_trace_max_lock =
35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 static DEFINE_PER_CPU(int, trace_active);
38 static DEFINE_MUTEX(stack_sysctl_mutex);
40 int stack_tracer_enabled;
41 static int last_stack_tracer_enabled;
43 void stack_trace_print(void)
48 pr_emerg(" Depth Size Location (%d entries)\n"
49 " ----- ---- --------\n",
50 stack_trace_max.nr_entries);
52 for (i = 0; i < stack_trace_max.nr_entries; i++) {
53 if (stack_dump_trace[i] == ULONG_MAX)
55 if (i+1 == stack_trace_max.nr_entries ||
56 stack_dump_trace[i+1] == ULONG_MAX)
57 size = stack_trace_index[i];
59 size = stack_trace_index[i] - stack_trace_index[i+1];
61 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
62 size, (void *)stack_dump_trace[i]);
67 * When arch-specific code overides this function, the following
68 * data should be filled up, assuming stack_trace_max_lock is held to
69 * prevent concurrent updates.
72 * stack_trace_max_size
75 check_stack(unsigned long ip, unsigned long *stack)
77 unsigned long this_size, flags; unsigned long *p, *top, *start;
78 static int tracer_frame;
79 int frame_size = ACCESS_ONCE(tracer_frame);
82 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83 this_size = THREAD_SIZE - this_size;
84 /* Remove the frame of the tracer */
85 this_size -= frame_size;
87 if (this_size <= stack_trace_max_size)
90 /* we do not handle interrupt stacks yet */
91 if (!object_is_on_stack(stack))
94 /* Can't do this from NMI context (can cause deadlocks) */
98 local_irq_save(flags);
99 arch_spin_lock(&stack_trace_max_lock);
102 * RCU may not be watching, make it see us.
103 * The stack trace code uses rcu_sched.
107 /* In case another CPU set the tracer_frame on us */
108 if (unlikely(!frame_size))
109 this_size -= tracer_frame;
111 /* a race could have already updated it */
112 if (this_size <= stack_trace_max_size)
115 stack_trace_max_size = this_size;
117 stack_trace_max.nr_entries = 0;
118 stack_trace_max.skip = 3;
120 save_stack_trace(&stack_trace_max);
122 /* Skip over the overhead of the stack tracer itself */
123 for (i = 0; i < stack_trace_max.nr_entries; i++) {
124 if (stack_dump_trace[i] == ip)
129 * Some archs may not have the passed in ip in the dump.
130 * If that happens, we need to show everything.
132 if (i == stack_trace_max.nr_entries)
136 * Now find where in the stack these are.
140 top = (unsigned long *)
141 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
144 * Loop through all the entries. One of the entries may
145 * for some reason be missed on the stack, so we may
146 * have to account for them. If they are all there, this
147 * loop will only happen once. This code only takes place
148 * on a new max, so it is far from a fast path.
150 while (i < stack_trace_max.nr_entries) {
153 stack_trace_index[x] = this_size;
156 for (; p < top && i < stack_trace_max.nr_entries; p++) {
157 if (stack_dump_trace[i] == ULONG_MAX)
160 * The READ_ONCE_NOCHECK is used to let KASAN know that
161 * this is not a stack-out-of-bounds error.
163 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
164 stack_dump_trace[x] = stack_dump_trace[i++];
165 this_size = stack_trace_index[x++] =
166 (top - p) * sizeof(unsigned long);
168 /* Start the search from here */
171 * We do not want to show the overhead
172 * of the stack tracer stack in the
173 * max stack. If we haven't figured
174 * out what that is, then figure it out
177 if (unlikely(!tracer_frame)) {
178 tracer_frame = (p - stack) *
179 sizeof(unsigned long);
180 stack_trace_max_size -= tracer_frame;
189 stack_trace_max.nr_entries = x;
191 stack_dump_trace[x] = ULONG_MAX;
193 if (task_stack_end_corrupted(current)) {
200 arch_spin_unlock(&stack_trace_max_lock);
201 local_irq_restore(flags);
204 /* Some archs may not define MCOUNT_INSN_SIZE */
205 #ifndef MCOUNT_INSN_SIZE
206 # define MCOUNT_INSN_SIZE 0
210 stack_trace_call(unsigned long ip, unsigned long parent_ip,
211 struct ftrace_ops *op, struct pt_regs *pt_regs)
216 preempt_disable_notrace();
218 cpu = raw_smp_processor_id();
219 /* no atomic needed, we only modify this variable by this cpu */
220 if (per_cpu(trace_active, cpu)++ != 0)
223 ip += MCOUNT_INSN_SIZE;
225 check_stack(ip, &stack);
228 per_cpu(trace_active, cpu)--;
229 /* prevent recursion in schedule */
230 preempt_enable_notrace();
233 static struct ftrace_ops trace_ops __read_mostly =
235 .func = stack_trace_call,
236 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
240 stack_max_size_read(struct file *filp, char __user *ubuf,
241 size_t count, loff_t *ppos)
243 unsigned long *ptr = filp->private_data;
247 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
250 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
254 stack_max_size_write(struct file *filp, const char __user *ubuf,
255 size_t count, loff_t *ppos)
257 long *ptr = filp->private_data;
258 unsigned long val, flags;
262 ret = kstrtoul_from_user(ubuf, count, 10, &val);
266 local_irq_save(flags);
269 * In case we trace inside arch_spin_lock() or after (NMI),
270 * we will cause circular lock, so we also need to increase
271 * the percpu trace_active here.
273 cpu = smp_processor_id();
274 per_cpu(trace_active, cpu)++;
276 arch_spin_lock(&stack_trace_max_lock);
278 arch_spin_unlock(&stack_trace_max_lock);
280 per_cpu(trace_active, cpu)--;
281 local_irq_restore(flags);
286 static const struct file_operations stack_max_size_fops = {
287 .open = tracing_open_generic,
288 .read = stack_max_size_read,
289 .write = stack_max_size_write,
290 .llseek = default_llseek,
294 __next(struct seq_file *m, loff_t *pos)
298 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
301 m->private = (void *)n;
306 t_next(struct seq_file *m, void *v, loff_t *pos)
309 return __next(m, pos);
312 static void *t_start(struct seq_file *m, loff_t *pos)
318 cpu = smp_processor_id();
319 per_cpu(trace_active, cpu)++;
321 arch_spin_lock(&stack_trace_max_lock);
324 return SEQ_START_TOKEN;
326 return __next(m, pos);
329 static void t_stop(struct seq_file *m, void *p)
333 arch_spin_unlock(&stack_trace_max_lock);
335 cpu = smp_processor_id();
336 per_cpu(trace_active, cpu)--;
341 static void trace_lookup_stack(struct seq_file *m, long i)
343 unsigned long addr = stack_dump_trace[i];
345 seq_printf(m, "%pS\n", (void *)addr);
348 static void print_disabled(struct seq_file *m)
351 "# Stack tracer disabled\n"
353 "# To enable the stack tracer, either add 'stacktrace' to the\n"
354 "# kernel command line\n"
355 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
359 static int t_show(struct seq_file *m, void *v)
364 if (v == SEQ_START_TOKEN) {
365 seq_printf(m, " Depth Size Location"
367 " ----- ---- --------\n",
368 stack_trace_max.nr_entries);
370 if (!stack_tracer_enabled && !stack_trace_max_size)
378 if (i >= stack_trace_max.nr_entries ||
379 stack_dump_trace[i] == ULONG_MAX)
382 if (i+1 == stack_trace_max.nr_entries ||
383 stack_dump_trace[i+1] == ULONG_MAX)
384 size = stack_trace_index[i];
386 size = stack_trace_index[i] - stack_trace_index[i+1];
388 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
390 trace_lookup_stack(m, i);
395 static const struct seq_operations stack_trace_seq_ops = {
402 static int stack_trace_open(struct inode *inode, struct file *file)
404 return seq_open(file, &stack_trace_seq_ops);
407 static const struct file_operations stack_trace_fops = {
408 .open = stack_trace_open,
411 .release = seq_release,
415 stack_trace_filter_open(struct inode *inode, struct file *file)
417 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
421 static const struct file_operations stack_trace_filter_fops = {
422 .open = stack_trace_filter_open,
424 .write = ftrace_filter_write,
425 .llseek = tracing_lseek,
426 .release = ftrace_regex_release,
430 stack_trace_sysctl(struct ctl_table *table, int write,
431 void __user *buffer, size_t *lenp,
436 mutex_lock(&stack_sysctl_mutex);
438 ret = proc_dointvec(table, write, buffer, lenp, ppos);
441 (last_stack_tracer_enabled == !!stack_tracer_enabled))
444 last_stack_tracer_enabled = !!stack_tracer_enabled;
446 if (stack_tracer_enabled)
447 register_ftrace_function(&trace_ops);
449 unregister_ftrace_function(&trace_ops);
452 mutex_unlock(&stack_sysctl_mutex);
456 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
458 static __init int enable_stacktrace(char *str)
460 if (strncmp(str, "_filter=", 8) == 0)
461 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
463 stack_tracer_enabled = 1;
464 last_stack_tracer_enabled = 1;
467 __setup("stacktrace", enable_stacktrace);
469 static __init int stack_trace_init(void)
471 struct dentry *d_tracer;
473 d_tracer = tracing_init_dentry();
474 if (IS_ERR(d_tracer))
477 trace_create_file("stack_max_size", 0644, d_tracer,
478 &stack_trace_max_size, &stack_max_size_fops);
480 trace_create_file("stack_trace", 0444, d_tracer,
481 NULL, &stack_trace_fops);
483 trace_create_file("stack_trace_filter", 0444, d_tracer,
484 NULL, &stack_trace_filter_fops);
486 if (stack_trace_filter_buf[0])
487 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
489 if (stack_tracer_enabled)
490 register_ftrace_function(&trace_ops);
495 device_initcall(stack_trace_init);