2 * Performance events callchain code, extracted from core.c:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/slab.h>
14 #include <linux/sched/task_stack.h>
18 struct callchain_cpus_entries {
19 struct rcu_head rcu_head;
20 struct perf_callchain_entry *cpu_entries[0];
23 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
24 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
26 static inline size_t perf_callchain_entry__sizeof(void)
28 return (sizeof(struct perf_callchain_entry) +
29 sizeof(__u64) * (sysctl_perf_event_max_stack +
30 sysctl_perf_event_max_contexts_per_stack));
33 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
34 static atomic_t nr_callchain_events;
35 static DEFINE_MUTEX(callchain_mutex);
36 static struct callchain_cpus_entries *callchain_cpus_entries;
39 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
44 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
49 static void release_callchain_buffers_rcu(struct rcu_head *head)
51 struct callchain_cpus_entries *entries;
54 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
56 for_each_possible_cpu(cpu)
57 kfree(entries->cpu_entries[cpu]);
62 static void release_callchain_buffers(void)
64 struct callchain_cpus_entries *entries;
66 entries = callchain_cpus_entries;
67 RCU_INIT_POINTER(callchain_cpus_entries, NULL);
68 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
71 static int alloc_callchain_buffers(void)
75 struct callchain_cpus_entries *entries;
78 * We can't use the percpu allocation API for data that can be
79 * accessed from NMI. Use a temporary manual per cpu allocation
80 * until that gets sorted out.
82 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
84 entries = kzalloc(size, GFP_KERNEL);
88 size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
90 for_each_possible_cpu(cpu) {
91 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
93 if (!entries->cpu_entries[cpu])
97 rcu_assign_pointer(callchain_cpus_entries, entries);
102 for_each_possible_cpu(cpu)
103 kfree(entries->cpu_entries[cpu]);
109 int get_callchain_buffers(int event_max_stack)
114 mutex_lock(&callchain_mutex);
116 count = atomic_inc_return(&nr_callchain_events);
117 if (WARN_ON_ONCE(count < 1)) {
123 * If requesting per event more than the global cap,
124 * return a different error to help userspace figure
127 * And also do it here so that we have &callchain_mutex held.
129 if (event_max_stack > sysctl_perf_event_max_stack) {
135 err = alloc_callchain_buffers();
138 atomic_dec(&nr_callchain_events);
140 mutex_unlock(&callchain_mutex);
145 void put_callchain_buffers(void)
147 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
148 release_callchain_buffers();
149 mutex_unlock(&callchain_mutex);
153 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
156 struct callchain_cpus_entries *entries;
158 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
162 entries = rcu_dereference(callchain_cpus_entries);
166 cpu = smp_processor_id();
168 return (((void *)entries->cpu_entries[cpu]) +
169 (*rctx * perf_callchain_entry__sizeof()));
173 put_callchain_entry(int rctx)
175 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
178 struct perf_callchain_entry *
179 perf_callchain(struct perf_event *event, struct pt_regs *regs)
181 bool kernel = !event->attr.exclude_callchain_kernel;
182 bool user = !event->attr.exclude_callchain_user;
183 /* Disallow cross-task user callchains. */
184 bool crosstask = event->ctx->task && event->ctx->task != current;
185 const u32 max_stack = event->attr.sample_max_stack;
187 if (!kernel && !user)
190 return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
193 struct perf_callchain_entry *
194 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
195 u32 max_stack, bool crosstask, bool add_mark)
197 struct perf_callchain_entry *entry;
198 struct perf_callchain_entry_ctx ctx;
201 entry = get_callchain_entry(&rctx);
209 ctx.max_stack = max_stack;
210 ctx.nr = entry->nr = init_nr;
212 ctx.contexts_maxed = false;
214 if (kernel && !user_mode(regs)) {
216 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
217 perf_callchain_kernel(&ctx, regs);
221 if (!user_mode(regs)) {
223 regs = task_pt_regs(current);
235 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
239 perf_callchain_user(&ctx, regs);
245 put_callchain_entry(rctx);
251 * Used for sysctl_perf_event_max_stack and
252 * sysctl_perf_event_max_contexts_per_stack.
254 int perf_event_max_stack_handler(struct ctl_table *table, int write,
255 void __user *buffer, size_t *lenp, loff_t *ppos)
257 int *value = table->data;
258 int new_value = *value, ret;
259 struct ctl_table new_table = *table;
261 new_table.data = &new_value;
262 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
266 mutex_lock(&callchain_mutex);
267 if (atomic_read(&nr_callchain_events))
272 mutex_unlock(&callchain_mutex);