GNU Linux-libre 4.9.309-gnu1
[releases.git] / kernel / events / callchain.c
1 /*
2  * Performance events callchain code, extracted from core.c:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/perf_event.h>
13 #include <linux/slab.h>
14 #include "internal.h"
15
16 struct callchain_cpus_entries {
17         struct rcu_head                 rcu_head;
18         struct perf_callchain_entry     *cpu_entries[0];
19 };
20
21 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
22 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
23
24 static inline size_t perf_callchain_entry__sizeof(void)
25 {
26         return (sizeof(struct perf_callchain_entry) +
27                 sizeof(__u64) * (sysctl_perf_event_max_stack +
28                                  sysctl_perf_event_max_contexts_per_stack));
29 }
30
31 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
32 static atomic_t nr_callchain_events;
33 static DEFINE_MUTEX(callchain_mutex);
34 static struct callchain_cpus_entries *callchain_cpus_entries;
35
36
37 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
38                                   struct pt_regs *regs)
39 {
40 }
41
42 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
43                                 struct pt_regs *regs)
44 {
45 }
46
47 static void release_callchain_buffers_rcu(struct rcu_head *head)
48 {
49         struct callchain_cpus_entries *entries;
50         int cpu;
51
52         entries = container_of(head, struct callchain_cpus_entries, rcu_head);
53
54         for_each_possible_cpu(cpu)
55                 kfree(entries->cpu_entries[cpu]);
56
57         kfree(entries);
58 }
59
60 static void release_callchain_buffers(void)
61 {
62         struct callchain_cpus_entries *entries;
63
64         entries = callchain_cpus_entries;
65         RCU_INIT_POINTER(callchain_cpus_entries, NULL);
66         call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
67 }
68
69 static int alloc_callchain_buffers(void)
70 {
71         int cpu;
72         int size;
73         struct callchain_cpus_entries *entries;
74
75         /*
76          * We can't use the percpu allocation API for data that can be
77          * accessed from NMI. Use a temporary manual per cpu allocation
78          * until that gets sorted out.
79          */
80         size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
81
82         entries = kzalloc(size, GFP_KERNEL);
83         if (!entries)
84                 return -ENOMEM;
85
86         size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
87
88         for_each_possible_cpu(cpu) {
89                 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
90                                                          cpu_to_node(cpu));
91                 if (!entries->cpu_entries[cpu])
92                         goto fail;
93         }
94
95         rcu_assign_pointer(callchain_cpus_entries, entries);
96
97         return 0;
98
99 fail:
100         for_each_possible_cpu(cpu)
101                 kfree(entries->cpu_entries[cpu]);
102         kfree(entries);
103
104         return -ENOMEM;
105 }
106
107 int get_callchain_buffers(int event_max_stack)
108 {
109         int err = 0;
110         int count;
111
112         mutex_lock(&callchain_mutex);
113
114         count = atomic_inc_return(&nr_callchain_events);
115         if (WARN_ON_ONCE(count < 1)) {
116                 err = -EINVAL;
117                 goto exit;
118         }
119
120         /*
121          * If requesting per event more than the global cap,
122          * return a different error to help userspace figure
123          * this out.
124          *
125          * And also do it here so that we have &callchain_mutex held.
126          */
127         if (event_max_stack > sysctl_perf_event_max_stack) {
128                 err = -EOVERFLOW;
129                 goto exit;
130         }
131
132         if (count == 1)
133                 err = alloc_callchain_buffers();
134 exit:
135         if (err)
136                 atomic_dec(&nr_callchain_events);
137
138         mutex_unlock(&callchain_mutex);
139
140         return err;
141 }
142
143 void put_callchain_buffers(void)
144 {
145         if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
146                 release_callchain_buffers();
147                 mutex_unlock(&callchain_mutex);
148         }
149 }
150
151 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
152 {
153         int cpu;
154         struct callchain_cpus_entries *entries;
155
156         *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
157         if (*rctx == -1)
158                 return NULL;
159
160         entries = rcu_dereference(callchain_cpus_entries);
161         if (!entries)
162                 return NULL;
163
164         cpu = smp_processor_id();
165
166         return (((void *)entries->cpu_entries[cpu]) +
167                 (*rctx * perf_callchain_entry__sizeof()));
168 }
169
170 static void
171 put_callchain_entry(int rctx)
172 {
173         put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
174 }
175
176 struct perf_callchain_entry *
177 perf_callchain(struct perf_event *event, struct pt_regs *regs)
178 {
179         bool kernel = !event->attr.exclude_callchain_kernel;
180         bool user   = !event->attr.exclude_callchain_user;
181         /* Disallow cross-task user callchains. */
182         bool crosstask = event->ctx->task && event->ctx->task != current;
183         const u32 max_stack = event->attr.sample_max_stack;
184
185         if (!kernel && !user)
186                 return NULL;
187
188         return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
189 }
190
191 struct perf_callchain_entry *
192 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
193                    u32 max_stack, bool crosstask, bool add_mark)
194 {
195         struct perf_callchain_entry *entry;
196         struct perf_callchain_entry_ctx ctx;
197         int rctx;
198
199         entry = get_callchain_entry(&rctx);
200         if (rctx == -1)
201                 return NULL;
202
203         if (!entry)
204                 goto exit_put;
205
206         ctx.entry     = entry;
207         ctx.max_stack = max_stack;
208         ctx.nr        = entry->nr = init_nr;
209         ctx.contexts       = 0;
210         ctx.contexts_maxed = false;
211
212         if (kernel && !user_mode(regs)) {
213                 if (add_mark)
214                         perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
215                 perf_callchain_kernel(&ctx, regs);
216         }
217
218         if (user) {
219                 if (!user_mode(regs)) {
220                         if  (current->mm)
221                                 regs = task_pt_regs(current);
222                         else
223                                 regs = NULL;
224                 }
225
226                 if (regs) {
227                         mm_segment_t fs;
228
229                         if (crosstask)
230                                 goto exit_put;
231
232                         if (add_mark)
233                                 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
234
235                         fs = get_fs();
236                         set_fs(USER_DS);
237                         perf_callchain_user(&ctx, regs);
238                         set_fs(fs);
239                 }
240         }
241
242 exit_put:
243         put_callchain_entry(rctx);
244
245         return entry;
246 }
247
248 /*
249  * Used for sysctl_perf_event_max_stack and
250  * sysctl_perf_event_max_contexts_per_stack.
251  */
252 int perf_event_max_stack_handler(struct ctl_table *table, int write,
253                                  void __user *buffer, size_t *lenp, loff_t *ppos)
254 {
255         int *value = table->data;
256         int new_value = *value, ret;
257         struct ctl_table new_table = *table;
258
259         new_table.data = &new_value;
260         ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
261         if (ret || !write)
262                 return ret;
263
264         mutex_lock(&callchain_mutex);
265         if (atomic_read(&nr_callchain_events))
266                 ret = -EBUSY;
267         else
268                 *value = new_value;
269
270         mutex_unlock(&callchain_mutex);
271
272         return ret;
273 }