1 // SPDX-License-Identifier: GPL-2.0
3 * trace event based perf event profiling/tracing
5 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
6 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/module.h>
10 #include <linux/kprobes.h>
12 #include "trace_probe.h"
14 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
23 /* Count the events in use (per event id, not per instance) */
24 static int total_ref_count;
26 static int perf_trace_event_perm(struct trace_event_call *tp_event,
27 struct perf_event *p_event)
29 if (tp_event->perf_perm) {
30 int ret = tp_event->perf_perm(tp_event, p_event);
36 * We checked and allowed to create parent,
37 * allow children without checking.
43 * It's ok to check current process (owner) permissions in here,
44 * because code below is called only via perf_event_open syscall.
47 /* The ftrace function trace is allowed only for root. */
48 if (ftrace_event_is_function(tp_event)) {
49 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
52 if (!is_sampling_event(p_event))
56 * We don't allow user space callchains for function trace
57 * event, due to issues with page faults while tracing page
58 * fault handler and its overall trickiness nature.
60 if (!p_event->attr.exclude_callchain_user)
64 * Same reason to disable user stack dump as for user space
67 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
71 /* No tracing, just counting, so no obvious leak */
72 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
75 /* Some events are ok to be traced by non-root users... */
76 if (p_event->attach_state == PERF_ATTACH_TASK) {
77 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
82 * ...otherwise raw tracepoint data can be a severe data leak,
83 * only allow root to have these.
85 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
91 static int perf_trace_event_reg(struct trace_event_call *tp_event,
92 struct perf_event *p_event)
94 struct hlist_head __percpu *list;
98 p_event->tp_event = tp_event;
99 if (tp_event->perf_refcount++ > 0)
102 list = alloc_percpu(struct hlist_head);
106 for_each_possible_cpu(cpu)
107 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
109 tp_event->perf_events = list;
111 if (!total_ref_count) {
115 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
116 buf = (char __percpu *)alloc_percpu(perf_trace_t);
120 perf_trace_buf[i] = buf;
124 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
132 if (!total_ref_count) {
135 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
136 free_percpu(perf_trace_buf[i]);
137 perf_trace_buf[i] = NULL;
141 if (!--tp_event->perf_refcount) {
142 free_percpu(tp_event->perf_events);
143 tp_event->perf_events = NULL;
149 static void perf_trace_event_unreg(struct perf_event *p_event)
151 struct trace_event_call *tp_event = p_event->tp_event;
154 if (--tp_event->perf_refcount > 0)
157 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
160 * Ensure our callback won't be called anymore. The buffers
161 * will be freed after that.
163 tracepoint_synchronize_unregister();
165 free_percpu(tp_event->perf_events);
166 tp_event->perf_events = NULL;
168 if (!--total_ref_count) {
169 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
170 free_percpu(perf_trace_buf[i]);
171 perf_trace_buf[i] = NULL;
175 module_put(tp_event->mod);
178 static int perf_trace_event_open(struct perf_event *p_event)
180 struct trace_event_call *tp_event = p_event->tp_event;
181 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
184 static void perf_trace_event_close(struct perf_event *p_event)
186 struct trace_event_call *tp_event = p_event->tp_event;
187 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
190 static int perf_trace_event_init(struct trace_event_call *tp_event,
191 struct perf_event *p_event)
195 ret = perf_trace_event_perm(tp_event, p_event);
199 ret = perf_trace_event_reg(tp_event, p_event);
203 ret = perf_trace_event_open(p_event);
205 perf_trace_event_unreg(p_event);
212 int perf_trace_init(struct perf_event *p_event)
214 struct trace_event_call *tp_event;
215 u64 event_id = p_event->attr.config;
218 mutex_lock(&event_mutex);
219 list_for_each_entry(tp_event, &ftrace_events, list) {
220 if (tp_event->event.type == event_id &&
221 tp_event->class && tp_event->class->reg &&
222 try_module_get(tp_event->mod)) {
223 ret = perf_trace_event_init(tp_event, p_event);
225 module_put(tp_event->mod);
229 mutex_unlock(&event_mutex);
234 void perf_trace_destroy(struct perf_event *p_event)
236 mutex_lock(&event_mutex);
237 perf_trace_event_close(p_event);
238 perf_trace_event_unreg(p_event);
239 mutex_unlock(&event_mutex);
242 #ifdef CONFIG_KPROBE_EVENTS
243 int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
247 struct trace_event_call *tp_event;
249 if (p_event->attr.kprobe_func) {
250 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
253 ret = strncpy_from_user(
254 func, u64_to_user_ptr(p_event->attr.kprobe_func),
256 if (ret == KSYM_NAME_LEN)
261 if (func[0] == '\0') {
267 tp_event = create_local_trace_kprobe(
268 func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
269 p_event->attr.probe_offset, is_retprobe);
270 if (IS_ERR(tp_event)) {
271 ret = PTR_ERR(tp_event);
275 mutex_lock(&event_mutex);
276 ret = perf_trace_event_init(tp_event, p_event);
278 destroy_local_trace_kprobe(tp_event);
279 mutex_unlock(&event_mutex);
285 void perf_kprobe_destroy(struct perf_event *p_event)
287 mutex_lock(&event_mutex);
288 perf_trace_event_close(p_event);
289 perf_trace_event_unreg(p_event);
290 mutex_unlock(&event_mutex);
292 destroy_local_trace_kprobe(p_event->tp_event);
294 #endif /* CONFIG_KPROBE_EVENTS */
296 #ifdef CONFIG_UPROBE_EVENTS
297 int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
301 struct trace_event_call *tp_event;
303 if (!p_event->attr.uprobe_path)
306 path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
310 return (ret == -EINVAL) ? -E2BIG : ret;
312 if (path[0] == '\0') {
317 tp_event = create_local_trace_uprobe(
318 path, p_event->attr.probe_offset, is_retprobe);
319 if (IS_ERR(tp_event)) {
320 ret = PTR_ERR(tp_event);
325 * local trace_uprobe need to hold event_mutex to call
326 * uprobe_buffer_enable() and uprobe_buffer_disable().
327 * event_mutex is not required for local trace_kprobes.
329 mutex_lock(&event_mutex);
330 ret = perf_trace_event_init(tp_event, p_event);
332 destroy_local_trace_uprobe(tp_event);
333 mutex_unlock(&event_mutex);
339 void perf_uprobe_destroy(struct perf_event *p_event)
341 mutex_lock(&event_mutex);
342 perf_trace_event_close(p_event);
343 perf_trace_event_unreg(p_event);
344 mutex_unlock(&event_mutex);
345 destroy_local_trace_uprobe(p_event->tp_event);
347 #endif /* CONFIG_UPROBE_EVENTS */
349 int perf_trace_add(struct perf_event *p_event, int flags)
351 struct trace_event_call *tp_event = p_event->tp_event;
353 if (!(flags & PERF_EF_START))
354 p_event->hw.state = PERF_HES_STOPPED;
357 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
358 * and we need to take the default action of enqueueing our event on
359 * the right per-cpu hlist.
361 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
362 struct hlist_head __percpu *pcpu_list;
363 struct hlist_head *list;
365 pcpu_list = tp_event->perf_events;
366 if (WARN_ON_ONCE(!pcpu_list))
369 list = this_cpu_ptr(pcpu_list);
370 hlist_add_head_rcu(&p_event->hlist_entry, list);
376 void perf_trace_del(struct perf_event *p_event, int flags)
378 struct trace_event_call *tp_event = p_event->tp_event;
381 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
382 * and we need to take the default action of dequeueing our event from
383 * the right per-cpu hlist.
385 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
386 hlist_del_rcu(&p_event->hlist_entry);
389 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
394 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
396 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
397 "perf buffer not large enough"))
400 *rctxp = rctx = perf_swevent_get_recursion_context();
405 *regs = this_cpu_ptr(&__perf_regs[rctx]);
406 raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
408 /* zero the dead bytes from align to not leak stack to user */
409 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
412 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
413 NOKPROBE_SYMBOL(perf_trace_buf_alloc);
415 void perf_trace_buf_update(void *record, u16 type)
417 struct trace_entry *entry = record;
418 int pc = preempt_count();
421 local_save_flags(flags);
422 tracing_generic_entry_update(entry, flags, pc);
425 NOKPROBE_SYMBOL(perf_trace_buf_update);
427 #ifdef CONFIG_FUNCTION_TRACER
429 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
430 struct ftrace_ops *ops, struct pt_regs *pt_regs)
432 struct ftrace_entry *entry;
433 struct perf_event *event;
434 struct hlist_head head;
438 if ((unsigned long)ops->private != smp_processor_id())
441 event = container_of(ops, struct perf_event, ftrace_ops);
444 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
445 * the perf code does is hlist_for_each_entry_rcu(), so we can
446 * get away with simply setting the @head.first pointer in order
447 * to create a singular list.
449 head.first = &event->hlist_entry;
451 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
452 sizeof(u64)) - sizeof(u32))
454 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
456 memset(®s, 0, sizeof(regs));
457 perf_fetch_caller_regs(®s);
459 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
464 entry->parent_ip = parent_ip;
465 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
466 1, ®s, &head, NULL);
471 static int perf_ftrace_function_register(struct perf_event *event)
473 struct ftrace_ops *ops = &event->ftrace_ops;
475 ops->flags = FTRACE_OPS_FL_RCU;
476 ops->func = perf_ftrace_function_call;
477 ops->private = (void *)(unsigned long)nr_cpu_ids;
479 return register_ftrace_function(ops);
482 static int perf_ftrace_function_unregister(struct perf_event *event)
484 struct ftrace_ops *ops = &event->ftrace_ops;
485 int ret = unregister_ftrace_function(ops);
486 ftrace_free_filter(ops);
490 int perf_ftrace_event_register(struct trace_event_call *call,
491 enum trace_reg type, void *data)
493 struct perf_event *event = data;
496 case TRACE_REG_REGISTER:
497 case TRACE_REG_UNREGISTER:
499 case TRACE_REG_PERF_REGISTER:
500 case TRACE_REG_PERF_UNREGISTER:
502 case TRACE_REG_PERF_OPEN:
503 return perf_ftrace_function_register(data);
504 case TRACE_REG_PERF_CLOSE:
505 return perf_ftrace_function_unregister(data);
506 case TRACE_REG_PERF_ADD:
507 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
509 case TRACE_REG_PERF_DEL:
510 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
516 #endif /* CONFIG_FUNCTION_TRACER */