1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
20 #include "trace_dynevent.h"
21 #include "trace_probe.h"
22 #include "trace_probe_tmpl.h"
24 #define UPROBE_EVENT_SYSTEM "uprobes"
26 struct uprobe_trace_entry_head {
27 struct trace_entry ent;
28 unsigned long vaddr[];
31 #define SIZEOF_TRACE_ENTRY(is_return) \
32 (sizeof(struct uprobe_trace_entry_head) + \
33 sizeof(unsigned long) * (is_return ? 2 : 1))
35 #define DATAOF_TRACE_ENTRY(entry, is_return) \
36 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
38 static int trace_uprobe_create(const char *raw_command);
39 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
40 static int trace_uprobe_release(struct dyn_event *ev);
41 static bool trace_uprobe_is_busy(struct dyn_event *ev);
42 static bool trace_uprobe_match(const char *system, const char *event,
43 int argc, const char **argv, struct dyn_event *ev);
45 static struct dyn_event_operations trace_uprobe_ops = {
46 .create = trace_uprobe_create,
47 .show = trace_uprobe_show,
48 .is_busy = trace_uprobe_is_busy,
49 .free = trace_uprobe_release,
50 .match = trace_uprobe_match,
54 * uprobe event core functions
57 struct dyn_event devent;
58 struct uprobe_consumer consumer;
63 unsigned long ref_ctr_offset;
65 struct trace_probe tp;
68 static bool is_trace_uprobe(struct dyn_event *ev)
70 return ev->ops == &trace_uprobe_ops;
73 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
75 return container_of(ev, struct trace_uprobe, devent);
79 * for_each_trace_uprobe - iterate over the trace_uprobe list
80 * @pos: the struct trace_uprobe * for each entry
81 * @dpos: the struct dyn_event * to use as a loop cursor
83 #define for_each_trace_uprobe(pos, dpos) \
84 for_each_dyn_event(dpos) \
85 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
87 static int register_uprobe_event(struct trace_uprobe *tu);
88 static int unregister_uprobe_event(struct trace_uprobe *tu);
90 struct uprobe_dispatch_data {
91 struct trace_uprobe *tu;
92 unsigned long bp_addr;
95 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
96 static int uretprobe_dispatcher(struct uprobe_consumer *con,
97 unsigned long func, struct pt_regs *regs);
99 #ifdef CONFIG_STACK_GROWSUP
100 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
102 return addr - (n * sizeof(long));
105 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
107 return addr + (n * sizeof(long));
111 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
114 unsigned long addr = user_stack_pointer(regs);
116 addr = adjust_stack_addr(addr, n);
118 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
125 * Uprobes-specific fetch functions
127 static nokprobe_inline int
128 probe_mem_read(void *dest, void *src, size_t size)
130 void __user *vaddr = (void __force __user *)src;
132 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
135 static nokprobe_inline int
136 probe_mem_read_user(void *dest, void *src, size_t size)
138 return probe_mem_read(dest, src, size);
142 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
143 * length and relative data location.
145 static nokprobe_inline int
146 fetch_store_string(unsigned long addr, void *dest, void *base)
149 u32 loc = *(u32 *)dest;
150 int maxlen = get_loc_len(loc);
151 u8 *dst = get_loc_data(dest, base);
152 void __user *src = (void __force __user *) addr;
154 if (unlikely(!maxlen))
157 if (addr == FETCH_TOKEN_COMM)
158 ret = strlcpy(dst, current->comm, maxlen);
160 ret = strncpy_from_user(dst, src, maxlen);
166 * Include the terminating null byte. In this case it
167 * was copied by strncpy_from_user but not accounted
171 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
177 static nokprobe_inline int
178 fetch_store_string_user(unsigned long addr, void *dest, void *base)
180 return fetch_store_string(addr, dest, base);
183 /* Return the length of string -- including null terminal byte */
184 static nokprobe_inline int
185 fetch_store_strlen(unsigned long addr)
188 void __user *vaddr = (void __force __user *) addr;
190 if (addr == FETCH_TOKEN_COMM)
191 len = strlen(current->comm) + 1;
193 len = strnlen_user(vaddr, MAX_STRING_SIZE);
195 return (len > MAX_STRING_SIZE) ? 0 : len;
198 static nokprobe_inline int
199 fetch_store_strlen_user(unsigned long addr)
201 return fetch_store_strlen(addr);
204 static unsigned long translate_user_vaddr(unsigned long file_offset)
206 unsigned long base_addr;
207 struct uprobe_dispatch_data *udd;
209 udd = (void *) current->utask->vaddr;
211 base_addr = udd->bp_addr - udd->tu->offset;
212 return base_addr + file_offset;
215 /* Note that we don't verify it, since the code does not come from user space */
217 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
220 struct pt_regs *regs = rec;
223 /* 1st stage: get value from context */
226 val = regs_get_register(regs, code->param);
229 val = get_user_stack_nth(regs, code->param);
231 case FETCH_OP_STACKP:
232 val = user_stack_pointer(regs);
234 case FETCH_OP_RETVAL:
235 val = regs_return_value(regs);
238 val = code->immediate;
241 val = FETCH_TOKEN_COMM;
244 val = (unsigned long)code->data;
247 val = translate_user_vaddr(code->immediate);
254 return process_fetch_insn_bottom(code, val, dest, base);
256 NOKPROBE_SYMBOL(process_fetch_insn)
258 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
260 rwlock_init(&filter->rwlock);
261 filter->nr_systemwide = 0;
262 INIT_LIST_HEAD(&filter->perf_events);
265 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
267 return !filter->nr_systemwide && list_empty(&filter->perf_events);
270 static inline bool is_ret_probe(struct trace_uprobe *tu)
272 return tu->consumer.ret_handler != NULL;
275 static bool trace_uprobe_is_busy(struct dyn_event *ev)
277 struct trace_uprobe *tu = to_trace_uprobe(ev);
279 return trace_probe_is_enabled(&tu->tp);
282 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
283 int argc, const char **argv)
285 char buf[MAX_ARGSTR_LEN + 1];
291 len = strlen(tu->filename);
292 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 if (tu->ref_ctr_offset == 0)
296 snprintf(buf, sizeof(buf), "0x%0*lx",
297 (int)(sizeof(void *) * 2), tu->offset);
299 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
300 (int)(sizeof(void *) * 2), tu->offset,
302 if (strcmp(buf, &argv[0][len + 1]))
307 return trace_probe_match_command_args(&tu->tp, argc, argv);
310 static bool trace_uprobe_match(const char *system, const char *event,
311 int argc, const char **argv, struct dyn_event *ev)
313 struct trace_uprobe *tu = to_trace_uprobe(ev);
315 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
316 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
317 trace_uprobe_match_command_head(tu, argc, argv);
320 static nokprobe_inline struct trace_uprobe *
321 trace_uprobe_primary_from_call(struct trace_event_call *call)
323 struct trace_probe *tp;
325 tp = trace_probe_primary_from_call(call);
326 if (WARN_ON_ONCE(!tp))
329 return container_of(tp, struct trace_uprobe, tp);
333 * Allocate new trace_uprobe and initialize it (including uprobes).
335 static struct trace_uprobe *
336 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
338 struct trace_uprobe *tu;
341 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
343 return ERR_PTR(-ENOMEM);
345 ret = trace_probe_init(&tu->tp, event, group, true);
349 dyn_event_init(&tu->devent, &trace_uprobe_ops);
350 tu->consumer.handler = uprobe_dispatcher;
352 tu->consumer.ret_handler = uretprobe_dispatcher;
353 init_trace_uprobe_filter(tu->tp.event->filter);
362 static void free_trace_uprobe(struct trace_uprobe *tu)
368 trace_probe_cleanup(&tu->tp);
373 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
375 struct dyn_event *pos;
376 struct trace_uprobe *tu;
378 for_each_trace_uprobe(tu, pos)
379 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
380 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
386 /* Unregister a trace_uprobe and probe_event */
387 static int unregister_trace_uprobe(struct trace_uprobe *tu)
391 if (trace_probe_has_sibling(&tu->tp))
394 /* If there's a reference to the dynamic event */
395 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
398 ret = unregister_uprobe_event(tu);
403 dyn_event_remove(&tu->devent);
404 trace_probe_unlink(&tu->tp);
405 free_trace_uprobe(tu);
409 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
410 struct trace_uprobe *comp)
412 struct trace_probe_event *tpe = orig->tp.event;
413 struct inode *comp_inode = d_real_inode(comp->path.dentry);
416 list_for_each_entry(orig, &tpe->probes, tp.list) {
417 if (comp_inode != d_real_inode(orig->path.dentry) ||
418 comp->offset != orig->offset)
422 * trace_probe_compare_arg_type() ensured that nr_args and
423 * each argument name and type are same. Let's compare comm.
425 for (i = 0; i < orig->tp.nr_args; i++) {
426 if (strcmp(orig->tp.args[i].comm,
427 comp->tp.args[i].comm))
431 if (i == orig->tp.nr_args)
438 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
442 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
444 /* Note that argument starts index = 2 */
445 trace_probe_log_set_index(ret + 1);
446 trace_probe_log_err(0, DIFF_ARG_TYPE);
449 if (trace_uprobe_has_same_uprobe(to, tu)) {
450 trace_probe_log_set_index(0);
451 trace_probe_log_err(0, SAME_PROBE);
455 /* Append to existing event */
456 ret = trace_probe_append(&tu->tp, &to->tp);
458 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
464 * Uprobe with multiple reference counter is not allowed. i.e.
465 * If inode and offset matches, reference counter offset *must*
466 * match as well. Though, there is one exception: If user is
467 * replacing old trace_uprobe with new one(same group/event),
468 * then we allow same uprobe with new reference counter as far
469 * as the new one does not conflict with any other existing
472 static int validate_ref_ctr_offset(struct trace_uprobe *new)
474 struct dyn_event *pos;
475 struct trace_uprobe *tmp;
476 struct inode *new_inode = d_real_inode(new->path.dentry);
478 for_each_trace_uprobe(tmp, pos) {
479 if (new_inode == d_real_inode(tmp->path.dentry) &&
480 new->offset == tmp->offset &&
481 new->ref_ctr_offset != tmp->ref_ctr_offset) {
482 pr_warn("Reference counter offset mismatch.");
489 /* Register a trace_uprobe and probe_event */
490 static int register_trace_uprobe(struct trace_uprobe *tu)
492 struct trace_uprobe *old_tu;
495 mutex_lock(&event_mutex);
497 ret = validate_ref_ctr_offset(tu);
501 /* register as an event */
502 old_tu = find_probe_event(trace_probe_name(&tu->tp),
503 trace_probe_group_name(&tu->tp));
505 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506 trace_probe_log_set_index(0);
507 trace_probe_log_err(0, DIFF_PROBE_TYPE);
510 ret = append_trace_uprobe(tu, old_tu);
515 ret = register_uprobe_event(tu);
517 if (ret == -EEXIST) {
518 trace_probe_log_set_index(0);
519 trace_probe_log_err(0, EVENT_EXIST);
521 pr_warn("Failed to register probe event(%d)\n", ret);
525 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
528 mutex_unlock(&event_mutex);
535 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
537 static int __trace_uprobe_create(int argc, const char **argv)
539 struct trace_uprobe *tu;
540 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
541 char *arg, *filename, *rctr, *rctr_end, *tmp;
542 char buf[MAX_EVENT_NAME_LEN];
543 enum probe_print_type ptype;
545 unsigned long offset, ref_ctr_offset;
546 bool is_return = false;
551 switch (argv[0][0]) {
564 if (argv[0][1] == ':')
567 if (!strchr(argv[1], '/'))
570 filename = kstrdup(argv[1], GFP_KERNEL);
574 /* Find the last occurrence, in case the path contains ':' too. */
575 arg = strrchr(filename, ':');
576 if (!arg || !isdigit(arg[1])) {
581 trace_probe_log_init("trace_uprobe", argc, argv);
582 trace_probe_log_set_index(1); /* filename is the 2nd argument */
585 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
587 trace_probe_log_err(0, FILE_NOT_FOUND);
589 trace_probe_log_clear();
592 if (!d_is_reg(path.dentry)) {
593 trace_probe_log_err(0, NO_REGULAR_FILE);
595 goto fail_address_parse;
598 /* Parse reference counter offset if specified. */
599 rctr = strchr(arg, '(');
601 rctr_end = strchr(rctr, ')');
604 rctr_end = rctr + strlen(rctr);
605 trace_probe_log_err(rctr_end - filename,
607 goto fail_address_parse;
608 } else if (rctr_end[1] != '\0') {
610 trace_probe_log_err(rctr_end + 1 - filename,
612 goto fail_address_parse;
617 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
619 trace_probe_log_err(rctr - filename, BAD_REFCNT);
620 goto fail_address_parse;
624 /* Check if there is %return suffix */
625 tmp = strchr(arg, '%');
627 if (!strcmp(tmp, "%return")) {
631 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
633 goto fail_address_parse;
637 /* Parse uprobe offset. */
638 ret = kstrtoul(arg, 0, &offset);
640 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
641 goto fail_address_parse;
645 trace_probe_log_set_index(0);
647 ret = traceprobe_parse_event_name(&event, &group, buf,
650 goto fail_address_parse;
655 tail = kstrdup(kbasename(filename), GFP_KERNEL);
658 goto fail_address_parse;
661 ptr = strpbrk(tail, ".-_");
665 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
673 tu = alloc_trace_uprobe(group, event, argc, is_return);
676 /* This must return -ENOMEM otherwise there is a bug */
677 WARN_ON_ONCE(ret != -ENOMEM);
678 goto fail_address_parse;
681 tu->ref_ctr_offset = ref_ctr_offset;
683 tu->filename = filename;
685 /* parse arguments */
686 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
687 trace_probe_log_set_index(i + 2);
688 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i],
689 is_return ? TPARG_FL_RETURN : 0);
694 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
695 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
699 ret = register_trace_uprobe(tu);
704 free_trace_uprobe(tu);
706 trace_probe_log_clear();
710 trace_probe_log_clear();
717 int trace_uprobe_create(const char *raw_command)
719 return trace_probe_create(raw_command, __trace_uprobe_create);
722 static int create_or_delete_trace_uprobe(const char *raw_command)
726 if (raw_command[0] == '-')
727 return dyn_event_release(raw_command, &trace_uprobe_ops);
729 ret = trace_uprobe_create(raw_command);
730 return ret == -ECANCELED ? -EINVAL : ret;
733 static int trace_uprobe_release(struct dyn_event *ev)
735 struct trace_uprobe *tu = to_trace_uprobe(ev);
737 return unregister_trace_uprobe(tu);
740 /* Probes listing interfaces */
741 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
743 struct trace_uprobe *tu = to_trace_uprobe(ev);
744 char c = is_ret_probe(tu) ? 'r' : 'p';
747 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
748 trace_probe_name(&tu->tp), tu->filename,
749 (int)(sizeof(void *) * 2), tu->offset);
751 if (tu->ref_ctr_offset)
752 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
754 for (i = 0; i < tu->tp.nr_args; i++)
755 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
761 static int probes_seq_show(struct seq_file *m, void *v)
763 struct dyn_event *ev = v;
765 if (!is_trace_uprobe(ev))
768 return trace_uprobe_show(m, ev);
771 static const struct seq_operations probes_seq_op = {
772 .start = dyn_event_seq_start,
773 .next = dyn_event_seq_next,
774 .stop = dyn_event_seq_stop,
775 .show = probes_seq_show
778 static int probes_open(struct inode *inode, struct file *file)
782 ret = security_locked_down(LOCKDOWN_TRACEFS);
786 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
787 ret = dyn_events_release_all(&trace_uprobe_ops);
792 return seq_open(file, &probes_seq_op);
795 static ssize_t probes_write(struct file *file, const char __user *buffer,
796 size_t count, loff_t *ppos)
798 return trace_parse_run_command(file, buffer, count, ppos,
799 create_or_delete_trace_uprobe);
802 static const struct file_operations uprobe_events_ops = {
803 .owner = THIS_MODULE,
807 .release = seq_release,
808 .write = probes_write,
811 /* Probes profiling interfaces */
812 static int probes_profile_seq_show(struct seq_file *m, void *v)
814 struct dyn_event *ev = v;
815 struct trace_uprobe *tu;
817 if (!is_trace_uprobe(ev))
820 tu = to_trace_uprobe(ev);
821 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
822 trace_probe_name(&tu->tp), tu->nhit);
826 static const struct seq_operations profile_seq_op = {
827 .start = dyn_event_seq_start,
828 .next = dyn_event_seq_next,
829 .stop = dyn_event_seq_stop,
830 .show = probes_profile_seq_show
833 static int profile_open(struct inode *inode, struct file *file)
837 ret = security_locked_down(LOCKDOWN_TRACEFS);
841 return seq_open(file, &profile_seq_op);
844 static const struct file_operations uprobe_profile_ops = {
845 .owner = THIS_MODULE,
846 .open = profile_open,
849 .release = seq_release,
852 struct uprobe_cpu_buffer {
856 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
857 static int uprobe_buffer_refcnt;
859 static int uprobe_buffer_init(void)
863 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
864 if (uprobe_cpu_buffer == NULL)
867 for_each_possible_cpu(cpu) {
868 struct page *p = alloc_pages_node(cpu_to_node(cpu),
874 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
875 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
881 for_each_possible_cpu(cpu) {
884 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
887 free_percpu(uprobe_cpu_buffer);
891 static int uprobe_buffer_enable(void)
895 BUG_ON(!mutex_is_locked(&event_mutex));
897 if (uprobe_buffer_refcnt++ == 0) {
898 ret = uprobe_buffer_init();
900 uprobe_buffer_refcnt--;
906 static void uprobe_buffer_disable(void)
910 BUG_ON(!mutex_is_locked(&event_mutex));
912 if (--uprobe_buffer_refcnt == 0) {
913 for_each_possible_cpu(cpu)
914 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
917 free_percpu(uprobe_cpu_buffer);
918 uprobe_cpu_buffer = NULL;
922 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
924 struct uprobe_cpu_buffer *ucb;
927 cpu = raw_smp_processor_id();
928 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
931 * Use per-cpu buffers for fastest access, but we might migrate
932 * so the mutex makes sure we have sole access to it.
934 mutex_lock(&ucb->mutex);
939 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
941 mutex_unlock(&ucb->mutex);
944 static void __uprobe_trace_func(struct trace_uprobe *tu,
945 unsigned long func, struct pt_regs *regs,
946 struct uprobe_cpu_buffer *ucb, int dsize,
947 struct trace_event_file *trace_file)
949 struct uprobe_trace_entry_head *entry;
950 struct trace_event_buffer fbuffer;
953 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
955 WARN_ON(call != trace_file->event_call);
957 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
960 if (trace_trigger_soft_disabled(trace_file))
963 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
964 size = esize + tu->tp.size + dsize;
965 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
969 if (is_ret_probe(tu)) {
970 entry->vaddr[0] = func;
971 entry->vaddr[1] = instruction_pointer(regs);
972 data = DATAOF_TRACE_ENTRY(entry, true);
974 entry->vaddr[0] = instruction_pointer(regs);
975 data = DATAOF_TRACE_ENTRY(entry, false);
978 memcpy(data, ucb->buf, tu->tp.size + dsize);
980 trace_event_buffer_commit(&fbuffer);
984 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
985 struct uprobe_cpu_buffer *ucb, int dsize)
987 struct event_file_link *link;
989 if (is_ret_probe(tu))
993 trace_probe_for_each_link_rcu(link, &tu->tp)
994 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1000 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1001 struct pt_regs *regs,
1002 struct uprobe_cpu_buffer *ucb, int dsize)
1004 struct event_file_link *link;
1007 trace_probe_for_each_link_rcu(link, &tu->tp)
1008 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1012 /* Event entry printers */
1013 static enum print_line_t
1014 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1016 struct uprobe_trace_entry_head *entry;
1017 struct trace_seq *s = &iter->seq;
1018 struct trace_uprobe *tu;
1021 entry = (struct uprobe_trace_entry_head *)iter->ent;
1022 tu = trace_uprobe_primary_from_call(
1023 container_of(event, struct trace_event_call, event));
1027 if (is_ret_probe(tu)) {
1028 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1029 trace_probe_name(&tu->tp),
1030 entry->vaddr[1], entry->vaddr[0]);
1031 data = DATAOF_TRACE_ENTRY(entry, true);
1033 trace_seq_printf(s, "%s: (0x%lx)",
1034 trace_probe_name(&tu->tp),
1036 data = DATAOF_TRACE_ENTRY(entry, false);
1039 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1042 trace_seq_putc(s, '\n');
1045 return trace_handle_return(s);
1048 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1049 enum uprobe_filter_ctx ctx,
1050 struct mm_struct *mm);
1052 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1056 tu->consumer.filter = filter;
1057 tu->inode = d_real_inode(tu->path.dentry);
1059 if (tu->ref_ctr_offset)
1060 ret = uprobe_register_refctr(tu->inode, tu->offset,
1061 tu->ref_ctr_offset, &tu->consumer);
1063 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1071 static void __probe_event_disable(struct trace_probe *tp)
1073 struct trace_uprobe *tu;
1075 tu = container_of(tp, struct trace_uprobe, tp);
1076 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1078 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1082 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1087 static int probe_event_enable(struct trace_event_call *call,
1088 struct trace_event_file *file, filter_func_t filter)
1090 struct trace_probe *tp;
1091 struct trace_uprobe *tu;
1095 tp = trace_probe_primary_from_call(call);
1096 if (WARN_ON_ONCE(!tp))
1098 enabled = trace_probe_is_enabled(tp);
1100 /* This may also change "enabled" state */
1102 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1105 ret = trace_probe_add_file(tp, file);
1109 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1112 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1115 tu = container_of(tp, struct trace_uprobe, tp);
1116 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1121 ret = uprobe_buffer_enable();
1125 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1126 ret = trace_uprobe_enable(tu, filter);
1128 __probe_event_disable(tp);
1136 uprobe_buffer_disable();
1140 trace_probe_remove_file(tp, file);
1142 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1147 static void probe_event_disable(struct trace_event_call *call,
1148 struct trace_event_file *file)
1150 struct trace_probe *tp;
1152 tp = trace_probe_primary_from_call(call);
1153 if (WARN_ON_ONCE(!tp))
1156 if (!trace_probe_is_enabled(tp))
1160 if (trace_probe_remove_file(tp, file) < 0)
1163 if (trace_probe_is_enabled(tp))
1166 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1168 __probe_event_disable(tp);
1169 uprobe_buffer_disable();
1172 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1175 struct uprobe_trace_entry_head field;
1176 struct trace_uprobe *tu;
1178 tu = trace_uprobe_primary_from_call(event_call);
1182 if (is_ret_probe(tu)) {
1183 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1184 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1185 size = SIZEOF_TRACE_ENTRY(true);
1187 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1188 size = SIZEOF_TRACE_ENTRY(false);
1191 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1194 #ifdef CONFIG_PERF_EVENTS
1196 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1198 struct perf_event *event;
1200 if (filter->nr_systemwide)
1203 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1204 if (event->hw.target->mm == mm)
1212 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1213 struct perf_event *event)
1215 return __uprobe_perf_filter(filter, event->hw.target->mm);
1218 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1219 struct perf_event *event)
1223 write_lock(&filter->rwlock);
1224 if (event->hw.target) {
1225 list_del(&event->hw.tp_list);
1226 done = filter->nr_systemwide ||
1227 (event->hw.target->flags & PF_EXITING) ||
1228 trace_uprobe_filter_event(filter, event);
1230 filter->nr_systemwide--;
1231 done = filter->nr_systemwide;
1233 write_unlock(&filter->rwlock);
1238 /* This returns true if the filter always covers target mm */
1239 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1240 struct perf_event *event)
1244 write_lock(&filter->rwlock);
1245 if (event->hw.target) {
1247 * event->parent != NULL means copy_process(), we can avoid
1248 * uprobe_apply(). current->mm must be probed and we can rely
1249 * on dup_mmap() which preserves the already installed bp's.
1251 * attr.enable_on_exec means that exec/mmap will install the
1252 * breakpoints we need.
1254 done = filter->nr_systemwide ||
1255 event->parent || event->attr.enable_on_exec ||
1256 trace_uprobe_filter_event(filter, event);
1257 list_add(&event->hw.tp_list, &filter->perf_events);
1259 done = filter->nr_systemwide;
1260 filter->nr_systemwide++;
1262 write_unlock(&filter->rwlock);
1267 static int uprobe_perf_close(struct trace_event_call *call,
1268 struct perf_event *event)
1270 struct trace_probe *tp;
1271 struct trace_uprobe *tu;
1274 tp = trace_probe_primary_from_call(call);
1275 if (WARN_ON_ONCE(!tp))
1278 tu = container_of(tp, struct trace_uprobe, tp);
1279 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1282 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1283 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1291 static int uprobe_perf_open(struct trace_event_call *call,
1292 struct perf_event *event)
1294 struct trace_probe *tp;
1295 struct trace_uprobe *tu;
1298 tp = trace_probe_primary_from_call(call);
1299 if (WARN_ON_ONCE(!tp))
1302 tu = container_of(tp, struct trace_uprobe, tp);
1303 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1306 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1307 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1309 uprobe_perf_close(call, event);
1317 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1318 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1320 struct trace_uprobe_filter *filter;
1321 struct trace_uprobe *tu;
1324 tu = container_of(uc, struct trace_uprobe, consumer);
1325 filter = tu->tp.event->filter;
1327 read_lock(&filter->rwlock);
1328 ret = __uprobe_perf_filter(filter, mm);
1329 read_unlock(&filter->rwlock);
1334 static void __uprobe_perf_func(struct trace_uprobe *tu,
1335 unsigned long func, struct pt_regs *regs,
1336 struct uprobe_cpu_buffer *ucb, int dsize)
1338 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1339 struct uprobe_trace_entry_head *entry;
1340 struct hlist_head *head;
1345 if (bpf_prog_array_valid(call)) {
1349 ret = trace_call_bpf(call, regs);
1355 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1357 size = esize + tu->tp.size + dsize;
1358 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1359 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1363 head = this_cpu_ptr(call->perf_events);
1364 if (hlist_empty(head))
1367 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1371 if (is_ret_probe(tu)) {
1372 entry->vaddr[0] = func;
1373 entry->vaddr[1] = instruction_pointer(regs);
1374 data = DATAOF_TRACE_ENTRY(entry, true);
1376 entry->vaddr[0] = instruction_pointer(regs);
1377 data = DATAOF_TRACE_ENTRY(entry, false);
1380 memcpy(data, ucb->buf, tu->tp.size + dsize);
1382 if (size - esize > tu->tp.size + dsize) {
1383 int len = tu->tp.size + dsize;
1385 memset(data + len, 0, size - esize - len);
1388 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1394 /* uprobe profile handler */
1395 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1396 struct uprobe_cpu_buffer *ucb, int dsize)
1398 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1399 return UPROBE_HANDLER_REMOVE;
1401 if (!is_ret_probe(tu))
1402 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1406 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1407 struct pt_regs *regs,
1408 struct uprobe_cpu_buffer *ucb, int dsize)
1410 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1413 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1414 const char **filename, u64 *probe_offset,
1415 bool perf_type_tracepoint)
1417 const char *pevent = trace_event_name(event->tp_event);
1418 const char *group = event->tp_event->class->system;
1419 struct trace_uprobe *tu;
1421 if (perf_type_tracepoint)
1422 tu = find_probe_event(pevent, group);
1424 tu = trace_uprobe_primary_from_call(event->tp_event);
1428 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1429 : BPF_FD_TYPE_UPROBE;
1430 *filename = tu->filename;
1431 *probe_offset = tu->offset;
1434 #endif /* CONFIG_PERF_EVENTS */
1437 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1440 struct trace_event_file *file = data;
1443 case TRACE_REG_REGISTER:
1444 return probe_event_enable(event, file, NULL);
1446 case TRACE_REG_UNREGISTER:
1447 probe_event_disable(event, file);
1450 #ifdef CONFIG_PERF_EVENTS
1451 case TRACE_REG_PERF_REGISTER:
1452 return probe_event_enable(event, NULL, uprobe_perf_filter);
1454 case TRACE_REG_PERF_UNREGISTER:
1455 probe_event_disable(event, NULL);
1458 case TRACE_REG_PERF_OPEN:
1459 return uprobe_perf_open(event, data);
1461 case TRACE_REG_PERF_CLOSE:
1462 return uprobe_perf_close(event, data);
1470 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1472 struct trace_uprobe *tu;
1473 struct uprobe_dispatch_data udd;
1474 struct uprobe_cpu_buffer *ucb;
1479 tu = container_of(con, struct trace_uprobe, consumer);
1483 udd.bp_addr = instruction_pointer(regs);
1485 current->utask->vaddr = (unsigned long) &udd;
1487 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1490 dsize = __get_data_size(&tu->tp, regs);
1491 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1493 ucb = uprobe_buffer_get();
1494 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1496 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1497 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1499 #ifdef CONFIG_PERF_EVENTS
1500 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1501 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1503 uprobe_buffer_put(ucb);
1507 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1508 unsigned long func, struct pt_regs *regs)
1510 struct trace_uprobe *tu;
1511 struct uprobe_dispatch_data udd;
1512 struct uprobe_cpu_buffer *ucb;
1515 tu = container_of(con, struct trace_uprobe, consumer);
1520 current->utask->vaddr = (unsigned long) &udd;
1522 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1525 dsize = __get_data_size(&tu->tp, regs);
1526 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1528 ucb = uprobe_buffer_get();
1529 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1531 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1532 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1534 #ifdef CONFIG_PERF_EVENTS
1535 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1536 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1538 uprobe_buffer_put(ucb);
1542 static struct trace_event_functions uprobe_funcs = {
1543 .trace = print_uprobe_event
1546 static struct trace_event_fields uprobe_fields_array[] = {
1547 { .type = TRACE_FUNCTION_TYPE,
1548 .define_fields = uprobe_event_define_fields },
1552 static inline void init_trace_event_call(struct trace_uprobe *tu)
1554 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1555 call->event.funcs = &uprobe_funcs;
1556 call->class->fields_array = uprobe_fields_array;
1558 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1559 call->class->reg = trace_uprobe_register;
1562 static int register_uprobe_event(struct trace_uprobe *tu)
1564 init_trace_event_call(tu);
1566 return trace_probe_register_event_call(&tu->tp);
1569 static int unregister_uprobe_event(struct trace_uprobe *tu)
1571 return trace_probe_unregister_event_call(&tu->tp);
1574 #ifdef CONFIG_PERF_EVENTS
1575 struct trace_event_call *
1576 create_local_trace_uprobe(char *name, unsigned long offs,
1577 unsigned long ref_ctr_offset, bool is_return)
1579 enum probe_print_type ptype;
1580 struct trace_uprobe *tu;
1584 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1586 return ERR_PTR(ret);
1588 if (!d_is_reg(path.dentry)) {
1590 return ERR_PTR(-EINVAL);
1594 * local trace_kprobes are not added to dyn_event, so they are never
1595 * searched in find_trace_kprobe(). Therefore, there is no concern of
1596 * duplicated name "DUMMY_EVENT" here.
1598 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1602 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1605 return ERR_CAST(tu);
1610 tu->ref_ctr_offset = ref_ctr_offset;
1611 tu->filename = kstrdup(name, GFP_KERNEL);
1612 if (!tu->filename) {
1617 init_trace_event_call(tu);
1619 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1620 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1625 return trace_probe_event_call(&tu->tp);
1627 free_trace_uprobe(tu);
1628 return ERR_PTR(ret);
1631 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1633 struct trace_uprobe *tu;
1635 tu = trace_uprobe_primary_from_call(event_call);
1637 free_trace_uprobe(tu);
1639 #endif /* CONFIG_PERF_EVENTS */
1641 /* Make a trace interface for controlling probe points */
1642 static __init int init_uprobe_trace(void)
1646 ret = dyn_event_register(&trace_uprobe_ops);
1650 ret = tracing_init_dentry();
1654 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1655 NULL, &uprobe_events_ops);
1656 /* Profile interface */
1657 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1658 NULL, &uprobe_profile_ops);
1662 fs_initcall(init_uprobe_trace);