1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
23 #define UPROBE_EVENT_SYSTEM "uprobes"
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
37 static int trace_uprobe_create(int argc, const char **argv);
38 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_uprobe_release(struct dyn_event *ev);
40 static bool trace_uprobe_is_busy(struct dyn_event *ev);
41 static bool trace_uprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
44 static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
53 * uprobe event core functions
56 struct dyn_event devent;
57 struct uprobe_consumer consumer;
62 unsigned long ref_ctr_offset;
64 struct trace_probe tp;
67 static bool is_trace_uprobe(struct dyn_event *ev)
69 return ev->ops == &trace_uprobe_ops;
72 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
74 return container_of(ev, struct trace_uprobe, devent);
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
82 #define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
86 #define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
88 (sizeof(struct probe_arg) * (n)))
90 static int register_uprobe_event(struct trace_uprobe *tu);
91 static int unregister_uprobe_event(struct trace_uprobe *tu);
93 struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
98 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
99 static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
102 #ifdef CONFIG_STACK_GROWSUP
103 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
105 return addr - (n * sizeof(long));
108 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 return addr + (n * sizeof(long));
114 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
117 unsigned long addr = user_stack_pointer(regs);
119 addr = adjust_stack_addr(addr, n);
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128 * Uprobes-specific fetch functions
130 static nokprobe_inline int
131 probe_mem_read(void *dest, void *src, size_t size)
133 void __user *vaddr = (void __force __user *)src;
135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
138 static nokprobe_inline int
139 probe_mem_read_user(void *dest, void *src, size_t size)
141 return probe_mem_read(dest, src, size);
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
148 static nokprobe_inline int
149 fetch_store_string(unsigned long addr, void *dest, void *base)
152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
155 void __user *src = (void __force __user *) addr;
157 if (unlikely(!maxlen))
160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
163 ret = strncpy_from_user(dst, src, maxlen);
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
180 static nokprobe_inline int
181 fetch_store_string_user(unsigned long addr, void *dest, void *base)
183 return fetch_store_string(addr, dest, base);
186 /* Return the length of string -- including null terminal byte */
187 static nokprobe_inline int
188 fetch_store_strlen(unsigned long addr)
191 void __user *vaddr = (void __force __user *) addr;
193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
198 return (len > MAX_STRING_SIZE) ? 0 : len;
201 static nokprobe_inline int
202 fetch_store_strlen_user(unsigned long addr)
204 return fetch_store_strlen(addr);
207 static unsigned long translate_user_vaddr(unsigned long file_offset)
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
212 udd = (void *) current->utask->vaddr;
214 base_addr = udd->bp_addr - udd->tu->offset;
215 return base_addr + file_offset;
218 /* Note that we don't verify it, since the code does not come from user space */
220 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
223 struct pt_regs *regs = rec;
226 /* 1st stage: get value from context */
229 val = regs_get_register(regs, code->param);
232 val = get_user_stack_nth(regs, code->param);
234 case FETCH_OP_STACKP:
235 val = user_stack_pointer(regs);
237 case FETCH_OP_RETVAL:
238 val = regs_return_value(regs);
241 val = code->immediate;
244 val = FETCH_TOKEN_COMM;
247 val = (unsigned long)code->data;
250 val = translate_user_vaddr(code->immediate);
257 return process_fetch_insn_bottom(code, val, dest, base);
259 NOKPROBE_SYMBOL(process_fetch_insn)
261 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
263 rwlock_init(&filter->rwlock);
264 filter->nr_systemwide = 0;
265 INIT_LIST_HEAD(&filter->perf_events);
268 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
270 return !filter->nr_systemwide && list_empty(&filter->perf_events);
273 static inline bool is_ret_probe(struct trace_uprobe *tu)
275 return tu->consumer.ret_handler != NULL;
278 static bool trace_uprobe_is_busy(struct dyn_event *ev)
280 struct trace_uprobe *tu = to_trace_uprobe(ev);
282 return trace_probe_is_enabled(&tu->tp);
285 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
286 int argc, const char **argv)
288 char buf[MAX_ARGSTR_LEN + 1];
294 len = strlen(tu->filename);
295 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
298 if (tu->ref_ctr_offset == 0)
299 snprintf(buf, sizeof(buf), "0x%0*lx",
300 (int)(sizeof(void *) * 2), tu->offset);
302 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
303 (int)(sizeof(void *) * 2), tu->offset,
305 if (strcmp(buf, &argv[0][len + 1]))
310 return trace_probe_match_command_args(&tu->tp, argc, argv);
313 static bool trace_uprobe_match(const char *system, const char *event,
314 int argc, const char **argv, struct dyn_event *ev)
316 struct trace_uprobe *tu = to_trace_uprobe(ev);
318 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
319 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
320 trace_uprobe_match_command_head(tu, argc, argv);
323 static nokprobe_inline struct trace_uprobe *
324 trace_uprobe_primary_from_call(struct trace_event_call *call)
326 struct trace_probe *tp;
328 tp = trace_probe_primary_from_call(call);
329 if (WARN_ON_ONCE(!tp))
332 return container_of(tp, struct trace_uprobe, tp);
336 * Allocate new trace_uprobe and initialize it (including uprobes).
338 static struct trace_uprobe *
339 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
341 struct trace_uprobe *tu;
344 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
346 return ERR_PTR(-ENOMEM);
348 ret = trace_probe_init(&tu->tp, event, group, true);
352 dyn_event_init(&tu->devent, &trace_uprobe_ops);
353 tu->consumer.handler = uprobe_dispatcher;
355 tu->consumer.ret_handler = uretprobe_dispatcher;
356 init_trace_uprobe_filter(tu->tp.event->filter);
365 static void free_trace_uprobe(struct trace_uprobe *tu)
371 trace_probe_cleanup(&tu->tp);
376 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
378 struct dyn_event *pos;
379 struct trace_uprobe *tu;
381 for_each_trace_uprobe(tu, pos)
382 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
383 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
389 /* Unregister a trace_uprobe and probe_event */
390 static int unregister_trace_uprobe(struct trace_uprobe *tu)
394 if (trace_probe_has_sibling(&tu->tp))
397 ret = unregister_uprobe_event(tu);
402 dyn_event_remove(&tu->devent);
403 trace_probe_unlink(&tu->tp);
404 free_trace_uprobe(tu);
408 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
409 struct trace_uprobe *comp)
411 struct trace_probe_event *tpe = orig->tp.event;
412 struct trace_probe *pos;
413 struct inode *comp_inode = d_real_inode(comp->path.dentry);
416 list_for_each_entry(pos, &tpe->probes, list) {
417 orig = container_of(pos, struct trace_uprobe, tp);
418 if (comp_inode != d_real_inode(orig->path.dentry) ||
419 comp->offset != orig->offset)
423 * trace_probe_compare_arg_type() ensured that nr_args and
424 * each argument name and type are same. Let's compare comm.
426 for (i = 0; i < orig->tp.nr_args; i++) {
427 if (strcmp(orig->tp.args[i].comm,
428 comp->tp.args[i].comm))
432 if (i == orig->tp.nr_args)
439 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
443 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
445 /* Note that argument starts index = 2 */
446 trace_probe_log_set_index(ret + 1);
447 trace_probe_log_err(0, DIFF_ARG_TYPE);
450 if (trace_uprobe_has_same_uprobe(to, tu)) {
451 trace_probe_log_set_index(0);
452 trace_probe_log_err(0, SAME_PROBE);
456 /* Append to existing event */
457 ret = trace_probe_append(&tu->tp, &to->tp);
459 dyn_event_add(&tu->devent);
465 * Uprobe with multiple reference counter is not allowed. i.e.
466 * If inode and offset matches, reference counter offset *must*
467 * match as well. Though, there is one exception: If user is
468 * replacing old trace_uprobe with new one(same group/event),
469 * then we allow same uprobe with new reference counter as far
470 * as the new one does not conflict with any other existing
473 static int validate_ref_ctr_offset(struct trace_uprobe *new)
475 struct dyn_event *pos;
476 struct trace_uprobe *tmp;
477 struct inode *new_inode = d_real_inode(new->path.dentry);
479 for_each_trace_uprobe(tmp, pos) {
480 if (new_inode == d_real_inode(tmp->path.dentry) &&
481 new->offset == tmp->offset &&
482 new->ref_ctr_offset != tmp->ref_ctr_offset) {
483 pr_warn("Reference counter offset mismatch.");
490 /* Register a trace_uprobe and probe_event */
491 static int register_trace_uprobe(struct trace_uprobe *tu)
493 struct trace_uprobe *old_tu;
496 mutex_lock(&event_mutex);
498 ret = validate_ref_ctr_offset(tu);
502 /* register as an event */
503 old_tu = find_probe_event(trace_probe_name(&tu->tp),
504 trace_probe_group_name(&tu->tp));
506 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
507 trace_probe_log_set_index(0);
508 trace_probe_log_err(0, DIFF_PROBE_TYPE);
511 ret = append_trace_uprobe(tu, old_tu);
516 ret = register_uprobe_event(tu);
518 if (ret == -EEXIST) {
519 trace_probe_log_set_index(0);
520 trace_probe_log_err(0, EVENT_EXIST);
522 pr_warn("Failed to register probe event(%d)\n", ret);
526 dyn_event_add(&tu->devent);
529 mutex_unlock(&event_mutex);
536 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
538 static int trace_uprobe_create(int argc, const char **argv)
540 struct trace_uprobe *tu;
541 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
542 char *arg, *filename, *rctr, *rctr_end, *tmp;
543 char buf[MAX_EVENT_NAME_LEN];
545 unsigned long offset, ref_ctr_offset;
546 bool is_return = false;
552 switch (argv[0][0]) {
565 if (argv[0][1] == ':')
568 if (!strchr(argv[1], '/'))
571 filename = kstrdup(argv[1], GFP_KERNEL);
575 /* Find the last occurrence, in case the path contains ':' too. */
576 arg = strrchr(filename, ':');
577 if (!arg || !isdigit(arg[1])) {
582 trace_probe_log_init("trace_uprobe", argc, argv);
583 trace_probe_log_set_index(1); /* filename is the 2nd argument */
586 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
588 trace_probe_log_err(0, FILE_NOT_FOUND);
590 trace_probe_log_clear();
593 if (!d_is_reg(path.dentry)) {
594 trace_probe_log_err(0, NO_REGULAR_FILE);
596 goto fail_address_parse;
599 /* Parse reference counter offset if specified. */
600 rctr = strchr(arg, '(');
602 rctr_end = strchr(rctr, ')');
605 rctr_end = rctr + strlen(rctr);
606 trace_probe_log_err(rctr_end - filename,
608 goto fail_address_parse;
609 } else if (rctr_end[1] != '\0') {
611 trace_probe_log_err(rctr_end + 1 - filename,
613 goto fail_address_parse;
618 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
620 trace_probe_log_err(rctr - filename, BAD_REFCNT);
621 goto fail_address_parse;
625 /* Parse uprobe offset. */
626 ret = kstrtoul(arg, 0, &offset);
628 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
629 goto fail_address_parse;
633 trace_probe_log_set_index(0);
635 ret = traceprobe_parse_event_name(&event, &group, buf,
638 goto fail_address_parse;
643 tail = kstrdup(kbasename(filename), GFP_KERNEL);
646 goto fail_address_parse;
649 ptr = strpbrk(tail, ".-_");
653 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
661 tu = alloc_trace_uprobe(group, event, argc, is_return);
664 /* This must return -ENOMEM otherwise there is a bug */
665 WARN_ON_ONCE(ret != -ENOMEM);
666 goto fail_address_parse;
669 tu->ref_ctr_offset = ref_ctr_offset;
671 tu->filename = filename;
673 /* parse arguments */
674 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
675 tmp = kstrdup(argv[i], GFP_KERNEL);
681 trace_probe_log_set_index(i + 2);
682 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
683 is_return ? TPARG_FL_RETURN : 0);
689 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
693 ret = register_trace_uprobe(tu);
698 free_trace_uprobe(tu);
700 trace_probe_log_clear();
704 trace_probe_log_clear();
711 static int create_or_delete_trace_uprobe(int argc, char **argv)
715 if (argv[0][0] == '-')
716 return dyn_event_release(argc, argv, &trace_uprobe_ops);
718 ret = trace_uprobe_create(argc, (const char **)argv);
719 return ret == -ECANCELED ? -EINVAL : ret;
722 static int trace_uprobe_release(struct dyn_event *ev)
724 struct trace_uprobe *tu = to_trace_uprobe(ev);
726 return unregister_trace_uprobe(tu);
729 /* Probes listing interfaces */
730 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
732 struct trace_uprobe *tu = to_trace_uprobe(ev);
733 char c = is_ret_probe(tu) ? 'r' : 'p';
736 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
737 trace_probe_name(&tu->tp), tu->filename,
738 (int)(sizeof(void *) * 2), tu->offset);
740 if (tu->ref_ctr_offset)
741 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
743 for (i = 0; i < tu->tp.nr_args; i++)
744 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
750 static int probes_seq_show(struct seq_file *m, void *v)
752 struct dyn_event *ev = v;
754 if (!is_trace_uprobe(ev))
757 return trace_uprobe_show(m, ev);
760 static const struct seq_operations probes_seq_op = {
761 .start = dyn_event_seq_start,
762 .next = dyn_event_seq_next,
763 .stop = dyn_event_seq_stop,
764 .show = probes_seq_show
767 static int probes_open(struct inode *inode, struct file *file)
771 ret = security_locked_down(LOCKDOWN_TRACEFS);
775 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
776 ret = dyn_events_release_all(&trace_uprobe_ops);
781 return seq_open(file, &probes_seq_op);
784 static ssize_t probes_write(struct file *file, const char __user *buffer,
785 size_t count, loff_t *ppos)
787 return trace_parse_run_command(file, buffer, count, ppos,
788 create_or_delete_trace_uprobe);
791 static const struct file_operations uprobe_events_ops = {
792 .owner = THIS_MODULE,
796 .release = seq_release,
797 .write = probes_write,
800 /* Probes profiling interfaces */
801 static int probes_profile_seq_show(struct seq_file *m, void *v)
803 struct dyn_event *ev = v;
804 struct trace_uprobe *tu;
806 if (!is_trace_uprobe(ev))
809 tu = to_trace_uprobe(ev);
810 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
811 trace_probe_name(&tu->tp), tu->nhit);
815 static const struct seq_operations profile_seq_op = {
816 .start = dyn_event_seq_start,
817 .next = dyn_event_seq_next,
818 .stop = dyn_event_seq_stop,
819 .show = probes_profile_seq_show
822 static int profile_open(struct inode *inode, struct file *file)
826 ret = security_locked_down(LOCKDOWN_TRACEFS);
830 return seq_open(file, &profile_seq_op);
833 static const struct file_operations uprobe_profile_ops = {
834 .owner = THIS_MODULE,
835 .open = profile_open,
838 .release = seq_release,
841 struct uprobe_cpu_buffer {
845 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
846 static int uprobe_buffer_refcnt;
848 static int uprobe_buffer_init(void)
852 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
853 if (uprobe_cpu_buffer == NULL)
856 for_each_possible_cpu(cpu) {
857 struct page *p = alloc_pages_node(cpu_to_node(cpu),
863 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
864 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
870 for_each_possible_cpu(cpu) {
873 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
876 free_percpu(uprobe_cpu_buffer);
880 static int uprobe_buffer_enable(void)
884 BUG_ON(!mutex_is_locked(&event_mutex));
886 if (uprobe_buffer_refcnt++ == 0) {
887 ret = uprobe_buffer_init();
889 uprobe_buffer_refcnt--;
895 static void uprobe_buffer_disable(void)
899 BUG_ON(!mutex_is_locked(&event_mutex));
901 if (--uprobe_buffer_refcnt == 0) {
902 for_each_possible_cpu(cpu)
903 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
906 free_percpu(uprobe_cpu_buffer);
907 uprobe_cpu_buffer = NULL;
911 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
913 struct uprobe_cpu_buffer *ucb;
916 cpu = raw_smp_processor_id();
917 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
920 * Use per-cpu buffers for fastest access, but we might migrate
921 * so the mutex makes sure we have sole access to it.
923 mutex_lock(&ucb->mutex);
928 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
930 mutex_unlock(&ucb->mutex);
933 static void __uprobe_trace_func(struct trace_uprobe *tu,
934 unsigned long func, struct pt_regs *regs,
935 struct uprobe_cpu_buffer *ucb, int dsize,
936 struct trace_event_file *trace_file)
938 struct uprobe_trace_entry_head *entry;
939 struct ring_buffer_event *event;
940 struct ring_buffer *buffer;
943 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
945 WARN_ON(call != trace_file->event_call);
947 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
950 if (trace_trigger_soft_disabled(trace_file))
953 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
954 size = esize + tu->tp.size + dsize;
955 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
956 call->event.type, size, 0, 0);
960 entry = ring_buffer_event_data(event);
961 if (is_ret_probe(tu)) {
962 entry->vaddr[0] = func;
963 entry->vaddr[1] = instruction_pointer(regs);
964 data = DATAOF_TRACE_ENTRY(entry, true);
966 entry->vaddr[0] = instruction_pointer(regs);
967 data = DATAOF_TRACE_ENTRY(entry, false);
970 memcpy(data, ucb->buf, tu->tp.size + dsize);
972 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
976 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
977 struct uprobe_cpu_buffer *ucb, int dsize)
979 struct event_file_link *link;
981 if (is_ret_probe(tu))
985 trace_probe_for_each_link_rcu(link, &tu->tp)
986 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
992 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
993 struct pt_regs *regs,
994 struct uprobe_cpu_buffer *ucb, int dsize)
996 struct event_file_link *link;
999 trace_probe_for_each_link_rcu(link, &tu->tp)
1000 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1004 /* Event entry printers */
1005 static enum print_line_t
1006 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1008 struct uprobe_trace_entry_head *entry;
1009 struct trace_seq *s = &iter->seq;
1010 struct trace_uprobe *tu;
1013 entry = (struct uprobe_trace_entry_head *)iter->ent;
1014 tu = trace_uprobe_primary_from_call(
1015 container_of(event, struct trace_event_call, event));
1019 if (is_ret_probe(tu)) {
1020 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1021 trace_probe_name(&tu->tp),
1022 entry->vaddr[1], entry->vaddr[0]);
1023 data = DATAOF_TRACE_ENTRY(entry, true);
1025 trace_seq_printf(s, "%s: (0x%lx)",
1026 trace_probe_name(&tu->tp),
1028 data = DATAOF_TRACE_ENTRY(entry, false);
1031 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1034 trace_seq_putc(s, '\n');
1037 return trace_handle_return(s);
1040 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1041 enum uprobe_filter_ctx ctx,
1042 struct mm_struct *mm);
1044 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1048 tu->consumer.filter = filter;
1049 tu->inode = d_real_inode(tu->path.dentry);
1051 if (tu->ref_ctr_offset)
1052 ret = uprobe_register_refctr(tu->inode, tu->offset,
1053 tu->ref_ctr_offset, &tu->consumer);
1055 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1063 static void __probe_event_disable(struct trace_probe *tp)
1065 struct trace_probe *pos;
1066 struct trace_uprobe *tu;
1068 tu = container_of(tp, struct trace_uprobe, tp);
1069 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1071 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1072 tu = container_of(pos, struct trace_uprobe, tp);
1076 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1081 static int probe_event_enable(struct trace_event_call *call,
1082 struct trace_event_file *file, filter_func_t filter)
1084 struct trace_probe *pos, *tp;
1085 struct trace_uprobe *tu;
1089 tp = trace_probe_primary_from_call(call);
1090 if (WARN_ON_ONCE(!tp))
1092 enabled = trace_probe_is_enabled(tp);
1094 /* This may also change "enabled" state */
1096 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1099 ret = trace_probe_add_file(tp, file);
1103 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1106 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1109 tu = container_of(tp, struct trace_uprobe, tp);
1110 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1115 ret = uprobe_buffer_enable();
1119 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1120 tu = container_of(pos, struct trace_uprobe, tp);
1121 ret = trace_uprobe_enable(tu, filter);
1123 __probe_event_disable(tp);
1131 uprobe_buffer_disable();
1135 trace_probe_remove_file(tp, file);
1137 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1142 static void probe_event_disable(struct trace_event_call *call,
1143 struct trace_event_file *file)
1145 struct trace_probe *tp;
1147 tp = trace_probe_primary_from_call(call);
1148 if (WARN_ON_ONCE(!tp))
1151 if (!trace_probe_is_enabled(tp))
1155 if (trace_probe_remove_file(tp, file) < 0)
1158 if (trace_probe_is_enabled(tp))
1161 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1163 __probe_event_disable(tp);
1164 uprobe_buffer_disable();
1167 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1170 struct uprobe_trace_entry_head field;
1171 struct trace_uprobe *tu;
1173 tu = trace_uprobe_primary_from_call(event_call);
1177 if (is_ret_probe(tu)) {
1178 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1179 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1180 size = SIZEOF_TRACE_ENTRY(true);
1182 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1183 size = SIZEOF_TRACE_ENTRY(false);
1186 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1189 #ifdef CONFIG_PERF_EVENTS
1191 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1193 struct perf_event *event;
1195 if (filter->nr_systemwide)
1198 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1199 if (event->hw.target->mm == mm)
1207 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1208 struct perf_event *event)
1210 return __uprobe_perf_filter(filter, event->hw.target->mm);
1213 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1214 struct perf_event *event)
1218 write_lock(&filter->rwlock);
1219 if (event->hw.target) {
1220 list_del(&event->hw.tp_list);
1221 done = filter->nr_systemwide ||
1222 (event->hw.target->flags & PF_EXITING) ||
1223 trace_uprobe_filter_event(filter, event);
1225 filter->nr_systemwide--;
1226 done = filter->nr_systemwide;
1228 write_unlock(&filter->rwlock);
1233 /* This returns true if the filter always covers target mm */
1234 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1235 struct perf_event *event)
1239 write_lock(&filter->rwlock);
1240 if (event->hw.target) {
1242 * event->parent != NULL means copy_process(), we can avoid
1243 * uprobe_apply(). current->mm must be probed and we can rely
1244 * on dup_mmap() which preserves the already installed bp's.
1246 * attr.enable_on_exec means that exec/mmap will install the
1247 * breakpoints we need.
1249 done = filter->nr_systemwide ||
1250 event->parent || event->attr.enable_on_exec ||
1251 trace_uprobe_filter_event(filter, event);
1252 list_add(&event->hw.tp_list, &filter->perf_events);
1254 done = filter->nr_systemwide;
1255 filter->nr_systemwide++;
1257 write_unlock(&filter->rwlock);
1262 static int uprobe_perf_close(struct trace_event_call *call,
1263 struct perf_event *event)
1265 struct trace_probe *pos, *tp;
1266 struct trace_uprobe *tu;
1269 tp = trace_probe_primary_from_call(call);
1270 if (WARN_ON_ONCE(!tp))
1273 tu = container_of(tp, struct trace_uprobe, tp);
1274 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1277 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1278 tu = container_of(pos, struct trace_uprobe, tp);
1279 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1287 static int uprobe_perf_open(struct trace_event_call *call,
1288 struct perf_event *event)
1290 struct trace_probe *pos, *tp;
1291 struct trace_uprobe *tu;
1294 tp = trace_probe_primary_from_call(call);
1295 if (WARN_ON_ONCE(!tp))
1298 tu = container_of(tp, struct trace_uprobe, tp);
1299 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1302 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1303 tu = container_of(pos, struct trace_uprobe, tp);
1304 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1306 uprobe_perf_close(call, event);
1314 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1315 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1317 struct trace_uprobe_filter *filter;
1318 struct trace_uprobe *tu;
1321 tu = container_of(uc, struct trace_uprobe, consumer);
1322 filter = tu->tp.event->filter;
1324 read_lock(&filter->rwlock);
1325 ret = __uprobe_perf_filter(filter, mm);
1326 read_unlock(&filter->rwlock);
1331 static void __uprobe_perf_func(struct trace_uprobe *tu,
1332 unsigned long func, struct pt_regs *regs,
1333 struct uprobe_cpu_buffer *ucb, int dsize)
1335 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1336 struct uprobe_trace_entry_head *entry;
1337 struct hlist_head *head;
1342 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1345 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1347 size = esize + tu->tp.size + dsize;
1348 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1349 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1353 head = this_cpu_ptr(call->perf_events);
1354 if (hlist_empty(head))
1357 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1361 if (is_ret_probe(tu)) {
1362 entry->vaddr[0] = func;
1363 entry->vaddr[1] = instruction_pointer(regs);
1364 data = DATAOF_TRACE_ENTRY(entry, true);
1366 entry->vaddr[0] = instruction_pointer(regs);
1367 data = DATAOF_TRACE_ENTRY(entry, false);
1370 memcpy(data, ucb->buf, tu->tp.size + dsize);
1372 if (size - esize > tu->tp.size + dsize) {
1373 int len = tu->tp.size + dsize;
1375 memset(data + len, 0, size - esize - len);
1378 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1384 /* uprobe profile handler */
1385 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1386 struct uprobe_cpu_buffer *ucb, int dsize)
1388 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1389 return UPROBE_HANDLER_REMOVE;
1391 if (!is_ret_probe(tu))
1392 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1396 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1397 struct pt_regs *regs,
1398 struct uprobe_cpu_buffer *ucb, int dsize)
1400 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1403 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1404 const char **filename, u64 *probe_offset,
1405 u64 *probe_addr, bool perf_type_tracepoint)
1407 const char *pevent = trace_event_name(event->tp_event);
1408 const char *group = event->tp_event->class->system;
1409 struct trace_uprobe *tu;
1411 if (perf_type_tracepoint)
1412 tu = find_probe_event(pevent, group);
1414 tu = trace_uprobe_primary_from_call(event->tp_event);
1418 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1419 : BPF_FD_TYPE_UPROBE;
1420 *filename = tu->filename;
1421 *probe_offset = tu->offset;
1425 #endif /* CONFIG_PERF_EVENTS */
1428 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1431 struct trace_event_file *file = data;
1434 case TRACE_REG_REGISTER:
1435 return probe_event_enable(event, file, NULL);
1437 case TRACE_REG_UNREGISTER:
1438 probe_event_disable(event, file);
1441 #ifdef CONFIG_PERF_EVENTS
1442 case TRACE_REG_PERF_REGISTER:
1443 return probe_event_enable(event, NULL, uprobe_perf_filter);
1445 case TRACE_REG_PERF_UNREGISTER:
1446 probe_event_disable(event, NULL);
1449 case TRACE_REG_PERF_OPEN:
1450 return uprobe_perf_open(event, data);
1452 case TRACE_REG_PERF_CLOSE:
1453 return uprobe_perf_close(event, data);
1462 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1464 struct trace_uprobe *tu;
1465 struct uprobe_dispatch_data udd;
1466 struct uprobe_cpu_buffer *ucb;
1471 tu = container_of(con, struct trace_uprobe, consumer);
1475 udd.bp_addr = instruction_pointer(regs);
1477 current->utask->vaddr = (unsigned long) &udd;
1479 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1482 dsize = __get_data_size(&tu->tp, regs);
1483 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1485 ucb = uprobe_buffer_get();
1486 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1488 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1489 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1491 #ifdef CONFIG_PERF_EVENTS
1492 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1493 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1495 uprobe_buffer_put(ucb);
1499 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1500 unsigned long func, struct pt_regs *regs)
1502 struct trace_uprobe *tu;
1503 struct uprobe_dispatch_data udd;
1504 struct uprobe_cpu_buffer *ucb;
1507 tu = container_of(con, struct trace_uprobe, consumer);
1512 current->utask->vaddr = (unsigned long) &udd;
1514 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1517 dsize = __get_data_size(&tu->tp, regs);
1518 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1520 ucb = uprobe_buffer_get();
1521 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1523 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1524 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1526 #ifdef CONFIG_PERF_EVENTS
1527 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1528 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1530 uprobe_buffer_put(ucb);
1534 static struct trace_event_functions uprobe_funcs = {
1535 .trace = print_uprobe_event
1538 static inline void init_trace_event_call(struct trace_uprobe *tu)
1540 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1542 call->event.funcs = &uprobe_funcs;
1543 call->class->define_fields = uprobe_event_define_fields;
1545 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1546 call->class->reg = trace_uprobe_register;
1549 static int register_uprobe_event(struct trace_uprobe *tu)
1551 init_trace_event_call(tu);
1553 return trace_probe_register_event_call(&tu->tp);
1556 static int unregister_uprobe_event(struct trace_uprobe *tu)
1558 return trace_probe_unregister_event_call(&tu->tp);
1561 #ifdef CONFIG_PERF_EVENTS
1562 struct trace_event_call *
1563 create_local_trace_uprobe(char *name, unsigned long offs,
1564 unsigned long ref_ctr_offset, bool is_return)
1566 struct trace_uprobe *tu;
1570 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1572 return ERR_PTR(ret);
1574 if (!d_is_reg(path.dentry)) {
1576 return ERR_PTR(-EINVAL);
1580 * local trace_kprobes are not added to dyn_event, so they are never
1581 * searched in find_trace_kprobe(). Therefore, there is no concern of
1582 * duplicated name "DUMMY_EVENT" here.
1584 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1588 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1591 return ERR_CAST(tu);
1596 tu->ref_ctr_offset = ref_ctr_offset;
1597 tu->filename = kstrdup(name, GFP_KERNEL);
1598 init_trace_event_call(tu);
1600 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1605 return trace_probe_event_call(&tu->tp);
1607 free_trace_uprobe(tu);
1608 return ERR_PTR(ret);
1611 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1613 struct trace_uprobe *tu;
1615 tu = trace_uprobe_primary_from_call(event_call);
1617 free_trace_uprobe(tu);
1619 #endif /* CONFIG_PERF_EVENTS */
1621 /* Make a trace interface for controling probe points */
1622 static __init int init_uprobe_trace(void)
1624 struct dentry *d_tracer;
1627 ret = dyn_event_register(&trace_uprobe_ops);
1631 d_tracer = tracing_init_dentry();
1632 if (IS_ERR(d_tracer))
1635 trace_create_file("uprobe_events", 0644, d_tracer,
1636 NULL, &uprobe_events_ops);
1637 /* Profile interface */
1638 trace_create_file("uprobe_profile", 0444, d_tracer,
1639 NULL, &uprobe_profile_ops);
1643 fs_initcall(init_uprobe_trace);