GNU Linux-libre 5.4.257-gnu1
[releases.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/rculist.h>
14 #include <linux/error-injection.h>
15
16 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
17
18 #include "trace_dynevent.h"
19 #include "trace_kprobe_selftest.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define KPROBE_EVENT_SYSTEM "kprobes"
24 #define KRETPROBE_MAXACTIVE_MAX 4096
25 #define MAX_KPROBE_CMDLINE_SIZE 1024
26
27 /* Kprobe early definition from command line */
28 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
29 static bool kprobe_boot_events_enabled __initdata;
30
31 static int __init set_kprobe_boot_events(char *str)
32 {
33         strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
34         return 0;
35 }
36 __setup("kprobe_event=", set_kprobe_boot_events);
37
38 static int trace_kprobe_create(int argc, const char **argv);
39 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
40 static int trace_kprobe_release(struct dyn_event *ev);
41 static bool trace_kprobe_is_busy(struct dyn_event *ev);
42 static bool trace_kprobe_match(const char *system, const char *event,
43                         int argc, const char **argv, struct dyn_event *ev);
44
45 static struct dyn_event_operations trace_kprobe_ops = {
46         .create = trace_kprobe_create,
47         .show = trace_kprobe_show,
48         .is_busy = trace_kprobe_is_busy,
49         .free = trace_kprobe_release,
50         .match = trace_kprobe_match,
51 };
52
53 /*
54  * Kprobe event core functions
55  */
56 struct trace_kprobe {
57         struct dyn_event        devent;
58         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
59         unsigned long __percpu *nhit;
60         const char              *symbol;        /* symbol name */
61         struct trace_probe      tp;
62 };
63
64 static bool is_trace_kprobe(struct dyn_event *ev)
65 {
66         return ev->ops == &trace_kprobe_ops;
67 }
68
69 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
70 {
71         return container_of(ev, struct trace_kprobe, devent);
72 }
73
74 /**
75  * for_each_trace_kprobe - iterate over the trace_kprobe list
76  * @pos:        the struct trace_kprobe * for each entry
77  * @dpos:       the struct dyn_event * to use as a loop cursor
78  */
79 #define for_each_trace_kprobe(pos, dpos)        \
80         for_each_dyn_event(dpos)                \
81                 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
82
83 #define SIZEOF_TRACE_KPROBE(n)                          \
84         (offsetof(struct trace_kprobe, tp.args) +       \
85         (sizeof(struct probe_arg) * (n)))
86
87 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
88 {
89         return tk->rp.handler != NULL;
90 }
91
92 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
93 {
94         return tk->symbol ? tk->symbol : "unknown";
95 }
96
97 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
98 {
99         return tk->rp.kp.offset;
100 }
101
102 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
103 {
104         return !!(kprobe_gone(&tk->rp.kp));
105 }
106
107 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
108                                                  struct module *mod)
109 {
110         int len = strlen(mod->name);
111         const char *name = trace_kprobe_symbol(tk);
112         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
113 }
114
115 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
116 {
117         char *p;
118         bool ret;
119
120         if (!tk->symbol)
121                 return false;
122         p = strchr(tk->symbol, ':');
123         if (!p)
124                 return true;
125         *p = '\0';
126         mutex_lock(&module_mutex);
127         ret = !!find_module(tk->symbol);
128         mutex_unlock(&module_mutex);
129         *p = ':';
130
131         return ret;
132 }
133
134 static bool trace_kprobe_is_busy(struct dyn_event *ev)
135 {
136         struct trace_kprobe *tk = to_trace_kprobe(ev);
137
138         return trace_probe_is_enabled(&tk->tp);
139 }
140
141 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
142                                             int argc, const char **argv)
143 {
144         char buf[MAX_ARGSTR_LEN + 1];
145
146         if (!argc)
147                 return true;
148
149         if (!tk->symbol)
150                 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
151         else if (tk->rp.kp.offset)
152                 snprintf(buf, sizeof(buf), "%s+%u",
153                          trace_kprobe_symbol(tk), tk->rp.kp.offset);
154         else
155                 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
156         if (strcmp(buf, argv[0]))
157                 return false;
158         argc--; argv++;
159
160         return trace_probe_match_command_args(&tk->tp, argc, argv);
161 }
162
163 static bool trace_kprobe_match(const char *system, const char *event,
164                         int argc, const char **argv, struct dyn_event *ev)
165 {
166         struct trace_kprobe *tk = to_trace_kprobe(ev);
167
168         return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
169             (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
170             trace_kprobe_match_command_head(tk, argc, argv);
171 }
172
173 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
174 {
175         unsigned long nhit = 0;
176         int cpu;
177
178         for_each_possible_cpu(cpu)
179                 nhit += *per_cpu_ptr(tk->nhit, cpu);
180
181         return nhit;
182 }
183
184 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
185 {
186         return !(list_empty(&tk->rp.kp.list) &&
187                  hlist_unhashed(&tk->rp.kp.hlist));
188 }
189
190 /* Return 0 if it fails to find the symbol address */
191 static nokprobe_inline
192 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
193 {
194         unsigned long addr;
195
196         if (tk->symbol) {
197                 addr = (unsigned long)
198                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
199                 if (addr)
200                         addr += tk->rp.kp.offset;
201         } else {
202                 addr = (unsigned long)tk->rp.kp.addr;
203         }
204         return addr;
205 }
206
207 static nokprobe_inline struct trace_kprobe *
208 trace_kprobe_primary_from_call(struct trace_event_call *call)
209 {
210         struct trace_probe *tp;
211
212         tp = trace_probe_primary_from_call(call);
213         if (WARN_ON_ONCE(!tp))
214                 return NULL;
215
216         return container_of(tp, struct trace_kprobe, tp);
217 }
218
219 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
220 {
221         struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
222
223         return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
224                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
225                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
226 }
227
228 bool trace_kprobe_error_injectable(struct trace_event_call *call)
229 {
230         struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
231
232         return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
233                false;
234 }
235
236 static int register_kprobe_event(struct trace_kprobe *tk);
237 static int unregister_kprobe_event(struct trace_kprobe *tk);
238
239 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
240 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
241                                 struct pt_regs *regs);
242
243 static void free_trace_kprobe(struct trace_kprobe *tk)
244 {
245         if (tk) {
246                 trace_probe_cleanup(&tk->tp);
247                 kfree(tk->symbol);
248                 free_percpu(tk->nhit);
249                 kfree(tk);
250         }
251 }
252
253 /*
254  * Allocate new trace_probe and initialize it (including kprobes).
255  */
256 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
257                                              const char *event,
258                                              void *addr,
259                                              const char *symbol,
260                                              unsigned long offs,
261                                              int maxactive,
262                                              int nargs, bool is_return)
263 {
264         struct trace_kprobe *tk;
265         int ret = -ENOMEM;
266
267         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
268         if (!tk)
269                 return ERR_PTR(ret);
270
271         tk->nhit = alloc_percpu(unsigned long);
272         if (!tk->nhit)
273                 goto error;
274
275         if (symbol) {
276                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
277                 if (!tk->symbol)
278                         goto error;
279                 tk->rp.kp.symbol_name = tk->symbol;
280                 tk->rp.kp.offset = offs;
281         } else
282                 tk->rp.kp.addr = addr;
283
284         if (is_return)
285                 tk->rp.handler = kretprobe_dispatcher;
286         else
287                 tk->rp.kp.pre_handler = kprobe_dispatcher;
288
289         tk->rp.maxactive = maxactive;
290         INIT_HLIST_NODE(&tk->rp.kp.hlist);
291         INIT_LIST_HEAD(&tk->rp.kp.list);
292
293         ret = trace_probe_init(&tk->tp, event, group, false);
294         if (ret < 0)
295                 goto error;
296
297         dyn_event_init(&tk->devent, &trace_kprobe_ops);
298         return tk;
299 error:
300         free_trace_kprobe(tk);
301         return ERR_PTR(ret);
302 }
303
304 static struct trace_kprobe *find_trace_kprobe(const char *event,
305                                               const char *group)
306 {
307         struct dyn_event *pos;
308         struct trace_kprobe *tk;
309
310         for_each_trace_kprobe(tk, pos)
311                 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
312                     strcmp(trace_probe_group_name(&tk->tp), group) == 0)
313                         return tk;
314         return NULL;
315 }
316
317 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
318 {
319         int ret = 0;
320
321         if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
322                 if (trace_kprobe_is_return(tk))
323                         ret = enable_kretprobe(&tk->rp);
324                 else
325                         ret = enable_kprobe(&tk->rp.kp);
326         }
327
328         return ret;
329 }
330
331 static void __disable_trace_kprobe(struct trace_probe *tp)
332 {
333         struct trace_probe *pos;
334         struct trace_kprobe *tk;
335
336         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
337                 tk = container_of(pos, struct trace_kprobe, tp);
338                 if (!trace_kprobe_is_registered(tk))
339                         continue;
340                 if (trace_kprobe_is_return(tk))
341                         disable_kretprobe(&tk->rp);
342                 else
343                         disable_kprobe(&tk->rp.kp);
344         }
345 }
346
347 /*
348  * Enable trace_probe
349  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
350  */
351 static int enable_trace_kprobe(struct trace_event_call *call,
352                                 struct trace_event_file *file)
353 {
354         struct trace_probe *pos, *tp;
355         struct trace_kprobe *tk;
356         bool enabled;
357         int ret = 0;
358
359         tp = trace_probe_primary_from_call(call);
360         if (WARN_ON_ONCE(!tp))
361                 return -ENODEV;
362         enabled = trace_probe_is_enabled(tp);
363
364         /* This also changes "enabled" state */
365         if (file) {
366                 ret = trace_probe_add_file(tp, file);
367                 if (ret)
368                         return ret;
369         } else
370                 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
371
372         if (enabled)
373                 return 0;
374
375         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
376                 tk = container_of(pos, struct trace_kprobe, tp);
377                 if (trace_kprobe_has_gone(tk))
378                         continue;
379                 ret = __enable_trace_kprobe(tk);
380                 if (ret)
381                         break;
382                 enabled = true;
383         }
384
385         if (ret) {
386                 /* Failed to enable one of them. Roll back all */
387                 if (enabled)
388                         __disable_trace_kprobe(tp);
389                 if (file)
390                         trace_probe_remove_file(tp, file);
391                 else
392                         trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
393         }
394
395         return ret;
396 }
397
398 /*
399  * Disable trace_probe
400  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
401  */
402 static int disable_trace_kprobe(struct trace_event_call *call,
403                                 struct trace_event_file *file)
404 {
405         struct trace_probe *tp;
406
407         tp = trace_probe_primary_from_call(call);
408         if (WARN_ON_ONCE(!tp))
409                 return -ENODEV;
410
411         if (file) {
412                 if (!trace_probe_get_file_link(tp, file))
413                         return -ENOENT;
414                 if (!trace_probe_has_single_file(tp))
415                         goto out;
416                 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
417         } else
418                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
419
420         if (!trace_probe_is_enabled(tp))
421                 __disable_trace_kprobe(tp);
422
423  out:
424         if (file)
425                 /*
426                  * Synchronization is done in below function. For perf event,
427                  * file == NULL and perf_trace_event_unreg() calls
428                  * tracepoint_synchronize_unregister() to ensure synchronize
429                  * event. We don't need to care about it.
430                  */
431                 trace_probe_remove_file(tp, file);
432
433         return 1;
434 }
435
436 #if defined(CONFIG_DYNAMIC_FTRACE) && \
437         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
438 static bool __within_notrace_func(unsigned long addr)
439 {
440         unsigned long offset, size;
441
442         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
443                 return false;
444
445         /* Get the entry address of the target function */
446         addr -= offset;
447
448         /*
449          * Since ftrace_location_range() does inclusive range check, we need
450          * to subtract 1 byte from the end address.
451          */
452         return !ftrace_location_range(addr, addr + size - 1);
453 }
454
455 static bool within_notrace_func(struct trace_kprobe *tk)
456 {
457         unsigned long addr = trace_kprobe_address(tk);
458         char symname[KSYM_NAME_LEN], *p;
459
460         if (!__within_notrace_func(addr))
461                 return false;
462
463         /* Check if the address is on a suffixed-symbol */
464         if (!lookup_symbol_name(addr, symname)) {
465                 p = strchr(symname, '.');
466                 if (!p)
467                         return true;
468                 *p = '\0';
469                 addr = (unsigned long)kprobe_lookup_name(symname, 0);
470                 if (addr)
471                         return __within_notrace_func(addr);
472         }
473
474         return true;
475 }
476 #else
477 #define within_notrace_func(tk) (false)
478 #endif
479
480 /* Internal register function - just handle k*probes and flags */
481 static int __register_trace_kprobe(struct trace_kprobe *tk)
482 {
483         int i, ret;
484
485         ret = security_locked_down(LOCKDOWN_KPROBES);
486         if (ret)
487                 return ret;
488
489         if (trace_kprobe_is_registered(tk))
490                 return -EINVAL;
491
492         if (within_notrace_func(tk)) {
493                 pr_warn("Could not probe notrace function %s\n",
494                         trace_kprobe_symbol(tk));
495                 return -EINVAL;
496         }
497
498         for (i = 0; i < tk->tp.nr_args; i++) {
499                 ret = traceprobe_update_arg(&tk->tp.args[i]);
500                 if (ret)
501                         return ret;
502         }
503
504         /* Set/clear disabled flag according to tp->flag */
505         if (trace_probe_is_enabled(&tk->tp))
506                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
507         else
508                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
509
510         if (trace_kprobe_is_return(tk))
511                 ret = register_kretprobe(&tk->rp);
512         else
513                 ret = register_kprobe(&tk->rp.kp);
514
515         return ret;
516 }
517
518 /* Internal unregister function - just handle k*probes and flags */
519 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
520 {
521         if (trace_kprobe_is_registered(tk)) {
522                 if (trace_kprobe_is_return(tk))
523                         unregister_kretprobe(&tk->rp);
524                 else
525                         unregister_kprobe(&tk->rp.kp);
526                 /* Cleanup kprobe for reuse and mark it unregistered */
527                 INIT_HLIST_NODE(&tk->rp.kp.hlist);
528                 INIT_LIST_HEAD(&tk->rp.kp.list);
529                 if (tk->rp.kp.symbol_name)
530                         tk->rp.kp.addr = NULL;
531         }
532 }
533
534 /* Unregister a trace_probe and probe_event */
535 static int unregister_trace_kprobe(struct trace_kprobe *tk)
536 {
537         /* If other probes are on the event, just unregister kprobe */
538         if (trace_probe_has_sibling(&tk->tp))
539                 goto unreg;
540
541         /* Enabled event can not be unregistered */
542         if (trace_probe_is_enabled(&tk->tp))
543                 return -EBUSY;
544
545         /* Will fail if probe is being used by ftrace or perf */
546         if (unregister_kprobe_event(tk))
547                 return -EBUSY;
548
549 unreg:
550         __unregister_trace_kprobe(tk);
551         dyn_event_remove(&tk->devent);
552         trace_probe_unlink(&tk->tp);
553
554         return 0;
555 }
556
557 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
558                                          struct trace_kprobe *comp)
559 {
560         struct trace_probe_event *tpe = orig->tp.event;
561         struct trace_probe *pos;
562         int i;
563
564         list_for_each_entry(pos, &tpe->probes, list) {
565                 orig = container_of(pos, struct trace_kprobe, tp);
566                 if (strcmp(trace_kprobe_symbol(orig),
567                            trace_kprobe_symbol(comp)) ||
568                     trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
569                         continue;
570
571                 /*
572                  * trace_probe_compare_arg_type() ensured that nr_args and
573                  * each argument name and type are same. Let's compare comm.
574                  */
575                 for (i = 0; i < orig->tp.nr_args; i++) {
576                         if (strcmp(orig->tp.args[i].comm,
577                                    comp->tp.args[i].comm))
578                                 break;
579                 }
580
581                 if (i == orig->tp.nr_args)
582                         return true;
583         }
584
585         return false;
586 }
587
588 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
589 {
590         int ret;
591
592         ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
593         if (ret) {
594                 /* Note that argument starts index = 2 */
595                 trace_probe_log_set_index(ret + 1);
596                 trace_probe_log_err(0, DIFF_ARG_TYPE);
597                 return -EEXIST;
598         }
599         if (trace_kprobe_has_same_kprobe(to, tk)) {
600                 trace_probe_log_set_index(0);
601                 trace_probe_log_err(0, SAME_PROBE);
602                 return -EEXIST;
603         }
604
605         /* Append to existing event */
606         ret = trace_probe_append(&tk->tp, &to->tp);
607         if (ret)
608                 return ret;
609
610         /* Register k*probe */
611         ret = __register_trace_kprobe(tk);
612         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
613                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
614                 ret = 0;
615         }
616
617         if (ret)
618                 trace_probe_unlink(&tk->tp);
619         else
620                 dyn_event_add(&tk->devent);
621
622         return ret;
623 }
624
625 /* Register a trace_probe and probe_event */
626 static int register_trace_kprobe(struct trace_kprobe *tk)
627 {
628         struct trace_kprobe *old_tk;
629         int ret;
630
631         mutex_lock(&event_mutex);
632
633         old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
634                                    trace_probe_group_name(&tk->tp));
635         if (old_tk) {
636                 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
637                         trace_probe_log_set_index(0);
638                         trace_probe_log_err(0, DIFF_PROBE_TYPE);
639                         ret = -EEXIST;
640                 } else {
641                         ret = append_trace_kprobe(tk, old_tk);
642                 }
643                 goto end;
644         }
645
646         /* Register new event */
647         ret = register_kprobe_event(tk);
648         if (ret) {
649                 if (ret == -EEXIST) {
650                         trace_probe_log_set_index(0);
651                         trace_probe_log_err(0, EVENT_EXIST);
652                 } else
653                         pr_warn("Failed to register probe event(%d)\n", ret);
654                 goto end;
655         }
656
657         /* Register k*probe */
658         ret = __register_trace_kprobe(tk);
659         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
660                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
661                 ret = 0;
662         }
663
664         if (ret < 0)
665                 unregister_kprobe_event(tk);
666         else
667                 dyn_event_add(&tk->devent);
668
669 end:
670         mutex_unlock(&event_mutex);
671         return ret;
672 }
673
674 /* Module notifier call back, checking event on the module */
675 static int trace_kprobe_module_callback(struct notifier_block *nb,
676                                        unsigned long val, void *data)
677 {
678         struct module *mod = data;
679         struct dyn_event *pos;
680         struct trace_kprobe *tk;
681         int ret;
682
683         if (val != MODULE_STATE_COMING)
684                 return NOTIFY_DONE;
685
686         /* Update probes on coming module */
687         mutex_lock(&event_mutex);
688         for_each_trace_kprobe(tk, pos) {
689                 if (trace_kprobe_within_module(tk, mod)) {
690                         /* Don't need to check busy - this should have gone. */
691                         __unregister_trace_kprobe(tk);
692                         ret = __register_trace_kprobe(tk);
693                         if (ret)
694                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
695                                         trace_probe_name(&tk->tp),
696                                         mod->name, ret);
697                 }
698         }
699         mutex_unlock(&event_mutex);
700
701         return NOTIFY_DONE;
702 }
703
704 static struct notifier_block trace_kprobe_module_nb = {
705         .notifier_call = trace_kprobe_module_callback,
706         .priority = 1   /* Invoked after kprobe module callback */
707 };
708
709 /* Convert certain expected symbols into '_' when generating event names */
710 static inline void sanitize_event_name(char *name)
711 {
712         while (*name++ != '\0')
713                 if (*name == ':' || *name == '.')
714                         *name = '_';
715 }
716
717 static int trace_kprobe_create(int argc, const char *argv[])
718 {
719         /*
720          * Argument syntax:
721          *  - Add kprobe:
722          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
723          *  - Add kretprobe:
724          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
725          * Fetch args:
726          *  $retval     : fetch return value
727          *  $stack      : fetch stack address
728          *  $stackN     : fetch Nth of stack (N:0-)
729          *  $comm       : fetch current task comm
730          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
731          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
732          *  %REG        : fetch register REG
733          * Dereferencing memory fetch:
734          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
735          * Alias name of args:
736          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
737          * Type of args:
738          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
739          */
740         struct trace_kprobe *tk = NULL;
741         int i, len, ret = 0;
742         bool is_return = false;
743         char *symbol = NULL, *tmp = NULL;
744         const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
745         int maxactive = 0;
746         long offset = 0;
747         void *addr = NULL;
748         char buf[MAX_EVENT_NAME_LEN];
749         unsigned int flags = TPARG_FL_KERNEL;
750
751         switch (argv[0][0]) {
752         case 'r':
753                 is_return = true;
754                 flags |= TPARG_FL_RETURN;
755                 break;
756         case 'p':
757                 break;
758         default:
759                 return -ECANCELED;
760         }
761         if (argc < 2)
762                 return -ECANCELED;
763
764         trace_probe_log_init("trace_kprobe", argc, argv);
765
766         event = strchr(&argv[0][1], ':');
767         if (event)
768                 event++;
769
770         if (isdigit(argv[0][1])) {
771                 if (!is_return) {
772                         trace_probe_log_err(1, MAXACT_NO_KPROBE);
773                         goto parse_error;
774                 }
775                 if (event)
776                         len = event - &argv[0][1] - 1;
777                 else
778                         len = strlen(&argv[0][1]);
779                 if (len > MAX_EVENT_NAME_LEN - 1) {
780                         trace_probe_log_err(1, BAD_MAXACT);
781                         goto parse_error;
782                 }
783                 memcpy(buf, &argv[0][1], len);
784                 buf[len] = '\0';
785                 ret = kstrtouint(buf, 0, &maxactive);
786                 if (ret || !maxactive) {
787                         trace_probe_log_err(1, BAD_MAXACT);
788                         goto parse_error;
789                 }
790                 /* kretprobes instances are iterated over via a list. The
791                  * maximum should stay reasonable.
792                  */
793                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
794                         trace_probe_log_err(1, MAXACT_TOO_BIG);
795                         goto parse_error;
796                 }
797         }
798
799         /* try to parse an address. if that fails, try to read the
800          * input as a symbol. */
801         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
802                 trace_probe_log_set_index(1);
803                 /* Check whether uprobe event specified */
804                 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
805                         ret = -ECANCELED;
806                         goto error;
807                 }
808                 /* a symbol specified */
809                 symbol = kstrdup(argv[1], GFP_KERNEL);
810                 if (!symbol)
811                         return -ENOMEM;
812                 /* TODO: support .init module functions */
813                 ret = traceprobe_split_symbol_offset(symbol, &offset);
814                 if (ret || offset < 0 || offset > UINT_MAX) {
815                         trace_probe_log_err(0, BAD_PROBE_ADDR);
816                         goto parse_error;
817                 }
818                 ret = kprobe_on_func_entry(NULL, symbol, offset);
819                 if (ret == 0)
820                         flags |= TPARG_FL_FENTRY;
821                 /* Defer the ENOENT case until register kprobe */
822                 if (ret == -EINVAL && is_return) {
823                         trace_probe_log_err(0, BAD_RETPROBE);
824                         goto parse_error;
825                 }
826         }
827
828         trace_probe_log_set_index(0);
829         if (event) {
830                 ret = traceprobe_parse_event_name(&event, &group, buf,
831                                                   event - argv[0]);
832                 if (ret)
833                         goto parse_error;
834         } else {
835                 /* Make a new event name */
836                 if (symbol)
837                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
838                                  is_return ? 'r' : 'p', symbol, offset);
839                 else
840                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
841                                  is_return ? 'r' : 'p', addr);
842                 sanitize_event_name(buf);
843                 event = buf;
844         }
845
846         /* setup a probe */
847         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
848                                argc - 2, is_return);
849         if (IS_ERR(tk)) {
850                 ret = PTR_ERR(tk);
851                 /* This must return -ENOMEM, else there is a bug */
852                 WARN_ON_ONCE(ret != -ENOMEM);
853                 goto out;       /* We know tk is not allocated */
854         }
855         argc -= 2; argv += 2;
856
857         /* parse arguments */
858         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
859                 tmp = kstrdup(argv[i], GFP_KERNEL);
860                 if (!tmp) {
861                         ret = -ENOMEM;
862                         goto error;
863                 }
864
865                 trace_probe_log_set_index(i + 2);
866                 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
867                 kfree(tmp);
868                 if (ret)
869                         goto error;     /* This can be -ENOMEM */
870         }
871
872         ret = traceprobe_set_print_fmt(&tk->tp, is_return);
873         if (ret < 0)
874                 goto error;
875
876         ret = register_trace_kprobe(tk);
877         if (ret) {
878                 trace_probe_log_set_index(1);
879                 if (ret == -EILSEQ)
880                         trace_probe_log_err(0, BAD_INSN_BNDRY);
881                 else if (ret == -ENOENT)
882                         trace_probe_log_err(0, BAD_PROBE_ADDR);
883                 else if (ret != -ENOMEM && ret != -EEXIST)
884                         trace_probe_log_err(0, FAIL_REG_PROBE);
885                 goto error;
886         }
887
888 out:
889         trace_probe_log_clear();
890         kfree(symbol);
891         return ret;
892
893 parse_error:
894         ret = -EINVAL;
895 error:
896         free_trace_kprobe(tk);
897         goto out;
898 }
899
900 static int create_or_delete_trace_kprobe(int argc, char **argv)
901 {
902         int ret;
903
904         if (argv[0][0] == '-')
905                 return dyn_event_release(argc, argv, &trace_kprobe_ops);
906
907         ret = trace_kprobe_create(argc, (const char **)argv);
908         return ret == -ECANCELED ? -EINVAL : ret;
909 }
910
911 static int trace_kprobe_release(struct dyn_event *ev)
912 {
913         struct trace_kprobe *tk = to_trace_kprobe(ev);
914         int ret = unregister_trace_kprobe(tk);
915
916         if (!ret)
917                 free_trace_kprobe(tk);
918         return ret;
919 }
920
921 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
922 {
923         struct trace_kprobe *tk = to_trace_kprobe(ev);
924         int i;
925
926         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
927         if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
928                 seq_printf(m, "%d", tk->rp.maxactive);
929         seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
930                                 trace_probe_name(&tk->tp));
931
932         if (!tk->symbol)
933                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
934         else if (tk->rp.kp.offset)
935                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
936                            tk->rp.kp.offset);
937         else
938                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
939
940         for (i = 0; i < tk->tp.nr_args; i++)
941                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
942         seq_putc(m, '\n');
943
944         return 0;
945 }
946
947 static int probes_seq_show(struct seq_file *m, void *v)
948 {
949         struct dyn_event *ev = v;
950
951         if (!is_trace_kprobe(ev))
952                 return 0;
953
954         return trace_kprobe_show(m, ev);
955 }
956
957 static const struct seq_operations probes_seq_op = {
958         .start  = dyn_event_seq_start,
959         .next   = dyn_event_seq_next,
960         .stop   = dyn_event_seq_stop,
961         .show   = probes_seq_show
962 };
963
964 static int probes_open(struct inode *inode, struct file *file)
965 {
966         int ret;
967
968         ret = security_locked_down(LOCKDOWN_TRACEFS);
969         if (ret)
970                 return ret;
971
972         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
973                 ret = dyn_events_release_all(&trace_kprobe_ops);
974                 if (ret < 0)
975                         return ret;
976         }
977
978         return seq_open(file, &probes_seq_op);
979 }
980
981 static ssize_t probes_write(struct file *file, const char __user *buffer,
982                             size_t count, loff_t *ppos)
983 {
984         return trace_parse_run_command(file, buffer, count, ppos,
985                                        create_or_delete_trace_kprobe);
986 }
987
988 static const struct file_operations kprobe_events_ops = {
989         .owner          = THIS_MODULE,
990         .open           = probes_open,
991         .read           = seq_read,
992         .llseek         = seq_lseek,
993         .release        = seq_release,
994         .write          = probes_write,
995 };
996
997 /* Probes profiling interfaces */
998 static int probes_profile_seq_show(struct seq_file *m, void *v)
999 {
1000         struct dyn_event *ev = v;
1001         struct trace_kprobe *tk;
1002         unsigned long nmissed;
1003
1004         if (!is_trace_kprobe(ev))
1005                 return 0;
1006
1007         tk = to_trace_kprobe(ev);
1008         nmissed = trace_kprobe_is_return(tk) ?
1009                 tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1010         seq_printf(m, "  %-44s %15lu %15lu\n",
1011                    trace_probe_name(&tk->tp),
1012                    trace_kprobe_nhit(tk),
1013                    nmissed);
1014
1015         return 0;
1016 }
1017
1018 static const struct seq_operations profile_seq_op = {
1019         .start  = dyn_event_seq_start,
1020         .next   = dyn_event_seq_next,
1021         .stop   = dyn_event_seq_stop,
1022         .show   = probes_profile_seq_show
1023 };
1024
1025 static int profile_open(struct inode *inode, struct file *file)
1026 {
1027         int ret;
1028
1029         ret = security_locked_down(LOCKDOWN_TRACEFS);
1030         if (ret)
1031                 return ret;
1032
1033         return seq_open(file, &profile_seq_op);
1034 }
1035
1036 static const struct file_operations kprobe_profile_ops = {
1037         .owner          = THIS_MODULE,
1038         .open           = profile_open,
1039         .read           = seq_read,
1040         .llseek         = seq_lseek,
1041         .release        = seq_release,
1042 };
1043
1044 /* Kprobe specific fetch functions */
1045
1046 /* Return the length of string -- including null terminal byte */
1047 static nokprobe_inline int
1048 fetch_store_strlen(unsigned long addr)
1049 {
1050         int ret, len = 0;
1051         u8 c;
1052
1053         do {
1054                 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
1055                 len++;
1056         } while (c && ret == 0 && len < MAX_STRING_SIZE);
1057
1058         return (ret < 0) ? ret : len;
1059 }
1060
1061 /* Return the length of string -- including null terminal byte */
1062 static nokprobe_inline int
1063 fetch_store_strlen_user(unsigned long addr)
1064 {
1065         const void __user *uaddr =  (__force const void __user *)addr;
1066
1067         return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
1068 }
1069
1070 /*
1071  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1072  * length and relative data location.
1073  */
1074 static nokprobe_inline int
1075 fetch_store_string(unsigned long addr, void *dest, void *base)
1076 {
1077         int maxlen = get_loc_len(*(u32 *)dest);
1078         void *__dest;
1079         long ret;
1080
1081         if (unlikely(!maxlen))
1082                 return -ENOMEM;
1083
1084         __dest = get_loc_data(dest, base);
1085
1086         /*
1087          * Try to get string again, since the string can be changed while
1088          * probing.
1089          */
1090         ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
1091         if (ret >= 0)
1092                 *(u32 *)dest = make_data_loc(ret, __dest - base);
1093
1094         return ret;
1095 }
1096
1097 /*
1098  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1099  * with max length and relative data location.
1100  */
1101 static nokprobe_inline int
1102 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1103 {
1104         const void __user *uaddr =  (__force const void __user *)addr;
1105         int maxlen = get_loc_len(*(u32 *)dest);
1106         void *__dest;
1107         long ret;
1108
1109         if (unlikely(!maxlen))
1110                 return -ENOMEM;
1111
1112         __dest = get_loc_data(dest, base);
1113
1114         ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
1115         if (ret >= 0)
1116                 *(u32 *)dest = make_data_loc(ret, __dest - base);
1117
1118         return ret;
1119 }
1120
1121 static nokprobe_inline int
1122 probe_mem_read(void *dest, void *src, size_t size)
1123 {
1124         return probe_kernel_read(dest, src, size);
1125 }
1126
1127 static nokprobe_inline int
1128 probe_mem_read_user(void *dest, void *src, size_t size)
1129 {
1130         const void __user *uaddr =  (__force const void __user *)src;
1131
1132         return probe_user_read(dest, uaddr, size);
1133 }
1134
1135 /* Note that we don't verify it, since the code does not come from user space */
1136 static int
1137 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1138                    void *base)
1139 {
1140         struct pt_regs *regs = rec;
1141         unsigned long val;
1142
1143 retry:
1144         /* 1st stage: get value from context */
1145         switch (code->op) {
1146         case FETCH_OP_REG:
1147                 val = regs_get_register(regs, code->param);
1148                 break;
1149         case FETCH_OP_STACK:
1150                 val = regs_get_kernel_stack_nth(regs, code->param);
1151                 break;
1152         case FETCH_OP_STACKP:
1153                 val = kernel_stack_pointer(regs);
1154                 break;
1155         case FETCH_OP_RETVAL:
1156                 val = regs_return_value(regs);
1157                 break;
1158         case FETCH_OP_IMM:
1159                 val = code->immediate;
1160                 break;
1161         case FETCH_OP_COMM:
1162                 val = (unsigned long)current->comm;
1163                 break;
1164         case FETCH_OP_DATA:
1165                 val = (unsigned long)code->data;
1166                 break;
1167 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1168         case FETCH_OP_ARG:
1169                 val = regs_get_kernel_argument(regs, code->param);
1170                 break;
1171 #endif
1172         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1173                 code++;
1174                 goto retry;
1175         default:
1176                 return -EILSEQ;
1177         }
1178         code++;
1179
1180         return process_fetch_insn_bottom(code, val, dest, base);
1181 }
1182 NOKPROBE_SYMBOL(process_fetch_insn)
1183
1184 /* Kprobe handler */
1185 static nokprobe_inline void
1186 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1187                     struct trace_event_file *trace_file)
1188 {
1189         struct kprobe_trace_entry_head *entry;
1190         struct ring_buffer_event *event;
1191         struct ring_buffer *buffer;
1192         int size, dsize, pc;
1193         unsigned long irq_flags;
1194         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1195
1196         WARN_ON(call != trace_file->event_call);
1197
1198         if (trace_trigger_soft_disabled(trace_file))
1199                 return;
1200
1201         local_save_flags(irq_flags);
1202         pc = preempt_count();
1203
1204         dsize = __get_data_size(&tk->tp, regs);
1205         size = sizeof(*entry) + tk->tp.size + dsize;
1206
1207         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1208                                                 call->event.type,
1209                                                 size, irq_flags, pc);
1210         if (!event)
1211                 return;
1212
1213         entry = ring_buffer_event_data(event);
1214         entry->ip = (unsigned long)tk->rp.kp.addr;
1215         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1216
1217         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1218                                          entry, irq_flags, pc, regs);
1219 }
1220
1221 static void
1222 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1223 {
1224         struct event_file_link *link;
1225
1226         trace_probe_for_each_link_rcu(link, &tk->tp)
1227                 __kprobe_trace_func(tk, regs, link->file);
1228 }
1229 NOKPROBE_SYMBOL(kprobe_trace_func);
1230
1231 /* Kretprobe handler */
1232 static nokprobe_inline void
1233 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1234                        struct pt_regs *regs,
1235                        struct trace_event_file *trace_file)
1236 {
1237         struct kretprobe_trace_entry_head *entry;
1238         struct ring_buffer_event *event;
1239         struct ring_buffer *buffer;
1240         int size, pc, dsize;
1241         unsigned long irq_flags;
1242         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1243
1244         WARN_ON(call != trace_file->event_call);
1245
1246         if (trace_trigger_soft_disabled(trace_file))
1247                 return;
1248
1249         local_save_flags(irq_flags);
1250         pc = preempt_count();
1251
1252         dsize = __get_data_size(&tk->tp, regs);
1253         size = sizeof(*entry) + tk->tp.size + dsize;
1254
1255         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1256                                                 call->event.type,
1257                                                 size, irq_flags, pc);
1258         if (!event)
1259                 return;
1260
1261         entry = ring_buffer_event_data(event);
1262         entry->func = (unsigned long)tk->rp.kp.addr;
1263         entry->ret_ip = (unsigned long)ri->ret_addr;
1264         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1265
1266         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1267                                          entry, irq_flags, pc, regs);
1268 }
1269
1270 static void
1271 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1272                      struct pt_regs *regs)
1273 {
1274         struct event_file_link *link;
1275
1276         trace_probe_for_each_link_rcu(link, &tk->tp)
1277                 __kretprobe_trace_func(tk, ri, regs, link->file);
1278 }
1279 NOKPROBE_SYMBOL(kretprobe_trace_func);
1280
1281 /* Event entry printers */
1282 static enum print_line_t
1283 print_kprobe_event(struct trace_iterator *iter, int flags,
1284                    struct trace_event *event)
1285 {
1286         struct kprobe_trace_entry_head *field;
1287         struct trace_seq *s = &iter->seq;
1288         struct trace_probe *tp;
1289
1290         field = (struct kprobe_trace_entry_head *)iter->ent;
1291         tp = trace_probe_primary_from_call(
1292                 container_of(event, struct trace_event_call, event));
1293         if (WARN_ON_ONCE(!tp))
1294                 goto out;
1295
1296         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1297
1298         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1299                 goto out;
1300
1301         trace_seq_putc(s, ')');
1302
1303         if (print_probe_args(s, tp->args, tp->nr_args,
1304                              (u8 *)&field[1], field) < 0)
1305                 goto out;
1306
1307         trace_seq_putc(s, '\n');
1308  out:
1309         return trace_handle_return(s);
1310 }
1311
1312 static enum print_line_t
1313 print_kretprobe_event(struct trace_iterator *iter, int flags,
1314                       struct trace_event *event)
1315 {
1316         struct kretprobe_trace_entry_head *field;
1317         struct trace_seq *s = &iter->seq;
1318         struct trace_probe *tp;
1319
1320         field = (struct kretprobe_trace_entry_head *)iter->ent;
1321         tp = trace_probe_primary_from_call(
1322                 container_of(event, struct trace_event_call, event));
1323         if (WARN_ON_ONCE(!tp))
1324                 goto out;
1325
1326         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1327
1328         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1329                 goto out;
1330
1331         trace_seq_puts(s, " <- ");
1332
1333         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1334                 goto out;
1335
1336         trace_seq_putc(s, ')');
1337
1338         if (print_probe_args(s, tp->args, tp->nr_args,
1339                              (u8 *)&field[1], field) < 0)
1340                 goto out;
1341
1342         trace_seq_putc(s, '\n');
1343
1344  out:
1345         return trace_handle_return(s);
1346 }
1347
1348
1349 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1350 {
1351         int ret;
1352         struct kprobe_trace_entry_head field;
1353         struct trace_probe *tp;
1354
1355         tp = trace_probe_primary_from_call(event_call);
1356         if (WARN_ON_ONCE(!tp))
1357                 return -ENOENT;
1358
1359         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1360
1361         return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1362 }
1363
1364 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1365 {
1366         int ret;
1367         struct kretprobe_trace_entry_head field;
1368         struct trace_probe *tp;
1369
1370         tp = trace_probe_primary_from_call(event_call);
1371         if (WARN_ON_ONCE(!tp))
1372                 return -ENOENT;
1373
1374         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1375         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1376
1377         return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1378 }
1379
1380 #ifdef CONFIG_PERF_EVENTS
1381
1382 /* Kprobe profile handler */
1383 static int
1384 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1385 {
1386         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1387         struct kprobe_trace_entry_head *entry;
1388         struct hlist_head *head;
1389         int size, __size, dsize;
1390         int rctx;
1391
1392         if (bpf_prog_array_valid(call)) {
1393                 unsigned long orig_ip = instruction_pointer(regs);
1394                 int ret;
1395
1396                 ret = trace_call_bpf(call, regs);
1397
1398                 /*
1399                  * We need to check and see if we modified the pc of the
1400                  * pt_regs, and if so return 1 so that we don't do the
1401                  * single stepping.
1402                  */
1403                 if (orig_ip != instruction_pointer(regs))
1404                         return 1;
1405                 if (!ret)
1406                         return 0;
1407         }
1408
1409         head = this_cpu_ptr(call->perf_events);
1410         if (hlist_empty(head))
1411                 return 0;
1412
1413         dsize = __get_data_size(&tk->tp, regs);
1414         __size = sizeof(*entry) + tk->tp.size + dsize;
1415         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1416         size -= sizeof(u32);
1417
1418         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1419         if (!entry)
1420                 return 0;
1421
1422         entry->ip = (unsigned long)tk->rp.kp.addr;
1423         memset(&entry[1], 0, dsize);
1424         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1425         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1426                               head, NULL);
1427         return 0;
1428 }
1429 NOKPROBE_SYMBOL(kprobe_perf_func);
1430
1431 /* Kretprobe profile handler */
1432 static void
1433 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1434                     struct pt_regs *regs)
1435 {
1436         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1437         struct kretprobe_trace_entry_head *entry;
1438         struct hlist_head *head;
1439         int size, __size, dsize;
1440         int rctx;
1441
1442         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1443                 return;
1444
1445         head = this_cpu_ptr(call->perf_events);
1446         if (hlist_empty(head))
1447                 return;
1448
1449         dsize = __get_data_size(&tk->tp, regs);
1450         __size = sizeof(*entry) + tk->tp.size + dsize;
1451         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1452         size -= sizeof(u32);
1453
1454         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1455         if (!entry)
1456                 return;
1457
1458         entry->func = (unsigned long)tk->rp.kp.addr;
1459         entry->ret_ip = (unsigned long)ri->ret_addr;
1460         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1461         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1462                               head, NULL);
1463 }
1464 NOKPROBE_SYMBOL(kretprobe_perf_func);
1465
1466 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1467                         const char **symbol, u64 *probe_offset,
1468                         u64 *probe_addr, bool perf_type_tracepoint)
1469 {
1470         const char *pevent = trace_event_name(event->tp_event);
1471         const char *group = event->tp_event->class->system;
1472         struct trace_kprobe *tk;
1473
1474         if (perf_type_tracepoint)
1475                 tk = find_trace_kprobe(pevent, group);
1476         else
1477                 tk = trace_kprobe_primary_from_call(event->tp_event);
1478         if (!tk)
1479                 return -EINVAL;
1480
1481         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1482                                               : BPF_FD_TYPE_KPROBE;
1483         if (tk->symbol) {
1484                 *symbol = tk->symbol;
1485                 *probe_offset = tk->rp.kp.offset;
1486                 *probe_addr = 0;
1487         } else {
1488                 *symbol = NULL;
1489                 *probe_offset = 0;
1490                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1491         }
1492         return 0;
1493 }
1494 #endif  /* CONFIG_PERF_EVENTS */
1495
1496 /*
1497  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1498  *
1499  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1500  * lockless, but we can't race with this __init function.
1501  */
1502 static int kprobe_register(struct trace_event_call *event,
1503                            enum trace_reg type, void *data)
1504 {
1505         struct trace_event_file *file = data;
1506
1507         switch (type) {
1508         case TRACE_REG_REGISTER:
1509                 return enable_trace_kprobe(event, file);
1510         case TRACE_REG_UNREGISTER:
1511                 return disable_trace_kprobe(event, file);
1512
1513 #ifdef CONFIG_PERF_EVENTS
1514         case TRACE_REG_PERF_REGISTER:
1515                 return enable_trace_kprobe(event, NULL);
1516         case TRACE_REG_PERF_UNREGISTER:
1517                 return disable_trace_kprobe(event, NULL);
1518         case TRACE_REG_PERF_OPEN:
1519         case TRACE_REG_PERF_CLOSE:
1520         case TRACE_REG_PERF_ADD:
1521         case TRACE_REG_PERF_DEL:
1522                 return 0;
1523 #endif
1524         }
1525         return 0;
1526 }
1527
1528 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1529 {
1530         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1531         int ret = 0;
1532
1533         raw_cpu_inc(*tk->nhit);
1534
1535         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1536                 kprobe_trace_func(tk, regs);
1537 #ifdef CONFIG_PERF_EVENTS
1538         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1539                 ret = kprobe_perf_func(tk, regs);
1540 #endif
1541         return ret;
1542 }
1543 NOKPROBE_SYMBOL(kprobe_dispatcher);
1544
1545 static int
1546 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1547 {
1548         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1549
1550         raw_cpu_inc(*tk->nhit);
1551
1552         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1553                 kretprobe_trace_func(tk, ri, regs);
1554 #ifdef CONFIG_PERF_EVENTS
1555         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1556                 kretprobe_perf_func(tk, ri, regs);
1557 #endif
1558         return 0;       /* We don't tweek kernel, so just return 0 */
1559 }
1560 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1561
1562 static struct trace_event_functions kretprobe_funcs = {
1563         .trace          = print_kretprobe_event
1564 };
1565
1566 static struct trace_event_functions kprobe_funcs = {
1567         .trace          = print_kprobe_event
1568 };
1569
1570 static inline void init_trace_event_call(struct trace_kprobe *tk)
1571 {
1572         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1573
1574         if (trace_kprobe_is_return(tk)) {
1575                 call->event.funcs = &kretprobe_funcs;
1576                 call->class->define_fields = kretprobe_event_define_fields;
1577         } else {
1578                 call->event.funcs = &kprobe_funcs;
1579                 call->class->define_fields = kprobe_event_define_fields;
1580         }
1581
1582         call->flags = TRACE_EVENT_FL_KPROBE;
1583         call->class->reg = kprobe_register;
1584 }
1585
1586 static int register_kprobe_event(struct trace_kprobe *tk)
1587 {
1588         init_trace_event_call(tk);
1589
1590         return trace_probe_register_event_call(&tk->tp);
1591 }
1592
1593 static int unregister_kprobe_event(struct trace_kprobe *tk)
1594 {
1595         return trace_probe_unregister_event_call(&tk->tp);
1596 }
1597
1598 #ifdef CONFIG_PERF_EVENTS
1599 /* create a trace_kprobe, but don't add it to global lists */
1600 struct trace_event_call *
1601 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1602                           bool is_return)
1603 {
1604         struct trace_kprobe *tk;
1605         int ret;
1606         char *event;
1607
1608         /*
1609          * local trace_kprobes are not added to dyn_event, so they are never
1610          * searched in find_trace_kprobe(). Therefore, there is no concern of
1611          * duplicated name here.
1612          */
1613         event = func ? func : "DUMMY_EVENT";
1614
1615         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1616                                 offs, 0 /* maxactive */, 0 /* nargs */,
1617                                 is_return);
1618
1619         if (IS_ERR(tk)) {
1620                 pr_info("Failed to allocate trace_probe.(%d)\n",
1621                         (int)PTR_ERR(tk));
1622                 return ERR_CAST(tk);
1623         }
1624
1625         init_trace_event_call(tk);
1626
1627         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1628                 ret = -ENOMEM;
1629                 goto error;
1630         }
1631
1632         ret = __register_trace_kprobe(tk);
1633         if (ret < 0)
1634                 goto error;
1635
1636         return trace_probe_event_call(&tk->tp);
1637 error:
1638         free_trace_kprobe(tk);
1639         return ERR_PTR(ret);
1640 }
1641
1642 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1643 {
1644         struct trace_kprobe *tk;
1645
1646         tk = trace_kprobe_primary_from_call(event_call);
1647         if (unlikely(!tk))
1648                 return;
1649
1650         if (trace_probe_is_enabled(&tk->tp)) {
1651                 WARN_ON(1);
1652                 return;
1653         }
1654
1655         __unregister_trace_kprobe(tk);
1656
1657         free_trace_kprobe(tk);
1658 }
1659 #endif /* CONFIG_PERF_EVENTS */
1660
1661 static __init void enable_boot_kprobe_events(void)
1662 {
1663         struct trace_array *tr = top_trace_array();
1664         struct trace_event_file *file;
1665         struct trace_kprobe *tk;
1666         struct dyn_event *pos;
1667
1668         mutex_lock(&event_mutex);
1669         for_each_trace_kprobe(tk, pos) {
1670                 list_for_each_entry(file, &tr->events, list)
1671                         if (file->event_call == trace_probe_event_call(&tk->tp))
1672                                 trace_event_enable_disable(file, 1, 0);
1673         }
1674         mutex_unlock(&event_mutex);
1675 }
1676
1677 static __init void setup_boot_kprobe_events(void)
1678 {
1679         char *p, *cmd = kprobe_boot_events_buf;
1680         int ret;
1681
1682         strreplace(kprobe_boot_events_buf, ',', ' ');
1683
1684         while (cmd && *cmd != '\0') {
1685                 p = strchr(cmd, ';');
1686                 if (p)
1687                         *p++ = '\0';
1688
1689                 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1690                 if (ret)
1691                         pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1692                 else
1693                         kprobe_boot_events_enabled = true;
1694
1695                 cmd = p;
1696         }
1697
1698         enable_boot_kprobe_events();
1699 }
1700
1701 /* Make a tracefs interface for controlling probe points */
1702 static __init int init_kprobe_trace(void)
1703 {
1704         struct dentry *d_tracer;
1705         struct dentry *entry;
1706         int ret;
1707
1708         ret = dyn_event_register(&trace_kprobe_ops);
1709         if (ret)
1710                 return ret;
1711
1712         if (register_module_notifier(&trace_kprobe_module_nb))
1713                 return -EINVAL;
1714
1715         d_tracer = tracing_init_dentry();
1716         if (IS_ERR(d_tracer))
1717                 return 0;
1718
1719         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1720                                     NULL, &kprobe_events_ops);
1721
1722         /* Event list interface */
1723         if (!entry)
1724                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1725
1726         /* Profile interface */
1727         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1728                                     NULL, &kprobe_profile_ops);
1729
1730         if (!entry)
1731                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1732
1733         setup_boot_kprobe_events();
1734
1735         return 0;
1736 }
1737 fs_initcall(init_kprobe_trace);
1738
1739
1740 #ifdef CONFIG_FTRACE_STARTUP_TEST
1741 static __init struct trace_event_file *
1742 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1743 {
1744         struct trace_event_file *file;
1745
1746         list_for_each_entry(file, &tr->events, list)
1747                 if (file->event_call == trace_probe_event_call(&tk->tp))
1748                         return file;
1749
1750         return NULL;
1751 }
1752
1753 /*
1754  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1755  * stage, we can do this lockless.
1756  */
1757 static __init int kprobe_trace_self_tests_init(void)
1758 {
1759         int ret, warn = 0;
1760         int (*target)(int, int, int, int, int, int);
1761         struct trace_kprobe *tk;
1762         struct trace_event_file *file;
1763
1764         if (tracing_is_disabled())
1765                 return -ENODEV;
1766
1767         if (kprobe_boot_events_enabled) {
1768                 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1769                 return 0;
1770         }
1771
1772         target = kprobe_trace_selftest_target;
1773
1774         pr_info("Testing kprobe tracing: ");
1775
1776         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1777                                 create_or_delete_trace_kprobe);
1778         if (WARN_ON_ONCE(ret)) {
1779                 pr_warn("error on probing function entry.\n");
1780                 warn++;
1781         } else {
1782                 /* Enable trace point */
1783                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1784                 if (WARN_ON_ONCE(tk == NULL)) {
1785                         pr_warn("error on getting new probe.\n");
1786                         warn++;
1787                 } else {
1788                         file = find_trace_probe_file(tk, top_trace_array());
1789                         if (WARN_ON_ONCE(file == NULL)) {
1790                                 pr_warn("error on getting probe file.\n");
1791                                 warn++;
1792                         } else
1793                                 enable_trace_kprobe(
1794                                         trace_probe_event_call(&tk->tp), file);
1795                 }
1796         }
1797
1798         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1799                                 create_or_delete_trace_kprobe);
1800         if (WARN_ON_ONCE(ret)) {
1801                 pr_warn("error on probing function return.\n");
1802                 warn++;
1803         } else {
1804                 /* Enable trace point */
1805                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1806                 if (WARN_ON_ONCE(tk == NULL)) {
1807                         pr_warn("error on getting 2nd new probe.\n");
1808                         warn++;
1809                 } else {
1810                         file = find_trace_probe_file(tk, top_trace_array());
1811                         if (WARN_ON_ONCE(file == NULL)) {
1812                                 pr_warn("error on getting probe file.\n");
1813                                 warn++;
1814                         } else
1815                                 enable_trace_kprobe(
1816                                         trace_probe_event_call(&tk->tp), file);
1817                 }
1818         }
1819
1820         if (warn)
1821                 goto end;
1822
1823         ret = target(1, 2, 3, 4, 5, 6);
1824
1825         /*
1826          * Not expecting an error here, the check is only to prevent the
1827          * optimizer from removing the call to target() as otherwise there
1828          * are no side-effects and the call is never performed.
1829          */
1830         if (ret != 21)
1831                 warn++;
1832
1833         /* Disable trace points before removing it */
1834         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1835         if (WARN_ON_ONCE(tk == NULL)) {
1836                 pr_warn("error on getting test probe.\n");
1837                 warn++;
1838         } else {
1839                 if (trace_kprobe_nhit(tk) != 1) {
1840                         pr_warn("incorrect number of testprobe hits\n");
1841                         warn++;
1842                 }
1843
1844                 file = find_trace_probe_file(tk, top_trace_array());
1845                 if (WARN_ON_ONCE(file == NULL)) {
1846                         pr_warn("error on getting probe file.\n");
1847                         warn++;
1848                 } else
1849                         disable_trace_kprobe(
1850                                 trace_probe_event_call(&tk->tp), file);
1851         }
1852
1853         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1854         if (WARN_ON_ONCE(tk == NULL)) {
1855                 pr_warn("error on getting 2nd test probe.\n");
1856                 warn++;
1857         } else {
1858                 if (trace_kprobe_nhit(tk) != 1) {
1859                         pr_warn("incorrect number of testprobe2 hits\n");
1860                         warn++;
1861                 }
1862
1863                 file = find_trace_probe_file(tk, top_trace_array());
1864                 if (WARN_ON_ONCE(file == NULL)) {
1865                         pr_warn("error on getting probe file.\n");
1866                         warn++;
1867                 } else
1868                         disable_trace_kprobe(
1869                                 trace_probe_event_call(&tk->tp), file);
1870         }
1871
1872         ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1873         if (WARN_ON_ONCE(ret)) {
1874                 pr_warn("error on deleting a probe.\n");
1875                 warn++;
1876         }
1877
1878         ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1879         if (WARN_ON_ONCE(ret)) {
1880                 pr_warn("error on deleting a probe.\n");
1881                 warn++;
1882         }
1883
1884 end:
1885         ret = dyn_events_release_all(&trace_kprobe_ops);
1886         if (WARN_ON_ONCE(ret)) {
1887                 pr_warn("error on cleaning up probes.\n");
1888                 warn++;
1889         }
1890         /*
1891          * Wait for the optimizer work to finish. Otherwise it might fiddle
1892          * with probes in already freed __init text.
1893          */
1894         wait_for_kprobe_optimizer();
1895         if (warn)
1896                 pr_cont("NG: Some tests are failed. Please check them.\n");
1897         else
1898                 pr_cont("OK\n");
1899         return 0;
1900 }
1901
1902 late_initcall(kprobe_trace_self_tests_init);
1903
1904 #endif