GNU Linux-libre 4.14.259-gnu1
[releases.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 #define pr_fmt(fmt)     "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/rculist.h>
24
25 #include "trace_probe.h"
26
27 #define KPROBE_EVENT_SYSTEM "kprobes"
28 #define KRETPROBE_MAXACTIVE_MAX 4096
29
30 /**
31  * Kprobe event core functions
32  */
33 struct trace_kprobe {
34         struct list_head        list;
35         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
36         unsigned long __percpu *nhit;
37         const char              *symbol;        /* symbol name */
38         struct trace_probe      tp;
39 };
40
41 #define SIZEOF_TRACE_KPROBE(n)                          \
42         (offsetof(struct trace_kprobe, tp.args) +       \
43         (sizeof(struct probe_arg) * (n)))
44
45
46 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47 {
48         return tk->rp.handler != NULL;
49 }
50
51 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52 {
53         return tk->symbol ? tk->symbol : "unknown";
54 }
55
56 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57 {
58         return tk->rp.kp.offset;
59 }
60
61 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62 {
63         return !!(kprobe_gone(&tk->rp.kp));
64 }
65
66 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67                                                  struct module *mod)
68 {
69         int len = strlen(mod->name);
70         const char *name = trace_kprobe_symbol(tk);
71         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
72 }
73
74 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75 {
76         return !!strchr(trace_kprobe_symbol(tk), ':');
77 }
78
79 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
80 {
81         unsigned long nhit = 0;
82         int cpu;
83
84         for_each_possible_cpu(cpu)
85                 nhit += *per_cpu_ptr(tk->nhit, cpu);
86
87         return nhit;
88 }
89
90 static int register_kprobe_event(struct trace_kprobe *tk);
91 static int unregister_kprobe_event(struct trace_kprobe *tk);
92
93 static DEFINE_MUTEX(probe_lock);
94 static LIST_HEAD(probe_list);
95
96 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
97 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
98                                 struct pt_regs *regs);
99
100 /* Memory fetching by symbol */
101 struct symbol_cache {
102         char            *symbol;
103         long            offset;
104         unsigned long   addr;
105 };
106
107 unsigned long update_symbol_cache(struct symbol_cache *sc)
108 {
109         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
110
111         if (sc->addr)
112                 sc->addr += sc->offset;
113
114         return sc->addr;
115 }
116
117 void free_symbol_cache(struct symbol_cache *sc)
118 {
119         kfree(sc->symbol);
120         kfree(sc);
121 }
122
123 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
124 {
125         struct symbol_cache *sc;
126
127         if (!sym || strlen(sym) == 0)
128                 return NULL;
129
130         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
131         if (!sc)
132                 return NULL;
133
134         sc->symbol = kstrdup(sym, GFP_KERNEL);
135         if (!sc->symbol) {
136                 kfree(sc);
137                 return NULL;
138         }
139         sc->offset = offset;
140         update_symbol_cache(sc);
141
142         return sc;
143 }
144
145 /*
146  * Kprobes-specific fetch functions
147  */
148 #define DEFINE_FETCH_stack(type)                                        \
149 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
150                                           void *offset, void *dest)     \
151 {                                                                       \
152         *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
153                                 (unsigned int)((unsigned long)offset)); \
154 }                                                                       \
155 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
156
157 DEFINE_BASIC_FETCH_FUNCS(stack)
158 /* No string on the stack entry */
159 #define fetch_stack_string      NULL
160 #define fetch_stack_string_size NULL
161
162 #define DEFINE_FETCH_memory(type)                                       \
163 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
164                                           void *addr, void *dest)       \
165 {                                                                       \
166         type retval;                                                    \
167         if (probe_kernel_address(addr, retval))                         \
168                 *(type *)dest = 0;                                      \
169         else                                                            \
170                 *(type *)dest = retval;                                 \
171 }                                                                       \
172 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
173
174 DEFINE_BASIC_FETCH_FUNCS(memory)
175 /*
176  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
177  * length and relative data location.
178  */
179 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
180                                             void *addr, void *dest)
181 {
182         int maxlen = get_rloc_len(*(u32 *)dest);
183         u8 *dst = get_rloc_data(dest);
184         long ret;
185
186         if (!maxlen)
187                 return;
188
189         /*
190          * Try to get string again, since the string can be changed while
191          * probing.
192          */
193         ret = strncpy_from_unsafe(dst, addr, maxlen);
194
195         if (ret < 0) {  /* Failed to fetch string */
196                 dst[0] = '\0';
197                 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
198         } else {
199                 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
200         }
201 }
202 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
203
204 /* Return the length of string -- including null terminal byte */
205 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
206                                                  void *addr, void *dest)
207 {
208         mm_segment_t old_fs;
209         int ret, len = 0;
210         u8 c;
211
212         old_fs = get_fs();
213         set_fs(KERNEL_DS);
214         pagefault_disable();
215
216         do {
217                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
218                 len++;
219         } while (c && ret == 0 && len < MAX_STRING_SIZE);
220
221         pagefault_enable();
222         set_fs(old_fs);
223
224         if (ret < 0)    /* Failed to check the length */
225                 *(u32 *)dest = 0;
226         else
227                 *(u32 *)dest = len;
228 }
229 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
230
231 #define DEFINE_FETCH_symbol(type)                                       \
232 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
233 {                                                                       \
234         struct symbol_cache *sc = data;                                 \
235         if (sc->addr)                                                   \
236                 fetch_memory_##type(regs, (void *)sc->addr, dest);      \
237         else                                                            \
238                 *(type *)dest = 0;                                      \
239 }                                                                       \
240 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
241
242 DEFINE_BASIC_FETCH_FUNCS(symbol)
243 DEFINE_FETCH_symbol(string)
244 DEFINE_FETCH_symbol(string_size)
245
246 /* kprobes don't support file_offset fetch methods */
247 #define fetch_file_offset_u8            NULL
248 #define fetch_file_offset_u16           NULL
249 #define fetch_file_offset_u32           NULL
250 #define fetch_file_offset_u64           NULL
251 #define fetch_file_offset_string        NULL
252 #define fetch_file_offset_string_size   NULL
253
254 /* Fetch type information table */
255 static const struct fetch_type kprobes_fetch_type_table[] = {
256         /* Special types */
257         [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
258                                         sizeof(u32), 1, "__data_loc char[]"),
259         [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
260                                         string_size, sizeof(u32), 0, "u32"),
261         /* Basic types */
262         ASSIGN_FETCH_TYPE(u8,  u8,  0),
263         ASSIGN_FETCH_TYPE(u16, u16, 0),
264         ASSIGN_FETCH_TYPE(u32, u32, 0),
265         ASSIGN_FETCH_TYPE(u64, u64, 0),
266         ASSIGN_FETCH_TYPE(s8,  u8,  1),
267         ASSIGN_FETCH_TYPE(s16, u16, 1),
268         ASSIGN_FETCH_TYPE(s32, u32, 1),
269         ASSIGN_FETCH_TYPE(s64, u64, 1),
270         ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
271         ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
272         ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
273         ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
274
275         ASSIGN_FETCH_TYPE_END
276 };
277
278 /*
279  * Allocate new trace_probe and initialize it (including kprobes).
280  */
281 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
282                                              const char *event,
283                                              void *addr,
284                                              const char *symbol,
285                                              unsigned long offs,
286                                              int maxactive,
287                                              int nargs, bool is_return)
288 {
289         struct trace_kprobe *tk;
290         int ret = -ENOMEM;
291
292         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
293         if (!tk)
294                 return ERR_PTR(ret);
295
296         tk->nhit = alloc_percpu(unsigned long);
297         if (!tk->nhit)
298                 goto error;
299
300         if (symbol) {
301                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
302                 if (!tk->symbol)
303                         goto error;
304                 tk->rp.kp.symbol_name = tk->symbol;
305                 tk->rp.kp.offset = offs;
306         } else
307                 tk->rp.kp.addr = addr;
308
309         if (is_return)
310                 tk->rp.handler = kretprobe_dispatcher;
311         else
312                 tk->rp.kp.pre_handler = kprobe_dispatcher;
313
314         tk->rp.maxactive = maxactive;
315
316         if (!event || !is_good_name(event)) {
317                 ret = -EINVAL;
318                 goto error;
319         }
320
321         tk->tp.call.class = &tk->tp.class;
322         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
323         if (!tk->tp.call.name)
324                 goto error;
325
326         if (!group || !is_good_name(group)) {
327                 ret = -EINVAL;
328                 goto error;
329         }
330
331         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
332         if (!tk->tp.class.system)
333                 goto error;
334
335         INIT_LIST_HEAD(&tk->list);
336         INIT_LIST_HEAD(&tk->tp.files);
337         return tk;
338 error:
339         kfree(tk->tp.call.name);
340         kfree(tk->symbol);
341         free_percpu(tk->nhit);
342         kfree(tk);
343         return ERR_PTR(ret);
344 }
345
346 static void free_trace_kprobe(struct trace_kprobe *tk)
347 {
348         int i;
349
350         for (i = 0; i < tk->tp.nr_args; i++)
351                 traceprobe_free_probe_arg(&tk->tp.args[i]);
352
353         kfree(tk->tp.call.class->system);
354         kfree(tk->tp.call.name);
355         kfree(tk->symbol);
356         free_percpu(tk->nhit);
357         kfree(tk);
358 }
359
360 static struct trace_kprobe *find_trace_kprobe(const char *event,
361                                               const char *group)
362 {
363         struct trace_kprobe *tk;
364
365         list_for_each_entry(tk, &probe_list, list)
366                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
367                     strcmp(tk->tp.call.class->system, group) == 0)
368                         return tk;
369         return NULL;
370 }
371
372 /*
373  * Enable trace_probe
374  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
375  */
376 static int
377 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
378 {
379         struct event_file_link *link = NULL;
380         int ret = 0;
381
382         if (file) {
383                 link = kmalloc(sizeof(*link), GFP_KERNEL);
384                 if (!link) {
385                         ret = -ENOMEM;
386                         goto out;
387                 }
388
389                 link->file = file;
390                 list_add_tail_rcu(&link->list, &tk->tp.files);
391
392                 tk->tp.flags |= TP_FLAG_TRACE;
393         } else
394                 tk->tp.flags |= TP_FLAG_PROFILE;
395
396         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
397                 if (trace_kprobe_is_return(tk))
398                         ret = enable_kretprobe(&tk->rp);
399                 else
400                         ret = enable_kprobe(&tk->rp.kp);
401         }
402
403         if (ret) {
404                 if (file) {
405                         /* Notice the if is true on not WARN() */
406                         if (!WARN_ON_ONCE(!link))
407                                 list_del_rcu(&link->list);
408                         kfree(link);
409                         tk->tp.flags &= ~TP_FLAG_TRACE;
410                 } else {
411                         tk->tp.flags &= ~TP_FLAG_PROFILE;
412                 }
413         }
414  out:
415         return ret;
416 }
417
418 /*
419  * Disable trace_probe
420  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
421  */
422 static int
423 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
424 {
425         struct event_file_link *link = NULL;
426         int wait = 0;
427         int ret = 0;
428
429         if (file) {
430                 link = find_event_file_link(&tk->tp, file);
431                 if (!link) {
432                         ret = -EINVAL;
433                         goto out;
434                 }
435
436                 list_del_rcu(&link->list);
437                 wait = 1;
438                 if (!list_empty(&tk->tp.files))
439                         goto out;
440
441                 tk->tp.flags &= ~TP_FLAG_TRACE;
442         } else
443                 tk->tp.flags &= ~TP_FLAG_PROFILE;
444
445         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
446                 if (trace_kprobe_is_return(tk))
447                         disable_kretprobe(&tk->rp);
448                 else
449                         disable_kprobe(&tk->rp.kp);
450                 wait = 1;
451         }
452  out:
453         if (wait) {
454                 /*
455                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
456                  * to ensure disabled (all running handlers are finished).
457                  * This is not only for kfree(), but also the caller,
458                  * trace_remove_event_call() supposes it for releasing
459                  * event_call related objects, which will be accessed in
460                  * the kprobe_trace_func/kretprobe_trace_func.
461                  */
462                 synchronize_sched();
463                 kfree(link);    /* Ignored if link == NULL */
464         }
465
466         return ret;
467 }
468
469 /* Internal register function - just handle k*probes and flags */
470 static int __register_trace_kprobe(struct trace_kprobe *tk)
471 {
472         int i, ret;
473
474         if (trace_probe_is_registered(&tk->tp))
475                 return -EINVAL;
476
477         for (i = 0; i < tk->tp.nr_args; i++)
478                 traceprobe_update_arg(&tk->tp.args[i]);
479
480         /* Set/clear disabled flag according to tp->flag */
481         if (trace_probe_is_enabled(&tk->tp))
482                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
483         else
484                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
485
486         if (trace_kprobe_is_return(tk))
487                 ret = register_kretprobe(&tk->rp);
488         else
489                 ret = register_kprobe(&tk->rp.kp);
490
491         if (ret == 0)
492                 tk->tp.flags |= TP_FLAG_REGISTERED;
493         else {
494                 pr_warn("Could not insert probe at %s+%lu: %d\n",
495                         trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
496                 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
497                         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
498                         ret = 0;
499                 } else if (ret == -EILSEQ) {
500                         pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
501                                 tk->rp.kp.addr);
502                         ret = -EINVAL;
503                 }
504         }
505
506         return ret;
507 }
508
509 /* Internal unregister function - just handle k*probes and flags */
510 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
511 {
512         if (trace_probe_is_registered(&tk->tp)) {
513                 if (trace_kprobe_is_return(tk))
514                         unregister_kretprobe(&tk->rp);
515                 else
516                         unregister_kprobe(&tk->rp.kp);
517                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
518                 /* Cleanup kprobe for reuse */
519                 if (tk->rp.kp.symbol_name)
520                         tk->rp.kp.addr = NULL;
521         }
522 }
523
524 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
525 static int unregister_trace_kprobe(struct trace_kprobe *tk)
526 {
527         /* Enabled event can not be unregistered */
528         if (trace_probe_is_enabled(&tk->tp))
529                 return -EBUSY;
530
531         /* Will fail if probe is being used by ftrace or perf */
532         if (unregister_kprobe_event(tk))
533                 return -EBUSY;
534
535         __unregister_trace_kprobe(tk);
536         list_del(&tk->list);
537
538         return 0;
539 }
540
541 /* Register a trace_probe and probe_event */
542 static int register_trace_kprobe(struct trace_kprobe *tk)
543 {
544         struct trace_kprobe *old_tk;
545         int ret;
546
547         mutex_lock(&probe_lock);
548
549         /* Delete old (same name) event if exist */
550         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
551                         tk->tp.call.class->system);
552         if (old_tk) {
553                 ret = unregister_trace_kprobe(old_tk);
554                 if (ret < 0)
555                         goto end;
556                 free_trace_kprobe(old_tk);
557         }
558
559         /* Register new event */
560         ret = register_kprobe_event(tk);
561         if (ret) {
562                 pr_warn("Failed to register probe event(%d)\n", ret);
563                 goto end;
564         }
565
566         /* Register k*probe */
567         ret = __register_trace_kprobe(tk);
568         if (ret < 0)
569                 unregister_kprobe_event(tk);
570         else
571                 list_add_tail(&tk->list, &probe_list);
572
573 end:
574         mutex_unlock(&probe_lock);
575         return ret;
576 }
577
578 /* Module notifier call back, checking event on the module */
579 static int trace_kprobe_module_callback(struct notifier_block *nb,
580                                        unsigned long val, void *data)
581 {
582         struct module *mod = data;
583         struct trace_kprobe *tk;
584         int ret;
585
586         if (val != MODULE_STATE_COMING)
587                 return NOTIFY_DONE;
588
589         /* Update probes on coming module */
590         mutex_lock(&probe_lock);
591         list_for_each_entry(tk, &probe_list, list) {
592                 if (trace_kprobe_within_module(tk, mod)) {
593                         /* Don't need to check busy - this should have gone. */
594                         __unregister_trace_kprobe(tk);
595                         ret = __register_trace_kprobe(tk);
596                         if (ret)
597                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
598                                         trace_event_name(&tk->tp.call),
599                                         mod->name, ret);
600                 }
601         }
602         mutex_unlock(&probe_lock);
603
604         return NOTIFY_DONE;
605 }
606
607 static struct notifier_block trace_kprobe_module_nb = {
608         .notifier_call = trace_kprobe_module_callback,
609         .priority = 1   /* Invoked after kprobe module callback */
610 };
611
612 /* Convert certain expected symbols into '_' when generating event names */
613 static inline void sanitize_event_name(char *name)
614 {
615         while (*name++ != '\0')
616                 if (*name == ':' || *name == '.')
617                         *name = '_';
618 }
619
620 static int create_trace_kprobe(int argc, char **argv)
621 {
622         /*
623          * Argument syntax:
624          *  - Add kprobe:
625          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
626          *  - Add kretprobe:
627          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
628          * Fetch args:
629          *  $retval     : fetch return value
630          *  $stack      : fetch stack address
631          *  $stackN     : fetch Nth of stack (N:0-)
632          *  $comm       : fetch current task comm
633          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
634          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
635          *  %REG        : fetch register REG
636          * Dereferencing memory fetch:
637          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
638          * Alias name of args:
639          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
640          * Type of args:
641          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
642          */
643         struct trace_kprobe *tk;
644         int i, ret = 0;
645         bool is_return = false, is_delete = false;
646         char *symbol = NULL, *event = NULL, *group = NULL;
647         int maxactive = 0;
648         char *arg;
649         long offset = 0;
650         void *addr = NULL;
651         char buf[MAX_EVENT_NAME_LEN];
652
653         /* argc must be >= 1 */
654         if (argv[0][0] == 'p')
655                 is_return = false;
656         else if (argv[0][0] == 'r')
657                 is_return = true;
658         else if (argv[0][0] == '-')
659                 is_delete = true;
660         else {
661                 pr_info("Probe definition must be started with 'p', 'r' or"
662                         " '-'.\n");
663                 return -EINVAL;
664         }
665
666         event = strchr(&argv[0][1], ':');
667         if (event) {
668                 event[0] = '\0';
669                 event++;
670         }
671         if (is_return && isdigit(argv[0][1])) {
672                 ret = kstrtouint(&argv[0][1], 0, &maxactive);
673                 if (ret) {
674                         pr_info("Failed to parse maxactive.\n");
675                         return ret;
676                 }
677                 /* kretprobes instances are iterated over via a list. The
678                  * maximum should stay reasonable.
679                  */
680                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
681                         pr_info("Maxactive is too big (%d > %d).\n",
682                                 maxactive, KRETPROBE_MAXACTIVE_MAX);
683                         return -E2BIG;
684                 }
685         }
686
687         if (event) {
688                 if (strchr(event, '/')) {
689                         group = event;
690                         event = strchr(group, '/') + 1;
691                         event[-1] = '\0';
692                         if (strlen(group) == 0) {
693                                 pr_info("Group name is not specified\n");
694                                 return -EINVAL;
695                         }
696                 }
697                 if (strlen(event) == 0) {
698                         pr_info("Event name is not specified\n");
699                         return -EINVAL;
700                 }
701         }
702         if (!group)
703                 group = KPROBE_EVENT_SYSTEM;
704
705         if (is_delete) {
706                 if (!event) {
707                         pr_info("Delete command needs an event name.\n");
708                         return -EINVAL;
709                 }
710                 mutex_lock(&probe_lock);
711                 tk = find_trace_kprobe(event, group);
712                 if (!tk) {
713                         mutex_unlock(&probe_lock);
714                         pr_info("Event %s/%s doesn't exist.\n", group, event);
715                         return -ENOENT;
716                 }
717                 /* delete an event */
718                 ret = unregister_trace_kprobe(tk);
719                 if (ret == 0)
720                         free_trace_kprobe(tk);
721                 mutex_unlock(&probe_lock);
722                 return ret;
723         }
724
725         if (argc < 2) {
726                 pr_info("Probe point is not specified.\n");
727                 return -EINVAL;
728         }
729
730         /* try to parse an address. if that fails, try to read the
731          * input as a symbol. */
732         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
733                 /* a symbol specified */
734                 symbol = argv[1];
735                 /* TODO: support .init module functions */
736                 ret = traceprobe_split_symbol_offset(symbol, &offset);
737                 if (ret || offset < 0 || offset > UINT_MAX) {
738                         pr_info("Failed to parse either an address or a symbol.\n");
739                         return ret;
740                 }
741                 if (offset && is_return &&
742                     !kprobe_on_func_entry(NULL, symbol, offset)) {
743                         pr_info("Given offset is not valid for return probe.\n");
744                         return -EINVAL;
745                 }
746         }
747         argc -= 2; argv += 2;
748
749         /* setup a probe */
750         if (!event) {
751                 /* Make a new event name */
752                 if (symbol)
753                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
754                                  is_return ? 'r' : 'p', symbol, offset);
755                 else
756                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
757                                  is_return ? 'r' : 'p', addr);
758                 sanitize_event_name(buf);
759                 event = buf;
760         }
761         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
762                                argc, is_return);
763         if (IS_ERR(tk)) {
764                 pr_info("Failed to allocate trace_probe.(%d)\n",
765                         (int)PTR_ERR(tk));
766                 return PTR_ERR(tk);
767         }
768
769         /* parse arguments */
770         ret = 0;
771         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
772                 struct probe_arg *parg = &tk->tp.args[i];
773
774                 /* Increment count for freeing args in error case */
775                 tk->tp.nr_args++;
776
777                 /* Parse argument name */
778                 arg = strchr(argv[i], '=');
779                 if (arg) {
780                         *arg++ = '\0';
781                         parg->name = kstrdup(argv[i], GFP_KERNEL);
782                 } else {
783                         arg = argv[i];
784                         /* If argument name is omitted, set "argN" */
785                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
786                         parg->name = kstrdup(buf, GFP_KERNEL);
787                 }
788
789                 if (!parg->name) {
790                         pr_info("Failed to allocate argument[%d] name.\n", i);
791                         ret = -ENOMEM;
792                         goto error;
793                 }
794
795                 if (!is_good_name(parg->name)) {
796                         pr_info("Invalid argument[%d] name: %s\n",
797                                 i, parg->name);
798                         ret = -EINVAL;
799                         goto error;
800                 }
801
802                 if (traceprobe_conflict_field_name(parg->name,
803                                                         tk->tp.args, i)) {
804                         pr_info("Argument[%d] name '%s' conflicts with "
805                                 "another field.\n", i, argv[i]);
806                         ret = -EINVAL;
807                         goto error;
808                 }
809
810                 /* Parse fetch argument */
811                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
812                                                 is_return, true,
813                                                 kprobes_fetch_type_table);
814                 if (ret) {
815                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
816                         goto error;
817                 }
818         }
819
820         ret = register_trace_kprobe(tk);
821         if (ret)
822                 goto error;
823         return 0;
824
825 error:
826         free_trace_kprobe(tk);
827         return ret;
828 }
829
830 static int release_all_trace_kprobes(void)
831 {
832         struct trace_kprobe *tk;
833         int ret = 0;
834
835         mutex_lock(&probe_lock);
836         /* Ensure no probe is in use. */
837         list_for_each_entry(tk, &probe_list, list)
838                 if (trace_probe_is_enabled(&tk->tp)) {
839                         ret = -EBUSY;
840                         goto end;
841                 }
842         /* TODO: Use batch unregistration */
843         while (!list_empty(&probe_list)) {
844                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
845                 ret = unregister_trace_kprobe(tk);
846                 if (ret)
847                         goto end;
848                 free_trace_kprobe(tk);
849         }
850
851 end:
852         mutex_unlock(&probe_lock);
853
854         return ret;
855 }
856
857 /* Probes listing interfaces */
858 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
859 {
860         mutex_lock(&probe_lock);
861         return seq_list_start(&probe_list, *pos);
862 }
863
864 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
865 {
866         return seq_list_next(v, &probe_list, pos);
867 }
868
869 static void probes_seq_stop(struct seq_file *m, void *v)
870 {
871         mutex_unlock(&probe_lock);
872 }
873
874 static int probes_seq_show(struct seq_file *m, void *v)
875 {
876         struct trace_kprobe *tk = v;
877         int i;
878
879         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
880         if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
881                 seq_printf(m, "%d", tk->rp.maxactive);
882         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
883                         trace_event_name(&tk->tp.call));
884
885         if (!tk->symbol)
886                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
887         else if (tk->rp.kp.offset)
888                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
889                            tk->rp.kp.offset);
890         else
891                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
892
893         for (i = 0; i < tk->tp.nr_args; i++)
894                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
895         seq_putc(m, '\n');
896
897         return 0;
898 }
899
900 static const struct seq_operations probes_seq_op = {
901         .start  = probes_seq_start,
902         .next   = probes_seq_next,
903         .stop   = probes_seq_stop,
904         .show   = probes_seq_show
905 };
906
907 static int probes_open(struct inode *inode, struct file *file)
908 {
909         int ret;
910
911         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
912                 ret = release_all_trace_kprobes();
913                 if (ret < 0)
914                         return ret;
915         }
916
917         return seq_open(file, &probes_seq_op);
918 }
919
920 static ssize_t probes_write(struct file *file, const char __user *buffer,
921                             size_t count, loff_t *ppos)
922 {
923         return traceprobe_probes_write(file, buffer, count, ppos,
924                         create_trace_kprobe);
925 }
926
927 static const struct file_operations kprobe_events_ops = {
928         .owner          = THIS_MODULE,
929         .open           = probes_open,
930         .read           = seq_read,
931         .llseek         = seq_lseek,
932         .release        = seq_release,
933         .write          = probes_write,
934 };
935
936 /* Probes profiling interfaces */
937 static int probes_profile_seq_show(struct seq_file *m, void *v)
938 {
939         struct trace_kprobe *tk = v;
940
941         seq_printf(m, "  %-44s %15lu %15lu\n",
942                    trace_event_name(&tk->tp.call),
943                    trace_kprobe_nhit(tk),
944                    tk->rp.kp.nmissed);
945
946         return 0;
947 }
948
949 static const struct seq_operations profile_seq_op = {
950         .start  = probes_seq_start,
951         .next   = probes_seq_next,
952         .stop   = probes_seq_stop,
953         .show   = probes_profile_seq_show
954 };
955
956 static int profile_open(struct inode *inode, struct file *file)
957 {
958         return seq_open(file, &profile_seq_op);
959 }
960
961 static const struct file_operations kprobe_profile_ops = {
962         .owner          = THIS_MODULE,
963         .open           = profile_open,
964         .read           = seq_read,
965         .llseek         = seq_lseek,
966         .release        = seq_release,
967 };
968
969 /* Kprobe handler */
970 static nokprobe_inline void
971 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
972                     struct trace_event_file *trace_file)
973 {
974         struct kprobe_trace_entry_head *entry;
975         struct ring_buffer_event *event;
976         struct ring_buffer *buffer;
977         int size, dsize, pc;
978         unsigned long irq_flags;
979         struct trace_event_call *call = &tk->tp.call;
980
981         WARN_ON(call != trace_file->event_call);
982
983         if (trace_trigger_soft_disabled(trace_file))
984                 return;
985
986         local_save_flags(irq_flags);
987         pc = preempt_count();
988
989         dsize = __get_data_size(&tk->tp, regs);
990         size = sizeof(*entry) + tk->tp.size + dsize;
991
992         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
993                                                 call->event.type,
994                                                 size, irq_flags, pc);
995         if (!event)
996                 return;
997
998         entry = ring_buffer_event_data(event);
999         entry->ip = (unsigned long)tk->rp.kp.addr;
1000         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1001
1002         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1003                                          entry, irq_flags, pc, regs);
1004 }
1005
1006 static void
1007 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1008 {
1009         struct event_file_link *link;
1010
1011         list_for_each_entry_rcu(link, &tk->tp.files, list)
1012                 __kprobe_trace_func(tk, regs, link->file);
1013 }
1014 NOKPROBE_SYMBOL(kprobe_trace_func);
1015
1016 /* Kretprobe handler */
1017 static nokprobe_inline void
1018 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1019                        struct pt_regs *regs,
1020                        struct trace_event_file *trace_file)
1021 {
1022         struct kretprobe_trace_entry_head *entry;
1023         struct ring_buffer_event *event;
1024         struct ring_buffer *buffer;
1025         int size, pc, dsize;
1026         unsigned long irq_flags;
1027         struct trace_event_call *call = &tk->tp.call;
1028
1029         WARN_ON(call != trace_file->event_call);
1030
1031         if (trace_trigger_soft_disabled(trace_file))
1032                 return;
1033
1034         local_save_flags(irq_flags);
1035         pc = preempt_count();
1036
1037         dsize = __get_data_size(&tk->tp, regs);
1038         size = sizeof(*entry) + tk->tp.size + dsize;
1039
1040         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1041                                                 call->event.type,
1042                                                 size, irq_flags, pc);
1043         if (!event)
1044                 return;
1045
1046         entry = ring_buffer_event_data(event);
1047         entry->func = (unsigned long)tk->rp.kp.addr;
1048         entry->ret_ip = (unsigned long)ri->ret_addr;
1049         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1050
1051         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1052                                          entry, irq_flags, pc, regs);
1053 }
1054
1055 static void
1056 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1057                      struct pt_regs *regs)
1058 {
1059         struct event_file_link *link;
1060
1061         list_for_each_entry_rcu(link, &tk->tp.files, list)
1062                 __kretprobe_trace_func(tk, ri, regs, link->file);
1063 }
1064 NOKPROBE_SYMBOL(kretprobe_trace_func);
1065
1066 /* Event entry printers */
1067 static enum print_line_t
1068 print_kprobe_event(struct trace_iterator *iter, int flags,
1069                    struct trace_event *event)
1070 {
1071         struct kprobe_trace_entry_head *field;
1072         struct trace_seq *s = &iter->seq;
1073         struct trace_probe *tp;
1074         u8 *data;
1075         int i;
1076
1077         field = (struct kprobe_trace_entry_head *)iter->ent;
1078         tp = container_of(event, struct trace_probe, call.event);
1079
1080         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1081
1082         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1083                 goto out;
1084
1085         trace_seq_putc(s, ')');
1086
1087         data = (u8 *)&field[1];
1088         for (i = 0; i < tp->nr_args; i++)
1089                 if (!tp->args[i].type->print(s, tp->args[i].name,
1090                                              data + tp->args[i].offset, field))
1091                         goto out;
1092
1093         trace_seq_putc(s, '\n');
1094  out:
1095         return trace_handle_return(s);
1096 }
1097
1098 static enum print_line_t
1099 print_kretprobe_event(struct trace_iterator *iter, int flags,
1100                       struct trace_event *event)
1101 {
1102         struct kretprobe_trace_entry_head *field;
1103         struct trace_seq *s = &iter->seq;
1104         struct trace_probe *tp;
1105         u8 *data;
1106         int i;
1107
1108         field = (struct kretprobe_trace_entry_head *)iter->ent;
1109         tp = container_of(event, struct trace_probe, call.event);
1110
1111         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1112
1113         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1114                 goto out;
1115
1116         trace_seq_puts(s, " <- ");
1117
1118         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1119                 goto out;
1120
1121         trace_seq_putc(s, ')');
1122
1123         data = (u8 *)&field[1];
1124         for (i = 0; i < tp->nr_args; i++)
1125                 if (!tp->args[i].type->print(s, tp->args[i].name,
1126                                              data + tp->args[i].offset, field))
1127                         goto out;
1128
1129         trace_seq_putc(s, '\n');
1130
1131  out:
1132         return trace_handle_return(s);
1133 }
1134
1135
1136 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1137 {
1138         int ret, i;
1139         struct kprobe_trace_entry_head field;
1140         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1141
1142         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1143         /* Set argument names as fields */
1144         for (i = 0; i < tk->tp.nr_args; i++) {
1145                 struct probe_arg *parg = &tk->tp.args[i];
1146
1147                 ret = trace_define_field(event_call, parg->type->fmttype,
1148                                          parg->name,
1149                                          sizeof(field) + parg->offset,
1150                                          parg->type->size,
1151                                          parg->type->is_signed,
1152                                          FILTER_OTHER);
1153                 if (ret)
1154                         return ret;
1155         }
1156         return 0;
1157 }
1158
1159 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1160 {
1161         int ret, i;
1162         struct kretprobe_trace_entry_head field;
1163         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1164
1165         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1166         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1167         /* Set argument names as fields */
1168         for (i = 0; i < tk->tp.nr_args; i++) {
1169                 struct probe_arg *parg = &tk->tp.args[i];
1170
1171                 ret = trace_define_field(event_call, parg->type->fmttype,
1172                                          parg->name,
1173                                          sizeof(field) + parg->offset,
1174                                          parg->type->size,
1175                                          parg->type->is_signed,
1176                                          FILTER_OTHER);
1177                 if (ret)
1178                         return ret;
1179         }
1180         return 0;
1181 }
1182
1183 #ifdef CONFIG_PERF_EVENTS
1184
1185 /* Kprobe profile handler */
1186 static void
1187 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1188 {
1189         struct trace_event_call *call = &tk->tp.call;
1190         struct bpf_prog *prog = call->prog;
1191         struct kprobe_trace_entry_head *entry;
1192         struct hlist_head *head;
1193         int size, __size, dsize;
1194         int rctx;
1195
1196         if (prog && !trace_call_bpf(prog, regs))
1197                 return;
1198
1199         head = this_cpu_ptr(call->perf_events);
1200         if (hlist_empty(head))
1201                 return;
1202
1203         dsize = __get_data_size(&tk->tp, regs);
1204         __size = sizeof(*entry) + tk->tp.size + dsize;
1205         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1206         size -= sizeof(u32);
1207
1208         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1209         if (!entry)
1210                 return;
1211
1212         entry->ip = (unsigned long)tk->rp.kp.addr;
1213         memset(&entry[1], 0, dsize);
1214         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1215         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1216                               head, NULL, NULL);
1217 }
1218 NOKPROBE_SYMBOL(kprobe_perf_func);
1219
1220 /* Kretprobe profile handler */
1221 static void
1222 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1223                     struct pt_regs *regs)
1224 {
1225         struct trace_event_call *call = &tk->tp.call;
1226         struct bpf_prog *prog = call->prog;
1227         struct kretprobe_trace_entry_head *entry;
1228         struct hlist_head *head;
1229         int size, __size, dsize;
1230         int rctx;
1231
1232         if (prog && !trace_call_bpf(prog, regs))
1233                 return;
1234
1235         head = this_cpu_ptr(call->perf_events);
1236         if (hlist_empty(head))
1237                 return;
1238
1239         dsize = __get_data_size(&tk->tp, regs);
1240         __size = sizeof(*entry) + tk->tp.size + dsize;
1241         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1242         size -= sizeof(u32);
1243
1244         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1245         if (!entry)
1246                 return;
1247
1248         entry->func = (unsigned long)tk->rp.kp.addr;
1249         entry->ret_ip = (unsigned long)ri->ret_addr;
1250         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1251         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1252                               head, NULL, NULL);
1253 }
1254 NOKPROBE_SYMBOL(kretprobe_perf_func);
1255 #endif  /* CONFIG_PERF_EVENTS */
1256
1257 /*
1258  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1259  *
1260  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1261  * lockless, but we can't race with this __init function.
1262  */
1263 static int kprobe_register(struct trace_event_call *event,
1264                            enum trace_reg type, void *data)
1265 {
1266         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1267         struct trace_event_file *file = data;
1268
1269         switch (type) {
1270         case TRACE_REG_REGISTER:
1271                 return enable_trace_kprobe(tk, file);
1272         case TRACE_REG_UNREGISTER:
1273                 return disable_trace_kprobe(tk, file);
1274
1275 #ifdef CONFIG_PERF_EVENTS
1276         case TRACE_REG_PERF_REGISTER:
1277                 return enable_trace_kprobe(tk, NULL);
1278         case TRACE_REG_PERF_UNREGISTER:
1279                 return disable_trace_kprobe(tk, NULL);
1280         case TRACE_REG_PERF_OPEN:
1281         case TRACE_REG_PERF_CLOSE:
1282         case TRACE_REG_PERF_ADD:
1283         case TRACE_REG_PERF_DEL:
1284                 return 0;
1285 #endif
1286         }
1287         return 0;
1288 }
1289
1290 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1291 {
1292         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1293
1294         raw_cpu_inc(*tk->nhit);
1295
1296         if (tk->tp.flags & TP_FLAG_TRACE)
1297                 kprobe_trace_func(tk, regs);
1298 #ifdef CONFIG_PERF_EVENTS
1299         if (tk->tp.flags & TP_FLAG_PROFILE)
1300                 kprobe_perf_func(tk, regs);
1301 #endif
1302         return 0;       /* We don't tweek kernel, so just return 0 */
1303 }
1304 NOKPROBE_SYMBOL(kprobe_dispatcher);
1305
1306 static int
1307 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1308 {
1309         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1310
1311         raw_cpu_inc(*tk->nhit);
1312
1313         if (tk->tp.flags & TP_FLAG_TRACE)
1314                 kretprobe_trace_func(tk, ri, regs);
1315 #ifdef CONFIG_PERF_EVENTS
1316         if (tk->tp.flags & TP_FLAG_PROFILE)
1317                 kretprobe_perf_func(tk, ri, regs);
1318 #endif
1319         return 0;       /* We don't tweek kernel, so just return 0 */
1320 }
1321 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1322
1323 static struct trace_event_functions kretprobe_funcs = {
1324         .trace          = print_kretprobe_event
1325 };
1326
1327 static struct trace_event_functions kprobe_funcs = {
1328         .trace          = print_kprobe_event
1329 };
1330
1331 static int register_kprobe_event(struct trace_kprobe *tk)
1332 {
1333         struct trace_event_call *call = &tk->tp.call;
1334         int ret;
1335
1336         /* Initialize trace_event_call */
1337         INIT_LIST_HEAD(&call->class->fields);
1338         if (trace_kprobe_is_return(tk)) {
1339                 call->event.funcs = &kretprobe_funcs;
1340                 call->class->define_fields = kretprobe_event_define_fields;
1341         } else {
1342                 call->event.funcs = &kprobe_funcs;
1343                 call->class->define_fields = kprobe_event_define_fields;
1344         }
1345         if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1346                 return -ENOMEM;
1347         ret = register_trace_event(&call->event);
1348         if (!ret) {
1349                 kfree(call->print_fmt);
1350                 return -ENODEV;
1351         }
1352         call->flags = TRACE_EVENT_FL_KPROBE;
1353         call->class->reg = kprobe_register;
1354         call->data = tk;
1355         ret = trace_add_event_call(call);
1356         if (ret) {
1357                 pr_info("Failed to register kprobe event: %s\n",
1358                         trace_event_name(call));
1359                 kfree(call->print_fmt);
1360                 unregister_trace_event(&call->event);
1361         }
1362         return ret;
1363 }
1364
1365 static int unregister_kprobe_event(struct trace_kprobe *tk)
1366 {
1367         int ret;
1368
1369         /* tp->event is unregistered in trace_remove_event_call() */
1370         ret = trace_remove_event_call(&tk->tp.call);
1371         if (!ret)
1372                 kfree(tk->tp.call.print_fmt);
1373         return ret;
1374 }
1375
1376 /* Make a tracefs interface for controlling probe points */
1377 static __init int init_kprobe_trace(void)
1378 {
1379         struct dentry *d_tracer;
1380         struct dentry *entry;
1381
1382         if (register_module_notifier(&trace_kprobe_module_nb))
1383                 return -EINVAL;
1384
1385         d_tracer = tracing_init_dentry();
1386         if (IS_ERR(d_tracer))
1387                 return 0;
1388
1389         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1390                                     NULL, &kprobe_events_ops);
1391
1392         /* Event list interface */
1393         if (!entry)
1394                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1395
1396         /* Profile interface */
1397         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1398                                     NULL, &kprobe_profile_ops);
1399
1400         if (!entry)
1401                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1402         return 0;
1403 }
1404 fs_initcall(init_kprobe_trace);
1405
1406
1407 #ifdef CONFIG_FTRACE_STARTUP_TEST
1408 /*
1409  * The "__used" keeps gcc from removing the function symbol
1410  * from the kallsyms table. 'noinline' makes sure that there
1411  * isn't an inlined version used by the test method below
1412  */
1413 static __used __init noinline int
1414 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1415 {
1416         return a1 + a2 + a3 + a4 + a5 + a6;
1417 }
1418
1419 static __init struct trace_event_file *
1420 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1421 {
1422         struct trace_event_file *file;
1423
1424         list_for_each_entry(file, &tr->events, list)
1425                 if (file->event_call == &tk->tp.call)
1426                         return file;
1427
1428         return NULL;
1429 }
1430
1431 /*
1432  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1433  * stage, we can do this lockless.
1434  */
1435 static __init int kprobe_trace_self_tests_init(void)
1436 {
1437         int ret, warn = 0;
1438         int (*target)(int, int, int, int, int, int);
1439         struct trace_kprobe *tk;
1440         struct trace_event_file *file;
1441
1442         if (tracing_is_disabled())
1443                 return -ENODEV;
1444
1445         target = kprobe_trace_selftest_target;
1446
1447         pr_info("Testing kprobe tracing: ");
1448
1449         ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1450                                   "$stack $stack0 +0($stack)",
1451                                   create_trace_kprobe);
1452         if (WARN_ON_ONCE(ret)) {
1453                 pr_warn("error on probing function entry.\n");
1454                 warn++;
1455         } else {
1456                 /* Enable trace point */
1457                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1458                 if (WARN_ON_ONCE(tk == NULL)) {
1459                         pr_warn("error on getting new probe.\n");
1460                         warn++;
1461                 } else {
1462                         file = find_trace_probe_file(tk, top_trace_array());
1463                         if (WARN_ON_ONCE(file == NULL)) {
1464                                 pr_warn("error on getting probe file.\n");
1465                                 warn++;
1466                         } else
1467                                 enable_trace_kprobe(tk, file);
1468                 }
1469         }
1470
1471         ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1472                                   "$retval", create_trace_kprobe);
1473         if (WARN_ON_ONCE(ret)) {
1474                 pr_warn("error on probing function return.\n");
1475                 warn++;
1476         } else {
1477                 /* Enable trace point */
1478                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1479                 if (WARN_ON_ONCE(tk == NULL)) {
1480                         pr_warn("error on getting 2nd new probe.\n");
1481                         warn++;
1482                 } else {
1483                         file = find_trace_probe_file(tk, top_trace_array());
1484                         if (WARN_ON_ONCE(file == NULL)) {
1485                                 pr_warn("error on getting probe file.\n");
1486                                 warn++;
1487                         } else
1488                                 enable_trace_kprobe(tk, file);
1489                 }
1490         }
1491
1492         if (warn)
1493                 goto end;
1494
1495         ret = target(1, 2, 3, 4, 5, 6);
1496
1497         /*
1498          * Not expecting an error here, the check is only to prevent the
1499          * optimizer from removing the call to target() as otherwise there
1500          * are no side-effects and the call is never performed.
1501          */
1502         if (ret != 21)
1503                 warn++;
1504
1505         /* Disable trace points before removing it */
1506         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1507         if (WARN_ON_ONCE(tk == NULL)) {
1508                 pr_warn("error on getting test probe.\n");
1509                 warn++;
1510         } else {
1511                 if (trace_kprobe_nhit(tk) != 1) {
1512                         pr_warn("incorrect number of testprobe hits\n");
1513                         warn++;
1514                 }
1515
1516                 file = find_trace_probe_file(tk, top_trace_array());
1517                 if (WARN_ON_ONCE(file == NULL)) {
1518                         pr_warn("error on getting probe file.\n");
1519                         warn++;
1520                 } else
1521                         disable_trace_kprobe(tk, file);
1522         }
1523
1524         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1525         if (WARN_ON_ONCE(tk == NULL)) {
1526                 pr_warn("error on getting 2nd test probe.\n");
1527                 warn++;
1528         } else {
1529                 if (trace_kprobe_nhit(tk) != 1) {
1530                         pr_warn("incorrect number of testprobe2 hits\n");
1531                         warn++;
1532                 }
1533
1534                 file = find_trace_probe_file(tk, top_trace_array());
1535                 if (WARN_ON_ONCE(file == NULL)) {
1536                         pr_warn("error on getting probe file.\n");
1537                         warn++;
1538                 } else
1539                         disable_trace_kprobe(tk, file);
1540         }
1541
1542         ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1543         if (WARN_ON_ONCE(ret)) {
1544                 pr_warn("error on deleting a probe.\n");
1545                 warn++;
1546         }
1547
1548         ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1549         if (WARN_ON_ONCE(ret)) {
1550                 pr_warn("error on deleting a probe.\n");
1551                 warn++;
1552         }
1553
1554 end:
1555         release_all_trace_kprobes();
1556         /*
1557          * Wait for the optimizer work to finish. Otherwise it might fiddle
1558          * with probes in already freed __init text.
1559          */
1560         wait_for_kprobe_optimizer();
1561         if (warn)
1562                 pr_cont("NG: Some tests are failed. Please check them.\n");
1563         else
1564                 pr_cont("OK\n");
1565         return 0;
1566 }
1567
1568 late_initcall(kprobe_trace_self_tests_init);
1569
1570 #endif