GNU Linux-libre 4.9.318-gnu1
[releases.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #define pr_fmt(fmt) fmt
12
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
19 #include <linux/ctype.h>
20 #include <linux/sort.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23
24 #include <trace/events/sched.h>
25
26 #include <asm/setup.h>
27
28 #include "trace_output.h"
29
30 #undef TRACE_SYSTEM
31 #define TRACE_SYSTEM "TRACE_SYSTEM"
32
33 DEFINE_MUTEX(event_mutex);
34
35 LIST_HEAD(ftrace_events);
36 static LIST_HEAD(ftrace_generic_fields);
37 static LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 static inline int system_refcount(struct event_subsystem *system)
45 {
46         return system->ref_count;
47 }
48
49 static int system_refcount_inc(struct event_subsystem *system)
50 {
51         return system->ref_count++;
52 }
53
54 static int system_refcount_dec(struct event_subsystem *system)
55 {
56         return --system->ref_count;
57 }
58
59 /* Double loops, do not use break, only goto's work */
60 #define do_for_each_event_file(tr, file)                        \
61         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
62                 list_for_each_entry(file, &tr->events, list)
63
64 #define do_for_each_event_file_safe(tr, file)                   \
65         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
66                 struct trace_event_file *___n;                          \
67                 list_for_each_entry_safe(file, ___n, &tr->events, list)
68
69 #define while_for_each_event_file()             \
70         }
71
72 static struct list_head *
73 trace_get_fields(struct trace_event_call *event_call)
74 {
75         if (!event_call->class->get_fields)
76                 return &event_call->class->fields;
77         return event_call->class->get_fields(event_call);
78 }
79
80 static struct ftrace_event_field *
81 __find_event_field(struct list_head *head, char *name)
82 {
83         struct ftrace_event_field *field;
84
85         list_for_each_entry(field, head, link) {
86                 if (!strcmp(field->name, name))
87                         return field;
88         }
89
90         return NULL;
91 }
92
93 struct ftrace_event_field *
94 trace_find_event_field(struct trace_event_call *call, char *name)
95 {
96         struct ftrace_event_field *field;
97         struct list_head *head;
98
99         head = trace_get_fields(call);
100         field = __find_event_field(head, name);
101         if (field)
102                 return field;
103
104         field = __find_event_field(&ftrace_generic_fields, name);
105         if (field)
106                 return field;
107
108         return __find_event_field(&ftrace_common_fields, name);
109 }
110
111 static int __trace_define_field(struct list_head *head, const char *type,
112                                 const char *name, int offset, int size,
113                                 int is_signed, int filter_type)
114 {
115         struct ftrace_event_field *field;
116
117         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
118         if (!field)
119                 return -ENOMEM;
120
121         field->name = name;
122         field->type = type;
123
124         if (filter_type == FILTER_OTHER)
125                 field->filter_type = filter_assign_type(type);
126         else
127                 field->filter_type = filter_type;
128
129         field->offset = offset;
130         field->size = size;
131         field->is_signed = is_signed;
132
133         list_add(&field->link, head);
134
135         return 0;
136 }
137
138 int trace_define_field(struct trace_event_call *call, const char *type,
139                        const char *name, int offset, int size, int is_signed,
140                        int filter_type)
141 {
142         struct list_head *head;
143
144         if (WARN_ON(!call->class))
145                 return 0;
146
147         head = trace_get_fields(call);
148         return __trace_define_field(head, type, name, offset, size,
149                                     is_signed, filter_type);
150 }
151 EXPORT_SYMBOL_GPL(trace_define_field);
152
153 #define __generic_field(type, item, filter_type)                        \
154         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
155                                    #item, 0, 0, is_signed_type(type),   \
156                                    filter_type);                        \
157         if (ret)                                                        \
158                 return ret;
159
160 #define __common_field(type, item)                                      \
161         ret = __trace_define_field(&ftrace_common_fields, #type,        \
162                                    "common_" #item,                     \
163                                    offsetof(typeof(ent), item),         \
164                                    sizeof(ent.item),                    \
165                                    is_signed_type(type), FILTER_OTHER); \
166         if (ret)                                                        \
167                 return ret;
168
169 static int trace_define_generic_fields(void)
170 {
171         int ret;
172
173         __generic_field(int, CPU, FILTER_CPU);
174         __generic_field(int, cpu, FILTER_CPU);
175         __generic_field(char *, COMM, FILTER_COMM);
176         __generic_field(char *, comm, FILTER_COMM);
177
178         return ret;
179 }
180
181 static int trace_define_common_fields(void)
182 {
183         int ret;
184         struct trace_entry ent;
185
186         __common_field(unsigned short, type);
187         __common_field(unsigned char, flags);
188         __common_field(unsigned char, preempt_count);
189         __common_field(int, pid);
190
191         return ret;
192 }
193
194 static void trace_destroy_fields(struct trace_event_call *call)
195 {
196         struct ftrace_event_field *field, *next;
197         struct list_head *head;
198
199         head = trace_get_fields(call);
200         list_for_each_entry_safe(field, next, head, link) {
201                 list_del(&field->link);
202                 kmem_cache_free(field_cachep, field);
203         }
204 }
205
206 /*
207  * run-time version of trace_event_get_offsets_<call>() that returns the last
208  * accessible offset of trace fields excluding __dynamic_array bytes
209  */
210 int trace_event_get_offsets(struct trace_event_call *call)
211 {
212         struct ftrace_event_field *tail;
213         struct list_head *head;
214
215         head = trace_get_fields(call);
216         /*
217          * head->next points to the last field with the largest offset,
218          * since it was added last by trace_define_field()
219          */
220         tail = list_first_entry(head, struct ftrace_event_field, link);
221         return tail->offset + tail->size;
222 }
223
224 int trace_event_raw_init(struct trace_event_call *call)
225 {
226         int id;
227
228         id = register_trace_event(&call->event);
229         if (!id)
230                 return -ENODEV;
231
232         return 0;
233 }
234 EXPORT_SYMBOL_GPL(trace_event_raw_init);
235
236 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
237 {
238         struct trace_array *tr = trace_file->tr;
239         struct trace_array_cpu *data;
240         struct trace_pid_list *pid_list;
241
242         pid_list = rcu_dereference_sched(tr->filtered_pids);
243         if (!pid_list)
244                 return false;
245
246         data = this_cpu_ptr(tr->trace_buffer.data);
247
248         return data->ignore_pid;
249 }
250 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
251
252 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
253                                  struct trace_event_file *trace_file,
254                                  unsigned long len)
255 {
256         struct trace_event_call *event_call = trace_file->event_call;
257
258         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
259             trace_event_ignore_this_pid(trace_file))
260                 return NULL;
261
262         local_save_flags(fbuffer->flags);
263         fbuffer->pc = preempt_count();
264         /*
265          * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
266          * preemption (adding one to the preempt_count). Since we are
267          * interested in the preempt_count at the time the tracepoint was
268          * hit, we need to subtract one to offset the increment.
269          */
270         if (IS_ENABLED(CONFIG_PREEMPT))
271                 fbuffer->pc--;
272         fbuffer->trace_file = trace_file;
273
274         fbuffer->event =
275                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
276                                                 event_call->event.type, len,
277                                                 fbuffer->flags, fbuffer->pc);
278         if (!fbuffer->event)
279                 return NULL;
280
281         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
282         return fbuffer->entry;
283 }
284 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
285
286 static DEFINE_SPINLOCK(tracepoint_iter_lock);
287
288 static void output_printk(struct trace_event_buffer *fbuffer)
289 {
290         struct trace_event_call *event_call;
291         struct trace_event *event;
292         unsigned long flags;
293         struct trace_iterator *iter = tracepoint_print_iter;
294
295         if (!iter)
296                 return;
297
298         event_call = fbuffer->trace_file->event_call;
299         if (!event_call || !event_call->event.funcs ||
300             !event_call->event.funcs->trace)
301                 return;
302
303         event = &fbuffer->trace_file->event_call->event;
304
305         spin_lock_irqsave(&tracepoint_iter_lock, flags);
306         trace_seq_init(&iter->seq);
307         iter->ent = fbuffer->entry;
308         event_call->event.funcs->trace(iter, 0, event);
309         trace_seq_putc(&iter->seq, 0);
310         printk("%s", iter->seq.buffer);
311
312         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
313 }
314
315 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
316 {
317         if (tracepoint_printk)
318                 output_printk(fbuffer);
319
320         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
321                                     fbuffer->event, fbuffer->entry,
322                                     fbuffer->flags, fbuffer->pc);
323 }
324 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
325
326 int trace_event_reg(struct trace_event_call *call,
327                     enum trace_reg type, void *data)
328 {
329         struct trace_event_file *file = data;
330
331         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
332         switch (type) {
333         case TRACE_REG_REGISTER:
334                 return tracepoint_probe_register(call->tp,
335                                                  call->class->probe,
336                                                  file);
337         case TRACE_REG_UNREGISTER:
338                 tracepoint_probe_unregister(call->tp,
339                                             call->class->probe,
340                                             file);
341                 return 0;
342
343 #ifdef CONFIG_PERF_EVENTS
344         case TRACE_REG_PERF_REGISTER:
345                 return tracepoint_probe_register(call->tp,
346                                                  call->class->perf_probe,
347                                                  call);
348         case TRACE_REG_PERF_UNREGISTER:
349                 tracepoint_probe_unregister(call->tp,
350                                             call->class->perf_probe,
351                                             call);
352                 return 0;
353         case TRACE_REG_PERF_OPEN:
354         case TRACE_REG_PERF_CLOSE:
355         case TRACE_REG_PERF_ADD:
356         case TRACE_REG_PERF_DEL:
357                 return 0;
358 #endif
359         }
360         return 0;
361 }
362 EXPORT_SYMBOL_GPL(trace_event_reg);
363
364 void trace_event_enable_cmd_record(bool enable)
365 {
366         struct trace_event_file *file;
367         struct trace_array *tr;
368
369         mutex_lock(&event_mutex);
370         do_for_each_event_file(tr, file) {
371
372                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
373                         continue;
374
375                 if (enable) {
376                         tracing_start_cmdline_record();
377                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
378                 } else {
379                         tracing_stop_cmdline_record();
380                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
381                 }
382         } while_for_each_event_file();
383         mutex_unlock(&event_mutex);
384 }
385
386 static int __ftrace_event_enable_disable(struct trace_event_file *file,
387                                          int enable, int soft_disable)
388 {
389         struct trace_event_call *call = file->event_call;
390         struct trace_array *tr = file->tr;
391         unsigned long file_flags = file->flags;
392         int ret = 0;
393         int disable;
394
395         switch (enable) {
396         case 0:
397                 /*
398                  * When soft_disable is set and enable is cleared, the sm_ref
399                  * reference counter is decremented. If it reaches 0, we want
400                  * to clear the SOFT_DISABLED flag but leave the event in the
401                  * state that it was. That is, if the event was enabled and
402                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
403                  * is set we do not want the event to be enabled before we
404                  * clear the bit.
405                  *
406                  * When soft_disable is not set but the SOFT_MODE flag is,
407                  * we do nothing. Do not disable the tracepoint, otherwise
408                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
409                  */
410                 if (soft_disable) {
411                         if (atomic_dec_return(&file->sm_ref) > 0)
412                                 break;
413                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
414                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
415                 } else
416                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
417
418                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
419                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
420                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
421                                 tracing_stop_cmdline_record();
422                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
423                         }
424                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
425                 }
426                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
427                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
428                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
429                 else
430                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
431                 break;
432         case 1:
433                 /*
434                  * When soft_disable is set and enable is set, we want to
435                  * register the tracepoint for the event, but leave the event
436                  * as is. That means, if the event was already enabled, we do
437                  * nothing (but set SOFT_MODE). If the event is disabled, we
438                  * set SOFT_DISABLED before enabling the event tracepoint, so
439                  * it still seems to be disabled.
440                  */
441                 if (!soft_disable)
442                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
443                 else {
444                         if (atomic_inc_return(&file->sm_ref) > 1)
445                                 break;
446                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
447                 }
448
449                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
450
451                         /* Keep the event disabled, when going to SOFT_MODE. */
452                         if (soft_disable)
453                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
454
455                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
456                                 tracing_start_cmdline_record();
457                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
458                         }
459                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
460                         if (ret) {
461                                 tracing_stop_cmdline_record();
462                                 pr_info("event trace: Could not enable event "
463                                         "%s\n", trace_event_name(call));
464                                 break;
465                         }
466                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
467
468                         /* WAS_ENABLED gets set but never cleared. */
469                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
470                 }
471                 break;
472         }
473
474         /* Enable or disable use of trace_buffered_event */
475         if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
476             (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
477                 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
478                         trace_buffered_event_enable();
479                 else
480                         trace_buffered_event_disable();
481         }
482
483         return ret;
484 }
485
486 int trace_event_enable_disable(struct trace_event_file *file,
487                                int enable, int soft_disable)
488 {
489         return __ftrace_event_enable_disable(file, enable, soft_disable);
490 }
491
492 static int ftrace_event_enable_disable(struct trace_event_file *file,
493                                        int enable)
494 {
495         return __ftrace_event_enable_disable(file, enable, 0);
496 }
497
498 static void ftrace_clear_events(struct trace_array *tr)
499 {
500         struct trace_event_file *file;
501
502         mutex_lock(&event_mutex);
503         list_for_each_entry(file, &tr->events, list) {
504                 ftrace_event_enable_disable(file, 0);
505         }
506         mutex_unlock(&event_mutex);
507 }
508
509 static void
510 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
511 {
512         struct trace_pid_list *pid_list;
513         struct trace_array *tr = data;
514
515         pid_list = rcu_dereference_sched(tr->filtered_pids);
516         trace_filter_add_remove_task(pid_list, NULL, task);
517 }
518
519 static void
520 event_filter_pid_sched_process_fork(void *data,
521                                     struct task_struct *self,
522                                     struct task_struct *task)
523 {
524         struct trace_pid_list *pid_list;
525         struct trace_array *tr = data;
526
527         pid_list = rcu_dereference_sched(tr->filtered_pids);
528         trace_filter_add_remove_task(pid_list, self, task);
529 }
530
531 void trace_event_follow_fork(struct trace_array *tr, bool enable)
532 {
533         if (enable) {
534                 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
535                                                        tr, INT_MIN);
536                 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
537                                                        tr, INT_MAX);
538         } else {
539                 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
540                                                     tr);
541                 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
542                                                     tr);
543         }
544 }
545
546 static void
547 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
548                     struct task_struct *prev, struct task_struct *next)
549 {
550         struct trace_array *tr = data;
551         struct trace_pid_list *pid_list;
552
553         pid_list = rcu_dereference_sched(tr->filtered_pids);
554
555         this_cpu_write(tr->trace_buffer.data->ignore_pid,
556                        trace_ignore_this_task(pid_list, prev) &&
557                        trace_ignore_this_task(pid_list, next));
558 }
559
560 static void
561 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
562                     struct task_struct *prev, struct task_struct *next)
563 {
564         struct trace_array *tr = data;
565         struct trace_pid_list *pid_list;
566
567         pid_list = rcu_dereference_sched(tr->filtered_pids);
568
569         this_cpu_write(tr->trace_buffer.data->ignore_pid,
570                        trace_ignore_this_task(pid_list, next));
571 }
572
573 static void
574 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
575 {
576         struct trace_array *tr = data;
577         struct trace_pid_list *pid_list;
578
579         /* Nothing to do if we are already tracing */
580         if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
581                 return;
582
583         pid_list = rcu_dereference_sched(tr->filtered_pids);
584
585         this_cpu_write(tr->trace_buffer.data->ignore_pid,
586                        trace_ignore_this_task(pid_list, task));
587 }
588
589 static void
590 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
591 {
592         struct trace_array *tr = data;
593         struct trace_pid_list *pid_list;
594
595         /* Nothing to do if we are not tracing */
596         if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
597                 return;
598
599         pid_list = rcu_dereference_sched(tr->filtered_pids);
600
601         /* Set tracing if current is enabled */
602         this_cpu_write(tr->trace_buffer.data->ignore_pid,
603                        trace_ignore_this_task(pid_list, current));
604 }
605
606 static void __ftrace_clear_event_pids(struct trace_array *tr)
607 {
608         struct trace_pid_list *pid_list;
609         struct trace_event_file *file;
610         int cpu;
611
612         pid_list = rcu_dereference_protected(tr->filtered_pids,
613                                              lockdep_is_held(&event_mutex));
614         if (!pid_list)
615                 return;
616
617         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
618         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
619
620         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
621         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
622
623         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
624         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
625
626         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
627         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
628
629         list_for_each_entry(file, &tr->events, list) {
630                 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
631         }
632
633         for_each_possible_cpu(cpu)
634                 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
635
636         rcu_assign_pointer(tr->filtered_pids, NULL);
637
638         /* Wait till all users are no longer using pid filtering */
639         synchronize_sched();
640
641         trace_free_pid_list(pid_list);
642 }
643
644 static void ftrace_clear_event_pids(struct trace_array *tr)
645 {
646         mutex_lock(&event_mutex);
647         __ftrace_clear_event_pids(tr);
648         mutex_unlock(&event_mutex);
649 }
650
651 static void __put_system(struct event_subsystem *system)
652 {
653         struct event_filter *filter = system->filter;
654
655         WARN_ON_ONCE(system_refcount(system) == 0);
656         if (system_refcount_dec(system))
657                 return;
658
659         list_del(&system->list);
660
661         if (filter) {
662                 kfree(filter->filter_string);
663                 kfree(filter);
664         }
665         kfree_const(system->name);
666         kfree(system);
667 }
668
669 static void __get_system(struct event_subsystem *system)
670 {
671         WARN_ON_ONCE(system_refcount(system) == 0);
672         system_refcount_inc(system);
673 }
674
675 static void __get_system_dir(struct trace_subsystem_dir *dir)
676 {
677         WARN_ON_ONCE(dir->ref_count == 0);
678         dir->ref_count++;
679         __get_system(dir->subsystem);
680 }
681
682 static void __put_system_dir(struct trace_subsystem_dir *dir)
683 {
684         WARN_ON_ONCE(dir->ref_count == 0);
685         /* If the subsystem is about to be freed, the dir must be too */
686         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
687
688         __put_system(dir->subsystem);
689         if (!--dir->ref_count)
690                 kfree(dir);
691 }
692
693 static void put_system(struct trace_subsystem_dir *dir)
694 {
695         mutex_lock(&event_mutex);
696         __put_system_dir(dir);
697         mutex_unlock(&event_mutex);
698 }
699
700 static void remove_subsystem(struct trace_subsystem_dir *dir)
701 {
702         if (!dir)
703                 return;
704
705         if (!--dir->nr_events) {
706                 tracefs_remove_recursive(dir->entry);
707                 list_del(&dir->list);
708                 __put_system_dir(dir);
709         }
710 }
711
712 static void remove_event_file_dir(struct trace_event_file *file)
713 {
714         struct dentry *dir = file->dir;
715         struct dentry *child;
716
717         if (dir) {
718                 spin_lock(&dir->d_lock);        /* probably unneeded */
719                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
720                         if (d_really_is_positive(child))        /* probably unneeded */
721                                 d_inode(child)->i_private = NULL;
722                 }
723                 spin_unlock(&dir->d_lock);
724
725                 tracefs_remove_recursive(dir);
726         }
727
728         list_del(&file->list);
729         remove_subsystem(file->system);
730         free_event_filter(file->filter);
731         kmem_cache_free(file_cachep, file);
732 }
733
734 /*
735  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
736  */
737 static int
738 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
739                               const char *sub, const char *event, int set)
740 {
741         struct trace_event_file *file;
742         struct trace_event_call *call;
743         const char *name;
744         int ret = -EINVAL;
745
746         list_for_each_entry(file, &tr->events, list) {
747
748                 call = file->event_call;
749                 name = trace_event_name(call);
750
751                 if (!name || !call->class || !call->class->reg)
752                         continue;
753
754                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
755                         continue;
756
757                 if (match &&
758                     strcmp(match, name) != 0 &&
759                     strcmp(match, call->class->system) != 0)
760                         continue;
761
762                 if (sub && strcmp(sub, call->class->system) != 0)
763                         continue;
764
765                 if (event && strcmp(event, name) != 0)
766                         continue;
767
768                 ftrace_event_enable_disable(file, set);
769
770                 ret = 0;
771         }
772
773         return ret;
774 }
775
776 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
777                                   const char *sub, const char *event, int set)
778 {
779         int ret;
780
781         mutex_lock(&event_mutex);
782         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
783         mutex_unlock(&event_mutex);
784
785         return ret;
786 }
787
788 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
789 {
790         char *event = NULL, *sub = NULL, *match;
791         int ret;
792
793         if (!tr)
794                 return -ENOENT;
795         /*
796          * The buf format can be <subsystem>:<event-name>
797          *  *:<event-name> means any event by that name.
798          *  :<event-name> is the same.
799          *
800          *  <subsystem>:* means all events in that subsystem
801          *  <subsystem>: means the same.
802          *
803          *  <name> (no ':') means all events in a subsystem with
804          *  the name <name> or any event that matches <name>
805          */
806
807         match = strsep(&buf, ":");
808         if (buf) {
809                 sub = match;
810                 event = buf;
811                 match = NULL;
812
813                 if (!strlen(sub) || strcmp(sub, "*") == 0)
814                         sub = NULL;
815                 if (!strlen(event) || strcmp(event, "*") == 0)
816                         event = NULL;
817         }
818
819         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
820
821         /* Put back the colon to allow this to be called again */
822         if (buf)
823                 *(buf - 1) = ':';
824
825         return ret;
826 }
827
828 /**
829  * trace_set_clr_event - enable or disable an event
830  * @system: system name to match (NULL for any system)
831  * @event: event name to match (NULL for all events, within system)
832  * @set: 1 to enable, 0 to disable
833  *
834  * This is a way for other parts of the kernel to enable or disable
835  * event recording.
836  *
837  * Returns 0 on success, -EINVAL if the parameters do not match any
838  * registered events.
839  */
840 int trace_set_clr_event(const char *system, const char *event, int set)
841 {
842         struct trace_array *tr = top_trace_array();
843
844         if (!tr)
845                 return -ENODEV;
846
847         return __ftrace_set_clr_event(tr, NULL, system, event, set);
848 }
849 EXPORT_SYMBOL_GPL(trace_set_clr_event);
850
851 /* 128 should be much more than enough */
852 #define EVENT_BUF_SIZE          127
853
854 static ssize_t
855 ftrace_event_write(struct file *file, const char __user *ubuf,
856                    size_t cnt, loff_t *ppos)
857 {
858         struct trace_parser parser;
859         struct seq_file *m = file->private_data;
860         struct trace_array *tr = m->private;
861         ssize_t read, ret;
862
863         if (!cnt)
864                 return 0;
865
866         ret = tracing_update_buffers();
867         if (ret < 0)
868                 return ret;
869
870         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
871                 return -ENOMEM;
872
873         read = trace_get_user(&parser, ubuf, cnt, ppos);
874
875         if (read >= 0 && trace_parser_loaded((&parser))) {
876                 int set = 1;
877
878                 if (*parser.buffer == '!')
879                         set = 0;
880
881                 parser.buffer[parser.idx] = 0;
882
883                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
884                 if (ret)
885                         goto out_put;
886         }
887
888         ret = read;
889
890  out_put:
891         trace_parser_put(&parser);
892
893         return ret;
894 }
895
896 static void *
897 t_next(struct seq_file *m, void *v, loff_t *pos)
898 {
899         struct trace_event_file *file = v;
900         struct trace_event_call *call;
901         struct trace_array *tr = m->private;
902
903         (*pos)++;
904
905         list_for_each_entry_continue(file, &tr->events, list) {
906                 call = file->event_call;
907                 /*
908                  * The ftrace subsystem is for showing formats only.
909                  * They can not be enabled or disabled via the event files.
910                  */
911                 if (call->class && call->class->reg &&
912                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
913                         return file;
914         }
915
916         return NULL;
917 }
918
919 static void *t_start(struct seq_file *m, loff_t *pos)
920 {
921         struct trace_event_file *file;
922         struct trace_array *tr = m->private;
923         loff_t l;
924
925         mutex_lock(&event_mutex);
926
927         file = list_entry(&tr->events, struct trace_event_file, list);
928         for (l = 0; l <= *pos; ) {
929                 file = t_next(m, file, &l);
930                 if (!file)
931                         break;
932         }
933         return file;
934 }
935
936 static void *
937 s_next(struct seq_file *m, void *v, loff_t *pos)
938 {
939         struct trace_event_file *file = v;
940         struct trace_array *tr = m->private;
941
942         (*pos)++;
943
944         list_for_each_entry_continue(file, &tr->events, list) {
945                 if (file->flags & EVENT_FILE_FL_ENABLED)
946                         return file;
947         }
948
949         return NULL;
950 }
951
952 static void *s_start(struct seq_file *m, loff_t *pos)
953 {
954         struct trace_event_file *file;
955         struct trace_array *tr = m->private;
956         loff_t l;
957
958         mutex_lock(&event_mutex);
959
960         file = list_entry(&tr->events, struct trace_event_file, list);
961         for (l = 0; l <= *pos; ) {
962                 file = s_next(m, file, &l);
963                 if (!file)
964                         break;
965         }
966         return file;
967 }
968
969 static int t_show(struct seq_file *m, void *v)
970 {
971         struct trace_event_file *file = v;
972         struct trace_event_call *call = file->event_call;
973
974         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
975                 seq_printf(m, "%s:", call->class->system);
976         seq_printf(m, "%s\n", trace_event_name(call));
977
978         return 0;
979 }
980
981 static void t_stop(struct seq_file *m, void *p)
982 {
983         mutex_unlock(&event_mutex);
984 }
985
986 static void *
987 p_next(struct seq_file *m, void *v, loff_t *pos)
988 {
989         struct trace_array *tr = m->private;
990         struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
991
992         return trace_pid_next(pid_list, v, pos);
993 }
994
995 static void *p_start(struct seq_file *m, loff_t *pos)
996         __acquires(RCU)
997 {
998         struct trace_pid_list *pid_list;
999         struct trace_array *tr = m->private;
1000
1001         /*
1002          * Grab the mutex, to keep calls to p_next() having the same
1003          * tr->filtered_pids as p_start() has.
1004          * If we just passed the tr->filtered_pids around, then RCU would
1005          * have been enough, but doing that makes things more complex.
1006          */
1007         mutex_lock(&event_mutex);
1008         rcu_read_lock_sched();
1009
1010         pid_list = rcu_dereference_sched(tr->filtered_pids);
1011
1012         if (!pid_list)
1013                 return NULL;
1014
1015         return trace_pid_start(pid_list, pos);
1016 }
1017
1018 static void p_stop(struct seq_file *m, void *p)
1019         __releases(RCU)
1020 {
1021         rcu_read_unlock_sched();
1022         mutex_unlock(&event_mutex);
1023 }
1024
1025 static ssize_t
1026 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1027                   loff_t *ppos)
1028 {
1029         struct trace_event_file *file;
1030         unsigned long flags;
1031         char buf[4] = "0";
1032
1033         mutex_lock(&event_mutex);
1034         file = event_file_data(filp);
1035         if (likely(file))
1036                 flags = file->flags;
1037         mutex_unlock(&event_mutex);
1038
1039         if (!file)
1040                 return -ENODEV;
1041
1042         if (flags & EVENT_FILE_FL_ENABLED &&
1043             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1044                 strcpy(buf, "1");
1045
1046         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1047             flags & EVENT_FILE_FL_SOFT_MODE)
1048                 strcat(buf, "*");
1049
1050         strcat(buf, "\n");
1051
1052         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1053 }
1054
1055 static ssize_t
1056 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1057                    loff_t *ppos)
1058 {
1059         struct trace_event_file *file;
1060         unsigned long val;
1061         int ret;
1062
1063         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1064         if (ret)
1065                 return ret;
1066
1067         ret = tracing_update_buffers();
1068         if (ret < 0)
1069                 return ret;
1070
1071         switch (val) {
1072         case 0:
1073         case 1:
1074                 ret = -ENODEV;
1075                 mutex_lock(&event_mutex);
1076                 file = event_file_data(filp);
1077                 if (likely(file))
1078                         ret = ftrace_event_enable_disable(file, val);
1079                 mutex_unlock(&event_mutex);
1080                 break;
1081
1082         default:
1083                 return -EINVAL;
1084         }
1085
1086         *ppos += cnt;
1087
1088         return ret ? ret : cnt;
1089 }
1090
1091 static ssize_t
1092 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1093                    loff_t *ppos)
1094 {
1095         const char set_to_char[4] = { '?', '0', '1', 'X' };
1096         struct trace_subsystem_dir *dir = filp->private_data;
1097         struct event_subsystem *system = dir->subsystem;
1098         struct trace_event_call *call;
1099         struct trace_event_file *file;
1100         struct trace_array *tr = dir->tr;
1101         char buf[2];
1102         int set = 0;
1103         int ret;
1104
1105         mutex_lock(&event_mutex);
1106         list_for_each_entry(file, &tr->events, list) {
1107                 call = file->event_call;
1108                 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1109                     !trace_event_name(call) || !call->class || !call->class->reg)
1110                         continue;
1111
1112                 if (system && strcmp(call->class->system, system->name) != 0)
1113                         continue;
1114
1115                 /*
1116                  * We need to find out if all the events are set
1117                  * or if all events or cleared, or if we have
1118                  * a mixture.
1119                  */
1120                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1121
1122                 /*
1123                  * If we have a mixture, no need to look further.
1124                  */
1125                 if (set == 3)
1126                         break;
1127         }
1128         mutex_unlock(&event_mutex);
1129
1130         buf[0] = set_to_char[set];
1131         buf[1] = '\n';
1132
1133         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1134
1135         return ret;
1136 }
1137
1138 static ssize_t
1139 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1140                     loff_t *ppos)
1141 {
1142         struct trace_subsystem_dir *dir = filp->private_data;
1143         struct event_subsystem *system = dir->subsystem;
1144         const char *name = NULL;
1145         unsigned long val;
1146         ssize_t ret;
1147
1148         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1149         if (ret)
1150                 return ret;
1151
1152         ret = tracing_update_buffers();
1153         if (ret < 0)
1154                 return ret;
1155
1156         if (val != 0 && val != 1)
1157                 return -EINVAL;
1158
1159         /*
1160          * Opening of "enable" adds a ref count to system,
1161          * so the name is safe to use.
1162          */
1163         if (system)
1164                 name = system->name;
1165
1166         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1167         if (ret)
1168                 goto out;
1169
1170         ret = cnt;
1171
1172 out:
1173         *ppos += cnt;
1174
1175         return ret;
1176 }
1177
1178 enum {
1179         FORMAT_HEADER           = 1,
1180         FORMAT_FIELD_SEPERATOR  = 2,
1181         FORMAT_PRINTFMT         = 3,
1182 };
1183
1184 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1185 {
1186         struct trace_event_call *call = event_file_data(m->private);
1187         struct list_head *common_head = &ftrace_common_fields;
1188         struct list_head *head = trace_get_fields(call);
1189         struct list_head *node = v;
1190
1191         (*pos)++;
1192
1193         switch ((unsigned long)v) {
1194         case FORMAT_HEADER:
1195                 node = common_head;
1196                 break;
1197
1198         case FORMAT_FIELD_SEPERATOR:
1199                 node = head;
1200                 break;
1201
1202         case FORMAT_PRINTFMT:
1203                 /* all done */
1204                 return NULL;
1205         }
1206
1207         node = node->prev;
1208         if (node == common_head)
1209                 return (void *)FORMAT_FIELD_SEPERATOR;
1210         else if (node == head)
1211                 return (void *)FORMAT_PRINTFMT;
1212         else
1213                 return node;
1214 }
1215
1216 static int f_show(struct seq_file *m, void *v)
1217 {
1218         struct trace_event_call *call = event_file_data(m->private);
1219         struct ftrace_event_field *field;
1220         const char *array_descriptor;
1221
1222         switch ((unsigned long)v) {
1223         case FORMAT_HEADER:
1224                 seq_printf(m, "name: %s\n", trace_event_name(call));
1225                 seq_printf(m, "ID: %d\n", call->event.type);
1226                 seq_puts(m, "format:\n");
1227                 return 0;
1228
1229         case FORMAT_FIELD_SEPERATOR:
1230                 seq_putc(m, '\n');
1231                 return 0;
1232
1233         case FORMAT_PRINTFMT:
1234                 seq_printf(m, "\nprint fmt: %s\n",
1235                            call->print_fmt);
1236                 return 0;
1237         }
1238
1239         field = list_entry(v, struct ftrace_event_field, link);
1240         /*
1241          * Smartly shows the array type(except dynamic array).
1242          * Normal:
1243          *      field:TYPE VAR
1244          * If TYPE := TYPE[LEN], it is shown:
1245          *      field:TYPE VAR[LEN]
1246          */
1247         array_descriptor = strchr(field->type, '[');
1248
1249         if (!strncmp(field->type, "__data_loc", 10))
1250                 array_descriptor = NULL;
1251
1252         if (!array_descriptor)
1253                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1254                            field->type, field->name, field->offset,
1255                            field->size, !!field->is_signed);
1256         else
1257                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1258                            (int)(array_descriptor - field->type),
1259                            field->type, field->name,
1260                            array_descriptor, field->offset,
1261                            field->size, !!field->is_signed);
1262
1263         return 0;
1264 }
1265
1266 static void *f_start(struct seq_file *m, loff_t *pos)
1267 {
1268         void *p = (void *)FORMAT_HEADER;
1269         loff_t l = 0;
1270
1271         /* ->stop() is called even if ->start() fails */
1272         mutex_lock(&event_mutex);
1273         if (!event_file_data(m->private))
1274                 return ERR_PTR(-ENODEV);
1275
1276         while (l < *pos && p)
1277                 p = f_next(m, p, &l);
1278
1279         return p;
1280 }
1281
1282 static void f_stop(struct seq_file *m, void *p)
1283 {
1284         mutex_unlock(&event_mutex);
1285 }
1286
1287 static const struct seq_operations trace_format_seq_ops = {
1288         .start          = f_start,
1289         .next           = f_next,
1290         .stop           = f_stop,
1291         .show           = f_show,
1292 };
1293
1294 static int trace_format_open(struct inode *inode, struct file *file)
1295 {
1296         struct seq_file *m;
1297         int ret;
1298
1299         ret = seq_open(file, &trace_format_seq_ops);
1300         if (ret < 0)
1301                 return ret;
1302
1303         m = file->private_data;
1304         m->private = file;
1305
1306         return 0;
1307 }
1308
1309 static ssize_t
1310 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1311 {
1312         int id = (long)event_file_data(filp);
1313         char buf[32];
1314         int len;
1315
1316         if (unlikely(!id))
1317                 return -ENODEV;
1318
1319         len = sprintf(buf, "%d\n", id);
1320
1321         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1322 }
1323
1324 static ssize_t
1325 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1326                   loff_t *ppos)
1327 {
1328         struct trace_event_file *file;
1329         struct trace_seq *s;
1330         int r = -ENODEV;
1331
1332         if (*ppos)
1333                 return 0;
1334
1335         s = kmalloc(sizeof(*s), GFP_KERNEL);
1336
1337         if (!s)
1338                 return -ENOMEM;
1339
1340         trace_seq_init(s);
1341
1342         mutex_lock(&event_mutex);
1343         file = event_file_data(filp);
1344         if (file)
1345                 print_event_filter(file, s);
1346         mutex_unlock(&event_mutex);
1347
1348         if (file)
1349                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1350                                             s->buffer, trace_seq_used(s));
1351
1352         kfree(s);
1353
1354         return r;
1355 }
1356
1357 static ssize_t
1358 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1359                    loff_t *ppos)
1360 {
1361         struct trace_event_file *file;
1362         char *buf;
1363         int err = -ENODEV;
1364
1365         if (cnt >= PAGE_SIZE)
1366                 return -EINVAL;
1367
1368         buf = memdup_user_nul(ubuf, cnt);
1369         if (IS_ERR(buf))
1370                 return PTR_ERR(buf);
1371
1372         mutex_lock(&event_mutex);
1373         file = event_file_data(filp);
1374         if (file)
1375                 err = apply_event_filter(file, buf);
1376         mutex_unlock(&event_mutex);
1377
1378         kfree(buf);
1379         if (err < 0)
1380                 return err;
1381
1382         *ppos += cnt;
1383
1384         return cnt;
1385 }
1386
1387 static LIST_HEAD(event_subsystems);
1388
1389 static int subsystem_open(struct inode *inode, struct file *filp)
1390 {
1391         struct event_subsystem *system = NULL;
1392         struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1393         struct trace_array *tr;
1394         int ret;
1395
1396         if (tracing_is_disabled())
1397                 return -ENODEV;
1398
1399         /* Make sure the system still exists */
1400         mutex_lock(&trace_types_lock);
1401         mutex_lock(&event_mutex);
1402         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1403                 list_for_each_entry(dir, &tr->systems, list) {
1404                         if (dir == inode->i_private) {
1405                                 /* Don't open systems with no events */
1406                                 if (dir->nr_events) {
1407                                         __get_system_dir(dir);
1408                                         system = dir->subsystem;
1409                                 }
1410                                 goto exit_loop;
1411                         }
1412                 }
1413         }
1414  exit_loop:
1415         mutex_unlock(&event_mutex);
1416         mutex_unlock(&trace_types_lock);
1417
1418         if (!system)
1419                 return -ENODEV;
1420
1421         /* Some versions of gcc think dir can be uninitialized here */
1422         WARN_ON(!dir);
1423
1424         /* Still need to increment the ref count of the system */
1425         if (trace_array_get(tr) < 0) {
1426                 put_system(dir);
1427                 return -ENODEV;
1428         }
1429
1430         ret = tracing_open_generic(inode, filp);
1431         if (ret < 0) {
1432                 trace_array_put(tr);
1433                 put_system(dir);
1434         }
1435
1436         return ret;
1437 }
1438
1439 static int system_tr_open(struct inode *inode, struct file *filp)
1440 {
1441         struct trace_subsystem_dir *dir;
1442         struct trace_array *tr = inode->i_private;
1443         int ret;
1444
1445         if (tracing_is_disabled())
1446                 return -ENODEV;
1447
1448         if (trace_array_get(tr) < 0)
1449                 return -ENODEV;
1450
1451         /* Make a temporary dir that has no system but points to tr */
1452         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1453         if (!dir) {
1454                 trace_array_put(tr);
1455                 return -ENOMEM;
1456         }
1457
1458         dir->tr = tr;
1459
1460         ret = tracing_open_generic(inode, filp);
1461         if (ret < 0) {
1462                 trace_array_put(tr);
1463                 kfree(dir);
1464                 return ret;
1465         }
1466
1467         filp->private_data = dir;
1468
1469         return 0;
1470 }
1471
1472 static int subsystem_release(struct inode *inode, struct file *file)
1473 {
1474         struct trace_subsystem_dir *dir = file->private_data;
1475
1476         trace_array_put(dir->tr);
1477
1478         /*
1479          * If dir->subsystem is NULL, then this is a temporary
1480          * descriptor that was made for a trace_array to enable
1481          * all subsystems.
1482          */
1483         if (dir->subsystem)
1484                 put_system(dir);
1485         else
1486                 kfree(dir);
1487
1488         return 0;
1489 }
1490
1491 static ssize_t
1492 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1493                       loff_t *ppos)
1494 {
1495         struct trace_subsystem_dir *dir = filp->private_data;
1496         struct event_subsystem *system = dir->subsystem;
1497         struct trace_seq *s;
1498         int r;
1499
1500         if (*ppos)
1501                 return 0;
1502
1503         s = kmalloc(sizeof(*s), GFP_KERNEL);
1504         if (!s)
1505                 return -ENOMEM;
1506
1507         trace_seq_init(s);
1508
1509         print_subsystem_event_filter(system, s);
1510         r = simple_read_from_buffer(ubuf, cnt, ppos,
1511                                     s->buffer, trace_seq_used(s));
1512
1513         kfree(s);
1514
1515         return r;
1516 }
1517
1518 static ssize_t
1519 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1520                        loff_t *ppos)
1521 {
1522         struct trace_subsystem_dir *dir = filp->private_data;
1523         char *buf;
1524         int err;
1525
1526         if (cnt >= PAGE_SIZE)
1527                 return -EINVAL;
1528
1529         buf = memdup_user_nul(ubuf, cnt);
1530         if (IS_ERR(buf))
1531                 return PTR_ERR(buf);
1532
1533         err = apply_subsystem_event_filter(dir, buf);
1534         kfree(buf);
1535         if (err < 0)
1536                 return err;
1537
1538         *ppos += cnt;
1539
1540         return cnt;
1541 }
1542
1543 static ssize_t
1544 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1545 {
1546         int (*func)(struct trace_seq *s) = filp->private_data;
1547         struct trace_seq *s;
1548         int r;
1549
1550         if (*ppos)
1551                 return 0;
1552
1553         s = kmalloc(sizeof(*s), GFP_KERNEL);
1554         if (!s)
1555                 return -ENOMEM;
1556
1557         trace_seq_init(s);
1558
1559         func(s);
1560         r = simple_read_from_buffer(ubuf, cnt, ppos,
1561                                     s->buffer, trace_seq_used(s));
1562
1563         kfree(s);
1564
1565         return r;
1566 }
1567
1568 static void ignore_task_cpu(void *data)
1569 {
1570         struct trace_array *tr = data;
1571         struct trace_pid_list *pid_list;
1572
1573         /*
1574          * This function is called by on_each_cpu() while the
1575          * event_mutex is held.
1576          */
1577         pid_list = rcu_dereference_protected(tr->filtered_pids,
1578                                              mutex_is_locked(&event_mutex));
1579
1580         this_cpu_write(tr->trace_buffer.data->ignore_pid,
1581                        trace_ignore_this_task(pid_list, current));
1582 }
1583
1584 static ssize_t
1585 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1586                        size_t cnt, loff_t *ppos)
1587 {
1588         struct seq_file *m = filp->private_data;
1589         struct trace_array *tr = m->private;
1590         struct trace_pid_list *filtered_pids = NULL;
1591         struct trace_pid_list *pid_list;
1592         struct trace_event_file *file;
1593         ssize_t ret;
1594
1595         if (!cnt)
1596                 return 0;
1597
1598         ret = tracing_update_buffers();
1599         if (ret < 0)
1600                 return ret;
1601
1602         mutex_lock(&event_mutex);
1603
1604         filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1605                                              lockdep_is_held(&event_mutex));
1606
1607         ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1608         if (ret < 0)
1609                 goto out;
1610
1611         rcu_assign_pointer(tr->filtered_pids, pid_list);
1612
1613         list_for_each_entry(file, &tr->events, list) {
1614                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1615         }
1616
1617         if (filtered_pids) {
1618                 synchronize_sched();
1619                 trace_free_pid_list(filtered_pids);
1620         } else if (pid_list) {
1621                 /*
1622                  * Register a probe that is called before all other probes
1623                  * to set ignore_pid if next or prev do not match.
1624                  * Register a probe this is called after all other probes
1625                  * to only keep ignore_pid set if next pid matches.
1626                  */
1627                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1628                                                  tr, INT_MAX);
1629                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1630                                                  tr, 0);
1631
1632                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1633                                                  tr, INT_MAX);
1634                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1635                                                  tr, 0);
1636
1637                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1638                                                      tr, INT_MAX);
1639                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1640                                                      tr, 0);
1641
1642                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1643                                                  tr, INT_MAX);
1644                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1645                                                  tr, 0);
1646         }
1647
1648         /*
1649          * Ignoring of pids is done at task switch. But we have to
1650          * check for those tasks that are currently running.
1651          * Always do this in case a pid was appended or removed.
1652          */
1653         on_each_cpu(ignore_task_cpu, tr, 1);
1654
1655  out:
1656         mutex_unlock(&event_mutex);
1657
1658         if (ret > 0)
1659                 *ppos += ret;
1660
1661         return ret;
1662 }
1663
1664 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1665 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1666 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1667 static int ftrace_event_release(struct inode *inode, struct file *file);
1668
1669 static const struct seq_operations show_event_seq_ops = {
1670         .start = t_start,
1671         .next = t_next,
1672         .show = t_show,
1673         .stop = t_stop,
1674 };
1675
1676 static const struct seq_operations show_set_event_seq_ops = {
1677         .start = s_start,
1678         .next = s_next,
1679         .show = t_show,
1680         .stop = t_stop,
1681 };
1682
1683 static const struct seq_operations show_set_pid_seq_ops = {
1684         .start = p_start,
1685         .next = p_next,
1686         .show = trace_pid_show,
1687         .stop = p_stop,
1688 };
1689
1690 static const struct file_operations ftrace_avail_fops = {
1691         .open = ftrace_event_avail_open,
1692         .read = seq_read,
1693         .llseek = seq_lseek,
1694         .release = seq_release,
1695 };
1696
1697 static const struct file_operations ftrace_set_event_fops = {
1698         .open = ftrace_event_set_open,
1699         .read = seq_read,
1700         .write = ftrace_event_write,
1701         .llseek = seq_lseek,
1702         .release = ftrace_event_release,
1703 };
1704
1705 static const struct file_operations ftrace_set_event_pid_fops = {
1706         .open = ftrace_event_set_pid_open,
1707         .read = seq_read,
1708         .write = ftrace_event_pid_write,
1709         .llseek = seq_lseek,
1710         .release = ftrace_event_release,
1711 };
1712
1713 static const struct file_operations ftrace_enable_fops = {
1714         .open = tracing_open_generic,
1715         .read = event_enable_read,
1716         .write = event_enable_write,
1717         .llseek = default_llseek,
1718 };
1719
1720 static const struct file_operations ftrace_event_format_fops = {
1721         .open = trace_format_open,
1722         .read = seq_read,
1723         .llseek = seq_lseek,
1724         .release = seq_release,
1725 };
1726
1727 static const struct file_operations ftrace_event_id_fops = {
1728         .read = event_id_read,
1729         .llseek = default_llseek,
1730 };
1731
1732 static const struct file_operations ftrace_event_filter_fops = {
1733         .open = tracing_open_generic,
1734         .read = event_filter_read,
1735         .write = event_filter_write,
1736         .llseek = default_llseek,
1737 };
1738
1739 static const struct file_operations ftrace_subsystem_filter_fops = {
1740         .open = subsystem_open,
1741         .read = subsystem_filter_read,
1742         .write = subsystem_filter_write,
1743         .llseek = default_llseek,
1744         .release = subsystem_release,
1745 };
1746
1747 static const struct file_operations ftrace_system_enable_fops = {
1748         .open = subsystem_open,
1749         .read = system_enable_read,
1750         .write = system_enable_write,
1751         .llseek = default_llseek,
1752         .release = subsystem_release,
1753 };
1754
1755 static const struct file_operations ftrace_tr_enable_fops = {
1756         .open = system_tr_open,
1757         .read = system_enable_read,
1758         .write = system_enable_write,
1759         .llseek = default_llseek,
1760         .release = subsystem_release,
1761 };
1762
1763 static const struct file_operations ftrace_show_header_fops = {
1764         .open = tracing_open_generic,
1765         .read = show_header,
1766         .llseek = default_llseek,
1767 };
1768
1769 static int
1770 ftrace_event_open(struct inode *inode, struct file *file,
1771                   const struct seq_operations *seq_ops)
1772 {
1773         struct seq_file *m;
1774         int ret;
1775
1776         ret = seq_open(file, seq_ops);
1777         if (ret < 0)
1778                 return ret;
1779         m = file->private_data;
1780         /* copy tr over to seq ops */
1781         m->private = inode->i_private;
1782
1783         return ret;
1784 }
1785
1786 static int ftrace_event_release(struct inode *inode, struct file *file)
1787 {
1788         struct trace_array *tr = inode->i_private;
1789
1790         trace_array_put(tr);
1791
1792         return seq_release(inode, file);
1793 }
1794
1795 static int
1796 ftrace_event_avail_open(struct inode *inode, struct file *file)
1797 {
1798         const struct seq_operations *seq_ops = &show_event_seq_ops;
1799
1800         return ftrace_event_open(inode, file, seq_ops);
1801 }
1802
1803 static int
1804 ftrace_event_set_open(struct inode *inode, struct file *file)
1805 {
1806         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1807         struct trace_array *tr = inode->i_private;
1808         int ret;
1809
1810         if (trace_array_get(tr) < 0)
1811                 return -ENODEV;
1812
1813         if ((file->f_mode & FMODE_WRITE) &&
1814             (file->f_flags & O_TRUNC))
1815                 ftrace_clear_events(tr);
1816
1817         ret = ftrace_event_open(inode, file, seq_ops);
1818         if (ret < 0)
1819                 trace_array_put(tr);
1820         return ret;
1821 }
1822
1823 static int
1824 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1825 {
1826         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1827         struct trace_array *tr = inode->i_private;
1828         int ret;
1829
1830         if (trace_array_get(tr) < 0)
1831                 return -ENODEV;
1832
1833         if ((file->f_mode & FMODE_WRITE) &&
1834             (file->f_flags & O_TRUNC))
1835                 ftrace_clear_event_pids(tr);
1836
1837         ret = ftrace_event_open(inode, file, seq_ops);
1838         if (ret < 0)
1839                 trace_array_put(tr);
1840         return ret;
1841 }
1842
1843 static struct event_subsystem *
1844 create_new_subsystem(const char *name)
1845 {
1846         struct event_subsystem *system;
1847
1848         /* need to create new entry */
1849         system = kmalloc(sizeof(*system), GFP_KERNEL);
1850         if (!system)
1851                 return NULL;
1852
1853         system->ref_count = 1;
1854
1855         /* Only allocate if dynamic (kprobes and modules) */
1856         system->name = kstrdup_const(name, GFP_KERNEL);
1857         if (!system->name)
1858                 goto out_free;
1859
1860         system->filter = NULL;
1861
1862         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1863         if (!system->filter)
1864                 goto out_free;
1865
1866         list_add(&system->list, &event_subsystems);
1867
1868         return system;
1869
1870  out_free:
1871         kfree_const(system->name);
1872         kfree(system);
1873         return NULL;
1874 }
1875
1876 static struct dentry *
1877 event_subsystem_dir(struct trace_array *tr, const char *name,
1878                     struct trace_event_file *file, struct dentry *parent)
1879 {
1880         struct trace_subsystem_dir *dir;
1881         struct event_subsystem *system;
1882         struct dentry *entry;
1883
1884         /* First see if we did not already create this dir */
1885         list_for_each_entry(dir, &tr->systems, list) {
1886                 system = dir->subsystem;
1887                 if (strcmp(system->name, name) == 0) {
1888                         dir->nr_events++;
1889                         file->system = dir;
1890                         return dir->entry;
1891                 }
1892         }
1893
1894         /* Now see if the system itself exists. */
1895         list_for_each_entry(system, &event_subsystems, list) {
1896                 if (strcmp(system->name, name) == 0)
1897                         break;
1898         }
1899         /* Reset system variable when not found */
1900         if (&system->list == &event_subsystems)
1901                 system = NULL;
1902
1903         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1904         if (!dir)
1905                 goto out_fail;
1906
1907         if (!system) {
1908                 system = create_new_subsystem(name);
1909                 if (!system)
1910                         goto out_free;
1911         } else
1912                 __get_system(system);
1913
1914         dir->entry = tracefs_create_dir(name, parent);
1915         if (!dir->entry) {
1916                 pr_warn("Failed to create system directory %s\n", name);
1917                 __put_system(system);
1918                 goto out_free;
1919         }
1920
1921         dir->tr = tr;
1922         dir->ref_count = 1;
1923         dir->nr_events = 1;
1924         dir->subsystem = system;
1925         file->system = dir;
1926
1927         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1928                                     &ftrace_subsystem_filter_fops);
1929         if (!entry) {
1930                 kfree(system->filter);
1931                 system->filter = NULL;
1932                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1933         }
1934
1935         trace_create_file("enable", 0644, dir->entry, dir,
1936                           &ftrace_system_enable_fops);
1937
1938         list_add(&dir->list, &tr->systems);
1939
1940         return dir->entry;
1941
1942  out_free:
1943         kfree(dir);
1944  out_fail:
1945         /* Only print this message if failed on memory allocation */
1946         if (!dir || !system)
1947                 pr_warn("No memory to create event subsystem %s\n", name);
1948         return NULL;
1949 }
1950
1951 static int
1952 event_create_dir(struct dentry *parent, struct trace_event_file *file)
1953 {
1954         struct trace_event_call *call = file->event_call;
1955         struct trace_array *tr = file->tr;
1956         struct list_head *head;
1957         struct dentry *d_events;
1958         const char *name;
1959         int ret;
1960
1961         /*
1962          * If the trace point header did not define TRACE_SYSTEM
1963          * then the system would be called "TRACE_SYSTEM".
1964          */
1965         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1966                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1967                 if (!d_events)
1968                         return -ENOMEM;
1969         } else
1970                 d_events = parent;
1971
1972         name = trace_event_name(call);
1973         file->dir = tracefs_create_dir(name, d_events);
1974         if (!file->dir) {
1975                 pr_warn("Could not create tracefs '%s' directory\n", name);
1976                 return -1;
1977         }
1978
1979         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1980                 trace_create_file("enable", 0644, file->dir, file,
1981                                   &ftrace_enable_fops);
1982
1983 #ifdef CONFIG_PERF_EVENTS
1984         if (call->event.type && call->class->reg)
1985                 trace_create_file("id", 0444, file->dir,
1986                                   (void *)(long)call->event.type,
1987                                   &ftrace_event_id_fops);
1988 #endif
1989
1990         /*
1991          * Other events may have the same class. Only update
1992          * the fields if they are not already defined.
1993          */
1994         head = trace_get_fields(call);
1995         if (list_empty(head)) {
1996                 ret = call->class->define_fields(call);
1997                 if (ret < 0) {
1998                         pr_warn("Could not initialize trace point events/%s\n",
1999                                 name);
2000                         return -1;
2001                 }
2002         }
2003         trace_create_file("filter", 0644, file->dir, file,
2004                           &ftrace_event_filter_fops);
2005
2006         /*
2007          * Only event directories that can be enabled should have
2008          * triggers.
2009          */
2010         if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2011                 trace_create_file("trigger", 0644, file->dir, file,
2012                                   &event_trigger_fops);
2013
2014 #ifdef CONFIG_HIST_TRIGGERS
2015         trace_create_file("hist", 0444, file->dir, file,
2016                           &event_hist_fops);
2017 #endif
2018         trace_create_file("format", 0444, file->dir, call,
2019                           &ftrace_event_format_fops);
2020
2021         return 0;
2022 }
2023
2024 static void remove_event_from_tracers(struct trace_event_call *call)
2025 {
2026         struct trace_event_file *file;
2027         struct trace_array *tr;
2028
2029         do_for_each_event_file_safe(tr, file) {
2030                 if (file->event_call != call)
2031                         continue;
2032
2033                 remove_event_file_dir(file);
2034                 /*
2035                  * The do_for_each_event_file_safe() is
2036                  * a double loop. After finding the call for this
2037                  * trace_array, we use break to jump to the next
2038                  * trace_array.
2039                  */
2040                 break;
2041         } while_for_each_event_file();
2042 }
2043
2044 static void event_remove(struct trace_event_call *call)
2045 {
2046         struct trace_array *tr;
2047         struct trace_event_file *file;
2048
2049         do_for_each_event_file(tr, file) {
2050                 if (file->event_call != call)
2051                         continue;
2052                 ftrace_event_enable_disable(file, 0);
2053                 /*
2054                  * The do_for_each_event_file() is
2055                  * a double loop. After finding the call for this
2056                  * trace_array, we use break to jump to the next
2057                  * trace_array.
2058                  */
2059                 break;
2060         } while_for_each_event_file();
2061
2062         if (call->event.funcs)
2063                 __unregister_trace_event(&call->event);
2064         remove_event_from_tracers(call);
2065         list_del(&call->list);
2066 }
2067
2068 static int event_init(struct trace_event_call *call)
2069 {
2070         int ret = 0;
2071         const char *name;
2072
2073         name = trace_event_name(call);
2074         if (WARN_ON(!name))
2075                 return -EINVAL;
2076
2077         if (call->class->raw_init) {
2078                 ret = call->class->raw_init(call);
2079                 if (ret < 0 && ret != -ENOSYS)
2080                         pr_warn("Could not initialize trace events/%s\n", name);
2081         }
2082
2083         return ret;
2084 }
2085
2086 static int
2087 __register_event(struct trace_event_call *call, struct module *mod)
2088 {
2089         int ret;
2090
2091         ret = event_init(call);
2092         if (ret < 0)
2093                 return ret;
2094
2095         list_add(&call->list, &ftrace_events);
2096         call->mod = mod;
2097
2098         return 0;
2099 }
2100
2101 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2102 {
2103         int rlen;
2104         int elen;
2105
2106         /* Find the length of the enum value as a string */
2107         elen = snprintf(ptr, 0, "%ld", map->enum_value);
2108         /* Make sure there's enough room to replace the string with the value */
2109         if (len < elen)
2110                 return NULL;
2111
2112         snprintf(ptr, elen + 1, "%ld", map->enum_value);
2113
2114         /* Get the rest of the string of ptr */
2115         rlen = strlen(ptr + len);
2116         memmove(ptr + elen, ptr + len, rlen);
2117         /* Make sure we end the new string */
2118         ptr[elen + rlen] = 0;
2119
2120         return ptr + elen;
2121 }
2122
2123 static void update_event_printk(struct trace_event_call *call,
2124                                 struct trace_enum_map *map)
2125 {
2126         char *ptr;
2127         int quote = 0;
2128         int len = strlen(map->enum_string);
2129
2130         for (ptr = call->print_fmt; *ptr; ptr++) {
2131                 if (*ptr == '\\') {
2132                         ptr++;
2133                         /* paranoid */
2134                         if (!*ptr)
2135                                 break;
2136                         continue;
2137                 }
2138                 if (*ptr == '"') {
2139                         quote ^= 1;
2140                         continue;
2141                 }
2142                 if (quote)
2143                         continue;
2144                 if (isdigit(*ptr)) {
2145                         /* skip numbers */
2146                         do {
2147                                 ptr++;
2148                                 /* Check for alpha chars like ULL */
2149                         } while (isalnum(*ptr));
2150                         if (!*ptr)
2151                                 break;
2152                         /*
2153                          * A number must have some kind of delimiter after
2154                          * it, and we can ignore that too.
2155                          */
2156                         continue;
2157                 }
2158                 if (isalpha(*ptr) || *ptr == '_') {
2159                         if (strncmp(map->enum_string, ptr, len) == 0 &&
2160                             !isalnum(ptr[len]) && ptr[len] != '_') {
2161                                 ptr = enum_replace(ptr, map, len);
2162                                 /* Hmm, enum string smaller than value */
2163                                 if (WARN_ON_ONCE(!ptr))
2164                                         return;
2165                                 /*
2166                                  * No need to decrement here, as enum_replace()
2167                                  * returns the pointer to the character passed
2168                                  * the enum, and two enums can not be placed
2169                                  * back to back without something in between.
2170                                  * We can skip that something in between.
2171                                  */
2172                                 continue;
2173                         }
2174                 skip_more:
2175                         do {
2176                                 ptr++;
2177                         } while (isalnum(*ptr) || *ptr == '_');
2178                         if (!*ptr)
2179                                 break;
2180                         /*
2181                          * If what comes after this variable is a '.' or
2182                          * '->' then we can continue to ignore that string.
2183                          */
2184                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2185                                 ptr += *ptr == '.' ? 1 : 2;
2186                                 if (!*ptr)
2187                                         break;
2188                                 goto skip_more;
2189                         }
2190                         /*
2191                          * Once again, we can skip the delimiter that came
2192                          * after the string.
2193                          */
2194                         continue;
2195                 }
2196         }
2197 }
2198
2199 void trace_event_enum_update(struct trace_enum_map **map, int len)
2200 {
2201         struct trace_event_call *call, *p;
2202         const char *last_system = NULL;
2203         bool first = false;
2204         int last_i;
2205         int i;
2206
2207         down_write(&trace_event_sem);
2208         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2209                 /* events are usually grouped together with systems */
2210                 if (!last_system || call->class->system != last_system) {
2211                         first = true;
2212                         last_i = 0;
2213                         last_system = call->class->system;
2214                 }
2215
2216                 /*
2217                  * Since calls are grouped by systems, the likelyhood that the
2218                  * next call in the iteration belongs to the same system as the
2219                  * previous call is high. As an optimization, we skip seaching
2220                  * for a map[] that matches the call's system if the last call
2221                  * was from the same system. That's what last_i is for. If the
2222                  * call has the same system as the previous call, then last_i
2223                  * will be the index of the first map[] that has a matching
2224                  * system.
2225                  */
2226                 for (i = last_i; i < len; i++) {
2227                         if (call->class->system == map[i]->system) {
2228                                 /* Save the first system if need be */
2229                                 if (first) {
2230                                         last_i = i;
2231                                         first = false;
2232                                 }
2233                                 update_event_printk(call, map[i]);
2234                         }
2235                 }
2236         }
2237         up_write(&trace_event_sem);
2238 }
2239
2240 static struct trace_event_file *
2241 trace_create_new_event(struct trace_event_call *call,
2242                        struct trace_array *tr)
2243 {
2244         struct trace_pid_list *pid_list;
2245         struct trace_event_file *file;
2246
2247         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2248         if (!file)
2249                 return NULL;
2250
2251         pid_list = rcu_dereference_protected(tr->filtered_pids,
2252                                              lockdep_is_held(&event_mutex));
2253
2254         if (pid_list)
2255                 file->flags |= EVENT_FILE_FL_PID_FILTER;
2256
2257         file->event_call = call;
2258         file->tr = tr;
2259         atomic_set(&file->sm_ref, 0);
2260         atomic_set(&file->tm_ref, 0);
2261         INIT_LIST_HEAD(&file->triggers);
2262         list_add(&file->list, &tr->events);
2263
2264         return file;
2265 }
2266
2267 /* Add an event to a trace directory */
2268 static int
2269 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2270 {
2271         struct trace_event_file *file;
2272
2273         file = trace_create_new_event(call, tr);
2274         if (!file)
2275                 return -ENOMEM;
2276
2277         return event_create_dir(tr->event_dir, file);
2278 }
2279
2280 /*
2281  * Just create a decriptor for early init. A descriptor is required
2282  * for enabling events at boot. We want to enable events before
2283  * the filesystem is initialized.
2284  */
2285 static __init int
2286 __trace_early_add_new_event(struct trace_event_call *call,
2287                             struct trace_array *tr)
2288 {
2289         struct trace_event_file *file;
2290
2291         file = trace_create_new_event(call, tr);
2292         if (!file)
2293                 return -ENOMEM;
2294
2295         return 0;
2296 }
2297
2298 struct ftrace_module_file_ops;
2299 static void __add_event_to_tracers(struct trace_event_call *call);
2300
2301 /* Add an additional event_call dynamically */
2302 int trace_add_event_call(struct trace_event_call *call)
2303 {
2304         int ret;
2305         mutex_lock(&trace_types_lock);
2306         mutex_lock(&event_mutex);
2307
2308         ret = __register_event(call, NULL);
2309         if (ret >= 0)
2310                 __add_event_to_tracers(call);
2311
2312         mutex_unlock(&event_mutex);
2313         mutex_unlock(&trace_types_lock);
2314         return ret;
2315 }
2316
2317 /*
2318  * Must be called under locking of trace_types_lock, event_mutex and
2319  * trace_event_sem.
2320  */
2321 static void __trace_remove_event_call(struct trace_event_call *call)
2322 {
2323         event_remove(call);
2324         trace_destroy_fields(call);
2325         free_event_filter(call->filter);
2326         call->filter = NULL;
2327 }
2328
2329 static int probe_remove_event_call(struct trace_event_call *call)
2330 {
2331         struct trace_array *tr;
2332         struct trace_event_file *file;
2333
2334 #ifdef CONFIG_PERF_EVENTS
2335         if (call->perf_refcount)
2336                 return -EBUSY;
2337 #endif
2338         do_for_each_event_file(tr, file) {
2339                 if (file->event_call != call)
2340                         continue;
2341                 /*
2342                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2343                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2344                  * TRACE_REG_UNREGISTER.
2345                  */
2346                 if (file->flags & EVENT_FILE_FL_ENABLED)
2347                         return -EBUSY;
2348                 /*
2349                  * The do_for_each_event_file_safe() is
2350                  * a double loop. After finding the call for this
2351                  * trace_array, we use break to jump to the next
2352                  * trace_array.
2353                  */
2354                 break;
2355         } while_for_each_event_file();
2356
2357         __trace_remove_event_call(call);
2358
2359         return 0;
2360 }
2361
2362 /* Remove an event_call */
2363 int trace_remove_event_call(struct trace_event_call *call)
2364 {
2365         int ret;
2366
2367         mutex_lock(&trace_types_lock);
2368         mutex_lock(&event_mutex);
2369         down_write(&trace_event_sem);
2370         ret = probe_remove_event_call(call);
2371         up_write(&trace_event_sem);
2372         mutex_unlock(&event_mutex);
2373         mutex_unlock(&trace_types_lock);
2374
2375         return ret;
2376 }
2377
2378 #define for_each_event(event, start, end)                       \
2379         for (event = start;                                     \
2380              (unsigned long)event < (unsigned long)end;         \
2381              event++)
2382
2383 #ifdef CONFIG_MODULES
2384
2385 static void trace_module_add_events(struct module *mod)
2386 {
2387         struct trace_event_call **call, **start, **end;
2388
2389         if (!mod->num_trace_events)
2390                 return;
2391
2392         /* Don't add infrastructure for mods without tracepoints */
2393         if (trace_module_has_bad_taint(mod)) {
2394                 pr_err("%s: module has bad taint, not creating trace events\n",
2395                        mod->name);
2396                 return;
2397         }
2398
2399         start = mod->trace_events;
2400         end = mod->trace_events + mod->num_trace_events;
2401
2402         for_each_event(call, start, end) {
2403                 __register_event(*call, mod);
2404                 __add_event_to_tracers(*call);
2405         }
2406 }
2407
2408 static void trace_module_remove_events(struct module *mod)
2409 {
2410         struct trace_event_call *call, *p;
2411         bool clear_trace = false;
2412
2413         down_write(&trace_event_sem);
2414         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2415                 if (call->mod == mod) {
2416                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2417                                 clear_trace = true;
2418                         __trace_remove_event_call(call);
2419                 }
2420         }
2421         up_write(&trace_event_sem);
2422
2423         /*
2424          * It is safest to reset the ring buffer if the module being unloaded
2425          * registered any events that were used. The only worry is if
2426          * a new module gets loaded, and takes on the same id as the events
2427          * of this module. When printing out the buffer, traced events left
2428          * over from this module may be passed to the new module events and
2429          * unexpected results may occur.
2430          */
2431         if (clear_trace)
2432                 tracing_reset_all_online_cpus();
2433 }
2434
2435 static int trace_module_notify(struct notifier_block *self,
2436                                unsigned long val, void *data)
2437 {
2438         struct module *mod = data;
2439
2440         mutex_lock(&trace_types_lock);
2441         mutex_lock(&event_mutex);
2442         switch (val) {
2443         case MODULE_STATE_COMING:
2444                 trace_module_add_events(mod);
2445                 break;
2446         case MODULE_STATE_GOING:
2447                 trace_module_remove_events(mod);
2448                 break;
2449         }
2450         mutex_unlock(&event_mutex);
2451         mutex_unlock(&trace_types_lock);
2452
2453         return 0;
2454 }
2455
2456 static struct notifier_block trace_module_nb = {
2457         .notifier_call = trace_module_notify,
2458         .priority = 1, /* higher than trace.c module notify */
2459 };
2460 #endif /* CONFIG_MODULES */
2461
2462 /* Create a new event directory structure for a trace directory. */
2463 static void
2464 __trace_add_event_dirs(struct trace_array *tr)
2465 {
2466         struct trace_event_call *call;
2467         int ret;
2468
2469         list_for_each_entry(call, &ftrace_events, list) {
2470                 ret = __trace_add_new_event(call, tr);
2471                 if (ret < 0)
2472                         pr_warn("Could not create directory for event %s\n",
2473                                 trace_event_name(call));
2474         }
2475 }
2476
2477 struct trace_event_file *
2478 find_event_file(struct trace_array *tr, const char *system,  const char *event)
2479 {
2480         struct trace_event_file *file;
2481         struct trace_event_call *call;
2482         const char *name;
2483
2484         list_for_each_entry(file, &tr->events, list) {
2485
2486                 call = file->event_call;
2487                 name = trace_event_name(call);
2488
2489                 if (!name || !call->class || !call->class->reg)
2490                         continue;
2491
2492                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2493                         continue;
2494
2495                 if (strcmp(event, name) == 0 &&
2496                     strcmp(system, call->class->system) == 0)
2497                         return file;
2498         }
2499         return NULL;
2500 }
2501
2502 #ifdef CONFIG_DYNAMIC_FTRACE
2503
2504 /* Avoid typos */
2505 #define ENABLE_EVENT_STR        "enable_event"
2506 #define DISABLE_EVENT_STR       "disable_event"
2507
2508 struct event_probe_data {
2509         struct trace_event_file *file;
2510         unsigned long                   count;
2511         int                             ref;
2512         bool                            enable;
2513 };
2514
2515 static void
2516 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2517 {
2518         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2519         struct event_probe_data *data = *pdata;
2520
2521         if (!data)
2522                 return;
2523
2524         if (data->enable)
2525                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2526         else
2527                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2528 }
2529
2530 static void
2531 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2532 {
2533         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2534         struct event_probe_data *data = *pdata;
2535
2536         if (!data)
2537                 return;
2538
2539         if (!data->count)
2540                 return;
2541
2542         /* Skip if the event is in a state we want to switch to */
2543         if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2544                 return;
2545
2546         if (data->count != -1)
2547                 (data->count)--;
2548
2549         event_enable_probe(ip, parent_ip, _data);
2550 }
2551
2552 static int
2553 event_enable_print(struct seq_file *m, unsigned long ip,
2554                       struct ftrace_probe_ops *ops, void *_data)
2555 {
2556         struct event_probe_data *data = _data;
2557
2558         seq_printf(m, "%ps:", (void *)ip);
2559
2560         seq_printf(m, "%s:%s:%s",
2561                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2562                    data->file->event_call->class->system,
2563                    trace_event_name(data->file->event_call));
2564
2565         if (data->count == -1)
2566                 seq_puts(m, ":unlimited\n");
2567         else
2568                 seq_printf(m, ":count=%ld\n", data->count);
2569
2570         return 0;
2571 }
2572
2573 static int
2574 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2575                   void **_data)
2576 {
2577         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2578         struct event_probe_data *data = *pdata;
2579
2580         data->ref++;
2581         return 0;
2582 }
2583
2584 static void
2585 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2586                   void **_data)
2587 {
2588         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2589         struct event_probe_data *data = *pdata;
2590
2591         if (WARN_ON_ONCE(data->ref <= 0))
2592                 return;
2593
2594         data->ref--;
2595         if (!data->ref) {
2596                 /* Remove the SOFT_MODE flag */
2597                 __ftrace_event_enable_disable(data->file, 0, 1);
2598                 module_put(data->file->event_call->mod);
2599                 kfree(data);
2600         }
2601         *pdata = NULL;
2602 }
2603
2604 static struct ftrace_probe_ops event_enable_probe_ops = {
2605         .func                   = event_enable_probe,
2606         .print                  = event_enable_print,
2607         .init                   = event_enable_init,
2608         .free                   = event_enable_free,
2609 };
2610
2611 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2612         .func                   = event_enable_count_probe,
2613         .print                  = event_enable_print,
2614         .init                   = event_enable_init,
2615         .free                   = event_enable_free,
2616 };
2617
2618 static struct ftrace_probe_ops event_disable_probe_ops = {
2619         .func                   = event_enable_probe,
2620         .print                  = event_enable_print,
2621         .init                   = event_enable_init,
2622         .free                   = event_enable_free,
2623 };
2624
2625 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2626         .func                   = event_enable_count_probe,
2627         .print                  = event_enable_print,
2628         .init                   = event_enable_init,
2629         .free                   = event_enable_free,
2630 };
2631
2632 static int
2633 event_enable_func(struct ftrace_hash *hash,
2634                   char *glob, char *cmd, char *param, int enabled)
2635 {
2636         struct trace_array *tr = top_trace_array();
2637         struct trace_event_file *file;
2638         struct ftrace_probe_ops *ops;
2639         struct event_probe_data *data;
2640         const char *system;
2641         const char *event;
2642         char *number;
2643         bool enable;
2644         int ret;
2645
2646         if (!tr)
2647                 return -ENODEV;
2648
2649         /* hash funcs only work with set_ftrace_filter */
2650         if (!enabled || !param)
2651                 return -EINVAL;
2652
2653         system = strsep(&param, ":");
2654         if (!param)
2655                 return -EINVAL;
2656
2657         event = strsep(&param, ":");
2658
2659         mutex_lock(&event_mutex);
2660
2661         ret = -EINVAL;
2662         file = find_event_file(tr, system, event);
2663         if (!file)
2664                 goto out;
2665
2666         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2667
2668         if (enable)
2669                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2670         else
2671                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2672
2673         if (glob[0] == '!') {
2674                 unregister_ftrace_function_probe_func(glob+1, ops);
2675                 ret = 0;
2676                 goto out;
2677         }
2678
2679         ret = -ENOMEM;
2680         data = kzalloc(sizeof(*data), GFP_KERNEL);
2681         if (!data)
2682                 goto out;
2683
2684         data->enable = enable;
2685         data->count = -1;
2686         data->file = file;
2687
2688         if (!param)
2689                 goto out_reg;
2690
2691         number = strsep(&param, ":");
2692
2693         ret = -EINVAL;
2694         if (!strlen(number))
2695                 goto out_free;
2696
2697         /*
2698          * We use the callback data field (which is a pointer)
2699          * as our counter.
2700          */
2701         ret = kstrtoul(number, 0, &data->count);
2702         if (ret)
2703                 goto out_free;
2704
2705  out_reg:
2706         /* Don't let event modules unload while probe registered */
2707         ret = try_module_get(file->event_call->mod);
2708         if (!ret) {
2709                 ret = -EBUSY;
2710                 goto out_free;
2711         }
2712
2713         ret = __ftrace_event_enable_disable(file, 1, 1);
2714         if (ret < 0)
2715                 goto out_put;
2716         ret = register_ftrace_function_probe(glob, ops, data);
2717         /*
2718          * The above returns on success the # of functions enabled,
2719          * but if it didn't find any functions it returns zero.
2720          * Consider no functions a failure too.
2721          */
2722         if (!ret) {
2723                 ret = -ENOENT;
2724                 goto out_disable;
2725         } else if (ret < 0)
2726                 goto out_disable;
2727         /* Just return zero, not the number of enabled functions */
2728         ret = 0;
2729  out:
2730         mutex_unlock(&event_mutex);
2731         return ret;
2732
2733  out_disable:
2734         __ftrace_event_enable_disable(file, 0, 1);
2735  out_put:
2736         module_put(file->event_call->mod);
2737  out_free:
2738         kfree(data);
2739         goto out;
2740 }
2741
2742 static struct ftrace_func_command event_enable_cmd = {
2743         .name                   = ENABLE_EVENT_STR,
2744         .func                   = event_enable_func,
2745 };
2746
2747 static struct ftrace_func_command event_disable_cmd = {
2748         .name                   = DISABLE_EVENT_STR,
2749         .func                   = event_enable_func,
2750 };
2751
2752 static __init int register_event_cmds(void)
2753 {
2754         int ret;
2755
2756         ret = register_ftrace_command(&event_enable_cmd);
2757         if (WARN_ON(ret < 0))
2758                 return ret;
2759         ret = register_ftrace_command(&event_disable_cmd);
2760         if (WARN_ON(ret < 0))
2761                 unregister_ftrace_command(&event_enable_cmd);
2762         return ret;
2763 }
2764 #else
2765 static inline int register_event_cmds(void) { return 0; }
2766 #endif /* CONFIG_DYNAMIC_FTRACE */
2767
2768 /*
2769  * The top level array has already had its trace_event_file
2770  * descriptors created in order to allow for early events to
2771  * be recorded. This function is called after the tracefs has been
2772  * initialized, and we now have to create the files associated
2773  * to the events.
2774  */
2775 static __init void
2776 __trace_early_add_event_dirs(struct trace_array *tr)
2777 {
2778         struct trace_event_file *file;
2779         int ret;
2780
2781
2782         list_for_each_entry(file, &tr->events, list) {
2783                 ret = event_create_dir(tr->event_dir, file);
2784                 if (ret < 0)
2785                         pr_warn("Could not create directory for event %s\n",
2786                                 trace_event_name(file->event_call));
2787         }
2788 }
2789
2790 /*
2791  * For early boot up, the top trace array requires to have
2792  * a list of events that can be enabled. This must be done before
2793  * the filesystem is set up in order to allow events to be traced
2794  * early.
2795  */
2796 static __init void
2797 __trace_early_add_events(struct trace_array *tr)
2798 {
2799         struct trace_event_call *call;
2800         int ret;
2801
2802         list_for_each_entry(call, &ftrace_events, list) {
2803                 /* Early boot up should not have any modules loaded */
2804                 if (WARN_ON_ONCE(call->mod))
2805                         continue;
2806
2807                 ret = __trace_early_add_new_event(call, tr);
2808                 if (ret < 0)
2809                         pr_warn("Could not create early event %s\n",
2810                                 trace_event_name(call));
2811         }
2812 }
2813
2814 /* Remove the event directory structure for a trace directory. */
2815 static void
2816 __trace_remove_event_dirs(struct trace_array *tr)
2817 {
2818         struct trace_event_file *file, *next;
2819
2820         list_for_each_entry_safe(file, next, &tr->events, list)
2821                 remove_event_file_dir(file);
2822 }
2823
2824 static void __add_event_to_tracers(struct trace_event_call *call)
2825 {
2826         struct trace_array *tr;
2827
2828         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2829                 __trace_add_new_event(call, tr);
2830 }
2831
2832 extern struct trace_event_call *__start_ftrace_events[];
2833 extern struct trace_event_call *__stop_ftrace_events[];
2834
2835 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2836
2837 static __init int setup_trace_event(char *str)
2838 {
2839         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2840         ring_buffer_expanded = true;
2841         tracing_selftest_disabled = true;
2842
2843         return 1;
2844 }
2845 __setup("trace_event=", setup_trace_event);
2846
2847 /* Expects to have event_mutex held when called */
2848 static int
2849 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2850 {
2851         struct dentry *d_events;
2852         struct dentry *entry;
2853
2854         entry = tracefs_create_file("set_event", 0644, parent,
2855                                     tr, &ftrace_set_event_fops);
2856         if (!entry) {
2857                 pr_warn("Could not create tracefs 'set_event' entry\n");
2858                 return -ENOMEM;
2859         }
2860
2861         d_events = tracefs_create_dir("events", parent);
2862         if (!d_events) {
2863                 pr_warn("Could not create tracefs 'events' directory\n");
2864                 return -ENOMEM;
2865         }
2866
2867         entry = tracefs_create_file("set_event_pid", 0644, parent,
2868                                     tr, &ftrace_set_event_pid_fops);
2869
2870         /* ring buffer internal formats */
2871         trace_create_file("header_page", 0444, d_events,
2872                           ring_buffer_print_page_header,
2873                           &ftrace_show_header_fops);
2874
2875         trace_create_file("header_event", 0444, d_events,
2876                           ring_buffer_print_entry_header,
2877                           &ftrace_show_header_fops);
2878
2879         trace_create_file("enable", 0644, d_events,
2880                           tr, &ftrace_tr_enable_fops);
2881
2882         tr->event_dir = d_events;
2883
2884         return 0;
2885 }
2886
2887 /**
2888  * event_trace_add_tracer - add a instance of a trace_array to events
2889  * @parent: The parent dentry to place the files/directories for events in
2890  * @tr: The trace array associated with these events
2891  *
2892  * When a new instance is created, it needs to set up its events
2893  * directory, as well as other files associated with events. It also
2894  * creates the event hierachry in the @parent/events directory.
2895  *
2896  * Returns 0 on success.
2897  */
2898 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2899 {
2900         int ret;
2901
2902         mutex_lock(&event_mutex);
2903
2904         ret = create_event_toplevel_files(parent, tr);
2905         if (ret)
2906                 goto out_unlock;
2907
2908         down_write(&trace_event_sem);
2909         __trace_add_event_dirs(tr);
2910         up_write(&trace_event_sem);
2911
2912  out_unlock:
2913         mutex_unlock(&event_mutex);
2914
2915         return ret;
2916 }
2917
2918 /*
2919  * The top trace array already had its file descriptors created.
2920  * Now the files themselves need to be created.
2921  */
2922 static __init int
2923 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2924 {
2925         int ret;
2926
2927         mutex_lock(&event_mutex);
2928
2929         ret = create_event_toplevel_files(parent, tr);
2930         if (ret)
2931                 goto out_unlock;
2932
2933         down_write(&trace_event_sem);
2934         __trace_early_add_event_dirs(tr);
2935         up_write(&trace_event_sem);
2936
2937  out_unlock:
2938         mutex_unlock(&event_mutex);
2939
2940         return ret;
2941 }
2942
2943 int event_trace_del_tracer(struct trace_array *tr)
2944 {
2945         mutex_lock(&event_mutex);
2946
2947         /* Disable any event triggers and associated soft-disabled events */
2948         clear_event_triggers(tr);
2949
2950         /* Clear the pid list */
2951         __ftrace_clear_event_pids(tr);
2952
2953         /* Disable any running events */
2954         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2955
2956         /* Access to events are within rcu_read_lock_sched() */
2957         synchronize_sched();
2958
2959         down_write(&trace_event_sem);
2960         __trace_remove_event_dirs(tr);
2961         tracefs_remove_recursive(tr->event_dir);
2962         up_write(&trace_event_sem);
2963
2964         tr->event_dir = NULL;
2965
2966         mutex_unlock(&event_mutex);
2967
2968         return 0;
2969 }
2970
2971 static __init int event_trace_memsetup(void)
2972 {
2973         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2974         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
2975         return 0;
2976 }
2977
2978 static __init void
2979 early_enable_events(struct trace_array *tr, bool disable_first)
2980 {
2981         char *buf = bootup_event_buf;
2982         char *token;
2983         int ret;
2984
2985         while (true) {
2986                 token = strsep(&buf, ",");
2987
2988                 if (!token)
2989                         break;
2990
2991                 if (*token) {
2992                         /* Restarting syscalls requires that we stop them first */
2993                         if (disable_first)
2994                                 ftrace_set_clr_event(tr, token, 0);
2995
2996                         ret = ftrace_set_clr_event(tr, token, 1);
2997                         if (ret)
2998                                 pr_warn("Failed to enable trace event: %s\n", token);
2999                 }
3000
3001                 /* Put back the comma to allow this to be called again */
3002                 if (buf)
3003                         *(buf - 1) = ',';
3004         }
3005 }
3006
3007 static __init int event_trace_enable(void)
3008 {
3009         struct trace_array *tr = top_trace_array();
3010         struct trace_event_call **iter, *call;
3011         int ret;
3012
3013         if (!tr)
3014                 return -ENODEV;
3015
3016         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3017
3018                 call = *iter;
3019                 ret = event_init(call);
3020                 if (!ret)
3021                         list_add(&call->list, &ftrace_events);
3022         }
3023
3024         /*
3025          * We need the top trace array to have a working set of trace
3026          * points at early init, before the debug files and directories
3027          * are created. Create the file entries now, and attach them
3028          * to the actual file dentries later.
3029          */
3030         __trace_early_add_events(tr);
3031
3032         early_enable_events(tr, false);
3033
3034         trace_printk_start_comm();
3035
3036         register_event_cmds();
3037
3038         register_trigger_cmds();
3039
3040         return 0;
3041 }
3042
3043 /*
3044  * event_trace_enable() is called from trace_event_init() first to
3045  * initialize events and perhaps start any events that are on the
3046  * command line. Unfortunately, there are some events that will not
3047  * start this early, like the system call tracepoints that need
3048  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3049  * is called before pid 1 starts, and this flag is never set, making
3050  * the syscall tracepoint never get reached, but the event is enabled
3051  * regardless (and not doing anything).
3052  */
3053 static __init int event_trace_enable_again(void)
3054 {
3055         struct trace_array *tr;
3056
3057         tr = top_trace_array();
3058         if (!tr)
3059                 return -ENODEV;
3060
3061         early_enable_events(tr, true);
3062
3063         return 0;
3064 }
3065
3066 early_initcall(event_trace_enable_again);
3067
3068 static __init int event_trace_init(void)
3069 {
3070         struct trace_array *tr;
3071         struct dentry *d_tracer;
3072         struct dentry *entry;
3073         int ret;
3074
3075         tr = top_trace_array();
3076         if (!tr)
3077                 return -ENODEV;
3078
3079         d_tracer = tracing_init_dentry();
3080         if (IS_ERR(d_tracer))
3081                 return 0;
3082
3083         entry = tracefs_create_file("available_events", 0444, d_tracer,
3084                                     tr, &ftrace_avail_fops);
3085         if (!entry)
3086                 pr_warn("Could not create tracefs 'available_events' entry\n");
3087
3088         if (trace_define_generic_fields())
3089                 pr_warn("tracing: Failed to allocated generic fields");
3090
3091         if (trace_define_common_fields())
3092                 pr_warn("tracing: Failed to allocate common fields");
3093
3094         ret = early_event_add_tracer(d_tracer, tr);
3095         if (ret)
3096                 return ret;
3097
3098 #ifdef CONFIG_MODULES
3099         ret = register_module_notifier(&trace_module_nb);
3100         if (ret)
3101                 pr_warn("Failed to register trace events module notifier\n");
3102 #endif
3103         return 0;
3104 }
3105
3106 void __init trace_event_init(void)
3107 {
3108         event_trace_memsetup();
3109         init_ftrace_syscalls();
3110         event_trace_enable();
3111 }
3112
3113 fs_initcall(event_trace_init);
3114
3115 #ifdef CONFIG_FTRACE_STARTUP_TEST
3116
3117 static DEFINE_SPINLOCK(test_spinlock);
3118 static DEFINE_SPINLOCK(test_spinlock_irq);
3119 static DEFINE_MUTEX(test_mutex);
3120
3121 static __init void test_work(struct work_struct *dummy)
3122 {
3123         spin_lock(&test_spinlock);
3124         spin_lock_irq(&test_spinlock_irq);
3125         udelay(1);
3126         spin_unlock_irq(&test_spinlock_irq);
3127         spin_unlock(&test_spinlock);
3128
3129         mutex_lock(&test_mutex);
3130         msleep(1);
3131         mutex_unlock(&test_mutex);
3132 }
3133
3134 static __init int event_test_thread(void *unused)
3135 {
3136         void *test_malloc;
3137
3138         test_malloc = kmalloc(1234, GFP_KERNEL);
3139         if (!test_malloc)
3140                 pr_info("failed to kmalloc\n");
3141
3142         schedule_on_each_cpu(test_work);
3143
3144         kfree(test_malloc);
3145
3146         set_current_state(TASK_INTERRUPTIBLE);
3147         while (!kthread_should_stop()) {
3148                 schedule();
3149                 set_current_state(TASK_INTERRUPTIBLE);
3150         }
3151         __set_current_state(TASK_RUNNING);
3152
3153         return 0;
3154 }
3155
3156 /*
3157  * Do various things that may trigger events.
3158  */
3159 static __init void event_test_stuff(void)
3160 {
3161         struct task_struct *test_thread;
3162
3163         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3164         msleep(1);
3165         kthread_stop(test_thread);
3166 }
3167
3168 /*
3169  * For every trace event defined, we will test each trace point separately,
3170  * and then by groups, and finally all trace points.
3171  */
3172 static __init void event_trace_self_tests(void)
3173 {
3174         struct trace_subsystem_dir *dir;
3175         struct trace_event_file *file;
3176         struct trace_event_call *call;
3177         struct event_subsystem *system;
3178         struct trace_array *tr;
3179         int ret;
3180
3181         tr = top_trace_array();
3182         if (!tr)
3183                 return;
3184
3185         pr_info("Running tests on trace events:\n");
3186
3187         list_for_each_entry(file, &tr->events, list) {
3188
3189                 call = file->event_call;
3190
3191                 /* Only test those that have a probe */
3192                 if (!call->class || !call->class->probe)
3193                         continue;
3194
3195 /*
3196  * Testing syscall events here is pretty useless, but
3197  * we still do it if configured. But this is time consuming.
3198  * What we really need is a user thread to perform the
3199  * syscalls as we test.
3200  */
3201 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3202                 if (call->class->system &&
3203                     strcmp(call->class->system, "syscalls") == 0)
3204                         continue;
3205 #endif
3206
3207                 pr_info("Testing event %s: ", trace_event_name(call));
3208
3209                 /*
3210                  * If an event is already enabled, someone is using
3211                  * it and the self test should not be on.
3212                  */
3213                 if (file->flags & EVENT_FILE_FL_ENABLED) {
3214                         pr_warn("Enabled event during self test!\n");
3215                         WARN_ON_ONCE(1);
3216                         continue;
3217                 }
3218
3219                 ftrace_event_enable_disable(file, 1);
3220                 event_test_stuff();
3221                 ftrace_event_enable_disable(file, 0);
3222
3223                 pr_cont("OK\n");
3224         }
3225
3226         /* Now test at the sub system level */
3227
3228         pr_info("Running tests on trace event systems:\n");
3229
3230         list_for_each_entry(dir, &tr->systems, list) {
3231
3232                 system = dir->subsystem;
3233
3234                 /* the ftrace system is special, skip it */
3235                 if (strcmp(system->name, "ftrace") == 0)
3236                         continue;
3237
3238                 pr_info("Testing event system %s: ", system->name);
3239
3240                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3241                 if (WARN_ON_ONCE(ret)) {
3242                         pr_warn("error enabling system %s\n",
3243                                 system->name);
3244                         continue;
3245                 }
3246
3247                 event_test_stuff();
3248
3249                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3250                 if (WARN_ON_ONCE(ret)) {
3251                         pr_warn("error disabling system %s\n",
3252                                 system->name);
3253                         continue;
3254                 }
3255
3256                 pr_cont("OK\n");
3257         }
3258
3259         /* Test with all events enabled */
3260
3261         pr_info("Running tests on all trace events:\n");
3262         pr_info("Testing all events: ");
3263
3264         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3265         if (WARN_ON_ONCE(ret)) {
3266                 pr_warn("error enabling all events\n");
3267                 return;
3268         }
3269
3270         event_test_stuff();
3271
3272         /* reset sysname */
3273         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3274         if (WARN_ON_ONCE(ret)) {
3275                 pr_warn("error disabling all events\n");
3276                 return;
3277         }
3278
3279         pr_cont("OK\n");
3280 }
3281
3282 #ifdef CONFIG_FUNCTION_TRACER
3283
3284 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3285
3286 static struct trace_event_file event_trace_file __initdata;
3287
3288 static void __init
3289 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3290                           struct ftrace_ops *op, struct pt_regs *pt_regs)
3291 {
3292         struct ring_buffer_event *event;
3293         struct ring_buffer *buffer;
3294         struct ftrace_entry *entry;
3295         unsigned long flags;
3296         long disabled;
3297         int cpu;
3298         int pc;
3299
3300         pc = preempt_count();
3301         preempt_disable_notrace();
3302         cpu = raw_smp_processor_id();
3303         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3304
3305         if (disabled != 1)
3306                 goto out;
3307
3308         local_save_flags(flags);
3309
3310         event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3311                                                 TRACE_FN, sizeof(*entry),
3312                                                 flags, pc);
3313         if (!event)
3314                 goto out;
3315         entry   = ring_buffer_event_data(event);
3316         entry->ip                       = ip;
3317         entry->parent_ip                = parent_ip;
3318
3319         event_trigger_unlock_commit(&event_trace_file, buffer, event,
3320                                     entry, flags, pc);
3321  out:
3322         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3323         preempt_enable_notrace();
3324 }
3325
3326 static struct ftrace_ops trace_ops __initdata  =
3327 {
3328         .func = function_test_events_call,
3329         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3330 };
3331
3332 static __init void event_trace_self_test_with_function(void)
3333 {
3334         int ret;
3335
3336         event_trace_file.tr = top_trace_array();
3337         if (WARN_ON(!event_trace_file.tr))
3338                 return;
3339
3340         ret = register_ftrace_function(&trace_ops);
3341         if (WARN_ON(ret < 0)) {
3342                 pr_info("Failed to enable function tracer for event tests\n");
3343                 return;
3344         }
3345         pr_info("Running tests again, along with the function tracer\n");
3346         event_trace_self_tests();
3347         unregister_ftrace_function(&trace_ops);
3348 }
3349 #else
3350 static __init void event_trace_self_test_with_function(void)
3351 {
3352 }
3353 #endif
3354
3355 static __init int event_trace_self_tests_init(void)
3356 {
3357         if (!tracing_selftest_disabled) {
3358                 event_trace_self_tests();
3359                 event_trace_self_test_with_function();
3360         }
3361
3362         return 0;
3363 }
3364
3365 late_initcall(event_trace_self_tests_init);
3366
3367 #endif