GNU Linux-libre 4.19.223-gnu1
[releases.git] / kernel / trace / trace_events_trigger.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/ctype.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
13
14 #include "trace.h"
15
16 static LIST_HEAD(trigger_commands);
17 static DEFINE_MUTEX(trigger_cmd_mutex);
18
19 void trigger_data_free(struct event_trigger_data *data)
20 {
21         if (data->cmd_ops->set_filter)
22                 data->cmd_ops->set_filter(NULL, data, NULL);
23
24         /* make sure current triggers exit before free */
25         tracepoint_synchronize_unregister();
26
27         kfree(data);
28 }
29
30 /**
31  * event_triggers_call - Call triggers associated with a trace event
32  * @file: The trace_event_file associated with the event
33  * @rec: The trace entry for the event, NULL for unconditional invocation
34  *
35  * For each trigger associated with an event, invoke the trigger
36  * function registered with the associated trigger command.  If rec is
37  * non-NULL, it means that the trigger requires further processing and
38  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
39  * trigger has a filter associated with it, rec will checked against
40  * the filter and if the record matches the trigger will be invoked.
41  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
42  * in any case until the current event is written, the trigger
43  * function isn't invoked but the bit associated with the deferred
44  * trigger is set in the return value.
45  *
46  * Returns an enum event_trigger_type value containing a set bit for
47  * any trigger that should be deferred, ETT_NONE if nothing to defer.
48  *
49  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
50  *
51  * Return: an enum event_trigger_type value containing a set bit for
52  * any trigger that should be deferred, ETT_NONE if nothing to defer.
53  */
54 enum event_trigger_type
55 event_triggers_call(struct trace_event_file *file, void *rec,
56                     struct ring_buffer_event *event)
57 {
58         struct event_trigger_data *data;
59         enum event_trigger_type tt = ETT_NONE;
60         struct event_filter *filter;
61
62         if (list_empty(&file->triggers))
63                 return tt;
64
65         list_for_each_entry_rcu(data, &file->triggers, list) {
66                 if (data->paused)
67                         continue;
68                 if (!rec) {
69                         data->ops->func(data, rec, event);
70                         continue;
71                 }
72                 filter = rcu_dereference_sched(data->filter);
73                 if (filter && !filter_match_preds(filter, rec))
74                         continue;
75                 if (event_command_post_trigger(data->cmd_ops)) {
76                         tt |= data->cmd_ops->trigger_type;
77                         continue;
78                 }
79                 data->ops->func(data, rec, event);
80         }
81         return tt;
82 }
83 EXPORT_SYMBOL_GPL(event_triggers_call);
84
85 /**
86  * event_triggers_post_call - Call 'post_triggers' for a trace event
87  * @file: The trace_event_file associated with the event
88  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
89  *
90  * For each trigger associated with an event, invoke the trigger
91  * function registered with the associated trigger command, if the
92  * corresponding bit is set in the tt enum passed into this function.
93  * See @event_triggers_call for details on how those bits are set.
94  *
95  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
96  */
97 void
98 event_triggers_post_call(struct trace_event_file *file,
99                          enum event_trigger_type tt)
100 {
101         struct event_trigger_data *data;
102
103         list_for_each_entry_rcu(data, &file->triggers, list) {
104                 if (data->paused)
105                         continue;
106                 if (data->cmd_ops->trigger_type & tt)
107                         data->ops->func(data, NULL, NULL);
108         }
109 }
110 EXPORT_SYMBOL_GPL(event_triggers_post_call);
111
112 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
113
114 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
115 {
116         struct trace_event_file *event_file = event_file_data(m->private);
117
118         if (t == SHOW_AVAILABLE_TRIGGERS) {
119                 (*pos)++;
120                 return NULL;
121         }
122         return seq_list_next(t, &event_file->triggers, pos);
123 }
124
125 static void *trigger_start(struct seq_file *m, loff_t *pos)
126 {
127         struct trace_event_file *event_file;
128
129         /* ->stop() is called even if ->start() fails */
130         mutex_lock(&event_mutex);
131         event_file = event_file_data(m->private);
132         if (unlikely(!event_file))
133                 return ERR_PTR(-ENODEV);
134
135         if (list_empty(&event_file->triggers))
136                 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
137
138         return seq_list_start(&event_file->triggers, *pos);
139 }
140
141 static void trigger_stop(struct seq_file *m, void *t)
142 {
143         mutex_unlock(&event_mutex);
144 }
145
146 static int trigger_show(struct seq_file *m, void *v)
147 {
148         struct event_trigger_data *data;
149         struct event_command *p;
150
151         if (v == SHOW_AVAILABLE_TRIGGERS) {
152                 seq_puts(m, "# Available triggers:\n");
153                 seq_putc(m, '#');
154                 mutex_lock(&trigger_cmd_mutex);
155                 list_for_each_entry_reverse(p, &trigger_commands, list)
156                         seq_printf(m, " %s", p->name);
157                 seq_putc(m, '\n');
158                 mutex_unlock(&trigger_cmd_mutex);
159                 return 0;
160         }
161
162         data = list_entry(v, struct event_trigger_data, list);
163         data->ops->print(m, data->ops, data);
164
165         return 0;
166 }
167
168 static const struct seq_operations event_triggers_seq_ops = {
169         .start = trigger_start,
170         .next = trigger_next,
171         .stop = trigger_stop,
172         .show = trigger_show,
173 };
174
175 static int event_trigger_regex_open(struct inode *inode, struct file *file)
176 {
177         int ret = 0;
178
179         mutex_lock(&event_mutex);
180
181         if (unlikely(!event_file_data(file))) {
182                 mutex_unlock(&event_mutex);
183                 return -ENODEV;
184         }
185
186         if ((file->f_mode & FMODE_WRITE) &&
187             (file->f_flags & O_TRUNC)) {
188                 struct trace_event_file *event_file;
189                 struct event_command *p;
190
191                 event_file = event_file_data(file);
192
193                 list_for_each_entry(p, &trigger_commands, list) {
194                         if (p->unreg_all)
195                                 p->unreg_all(event_file);
196                 }
197         }
198
199         if (file->f_mode & FMODE_READ) {
200                 ret = seq_open(file, &event_triggers_seq_ops);
201                 if (!ret) {
202                         struct seq_file *m = file->private_data;
203                         m->private = file;
204                 }
205         }
206
207         mutex_unlock(&event_mutex);
208
209         return ret;
210 }
211
212 static int trigger_process_regex(struct trace_event_file *file, char *buff)
213 {
214         char *command, *next;
215         struct event_command *p;
216         int ret = -EINVAL;
217
218         next = buff = skip_spaces(buff);
219         command = strsep(&next, ": \t");
220         if (next) {
221                 next = skip_spaces(next);
222                 if (!*next)
223                         next = NULL;
224         }
225         command = (command[0] != '!') ? command : command + 1;
226
227         mutex_lock(&trigger_cmd_mutex);
228         list_for_each_entry(p, &trigger_commands, list) {
229                 if (strcmp(p->name, command) == 0) {
230                         ret = p->func(p, file, buff, command, next);
231                         goto out_unlock;
232                 }
233         }
234  out_unlock:
235         mutex_unlock(&trigger_cmd_mutex);
236
237         return ret;
238 }
239
240 static ssize_t event_trigger_regex_write(struct file *file,
241                                          const char __user *ubuf,
242                                          size_t cnt, loff_t *ppos)
243 {
244         struct trace_event_file *event_file;
245         ssize_t ret;
246         char *buf;
247
248         if (!cnt)
249                 return 0;
250
251         if (cnt >= PAGE_SIZE)
252                 return -EINVAL;
253
254         buf = memdup_user_nul(ubuf, cnt);
255         if (IS_ERR(buf))
256                 return PTR_ERR(buf);
257
258         strim(buf);
259
260         mutex_lock(&event_mutex);
261         event_file = event_file_data(file);
262         if (unlikely(!event_file)) {
263                 mutex_unlock(&event_mutex);
264                 kfree(buf);
265                 return -ENODEV;
266         }
267         ret = trigger_process_regex(event_file, buf);
268         mutex_unlock(&event_mutex);
269
270         kfree(buf);
271         if (ret < 0)
272                 goto out;
273
274         *ppos += cnt;
275         ret = cnt;
276  out:
277         return ret;
278 }
279
280 static int event_trigger_regex_release(struct inode *inode, struct file *file)
281 {
282         mutex_lock(&event_mutex);
283
284         if (file->f_mode & FMODE_READ)
285                 seq_release(inode, file);
286
287         mutex_unlock(&event_mutex);
288
289         return 0;
290 }
291
292 static ssize_t
293 event_trigger_write(struct file *filp, const char __user *ubuf,
294                     size_t cnt, loff_t *ppos)
295 {
296         return event_trigger_regex_write(filp, ubuf, cnt, ppos);
297 }
298
299 static int
300 event_trigger_open(struct inode *inode, struct file *filp)
301 {
302         return event_trigger_regex_open(inode, filp);
303 }
304
305 static int
306 event_trigger_release(struct inode *inode, struct file *file)
307 {
308         return event_trigger_regex_release(inode, file);
309 }
310
311 const struct file_operations event_trigger_fops = {
312         .open = event_trigger_open,
313         .read = seq_read,
314         .write = event_trigger_write,
315         .llseek = tracing_lseek,
316         .release = event_trigger_release,
317 };
318
319 /*
320  * Currently we only register event commands from __init, so mark this
321  * __init too.
322  */
323 __init int register_event_command(struct event_command *cmd)
324 {
325         struct event_command *p;
326         int ret = 0;
327
328         mutex_lock(&trigger_cmd_mutex);
329         list_for_each_entry(p, &trigger_commands, list) {
330                 if (strcmp(cmd->name, p->name) == 0) {
331                         ret = -EBUSY;
332                         goto out_unlock;
333                 }
334         }
335         list_add(&cmd->list, &trigger_commands);
336  out_unlock:
337         mutex_unlock(&trigger_cmd_mutex);
338
339         return ret;
340 }
341
342 /*
343  * Currently we only unregister event commands from __init, so mark
344  * this __init too.
345  */
346 __init int unregister_event_command(struct event_command *cmd)
347 {
348         struct event_command *p, *n;
349         int ret = -ENODEV;
350
351         mutex_lock(&trigger_cmd_mutex);
352         list_for_each_entry_safe(p, n, &trigger_commands, list) {
353                 if (strcmp(cmd->name, p->name) == 0) {
354                         ret = 0;
355                         list_del_init(&p->list);
356                         goto out_unlock;
357                 }
358         }
359  out_unlock:
360         mutex_unlock(&trigger_cmd_mutex);
361
362         return ret;
363 }
364
365 /**
366  * event_trigger_print - Generic event_trigger_ops @print implementation
367  * @name: The name of the event trigger
368  * @m: The seq_file being printed to
369  * @data: Trigger-specific data
370  * @filter_str: filter_str to print, if present
371  *
372  * Common implementation for event triggers to print themselves.
373  *
374  * Usually wrapped by a function that simply sets the @name of the
375  * trigger command and then invokes this.
376  *
377  * Return: 0 on success, errno otherwise
378  */
379 static int
380 event_trigger_print(const char *name, struct seq_file *m,
381                     void *data, char *filter_str)
382 {
383         long count = (long)data;
384
385         seq_puts(m, name);
386
387         if (count == -1)
388                 seq_puts(m, ":unlimited");
389         else
390                 seq_printf(m, ":count=%ld", count);
391
392         if (filter_str)
393                 seq_printf(m, " if %s\n", filter_str);
394         else
395                 seq_putc(m, '\n');
396
397         return 0;
398 }
399
400 /**
401  * event_trigger_init - Generic event_trigger_ops @init implementation
402  * @ops: The trigger ops associated with the trigger
403  * @data: Trigger-specific data
404  *
405  * Common implementation of event trigger initialization.
406  *
407  * Usually used directly as the @init method in event trigger
408  * implementations.
409  *
410  * Return: 0 on success, errno otherwise
411  */
412 int event_trigger_init(struct event_trigger_ops *ops,
413                        struct event_trigger_data *data)
414 {
415         data->ref++;
416         return 0;
417 }
418
419 /**
420  * event_trigger_free - Generic event_trigger_ops @free implementation
421  * @ops: The trigger ops associated with the trigger
422  * @data: Trigger-specific data
423  *
424  * Common implementation of event trigger de-initialization.
425  *
426  * Usually used directly as the @free method in event trigger
427  * implementations.
428  */
429 static void
430 event_trigger_free(struct event_trigger_ops *ops,
431                    struct event_trigger_data *data)
432 {
433         if (WARN_ON_ONCE(data->ref <= 0))
434                 return;
435
436         data->ref--;
437         if (!data->ref)
438                 trigger_data_free(data);
439 }
440
441 int trace_event_trigger_enable_disable(struct trace_event_file *file,
442                                        int trigger_enable)
443 {
444         int ret = 0;
445
446         if (trigger_enable) {
447                 if (atomic_inc_return(&file->tm_ref) > 1)
448                         return ret;
449                 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
450                 ret = trace_event_enable_disable(file, 1, 1);
451         } else {
452                 if (atomic_dec_return(&file->tm_ref) > 0)
453                         return ret;
454                 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
455                 ret = trace_event_enable_disable(file, 0, 1);
456         }
457
458         return ret;
459 }
460
461 /**
462  * clear_event_triggers - Clear all triggers associated with a trace array
463  * @tr: The trace array to clear
464  *
465  * For each trigger, the triggering event has its tm_ref decremented
466  * via trace_event_trigger_enable_disable(), and any associated event
467  * (in the case of enable/disable_event triggers) will have its sm_ref
468  * decremented via free()->trace_event_enable_disable().  That
469  * combination effectively reverses the soft-mode/trigger state added
470  * by trigger registration.
471  *
472  * Must be called with event_mutex held.
473  */
474 void
475 clear_event_triggers(struct trace_array *tr)
476 {
477         struct trace_event_file *file;
478
479         list_for_each_entry(file, &tr->events, list) {
480                 struct event_trigger_data *data, *n;
481                 list_for_each_entry_safe(data, n, &file->triggers, list) {
482                         trace_event_trigger_enable_disable(file, 0);
483                         list_del_rcu(&data->list);
484                         if (data->ops->free)
485                                 data->ops->free(data->ops, data);
486                 }
487         }
488 }
489
490 /**
491  * update_cond_flag - Set or reset the TRIGGER_COND bit
492  * @file: The trace_event_file associated with the event
493  *
494  * If an event has triggers and any of those triggers has a filter or
495  * a post_trigger, trigger invocation needs to be deferred until after
496  * the current event has logged its data, and the event should have
497  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
498  * cleared.
499  */
500 void update_cond_flag(struct trace_event_file *file)
501 {
502         struct event_trigger_data *data;
503         bool set_cond = false;
504
505         lockdep_assert_held(&event_mutex);
506
507         list_for_each_entry(data, &file->triggers, list) {
508                 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
509                     event_command_needs_rec(data->cmd_ops)) {
510                         set_cond = true;
511                         break;
512                 }
513         }
514
515         if (set_cond)
516                 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
517         else
518                 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
519 }
520
521 /**
522  * register_trigger - Generic event_command @reg implementation
523  * @glob: The raw string used to register the trigger
524  * @ops: The trigger ops associated with the trigger
525  * @data: Trigger-specific data to associate with the trigger
526  * @file: The trace_event_file associated with the event
527  *
528  * Common implementation for event trigger registration.
529  *
530  * Usually used directly as the @reg method in event command
531  * implementations.
532  *
533  * Return: 0 on success, errno otherwise
534  */
535 static int register_trigger(char *glob, struct event_trigger_ops *ops,
536                             struct event_trigger_data *data,
537                             struct trace_event_file *file)
538 {
539         struct event_trigger_data *test;
540         int ret = 0;
541
542         lockdep_assert_held(&event_mutex);
543
544         list_for_each_entry(test, &file->triggers, list) {
545                 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
546                         ret = -EEXIST;
547                         goto out;
548                 }
549         }
550
551         if (data->ops->init) {
552                 ret = data->ops->init(data->ops, data);
553                 if (ret < 0)
554                         goto out;
555         }
556
557         list_add_rcu(&data->list, &file->triggers);
558         ret++;
559
560         update_cond_flag(file);
561         if (trace_event_trigger_enable_disable(file, 1) < 0) {
562                 list_del_rcu(&data->list);
563                 update_cond_flag(file);
564                 ret--;
565         }
566 out:
567         return ret;
568 }
569
570 /**
571  * unregister_trigger - Generic event_command @unreg implementation
572  * @glob: The raw string used to register the trigger
573  * @ops: The trigger ops associated with the trigger
574  * @test: Trigger-specific data used to find the trigger to remove
575  * @file: The trace_event_file associated with the event
576  *
577  * Common implementation for event trigger unregistration.
578  *
579  * Usually used directly as the @unreg method in event command
580  * implementations.
581  */
582 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
583                                struct event_trigger_data *test,
584                                struct trace_event_file *file)
585 {
586         struct event_trigger_data *data;
587         bool unregistered = false;
588
589         lockdep_assert_held(&event_mutex);
590
591         list_for_each_entry(data, &file->triggers, list) {
592                 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
593                         unregistered = true;
594                         list_del_rcu(&data->list);
595                         trace_event_trigger_enable_disable(file, 0);
596                         update_cond_flag(file);
597                         break;
598                 }
599         }
600
601         if (unregistered && data->ops->free)
602                 data->ops->free(data->ops, data);
603 }
604
605 /**
606  * event_trigger_callback - Generic event_command @func implementation
607  * @cmd_ops: The command ops, used for trigger registration
608  * @file: The trace_event_file associated with the event
609  * @glob: The raw string used to register the trigger
610  * @cmd: The cmd portion of the string used to register the trigger
611  * @param: The params portion of the string used to register the trigger
612  *
613  * Common implementation for event command parsing and trigger
614  * instantiation.
615  *
616  * Usually used directly as the @func method in event command
617  * implementations.
618  *
619  * Return: 0 on success, errno otherwise
620  */
621 static int
622 event_trigger_callback(struct event_command *cmd_ops,
623                        struct trace_event_file *file,
624                        char *glob, char *cmd, char *param)
625 {
626         struct event_trigger_data *trigger_data;
627         struct event_trigger_ops *trigger_ops;
628         char *trigger = NULL;
629         char *number;
630         int ret;
631
632         /* separate the trigger from the filter (t:n [if filter]) */
633         if (param && isdigit(param[0])) {
634                 trigger = strsep(&param, " \t");
635                 if (param) {
636                         param = skip_spaces(param);
637                         if (!*param)
638                                 param = NULL;
639                 }
640         }
641
642         trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
643
644         ret = -ENOMEM;
645         trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
646         if (!trigger_data)
647                 goto out;
648
649         trigger_data->count = -1;
650         trigger_data->ops = trigger_ops;
651         trigger_data->cmd_ops = cmd_ops;
652         trigger_data->private_data = file;
653         INIT_LIST_HEAD(&trigger_data->list);
654         INIT_LIST_HEAD(&trigger_data->named_list);
655
656         if (glob[0] == '!') {
657                 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
658                 kfree(trigger_data);
659                 ret = 0;
660                 goto out;
661         }
662
663         if (trigger) {
664                 number = strsep(&trigger, ":");
665
666                 ret = -EINVAL;
667                 if (!strlen(number))
668                         goto out_free;
669
670                 /*
671                  * We use the callback data field (which is a pointer)
672                  * as our counter.
673                  */
674                 ret = kstrtoul(number, 0, &trigger_data->count);
675                 if (ret)
676                         goto out_free;
677         }
678
679         if (!param) /* if param is non-empty, it's supposed to be a filter */
680                 goto out_reg;
681
682         if (!cmd_ops->set_filter)
683                 goto out_reg;
684
685         ret = cmd_ops->set_filter(param, trigger_data, file);
686         if (ret < 0)
687                 goto out_free;
688
689  out_reg:
690         /* Up the trigger_data count to make sure reg doesn't free it on failure */
691         event_trigger_init(trigger_ops, trigger_data);
692         ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
693         /*
694          * The above returns on success the # of functions enabled,
695          * but if it didn't find any functions it returns zero.
696          * Consider no functions a failure too.
697          */
698         if (!ret) {
699                 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
700                 ret = -ENOENT;
701         } else if (ret > 0)
702                 ret = 0;
703
704         /* Down the counter of trigger_data or free it if not used anymore */
705         event_trigger_free(trigger_ops, trigger_data);
706  out:
707         return ret;
708
709  out_free:
710         if (cmd_ops->set_filter)
711                 cmd_ops->set_filter(NULL, trigger_data, NULL);
712         kfree(trigger_data);
713         goto out;
714 }
715
716 /**
717  * set_trigger_filter - Generic event_command @set_filter implementation
718  * @filter_str: The filter string for the trigger, NULL to remove filter
719  * @trigger_data: Trigger-specific data
720  * @file: The trace_event_file associated with the event
721  *
722  * Common implementation for event command filter parsing and filter
723  * instantiation.
724  *
725  * Usually used directly as the @set_filter method in event command
726  * implementations.
727  *
728  * Also used to remove a filter (if filter_str = NULL).
729  *
730  * Return: 0 on success, errno otherwise
731  */
732 int set_trigger_filter(char *filter_str,
733                        struct event_trigger_data *trigger_data,
734                        struct trace_event_file *file)
735 {
736         struct event_trigger_data *data = trigger_data;
737         struct event_filter *filter = NULL, *tmp;
738         int ret = -EINVAL;
739         char *s;
740
741         if (!filter_str) /* clear the current filter */
742                 goto assign;
743
744         s = strsep(&filter_str, " \t");
745
746         if (!strlen(s) || strcmp(s, "if") != 0)
747                 goto out;
748
749         if (!filter_str)
750                 goto out;
751
752         /* The filter is for the 'trigger' event, not the triggered event */
753         ret = create_event_filter(file->event_call, filter_str, false, &filter);
754         /*
755          * If create_event_filter() fails, filter still needs to be freed.
756          * Which the calling code will do with data->filter.
757          */
758  assign:
759         tmp = rcu_access_pointer(data->filter);
760
761         rcu_assign_pointer(data->filter, filter);
762
763         if (tmp) {
764                 /* Make sure the call is done with the filter */
765                 tracepoint_synchronize_unregister();
766                 free_event_filter(tmp);
767         }
768
769         kfree(data->filter_str);
770         data->filter_str = NULL;
771
772         if (filter_str) {
773                 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
774                 if (!data->filter_str) {
775                         free_event_filter(rcu_access_pointer(data->filter));
776                         data->filter = NULL;
777                         ret = -ENOMEM;
778                 }
779         }
780  out:
781         return ret;
782 }
783
784 static LIST_HEAD(named_triggers);
785
786 /**
787  * find_named_trigger - Find the common named trigger associated with @name
788  * @name: The name of the set of named triggers to find the common data for
789  *
790  * Named triggers are sets of triggers that share a common set of
791  * trigger data.  The first named trigger registered with a given name
792  * owns the common trigger data that the others subsequently
793  * registered with the same name will reference.  This function
794  * returns the common trigger data associated with that first
795  * registered instance.
796  *
797  * Return: the common trigger data for the given named trigger on
798  * success, NULL otherwise.
799  */
800 struct event_trigger_data *find_named_trigger(const char *name)
801 {
802         struct event_trigger_data *data;
803
804         if (!name)
805                 return NULL;
806
807         list_for_each_entry(data, &named_triggers, named_list) {
808                 if (data->named_data)
809                         continue;
810                 if (strcmp(data->name, name) == 0)
811                         return data;
812         }
813
814         return NULL;
815 }
816
817 /**
818  * is_named_trigger - determine if a given trigger is a named trigger
819  * @test: The trigger data to test
820  *
821  * Return: true if 'test' is a named trigger, false otherwise.
822  */
823 bool is_named_trigger(struct event_trigger_data *test)
824 {
825         struct event_trigger_data *data;
826
827         list_for_each_entry(data, &named_triggers, named_list) {
828                 if (test == data)
829                         return true;
830         }
831
832         return false;
833 }
834
835 /**
836  * save_named_trigger - save the trigger in the named trigger list
837  * @name: The name of the named trigger set
838  * @data: The trigger data to save
839  *
840  * Return: 0 if successful, negative error otherwise.
841  */
842 int save_named_trigger(const char *name, struct event_trigger_data *data)
843 {
844         data->name = kstrdup(name, GFP_KERNEL);
845         if (!data->name)
846                 return -ENOMEM;
847
848         list_add(&data->named_list, &named_triggers);
849
850         return 0;
851 }
852
853 /**
854  * del_named_trigger - delete a trigger from the named trigger list
855  * @data: The trigger data to delete
856  */
857 void del_named_trigger(struct event_trigger_data *data)
858 {
859         kfree(data->name);
860         data->name = NULL;
861
862         list_del(&data->named_list);
863 }
864
865 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
866 {
867         struct event_trigger_data *test;
868
869         list_for_each_entry(test, &named_triggers, named_list) {
870                 if (strcmp(test->name, data->name) == 0) {
871                         if (pause) {
872                                 test->paused_tmp = test->paused;
873                                 test->paused = true;
874                         } else {
875                                 test->paused = test->paused_tmp;
876                         }
877                 }
878         }
879 }
880
881 /**
882  * pause_named_trigger - Pause all named triggers with the same name
883  * @data: The trigger data of a named trigger to pause
884  *
885  * Pauses a named trigger along with all other triggers having the
886  * same name.  Because named triggers share a common set of data,
887  * pausing only one is meaningless, so pausing one named trigger needs
888  * to pause all triggers with the same name.
889  */
890 void pause_named_trigger(struct event_trigger_data *data)
891 {
892         __pause_named_trigger(data, true);
893 }
894
895 /**
896  * unpause_named_trigger - Un-pause all named triggers with the same name
897  * @data: The trigger data of a named trigger to unpause
898  *
899  * Un-pauses a named trigger along with all other triggers having the
900  * same name.  Because named triggers share a common set of data,
901  * unpausing only one is meaningless, so unpausing one named trigger
902  * needs to unpause all triggers with the same name.
903  */
904 void unpause_named_trigger(struct event_trigger_data *data)
905 {
906         __pause_named_trigger(data, false);
907 }
908
909 /**
910  * set_named_trigger_data - Associate common named trigger data
911  * @data: The trigger data of a named trigger to unpause
912  *
913  * Named triggers are sets of triggers that share a common set of
914  * trigger data.  The first named trigger registered with a given name
915  * owns the common trigger data that the others subsequently
916  * registered with the same name will reference.  This function
917  * associates the common trigger data from the first trigger with the
918  * given trigger.
919  */
920 void set_named_trigger_data(struct event_trigger_data *data,
921                             struct event_trigger_data *named_data)
922 {
923         data->named_data = named_data;
924 }
925
926 struct event_trigger_data *
927 get_named_trigger_data(struct event_trigger_data *data)
928 {
929         return data->named_data;
930 }
931
932 static void
933 traceon_trigger(struct event_trigger_data *data, void *rec,
934                 struct ring_buffer_event *event)
935 {
936         if (tracing_is_on())
937                 return;
938
939         tracing_on();
940 }
941
942 static void
943 traceon_count_trigger(struct event_trigger_data *data, void *rec,
944                       struct ring_buffer_event *event)
945 {
946         if (tracing_is_on())
947                 return;
948
949         if (!data->count)
950                 return;
951
952         if (data->count != -1)
953                 (data->count)--;
954
955         tracing_on();
956 }
957
958 static void
959 traceoff_trigger(struct event_trigger_data *data, void *rec,
960                  struct ring_buffer_event *event)
961 {
962         if (!tracing_is_on())
963                 return;
964
965         tracing_off();
966 }
967
968 static void
969 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
970                        struct ring_buffer_event *event)
971 {
972         if (!tracing_is_on())
973                 return;
974
975         if (!data->count)
976                 return;
977
978         if (data->count != -1)
979                 (data->count)--;
980
981         tracing_off();
982 }
983
984 static int
985 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
986                       struct event_trigger_data *data)
987 {
988         return event_trigger_print("traceon", m, (void *)data->count,
989                                    data->filter_str);
990 }
991
992 static int
993 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
994                        struct event_trigger_data *data)
995 {
996         return event_trigger_print("traceoff", m, (void *)data->count,
997                                    data->filter_str);
998 }
999
1000 static struct event_trigger_ops traceon_trigger_ops = {
1001         .func                   = traceon_trigger,
1002         .print                  = traceon_trigger_print,
1003         .init                   = event_trigger_init,
1004         .free                   = event_trigger_free,
1005 };
1006
1007 static struct event_trigger_ops traceon_count_trigger_ops = {
1008         .func                   = traceon_count_trigger,
1009         .print                  = traceon_trigger_print,
1010         .init                   = event_trigger_init,
1011         .free                   = event_trigger_free,
1012 };
1013
1014 static struct event_trigger_ops traceoff_trigger_ops = {
1015         .func                   = traceoff_trigger,
1016         .print                  = traceoff_trigger_print,
1017         .init                   = event_trigger_init,
1018         .free                   = event_trigger_free,
1019 };
1020
1021 static struct event_trigger_ops traceoff_count_trigger_ops = {
1022         .func                   = traceoff_count_trigger,
1023         .print                  = traceoff_trigger_print,
1024         .init                   = event_trigger_init,
1025         .free                   = event_trigger_free,
1026 };
1027
1028 static struct event_trigger_ops *
1029 onoff_get_trigger_ops(char *cmd, char *param)
1030 {
1031         struct event_trigger_ops *ops;
1032
1033         /* we register both traceon and traceoff to this callback */
1034         if (strcmp(cmd, "traceon") == 0)
1035                 ops = param ? &traceon_count_trigger_ops :
1036                         &traceon_trigger_ops;
1037         else
1038                 ops = param ? &traceoff_count_trigger_ops :
1039                         &traceoff_trigger_ops;
1040
1041         return ops;
1042 }
1043
1044 static struct event_command trigger_traceon_cmd = {
1045         .name                   = "traceon",
1046         .trigger_type           = ETT_TRACE_ONOFF,
1047         .func                   = event_trigger_callback,
1048         .reg                    = register_trigger,
1049         .unreg                  = unregister_trigger,
1050         .get_trigger_ops        = onoff_get_trigger_ops,
1051         .set_filter             = set_trigger_filter,
1052 };
1053
1054 static struct event_command trigger_traceoff_cmd = {
1055         .name                   = "traceoff",
1056         .trigger_type           = ETT_TRACE_ONOFF,
1057         .flags                  = EVENT_CMD_FL_POST_TRIGGER,
1058         .func                   = event_trigger_callback,
1059         .reg                    = register_trigger,
1060         .unreg                  = unregister_trigger,
1061         .get_trigger_ops        = onoff_get_trigger_ops,
1062         .set_filter             = set_trigger_filter,
1063 };
1064
1065 #ifdef CONFIG_TRACER_SNAPSHOT
1066 static void
1067 snapshot_trigger(struct event_trigger_data *data, void *rec,
1068                  struct ring_buffer_event *event)
1069 {
1070         struct trace_event_file *file = data->private_data;
1071
1072         if (file)
1073                 tracing_snapshot_instance(file->tr);
1074         else
1075                 tracing_snapshot();
1076 }
1077
1078 static void
1079 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1080                        struct ring_buffer_event *event)
1081 {
1082         if (!data->count)
1083                 return;
1084
1085         if (data->count != -1)
1086                 (data->count)--;
1087
1088         snapshot_trigger(data, rec, event);
1089 }
1090
1091 static int
1092 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1093                           struct event_trigger_data *data,
1094                           struct trace_event_file *file)
1095 {
1096         if (tracing_alloc_snapshot_instance(file->tr) != 0)
1097                 return 0;
1098
1099         return register_trigger(glob, ops, data, file);
1100 }
1101
1102 static int
1103 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1104                        struct event_trigger_data *data)
1105 {
1106         return event_trigger_print("snapshot", m, (void *)data->count,
1107                                    data->filter_str);
1108 }
1109
1110 static struct event_trigger_ops snapshot_trigger_ops = {
1111         .func                   = snapshot_trigger,
1112         .print                  = snapshot_trigger_print,
1113         .init                   = event_trigger_init,
1114         .free                   = event_trigger_free,
1115 };
1116
1117 static struct event_trigger_ops snapshot_count_trigger_ops = {
1118         .func                   = snapshot_count_trigger,
1119         .print                  = snapshot_trigger_print,
1120         .init                   = event_trigger_init,
1121         .free                   = event_trigger_free,
1122 };
1123
1124 static struct event_trigger_ops *
1125 snapshot_get_trigger_ops(char *cmd, char *param)
1126 {
1127         return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1128 }
1129
1130 static struct event_command trigger_snapshot_cmd = {
1131         .name                   = "snapshot",
1132         .trigger_type           = ETT_SNAPSHOT,
1133         .func                   = event_trigger_callback,
1134         .reg                    = register_snapshot_trigger,
1135         .unreg                  = unregister_trigger,
1136         .get_trigger_ops        = snapshot_get_trigger_ops,
1137         .set_filter             = set_trigger_filter,
1138 };
1139
1140 static __init int register_trigger_snapshot_cmd(void)
1141 {
1142         int ret;
1143
1144         ret = register_event_command(&trigger_snapshot_cmd);
1145         WARN_ON(ret < 0);
1146
1147         return ret;
1148 }
1149 #else
1150 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1151 #endif /* CONFIG_TRACER_SNAPSHOT */
1152
1153 #ifdef CONFIG_STACKTRACE
1154 #ifdef CONFIG_UNWINDER_ORC
1155 /* Skip 2:
1156  *   event_triggers_post_call()
1157  *   trace_event_raw_event_xxx()
1158  */
1159 # define STACK_SKIP 2
1160 #else
1161 /*
1162  * Skip 4:
1163  *   stacktrace_trigger()
1164  *   event_triggers_post_call()
1165  *   trace_event_buffer_commit()
1166  *   trace_event_raw_event_xxx()
1167  */
1168 #define STACK_SKIP 4
1169 #endif
1170
1171 static void
1172 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1173                    struct ring_buffer_event *event)
1174 {
1175         trace_dump_stack(STACK_SKIP);
1176 }
1177
1178 static void
1179 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1180                          struct ring_buffer_event *event)
1181 {
1182         if (!data->count)
1183                 return;
1184
1185         if (data->count != -1)
1186                 (data->count)--;
1187
1188         stacktrace_trigger(data, rec, event);
1189 }
1190
1191 static int
1192 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1193                          struct event_trigger_data *data)
1194 {
1195         return event_trigger_print("stacktrace", m, (void *)data->count,
1196                                    data->filter_str);
1197 }
1198
1199 static struct event_trigger_ops stacktrace_trigger_ops = {
1200         .func                   = stacktrace_trigger,
1201         .print                  = stacktrace_trigger_print,
1202         .init                   = event_trigger_init,
1203         .free                   = event_trigger_free,
1204 };
1205
1206 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1207         .func                   = stacktrace_count_trigger,
1208         .print                  = stacktrace_trigger_print,
1209         .init                   = event_trigger_init,
1210         .free                   = event_trigger_free,
1211 };
1212
1213 static struct event_trigger_ops *
1214 stacktrace_get_trigger_ops(char *cmd, char *param)
1215 {
1216         return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1217 }
1218
1219 static struct event_command trigger_stacktrace_cmd = {
1220         .name                   = "stacktrace",
1221         .trigger_type           = ETT_STACKTRACE,
1222         .flags                  = EVENT_CMD_FL_POST_TRIGGER,
1223         .func                   = event_trigger_callback,
1224         .reg                    = register_trigger,
1225         .unreg                  = unregister_trigger,
1226         .get_trigger_ops        = stacktrace_get_trigger_ops,
1227         .set_filter             = set_trigger_filter,
1228 };
1229
1230 static __init int register_trigger_stacktrace_cmd(void)
1231 {
1232         int ret;
1233
1234         ret = register_event_command(&trigger_stacktrace_cmd);
1235         WARN_ON(ret < 0);
1236
1237         return ret;
1238 }
1239 #else
1240 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1241 #endif /* CONFIG_STACKTRACE */
1242
1243 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1244 {
1245         unregister_event_command(&trigger_traceon_cmd);
1246         unregister_event_command(&trigger_traceoff_cmd);
1247 }
1248
1249 static void
1250 event_enable_trigger(struct event_trigger_data *data, void *rec,
1251                      struct ring_buffer_event *event)
1252 {
1253         struct enable_trigger_data *enable_data = data->private_data;
1254
1255         if (enable_data->enable)
1256                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1257         else
1258                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1259 }
1260
1261 static void
1262 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1263                            struct ring_buffer_event *event)
1264 {
1265         struct enable_trigger_data *enable_data = data->private_data;
1266
1267         if (!data->count)
1268                 return;
1269
1270         /* Skip if the event is in a state we want to switch to */
1271         if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1272                 return;
1273
1274         if (data->count != -1)
1275                 (data->count)--;
1276
1277         event_enable_trigger(data, rec, event);
1278 }
1279
1280 int event_enable_trigger_print(struct seq_file *m,
1281                                struct event_trigger_ops *ops,
1282                                struct event_trigger_data *data)
1283 {
1284         struct enable_trigger_data *enable_data = data->private_data;
1285
1286         seq_printf(m, "%s:%s:%s",
1287                    enable_data->hist ?
1288                    (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1289                    (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1290                    enable_data->file->event_call->class->system,
1291                    trace_event_name(enable_data->file->event_call));
1292
1293         if (data->count == -1)
1294                 seq_puts(m, ":unlimited");
1295         else
1296                 seq_printf(m, ":count=%ld", data->count);
1297
1298         if (data->filter_str)
1299                 seq_printf(m, " if %s\n", data->filter_str);
1300         else
1301                 seq_putc(m, '\n');
1302
1303         return 0;
1304 }
1305
1306 void event_enable_trigger_free(struct event_trigger_ops *ops,
1307                                struct event_trigger_data *data)
1308 {
1309         struct enable_trigger_data *enable_data = data->private_data;
1310
1311         if (WARN_ON_ONCE(data->ref <= 0))
1312                 return;
1313
1314         data->ref--;
1315         if (!data->ref) {
1316                 /* Remove the SOFT_MODE flag */
1317                 trace_event_enable_disable(enable_data->file, 0, 1);
1318                 module_put(enable_data->file->event_call->mod);
1319                 trigger_data_free(data);
1320                 kfree(enable_data);
1321         }
1322 }
1323
1324 static struct event_trigger_ops event_enable_trigger_ops = {
1325         .func                   = event_enable_trigger,
1326         .print                  = event_enable_trigger_print,
1327         .init                   = event_trigger_init,
1328         .free                   = event_enable_trigger_free,
1329 };
1330
1331 static struct event_trigger_ops event_enable_count_trigger_ops = {
1332         .func                   = event_enable_count_trigger,
1333         .print                  = event_enable_trigger_print,
1334         .init                   = event_trigger_init,
1335         .free                   = event_enable_trigger_free,
1336 };
1337
1338 static struct event_trigger_ops event_disable_trigger_ops = {
1339         .func                   = event_enable_trigger,
1340         .print                  = event_enable_trigger_print,
1341         .init                   = event_trigger_init,
1342         .free                   = event_enable_trigger_free,
1343 };
1344
1345 static struct event_trigger_ops event_disable_count_trigger_ops = {
1346         .func                   = event_enable_count_trigger,
1347         .print                  = event_enable_trigger_print,
1348         .init                   = event_trigger_init,
1349         .free                   = event_enable_trigger_free,
1350 };
1351
1352 int event_enable_trigger_func(struct event_command *cmd_ops,
1353                               struct trace_event_file *file,
1354                               char *glob, char *cmd, char *param)
1355 {
1356         struct trace_event_file *event_enable_file;
1357         struct enable_trigger_data *enable_data;
1358         struct event_trigger_data *trigger_data;
1359         struct event_trigger_ops *trigger_ops;
1360         struct trace_array *tr = file->tr;
1361         const char *system;
1362         const char *event;
1363         bool hist = false;
1364         char *trigger;
1365         char *number;
1366         bool enable;
1367         int ret;
1368
1369         if (!param)
1370                 return -EINVAL;
1371
1372         /* separate the trigger from the filter (s:e:n [if filter]) */
1373         trigger = strsep(&param, " \t");
1374         if (!trigger)
1375                 return -EINVAL;
1376         if (param) {
1377                 param = skip_spaces(param);
1378                 if (!*param)
1379                         param = NULL;
1380         }
1381
1382         system = strsep(&trigger, ":");
1383         if (!trigger)
1384                 return -EINVAL;
1385
1386         event = strsep(&trigger, ":");
1387
1388         ret = -EINVAL;
1389         event_enable_file = find_event_file(tr, system, event);
1390         if (!event_enable_file)
1391                 goto out;
1392
1393 #ifdef CONFIG_HIST_TRIGGERS
1394         hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1395                 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1396
1397         enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1398                   (strcmp(cmd, ENABLE_HIST_STR) == 0));
1399 #else
1400         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1401 #endif
1402         trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1403
1404         ret = -ENOMEM;
1405         trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1406         if (!trigger_data)
1407                 goto out;
1408
1409         enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1410         if (!enable_data) {
1411                 kfree(trigger_data);
1412                 goto out;
1413         }
1414
1415         trigger_data->count = -1;
1416         trigger_data->ops = trigger_ops;
1417         trigger_data->cmd_ops = cmd_ops;
1418         INIT_LIST_HEAD(&trigger_data->list);
1419         RCU_INIT_POINTER(trigger_data->filter, NULL);
1420
1421         enable_data->hist = hist;
1422         enable_data->enable = enable;
1423         enable_data->file = event_enable_file;
1424         trigger_data->private_data = enable_data;
1425
1426         if (glob[0] == '!') {
1427                 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1428                 kfree(trigger_data);
1429                 kfree(enable_data);
1430                 ret = 0;
1431                 goto out;
1432         }
1433
1434         /* Up the trigger_data count to make sure nothing frees it on failure */
1435         event_trigger_init(trigger_ops, trigger_data);
1436
1437         if (trigger) {
1438                 number = strsep(&trigger, ":");
1439
1440                 ret = -EINVAL;
1441                 if (!strlen(number))
1442                         goto out_free;
1443
1444                 /*
1445                  * We use the callback data field (which is a pointer)
1446                  * as our counter.
1447                  */
1448                 ret = kstrtoul(number, 0, &trigger_data->count);
1449                 if (ret)
1450                         goto out_free;
1451         }
1452
1453         if (!param) /* if param is non-empty, it's supposed to be a filter */
1454                 goto out_reg;
1455
1456         if (!cmd_ops->set_filter)
1457                 goto out_reg;
1458
1459         ret = cmd_ops->set_filter(param, trigger_data, file);
1460         if (ret < 0)
1461                 goto out_free;
1462
1463  out_reg:
1464         /* Don't let event modules unload while probe registered */
1465         ret = try_module_get(event_enable_file->event_call->mod);
1466         if (!ret) {
1467                 ret = -EBUSY;
1468                 goto out_free;
1469         }
1470
1471         ret = trace_event_enable_disable(event_enable_file, 1, 1);
1472         if (ret < 0)
1473                 goto out_put;
1474         ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1475         /*
1476          * The above returns on success the # of functions enabled,
1477          * but if it didn't find any functions it returns zero.
1478          * Consider no functions a failure too.
1479          */
1480         if (!ret) {
1481                 ret = -ENOENT;
1482                 goto out_disable;
1483         } else if (ret < 0)
1484                 goto out_disable;
1485         /* Just return zero, not the number of enabled functions */
1486         ret = 0;
1487         event_trigger_free(trigger_ops, trigger_data);
1488  out:
1489         return ret;
1490
1491  out_disable:
1492         trace_event_enable_disable(event_enable_file, 0, 1);
1493  out_put:
1494         module_put(event_enable_file->event_call->mod);
1495  out_free:
1496         if (cmd_ops->set_filter)
1497                 cmd_ops->set_filter(NULL, trigger_data, NULL);
1498         event_trigger_free(trigger_ops, trigger_data);
1499         kfree(enable_data);
1500         goto out;
1501 }
1502
1503 int event_enable_register_trigger(char *glob,
1504                                   struct event_trigger_ops *ops,
1505                                   struct event_trigger_data *data,
1506                                   struct trace_event_file *file)
1507 {
1508         struct enable_trigger_data *enable_data = data->private_data;
1509         struct enable_trigger_data *test_enable_data;
1510         struct event_trigger_data *test;
1511         int ret = 0;
1512
1513         lockdep_assert_held(&event_mutex);
1514
1515         list_for_each_entry(test, &file->triggers, list) {
1516                 test_enable_data = test->private_data;
1517                 if (test_enable_data &&
1518                     (test->cmd_ops->trigger_type ==
1519                      data->cmd_ops->trigger_type) &&
1520                     (test_enable_data->file == enable_data->file)) {
1521                         ret = -EEXIST;
1522                         goto out;
1523                 }
1524         }
1525
1526         if (data->ops->init) {
1527                 ret = data->ops->init(data->ops, data);
1528                 if (ret < 0)
1529                         goto out;
1530         }
1531
1532         list_add_rcu(&data->list, &file->triggers);
1533         ret++;
1534
1535         update_cond_flag(file);
1536         if (trace_event_trigger_enable_disable(file, 1) < 0) {
1537                 list_del_rcu(&data->list);
1538                 update_cond_flag(file);
1539                 ret--;
1540         }
1541 out:
1542         return ret;
1543 }
1544
1545 void event_enable_unregister_trigger(char *glob,
1546                                      struct event_trigger_ops *ops,
1547                                      struct event_trigger_data *test,
1548                                      struct trace_event_file *file)
1549 {
1550         struct enable_trigger_data *test_enable_data = test->private_data;
1551         struct enable_trigger_data *enable_data;
1552         struct event_trigger_data *data;
1553         bool unregistered = false;
1554
1555         lockdep_assert_held(&event_mutex);
1556
1557         list_for_each_entry(data, &file->triggers, list) {
1558                 enable_data = data->private_data;
1559                 if (enable_data &&
1560                     (data->cmd_ops->trigger_type ==
1561                      test->cmd_ops->trigger_type) &&
1562                     (enable_data->file == test_enable_data->file)) {
1563                         unregistered = true;
1564                         list_del_rcu(&data->list);
1565                         trace_event_trigger_enable_disable(file, 0);
1566                         update_cond_flag(file);
1567                         break;
1568                 }
1569         }
1570
1571         if (unregistered && data->ops->free)
1572                 data->ops->free(data->ops, data);
1573 }
1574
1575 static struct event_trigger_ops *
1576 event_enable_get_trigger_ops(char *cmd, char *param)
1577 {
1578         struct event_trigger_ops *ops;
1579         bool enable;
1580
1581 #ifdef CONFIG_HIST_TRIGGERS
1582         enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1583                   (strcmp(cmd, ENABLE_HIST_STR) == 0));
1584 #else
1585         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1586 #endif
1587         if (enable)
1588                 ops = param ? &event_enable_count_trigger_ops :
1589                         &event_enable_trigger_ops;
1590         else
1591                 ops = param ? &event_disable_count_trigger_ops :
1592                         &event_disable_trigger_ops;
1593
1594         return ops;
1595 }
1596
1597 static struct event_command trigger_enable_cmd = {
1598         .name                   = ENABLE_EVENT_STR,
1599         .trigger_type           = ETT_EVENT_ENABLE,
1600         .func                   = event_enable_trigger_func,
1601         .reg                    = event_enable_register_trigger,
1602         .unreg                  = event_enable_unregister_trigger,
1603         .get_trigger_ops        = event_enable_get_trigger_ops,
1604         .set_filter             = set_trigger_filter,
1605 };
1606
1607 static struct event_command trigger_disable_cmd = {
1608         .name                   = DISABLE_EVENT_STR,
1609         .trigger_type           = ETT_EVENT_ENABLE,
1610         .func                   = event_enable_trigger_func,
1611         .reg                    = event_enable_register_trigger,
1612         .unreg                  = event_enable_unregister_trigger,
1613         .get_trigger_ops        = event_enable_get_trigger_ops,
1614         .set_filter             = set_trigger_filter,
1615 };
1616
1617 static __init void unregister_trigger_enable_disable_cmds(void)
1618 {
1619         unregister_event_command(&trigger_enable_cmd);
1620         unregister_event_command(&trigger_disable_cmd);
1621 }
1622
1623 static __init int register_trigger_enable_disable_cmds(void)
1624 {
1625         int ret;
1626
1627         ret = register_event_command(&trigger_enable_cmd);
1628         if (WARN_ON(ret < 0))
1629                 return ret;
1630         ret = register_event_command(&trigger_disable_cmd);
1631         if (WARN_ON(ret < 0))
1632                 unregister_trigger_enable_disable_cmds();
1633
1634         return ret;
1635 }
1636
1637 static __init int register_trigger_traceon_traceoff_cmds(void)
1638 {
1639         int ret;
1640
1641         ret = register_event_command(&trigger_traceon_cmd);
1642         if (WARN_ON(ret < 0))
1643                 return ret;
1644         ret = register_event_command(&trigger_traceoff_cmd);
1645         if (WARN_ON(ret < 0))
1646                 unregister_trigger_traceon_traceoff_cmds();
1647
1648         return ret;
1649 }
1650
1651 __init int register_trigger_cmds(void)
1652 {
1653         register_trigger_traceon_traceoff_cmds();
1654         register_trigger_snapshot_cmd();
1655         register_trigger_stacktrace_cmd();
1656         register_trigger_enable_disable_cmds();
1657         register_trigger_hist_enable_disable_cmds();
1658         register_trigger_hist_cmd();
1659
1660         return 0;
1661 }