GNU Linux-libre 4.14.294-gnu1
[releases.git] / kernel / trace / trace_functions.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19
20 #include "trace.h"
21
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26                     struct ftrace_ops *op, struct pt_regs *pt_regs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29                           struct ftrace_ops *op, struct pt_regs *pt_regs);
30 static struct tracer_flags func_flags;
31
32 /* Our option */
33 enum {
34         TRACE_FUNC_OPT_STACK    = 0x1,
35 };
36
37 static int allocate_ftrace_ops(struct trace_array *tr)
38 {
39         struct ftrace_ops *ops;
40
41         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
42         if (!ops)
43                 return -ENOMEM;
44
45         /* Currently only the non stack verision is supported */
46         ops->func = function_trace_call;
47         ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
48
49         tr->ops = ops;
50         ops->private = tr;
51         return 0;
52 }
53
54
55 int ftrace_create_function_files(struct trace_array *tr,
56                                  struct dentry *parent)
57 {
58         int ret;
59
60         /*
61          * The top level array uses the "global_ops", and the files are
62          * created on boot up.
63          */
64         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
65                 return 0;
66
67         ret = allocate_ftrace_ops(tr);
68         if (ret)
69                 return ret;
70
71         ftrace_create_filter_files(tr->ops, parent);
72
73         return 0;
74 }
75
76 void ftrace_destroy_function_files(struct trace_array *tr)
77 {
78         ftrace_destroy_filter_files(tr->ops);
79         kfree(tr->ops);
80         tr->ops = NULL;
81 }
82
83 static int function_trace_init(struct trace_array *tr)
84 {
85         ftrace_func_t func;
86
87         /*
88          * Instance trace_arrays get their ops allocated
89          * at instance creation. Unless it failed
90          * the allocation.
91          */
92         if (!tr->ops)
93                 return -ENOMEM;
94
95         /* Currently only the global instance can do stack tracing */
96         if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
97             func_flags.val & TRACE_FUNC_OPT_STACK)
98                 func = function_stack_trace_call;
99         else
100                 func = function_trace_call;
101
102         ftrace_init_array_ops(tr, func);
103
104         tr->trace_buffer.cpu = get_cpu();
105         put_cpu();
106
107         tracing_start_cmdline_record();
108         tracing_start_function_trace(tr);
109         return 0;
110 }
111
112 static void function_trace_reset(struct trace_array *tr)
113 {
114         tracing_stop_function_trace(tr);
115         tracing_stop_cmdline_record();
116         ftrace_reset_array_ops(tr);
117 }
118
119 static void function_trace_start(struct trace_array *tr)
120 {
121         tracing_reset_online_cpus(&tr->trace_buffer);
122 }
123
124 static void
125 function_trace_call(unsigned long ip, unsigned long parent_ip,
126                     struct ftrace_ops *op, struct pt_regs *pt_regs)
127 {
128         struct trace_array *tr = op->private;
129         struct trace_array_cpu *data;
130         unsigned long flags;
131         int bit;
132         int cpu;
133         int pc;
134
135         if (unlikely(!tr->function_enabled))
136                 return;
137
138         pc = preempt_count();
139         preempt_disable_notrace();
140
141         bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
142         if (bit < 0)
143                 goto out;
144
145         cpu = smp_processor_id();
146         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147         if (!atomic_read(&data->disabled)) {
148                 local_save_flags(flags);
149                 trace_function(tr, ip, parent_ip, flags, pc);
150         }
151         trace_clear_recursion(bit);
152
153  out:
154         preempt_enable_notrace();
155 }
156
157 static void
158 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
159                           struct ftrace_ops *op, struct pt_regs *pt_regs)
160 {
161         struct trace_array *tr = op->private;
162         struct trace_array_cpu *data;
163         unsigned long flags;
164         long disabled;
165         int cpu;
166         int pc;
167
168         if (unlikely(!tr->function_enabled))
169                 return;
170
171         /*
172          * Need to use raw, since this must be called before the
173          * recursive protection is performed.
174          */
175         local_irq_save(flags);
176         cpu = raw_smp_processor_id();
177         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
178         disabled = atomic_inc_return(&data->disabled);
179
180         if (likely(disabled == 1)) {
181                 pc = preempt_count();
182                 trace_function(tr, ip, parent_ip, flags, pc);
183                 /*
184                  * skip over 5 funcs:
185                  *    __ftrace_trace_stack,
186                  *    __trace_stack,
187                  *    function_stack_trace_call
188                  *    ftrace_list_func
189                  *    ftrace_call
190                  */
191                 __trace_stack(tr, flags, 5, pc);
192         }
193
194         atomic_dec(&data->disabled);
195         local_irq_restore(flags);
196 }
197
198 static struct tracer_opt func_opts[] = {
199 #ifdef CONFIG_STACKTRACE
200         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
201 #endif
202         { } /* Always set a last empty entry */
203 };
204
205 static struct tracer_flags func_flags = {
206         .val = 0, /* By default: all flags disabled */
207         .opts = func_opts
208 };
209
210 static void tracing_start_function_trace(struct trace_array *tr)
211 {
212         tr->function_enabled = 0;
213         register_ftrace_function(tr->ops);
214         tr->function_enabled = 1;
215 }
216
217 static void tracing_stop_function_trace(struct trace_array *tr)
218 {
219         tr->function_enabled = 0;
220         unregister_ftrace_function(tr->ops);
221 }
222
223 static struct tracer function_trace;
224
225 static int
226 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
227 {
228         switch (bit) {
229         case TRACE_FUNC_OPT_STACK:
230                 /* do nothing if already set */
231                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
232                         break;
233
234                 /* We can change this flag when not running. */
235                 if (tr->current_trace != &function_trace)
236                         break;
237
238                 unregister_ftrace_function(tr->ops);
239
240                 if (set) {
241                         tr->ops->func = function_stack_trace_call;
242                         register_ftrace_function(tr->ops);
243                 } else {
244                         tr->ops->func = function_trace_call;
245                         register_ftrace_function(tr->ops);
246                 }
247
248                 break;
249         default:
250                 return -EINVAL;
251         }
252
253         return 0;
254 }
255
256 static struct tracer function_trace __tracer_data =
257 {
258         .name           = "function",
259         .init           = function_trace_init,
260         .reset          = function_trace_reset,
261         .start          = function_trace_start,
262         .flags          = &func_flags,
263         .set_flag       = func_set_flag,
264         .allow_instances = true,
265 #ifdef CONFIG_FTRACE_SELFTEST
266         .selftest       = trace_selftest_startup_function,
267 #endif
268 };
269
270 #ifdef CONFIG_DYNAMIC_FTRACE
271 static void update_traceon_count(struct ftrace_probe_ops *ops,
272                                  unsigned long ip,
273                                  struct trace_array *tr, bool on,
274                                  void *data)
275 {
276         struct ftrace_func_mapper *mapper = data;
277         long *count;
278         long old_count;
279
280         /*
281          * Tracing gets disabled (or enabled) once per count.
282          * This function can be called at the same time on multiple CPUs.
283          * It is fine if both disable (or enable) tracing, as disabling
284          * (or enabling) the second time doesn't do anything as the
285          * state of the tracer is already disabled (or enabled).
286          * What needs to be synchronized in this case is that the count
287          * only gets decremented once, even if the tracer is disabled
288          * (or enabled) twice, as the second one is really a nop.
289          *
290          * The memory barriers guarantee that we only decrement the
291          * counter once. First the count is read to a local variable
292          * and a read barrier is used to make sure that it is loaded
293          * before checking if the tracer is in the state we want.
294          * If the tracer is not in the state we want, then the count
295          * is guaranteed to be the old count.
296          *
297          * Next the tracer is set to the state we want (disabled or enabled)
298          * then a write memory barrier is used to make sure that
299          * the new state is visible before changing the counter by
300          * one minus the old counter. This guarantees that another CPU
301          * executing this code will see the new state before seeing
302          * the new counter value, and would not do anything if the new
303          * counter is seen.
304          *
305          * Note, there is no synchronization between this and a user
306          * setting the tracing_on file. But we currently don't care
307          * about that.
308          */
309         count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
310         old_count = *count;
311
312         if (old_count <= 0)
313                 return;
314
315         /* Make sure we see count before checking tracing state */
316         smp_rmb();
317
318         if (on == !!tracer_tracing_is_on(tr))
319                 return;
320
321         if (on)
322                 tracer_tracing_on(tr);
323         else
324                 tracer_tracing_off(tr);
325
326         /* Make sure tracing state is visible before updating count */
327         smp_wmb();
328
329         *count = old_count - 1;
330 }
331
332 static void
333 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
334                      struct trace_array *tr, struct ftrace_probe_ops *ops,
335                      void *data)
336 {
337         update_traceon_count(ops, ip, tr, 1, data);
338 }
339
340 static void
341 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
342                       struct trace_array *tr, struct ftrace_probe_ops *ops,
343                       void *data)
344 {
345         update_traceon_count(ops, ip, tr, 0, data);
346 }
347
348 static void
349 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
350                struct trace_array *tr, struct ftrace_probe_ops *ops,
351                void *data)
352 {
353         if (tracer_tracing_is_on(tr))
354                 return;
355
356         tracer_tracing_on(tr);
357 }
358
359 static void
360 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
361                 struct trace_array *tr, struct ftrace_probe_ops *ops,
362                 void *data)
363 {
364         if (!tracer_tracing_is_on(tr))
365                 return;
366
367         tracer_tracing_off(tr);
368 }
369
370 /*
371  * Skip 4:
372  *   ftrace_stacktrace()
373  *   function_trace_probe_call()
374  *   ftrace_ops_list_func()
375  *   ftrace_call()
376  */
377 #define STACK_SKIP 4
378
379 static __always_inline void trace_stack(struct trace_array *tr)
380 {
381         unsigned long flags;
382         int pc;
383
384         local_save_flags(flags);
385         pc = preempt_count();
386
387         __trace_stack(tr, flags, STACK_SKIP, pc);
388 }
389
390 static void
391 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
392                   struct trace_array *tr, struct ftrace_probe_ops *ops,
393                   void *data)
394 {
395         trace_stack(tr);
396 }
397
398 static void
399 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
400                         struct trace_array *tr, struct ftrace_probe_ops *ops,
401                         void *data)
402 {
403         struct ftrace_func_mapper *mapper = data;
404         long *count;
405         long old_count;
406         long new_count;
407
408         if (!tracing_is_on())
409                 return;
410
411         /* unlimited? */
412         if (!mapper) {
413                 trace_stack(tr);
414                 return;
415         }
416
417         count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
418
419         /*
420          * Stack traces should only execute the number of times the
421          * user specified in the counter.
422          */
423         do {
424                 old_count = *count;
425
426                 if (!old_count)
427                         return;
428
429                 new_count = old_count - 1;
430                 new_count = cmpxchg(count, old_count, new_count);
431                 if (new_count == old_count)
432                         trace_stack(tr);
433
434                 if (!tracing_is_on())
435                         return;
436
437         } while (new_count != old_count);
438 }
439
440 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
441                         void *data)
442 {
443         struct ftrace_func_mapper *mapper = data;
444         long *count = NULL;
445
446         if (mapper)
447                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
448
449         if (count) {
450                 if (*count <= 0)
451                         return 0;
452                 (*count)--;
453         }
454
455         return 1;
456 }
457
458 static void
459 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
460                   struct trace_array *tr, struct ftrace_probe_ops *ops,
461                   void *data)
462 {
463         if (update_count(ops, ip, data))
464                 ftrace_dump(DUMP_ALL);
465 }
466
467 /* Only dump the current CPU buffer. */
468 static void
469 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
470                      struct trace_array *tr, struct ftrace_probe_ops *ops,
471                      void *data)
472 {
473         if (update_count(ops, ip, data))
474                 ftrace_dump(DUMP_ORIG);
475 }
476
477 static int
478 ftrace_probe_print(const char *name, struct seq_file *m,
479                    unsigned long ip, struct ftrace_probe_ops *ops,
480                    void *data)
481 {
482         struct ftrace_func_mapper *mapper = data;
483         long *count = NULL;
484
485         seq_printf(m, "%ps:%s", (void *)ip, name);
486
487         if (mapper)
488                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
489
490         if (count)
491                 seq_printf(m, ":count=%ld\n", *count);
492         else
493                 seq_puts(m, ":unlimited\n");
494
495         return 0;
496 }
497
498 static int
499 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
500                      struct ftrace_probe_ops *ops,
501                      void *data)
502 {
503         return ftrace_probe_print("traceon", m, ip, ops, data);
504 }
505
506 static int
507 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
508                          struct ftrace_probe_ops *ops, void *data)
509 {
510         return ftrace_probe_print("traceoff", m, ip, ops, data);
511 }
512
513 static int
514 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
515                         struct ftrace_probe_ops *ops, void *data)
516 {
517         return ftrace_probe_print("stacktrace", m, ip, ops, data);
518 }
519
520 static int
521 ftrace_dump_print(struct seq_file *m, unsigned long ip,
522                         struct ftrace_probe_ops *ops, void *data)
523 {
524         return ftrace_probe_print("dump", m, ip, ops, data);
525 }
526
527 static int
528 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
529                         struct ftrace_probe_ops *ops, void *data)
530 {
531         return ftrace_probe_print("cpudump", m, ip, ops, data);
532 }
533
534
535 static int
536 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
537                   unsigned long ip, void *init_data, void **data)
538 {
539         struct ftrace_func_mapper *mapper = *data;
540
541         if (!mapper) {
542                 mapper = allocate_ftrace_func_mapper();
543                 if (!mapper)
544                         return -ENOMEM;
545                 *data = mapper;
546         }
547
548         return ftrace_func_mapper_add_ip(mapper, ip, init_data);
549 }
550
551 static void
552 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
553                   unsigned long ip, void *data)
554 {
555         struct ftrace_func_mapper *mapper = data;
556
557         if (!ip) {
558                 free_ftrace_func_mapper(mapper, NULL);
559                 return;
560         }
561
562         ftrace_func_mapper_remove_ip(mapper, ip);
563 }
564
565 static struct ftrace_probe_ops traceon_count_probe_ops = {
566         .func                   = ftrace_traceon_count,
567         .print                  = ftrace_traceon_print,
568         .init                   = ftrace_count_init,
569         .free                   = ftrace_count_free,
570 };
571
572 static struct ftrace_probe_ops traceoff_count_probe_ops = {
573         .func                   = ftrace_traceoff_count,
574         .print                  = ftrace_traceoff_print,
575         .init                   = ftrace_count_init,
576         .free                   = ftrace_count_free,
577 };
578
579 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
580         .func                   = ftrace_stacktrace_count,
581         .print                  = ftrace_stacktrace_print,
582         .init                   = ftrace_count_init,
583         .free                   = ftrace_count_free,
584 };
585
586 static struct ftrace_probe_ops dump_probe_ops = {
587         .func                   = ftrace_dump_probe,
588         .print                  = ftrace_dump_print,
589         .init                   = ftrace_count_init,
590         .free                   = ftrace_count_free,
591 };
592
593 static struct ftrace_probe_ops cpudump_probe_ops = {
594         .func                   = ftrace_cpudump_probe,
595         .print                  = ftrace_cpudump_print,
596 };
597
598 static struct ftrace_probe_ops traceon_probe_ops = {
599         .func                   = ftrace_traceon,
600         .print                  = ftrace_traceon_print,
601 };
602
603 static struct ftrace_probe_ops traceoff_probe_ops = {
604         .func                   = ftrace_traceoff,
605         .print                  = ftrace_traceoff_print,
606 };
607
608 static struct ftrace_probe_ops stacktrace_probe_ops = {
609         .func                   = ftrace_stacktrace,
610         .print                  = ftrace_stacktrace_print,
611 };
612
613 static int
614 ftrace_trace_probe_callback(struct trace_array *tr,
615                             struct ftrace_probe_ops *ops,
616                             struct ftrace_hash *hash, char *glob,
617                             char *cmd, char *param, int enable)
618 {
619         void *count = (void *)-1;
620         char *number;
621         int ret;
622
623         /* hash funcs only work with set_ftrace_filter */
624         if (!enable)
625                 return -EINVAL;
626
627         if (glob[0] == '!')
628                 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
629
630         if (!param)
631                 goto out_reg;
632
633         number = strsep(&param, ":");
634
635         if (!strlen(number))
636                 goto out_reg;
637
638         /*
639          * We use the callback data field (which is a pointer)
640          * as our counter.
641          */
642         ret = kstrtoul(number, 0, (unsigned long *)&count);
643         if (ret)
644                 return ret;
645
646  out_reg:
647         ret = register_ftrace_function_probe(glob, tr, ops, count);
648
649         return ret < 0 ? ret : 0;
650 }
651
652 static int
653 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
654                             char *glob, char *cmd, char *param, int enable)
655 {
656         struct ftrace_probe_ops *ops;
657
658         if (!tr)
659                 return -ENODEV;
660
661         /* we register both traceon and traceoff to this callback */
662         if (strcmp(cmd, "traceon") == 0)
663                 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
664         else
665                 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
666
667         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
668                                            param, enable);
669 }
670
671 static int
672 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
673                            char *glob, char *cmd, char *param, int enable)
674 {
675         struct ftrace_probe_ops *ops;
676
677         if (!tr)
678                 return -ENODEV;
679
680         ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
681
682         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
683                                            param, enable);
684 }
685
686 static int
687 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
688                            char *glob, char *cmd, char *param, int enable)
689 {
690         struct ftrace_probe_ops *ops;
691
692         if (!tr)
693                 return -ENODEV;
694
695         ops = &dump_probe_ops;
696
697         /* Only dump once. */
698         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
699                                            "1", enable);
700 }
701
702 static int
703 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
704                            char *glob, char *cmd, char *param, int enable)
705 {
706         struct ftrace_probe_ops *ops;
707
708         if (!tr)
709                 return -ENODEV;
710
711         ops = &cpudump_probe_ops;
712
713         /* Only dump once. */
714         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
715                                            "1", enable);
716 }
717
718 static struct ftrace_func_command ftrace_traceon_cmd = {
719         .name                   = "traceon",
720         .func                   = ftrace_trace_onoff_callback,
721 };
722
723 static struct ftrace_func_command ftrace_traceoff_cmd = {
724         .name                   = "traceoff",
725         .func                   = ftrace_trace_onoff_callback,
726 };
727
728 static struct ftrace_func_command ftrace_stacktrace_cmd = {
729         .name                   = "stacktrace",
730         .func                   = ftrace_stacktrace_callback,
731 };
732
733 static struct ftrace_func_command ftrace_dump_cmd = {
734         .name                   = "dump",
735         .func                   = ftrace_dump_callback,
736 };
737
738 static struct ftrace_func_command ftrace_cpudump_cmd = {
739         .name                   = "cpudump",
740         .func                   = ftrace_cpudump_callback,
741 };
742
743 static int __init init_func_cmd_traceon(void)
744 {
745         int ret;
746
747         ret = register_ftrace_command(&ftrace_traceoff_cmd);
748         if (ret)
749                 return ret;
750
751         ret = register_ftrace_command(&ftrace_traceon_cmd);
752         if (ret)
753                 goto out_free_traceoff;
754
755         ret = register_ftrace_command(&ftrace_stacktrace_cmd);
756         if (ret)
757                 goto out_free_traceon;
758
759         ret = register_ftrace_command(&ftrace_dump_cmd);
760         if (ret)
761                 goto out_free_stacktrace;
762
763         ret = register_ftrace_command(&ftrace_cpudump_cmd);
764         if (ret)
765                 goto out_free_dump;
766
767         return 0;
768
769  out_free_dump:
770         unregister_ftrace_command(&ftrace_dump_cmd);
771  out_free_stacktrace:
772         unregister_ftrace_command(&ftrace_stacktrace_cmd);
773  out_free_traceon:
774         unregister_ftrace_command(&ftrace_traceon_cmd);
775  out_free_traceoff:
776         unregister_ftrace_command(&ftrace_traceoff_cmd);
777
778         return ret;
779 }
780 #else
781 static inline int init_func_cmd_traceon(void)
782 {
783         return 0;
784 }
785 #endif /* CONFIG_DYNAMIC_FTRACE */
786
787 __init int init_function_trace(void)
788 {
789         init_func_cmd_traceon();
790         return register_tracer(&function_trace);
791 }