GNU Linux-libre 5.10.217-gnu1
[releases.git] / kernel / trace / trace_irqsoff.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace irqs off critical timings
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * From code in the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/kprobes.h>
18
19 #include "trace.h"
20
21 #include <trace/events/preemptirq.h>
22
23 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
24 static struct trace_array               *irqsoff_trace __read_mostly;
25 static int                              tracer_enabled __read_mostly;
26
27 static DEFINE_PER_CPU(int, tracing_cpu);
28
29 static DEFINE_RAW_SPINLOCK(max_trace_lock);
30
31 enum {
32         TRACER_IRQS_OFF         = (1 << 1),
33         TRACER_PREEMPT_OFF      = (1 << 2),
34 };
35
36 static int trace_type __read_mostly;
37
38 static int save_flags;
39
40 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42
43 #ifdef CONFIG_PREEMPT_TRACER
44 static inline int
45 preempt_trace(int pc)
46 {
47         return ((trace_type & TRACER_PREEMPT_OFF) && pc);
48 }
49 #else
50 # define preempt_trace(pc) (0)
51 #endif
52
53 #ifdef CONFIG_IRQSOFF_TRACER
54 static inline int
55 irq_trace(void)
56 {
57         return ((trace_type & TRACER_IRQS_OFF) &&
58                 irqs_disabled());
59 }
60 #else
61 # define irq_trace() (0)
62 #endif
63
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 static int irqsoff_display_graph(struct trace_array *tr, int set);
66 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
67 #else
68 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
69 {
70         return -EINVAL;
71 }
72 # define is_graph(tr) false
73 #endif
74
75 /*
76  * Sequence count - we record it when starting a measurement and
77  * skip the latency if the sequence has changed - some other section
78  * did a maximum and could disturb our measurement with serial console
79  * printouts, etc. Truly coinciding maximum latencies should be rare
80  * and what happens together happens separately as well, so this doesn't
81  * decrease the validity of the maximum found:
82  */
83 static __cacheline_aligned_in_smp       unsigned long max_sequence;
84
85 #ifdef CONFIG_FUNCTION_TRACER
86 /*
87  * Prologue for the preempt and irqs off function tracers.
88  *
89  * Returns 1 if it is OK to continue, and data->disabled is
90  *            incremented.
91  *         0 if the trace is to be ignored, and data->disabled
92  *            is kept the same.
93  *
94  * Note, this function is also used outside this ifdef but
95  *  inside the #ifdef of the function graph tracer below.
96  *  This is OK, since the function graph tracer is
97  *  dependent on the function tracer.
98  */
99 static int func_prolog_dec(struct trace_array *tr,
100                            struct trace_array_cpu **data,
101                            unsigned long *flags)
102 {
103         long disabled;
104         int cpu;
105
106         /*
107          * Does not matter if we preempt. We test the flags
108          * afterward, to see if irqs are disabled or not.
109          * If we preempt and get a false positive, the flags
110          * test will fail.
111          */
112         cpu = raw_smp_processor_id();
113         if (likely(!per_cpu(tracing_cpu, cpu)))
114                 return 0;
115
116         local_save_flags(*flags);
117         /*
118          * Slight chance to get a false positive on tracing_cpu,
119          * although I'm starting to think there isn't a chance.
120          * Leave this for now just to be paranoid.
121          */
122         if (!irqs_disabled_flags(*flags) && !preempt_count())
123                 return 0;
124
125         *data = per_cpu_ptr(tr->array_buffer.data, cpu);
126         disabled = atomic_inc_return(&(*data)->disabled);
127
128         if (likely(disabled == 1))
129                 return 1;
130
131         atomic_dec(&(*data)->disabled);
132
133         return 0;
134 }
135
136 /*
137  * irqsoff uses its own tracer function to keep the overhead down:
138  */
139 static void
140 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141                     struct ftrace_ops *op, struct pt_regs *pt_regs)
142 {
143         struct trace_array *tr = irqsoff_trace;
144         struct trace_array_cpu *data;
145         unsigned long flags;
146
147         if (!func_prolog_dec(tr, &data, &flags))
148                 return;
149
150         trace_function(tr, ip, parent_ip, flags, preempt_count());
151
152         atomic_dec(&data->disabled);
153 }
154 #endif /* CONFIG_FUNCTION_TRACER */
155
156 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
157 static int irqsoff_display_graph(struct trace_array *tr, int set)
158 {
159         int cpu;
160
161         if (!(is_graph(tr) ^ set))
162                 return 0;
163
164         stop_irqsoff_tracer(irqsoff_trace, !set);
165
166         for_each_possible_cpu(cpu)
167                 per_cpu(tracing_cpu, cpu) = 0;
168
169         tr->max_latency = 0;
170         tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
171
172         return start_irqsoff_tracer(irqsoff_trace, set);
173 }
174
175 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
176 {
177         struct trace_array *tr = irqsoff_trace;
178         struct trace_array_cpu *data;
179         unsigned long flags;
180         int ret;
181         int pc;
182
183         if (ftrace_graph_ignore_func(trace))
184                 return 0;
185         /*
186          * Do not trace a function if it's filtered by set_graph_notrace.
187          * Make the index of ret stack negative to indicate that it should
188          * ignore further functions.  But it needs its own ret stack entry
189          * to recover the original index in order to continue tracing after
190          * returning from the function.
191          */
192         if (ftrace_graph_notrace_addr(trace->func))
193                 return 1;
194
195         if (!func_prolog_dec(tr, &data, &flags))
196                 return 0;
197
198         pc = preempt_count();
199         ret = __trace_graph_entry(tr, trace, flags, pc);
200         atomic_dec(&data->disabled);
201
202         return ret;
203 }
204
205 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
206 {
207         struct trace_array *tr = irqsoff_trace;
208         struct trace_array_cpu *data;
209         unsigned long flags;
210         int pc;
211
212         ftrace_graph_addr_finish(trace);
213
214         if (!func_prolog_dec(tr, &data, &flags))
215                 return;
216
217         pc = preempt_count();
218         __trace_graph_return(tr, trace, flags, pc);
219         atomic_dec(&data->disabled);
220 }
221
222 static struct fgraph_ops fgraph_ops = {
223         .entryfunc              = &irqsoff_graph_entry,
224         .retfunc                = &irqsoff_graph_return,
225 };
226
227 static void irqsoff_trace_open(struct trace_iterator *iter)
228 {
229         if (is_graph(iter->tr))
230                 graph_trace_open(iter);
231         else
232                 iter->private = NULL;
233 }
234
235 static void irqsoff_trace_close(struct trace_iterator *iter)
236 {
237         if (iter->private)
238                 graph_trace_close(iter);
239 }
240
241 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
242                             TRACE_GRAPH_PRINT_PROC | \
243                             TRACE_GRAPH_PRINT_REL_TIME | \
244                             TRACE_GRAPH_PRINT_DURATION)
245
246 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
247 {
248         /*
249          * In graph mode call the graph tracer output function,
250          * otherwise go with the TRACE_FN event handler
251          */
252         if (is_graph(iter->tr))
253                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
254
255         return TRACE_TYPE_UNHANDLED;
256 }
257
258 static void irqsoff_print_header(struct seq_file *s)
259 {
260         struct trace_array *tr = irqsoff_trace;
261
262         if (is_graph(tr))
263                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
264         else
265                 trace_default_header(s);
266 }
267
268 static void
269 __trace_function(struct trace_array *tr,
270                  unsigned long ip, unsigned long parent_ip,
271                  unsigned long flags, int pc)
272 {
273         if (is_graph(tr))
274                 trace_graph_function(tr, ip, parent_ip, flags, pc);
275         else
276                 trace_function(tr, ip, parent_ip, flags, pc);
277 }
278
279 #else
280 #define __trace_function trace_function
281
282 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
283 {
284         return TRACE_TYPE_UNHANDLED;
285 }
286
287 static void irqsoff_trace_open(struct trace_iterator *iter) { }
288 static void irqsoff_trace_close(struct trace_iterator *iter) { }
289
290 #ifdef CONFIG_FUNCTION_TRACER
291 static void irqsoff_print_header(struct seq_file *s)
292 {
293         trace_default_header(s);
294 }
295 #else
296 static void irqsoff_print_header(struct seq_file *s)
297 {
298         trace_latency_header(s);
299 }
300 #endif /* CONFIG_FUNCTION_TRACER */
301 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
302
303 /*
304  * Should this new latency be reported/recorded?
305  */
306 static bool report_latency(struct trace_array *tr, u64 delta)
307 {
308         if (tracing_thresh) {
309                 if (delta < tracing_thresh)
310                         return false;
311         } else {
312                 if (delta <= tr->max_latency)
313                         return false;
314         }
315         return true;
316 }
317
318 static void
319 check_critical_timing(struct trace_array *tr,
320                       struct trace_array_cpu *data,
321                       unsigned long parent_ip,
322                       int cpu)
323 {
324         u64 T0, T1, delta;
325         unsigned long flags;
326         int pc;
327
328         T0 = data->preempt_timestamp;
329         T1 = ftrace_now(cpu);
330         delta = T1-T0;
331
332         local_save_flags(flags);
333
334         pc = preempt_count();
335
336         if (!report_latency(tr, delta))
337                 goto out;
338
339         raw_spin_lock_irqsave(&max_trace_lock, flags);
340
341         /* check if we are still the max latency */
342         if (!report_latency(tr, delta))
343                 goto out_unlock;
344
345         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
346         /* Skip 5 functions to get to the irq/preempt enable function */
347         __trace_stack(tr, flags, 5, pc);
348
349         if (data->critical_sequence != max_sequence)
350                 goto out_unlock;
351
352         data->critical_end = parent_ip;
353
354         if (likely(!is_tracing_stopped())) {
355                 tr->max_latency = delta;
356                 update_max_tr_single(tr, current, cpu);
357         }
358
359         max_sequence++;
360
361 out_unlock:
362         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
363
364 out:
365         data->critical_sequence = max_sequence;
366         data->preempt_timestamp = ftrace_now(cpu);
367         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
368 }
369
370 static nokprobe_inline void
371 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
372 {
373         int cpu;
374         struct trace_array *tr = irqsoff_trace;
375         struct trace_array_cpu *data;
376         unsigned long flags;
377
378         if (!tracer_enabled || !tracing_is_enabled())
379                 return;
380
381         cpu = raw_smp_processor_id();
382
383         if (per_cpu(tracing_cpu, cpu))
384                 return;
385
386         data = per_cpu_ptr(tr->array_buffer.data, cpu);
387
388         if (unlikely(!data) || atomic_read(&data->disabled))
389                 return;
390
391         atomic_inc(&data->disabled);
392
393         data->critical_sequence = max_sequence;
394         data->preempt_timestamp = ftrace_now(cpu);
395         data->critical_start = parent_ip ? : ip;
396
397         local_save_flags(flags);
398
399         __trace_function(tr, ip, parent_ip, flags, pc);
400
401         per_cpu(tracing_cpu, cpu) = 1;
402
403         atomic_dec(&data->disabled);
404 }
405
406 static nokprobe_inline void
407 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
408 {
409         int cpu;
410         struct trace_array *tr = irqsoff_trace;
411         struct trace_array_cpu *data;
412         unsigned long flags;
413
414         cpu = raw_smp_processor_id();
415         /* Always clear the tracing cpu on stopping the trace */
416         if (unlikely(per_cpu(tracing_cpu, cpu)))
417                 per_cpu(tracing_cpu, cpu) = 0;
418         else
419                 return;
420
421         if (!tracer_enabled || !tracing_is_enabled())
422                 return;
423
424         data = per_cpu_ptr(tr->array_buffer.data, cpu);
425
426         if (unlikely(!data) ||
427             !data->critical_start || atomic_read(&data->disabled))
428                 return;
429
430         atomic_inc(&data->disabled);
431
432         local_save_flags(flags);
433         __trace_function(tr, ip, parent_ip, flags, pc);
434         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
435         data->critical_start = 0;
436         atomic_dec(&data->disabled);
437 }
438
439 /* start and stop critical timings used to for stoppage (in idle) */
440 void start_critical_timings(void)
441 {
442         int pc = preempt_count();
443
444         if (preempt_trace(pc) || irq_trace())
445                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
446 }
447 EXPORT_SYMBOL_GPL(start_critical_timings);
448 NOKPROBE_SYMBOL(start_critical_timings);
449
450 void stop_critical_timings(void)
451 {
452         int pc = preempt_count();
453
454         if (preempt_trace(pc) || irq_trace())
455                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
456 }
457 EXPORT_SYMBOL_GPL(stop_critical_timings);
458 NOKPROBE_SYMBOL(stop_critical_timings);
459
460 #ifdef CONFIG_FUNCTION_TRACER
461 static bool function_enabled;
462
463 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
464 {
465         int ret;
466
467         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
468         if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
469                 return 0;
470
471         if (graph)
472                 ret = register_ftrace_graph(&fgraph_ops);
473         else
474                 ret = register_ftrace_function(tr->ops);
475
476         if (!ret)
477                 function_enabled = true;
478
479         return ret;
480 }
481
482 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
483 {
484         if (!function_enabled)
485                 return;
486
487         if (graph)
488                 unregister_ftrace_graph(&fgraph_ops);
489         else
490                 unregister_ftrace_function(tr->ops);
491
492         function_enabled = false;
493 }
494
495 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
496 {
497         if (!(mask & TRACE_ITER_FUNCTION))
498                 return 0;
499
500         if (set)
501                 register_irqsoff_function(tr, is_graph(tr), 1);
502         else
503                 unregister_irqsoff_function(tr, is_graph(tr));
504         return 1;
505 }
506 #else
507 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
508 {
509         return 0;
510 }
511 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
512 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
513 {
514         return 0;
515 }
516 #endif /* CONFIG_FUNCTION_TRACER */
517
518 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
519 {
520         struct tracer *tracer = tr->current_trace;
521
522         if (irqsoff_function_set(tr, mask, set))
523                 return 0;
524
525 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
526         if (mask & TRACE_ITER_DISPLAY_GRAPH)
527                 return irqsoff_display_graph(tr, set);
528 #endif
529
530         return trace_keep_overwrite(tracer, mask, set);
531 }
532
533 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
534 {
535         int ret;
536
537         ret = register_irqsoff_function(tr, graph, 0);
538
539         if (!ret && tracing_is_enabled())
540                 tracer_enabled = 1;
541         else
542                 tracer_enabled = 0;
543
544         return ret;
545 }
546
547 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
548 {
549         tracer_enabled = 0;
550
551         unregister_irqsoff_function(tr, graph);
552 }
553
554 static bool irqsoff_busy;
555
556 static int __irqsoff_tracer_init(struct trace_array *tr)
557 {
558         if (irqsoff_busy)
559                 return -EBUSY;
560
561         save_flags = tr->trace_flags;
562
563         /* non overwrite screws up the latency tracers */
564         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
565         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
566         /* without pause, we will produce garbage if another latency occurs */
567         set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
568
569         tr->max_latency = 0;
570         irqsoff_trace = tr;
571         /* make sure that the tracer is visible */
572         smp_wmb();
573
574         ftrace_init_array_ops(tr, irqsoff_tracer_call);
575
576         /* Only toplevel instance supports graph tracing */
577         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
578                                       is_graph(tr))))
579                 printk(KERN_ERR "failed to start irqsoff tracer\n");
580
581         irqsoff_busy = true;
582         return 0;
583 }
584
585 static void __irqsoff_tracer_reset(struct trace_array *tr)
586 {
587         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
588         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
589         int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
590
591         stop_irqsoff_tracer(tr, is_graph(tr));
592
593         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
594         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
595         set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
596         ftrace_reset_array_ops(tr);
597
598         irqsoff_busy = false;
599 }
600
601 static void irqsoff_tracer_start(struct trace_array *tr)
602 {
603         tracer_enabled = 1;
604 }
605
606 static void irqsoff_tracer_stop(struct trace_array *tr)
607 {
608         tracer_enabled = 0;
609 }
610
611 #ifdef CONFIG_IRQSOFF_TRACER
612 /*
613  * We are only interested in hardirq on/off events:
614  */
615 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
616 {
617         unsigned int pc = preempt_count();
618
619         if (!preempt_trace(pc) && irq_trace())
620                 stop_critical_timing(a0, a1, pc);
621 }
622 NOKPROBE_SYMBOL(tracer_hardirqs_on);
623
624 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
625 {
626         unsigned int pc = preempt_count();
627
628         if (!preempt_trace(pc) && irq_trace())
629                 start_critical_timing(a0, a1, pc);
630 }
631 NOKPROBE_SYMBOL(tracer_hardirqs_off);
632
633 static int irqsoff_tracer_init(struct trace_array *tr)
634 {
635         trace_type = TRACER_IRQS_OFF;
636
637         return __irqsoff_tracer_init(tr);
638 }
639
640 static void irqsoff_tracer_reset(struct trace_array *tr)
641 {
642         __irqsoff_tracer_reset(tr);
643 }
644
645 static struct tracer irqsoff_tracer __read_mostly =
646 {
647         .name           = "irqsoff",
648         .init           = irqsoff_tracer_init,
649         .reset          = irqsoff_tracer_reset,
650         .start          = irqsoff_tracer_start,
651         .stop           = irqsoff_tracer_stop,
652         .print_max      = true,
653         .print_header   = irqsoff_print_header,
654         .print_line     = irqsoff_print_line,
655         .flag_changed   = irqsoff_flag_changed,
656 #ifdef CONFIG_FTRACE_SELFTEST
657         .selftest    = trace_selftest_startup_irqsoff,
658 #endif
659         .open           = irqsoff_trace_open,
660         .close          = irqsoff_trace_close,
661         .allow_instances = true,
662         .use_max_tr     = true,
663 };
664 #endif /*  CONFIG_IRQSOFF_TRACER */
665
666 #ifdef CONFIG_PREEMPT_TRACER
667 void tracer_preempt_on(unsigned long a0, unsigned long a1)
668 {
669         int pc = preempt_count();
670
671         if (preempt_trace(pc) && !irq_trace())
672                 stop_critical_timing(a0, a1, pc);
673 }
674
675 void tracer_preempt_off(unsigned long a0, unsigned long a1)
676 {
677         int pc = preempt_count();
678
679         if (preempt_trace(pc) && !irq_trace())
680                 start_critical_timing(a0, a1, pc);
681 }
682
683 static int preemptoff_tracer_init(struct trace_array *tr)
684 {
685         trace_type = TRACER_PREEMPT_OFF;
686
687         return __irqsoff_tracer_init(tr);
688 }
689
690 static void preemptoff_tracer_reset(struct trace_array *tr)
691 {
692         __irqsoff_tracer_reset(tr);
693 }
694
695 static struct tracer preemptoff_tracer __read_mostly =
696 {
697         .name           = "preemptoff",
698         .init           = preemptoff_tracer_init,
699         .reset          = preemptoff_tracer_reset,
700         .start          = irqsoff_tracer_start,
701         .stop           = irqsoff_tracer_stop,
702         .print_max      = true,
703         .print_header   = irqsoff_print_header,
704         .print_line     = irqsoff_print_line,
705         .flag_changed   = irqsoff_flag_changed,
706 #ifdef CONFIG_FTRACE_SELFTEST
707         .selftest    = trace_selftest_startup_preemptoff,
708 #endif
709         .open           = irqsoff_trace_open,
710         .close          = irqsoff_trace_close,
711         .allow_instances = true,
712         .use_max_tr     = true,
713 };
714 #endif /* CONFIG_PREEMPT_TRACER */
715
716 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
717
718 static int preemptirqsoff_tracer_init(struct trace_array *tr)
719 {
720         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
721
722         return __irqsoff_tracer_init(tr);
723 }
724
725 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
726 {
727         __irqsoff_tracer_reset(tr);
728 }
729
730 static struct tracer preemptirqsoff_tracer __read_mostly =
731 {
732         .name           = "preemptirqsoff",
733         .init           = preemptirqsoff_tracer_init,
734         .reset          = preemptirqsoff_tracer_reset,
735         .start          = irqsoff_tracer_start,
736         .stop           = irqsoff_tracer_stop,
737         .print_max      = true,
738         .print_header   = irqsoff_print_header,
739         .print_line     = irqsoff_print_line,
740         .flag_changed   = irqsoff_flag_changed,
741 #ifdef CONFIG_FTRACE_SELFTEST
742         .selftest    = trace_selftest_startup_preemptirqsoff,
743 #endif
744         .open           = irqsoff_trace_open,
745         .close          = irqsoff_trace_close,
746         .allow_instances = true,
747         .use_max_tr     = true,
748 };
749 #endif
750
751 __init static int init_irqsoff_tracer(void)
752 {
753 #ifdef CONFIG_IRQSOFF_TRACER
754         register_tracer(&irqsoff_tracer);
755 #endif
756 #ifdef CONFIG_PREEMPT_TRACER
757         register_tracer(&preemptoff_tracer);
758 #endif
759 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
760         register_tracer(&preemptirqsoff_tracer);
761 #endif
762
763         return 0;
764 }
765 core_initcall(init_irqsoff_tracer);
766 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */