1 // SPDX-License-Identifier: GPL-2.0
3 * trace irqs off critical timings
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * From code in the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/kprobes.h>
21 #include <trace/events/preemptirq.h>
23 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
24 static struct trace_array *irqsoff_trace __read_mostly;
25 static int tracer_enabled __read_mostly;
27 static DEFINE_PER_CPU(int, tracing_cpu);
29 static DEFINE_RAW_SPINLOCK(max_trace_lock);
32 TRACER_IRQS_OFF = (1 << 1),
33 TRACER_PREEMPT_OFF = (1 << 2),
36 static int trace_type __read_mostly;
38 static int save_flags;
40 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
43 #ifdef CONFIG_PREEMPT_TRACER
47 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
50 # define preempt_trace(pc) (0)
53 #ifdef CONFIG_IRQSOFF_TRACER
57 return ((trace_type & TRACER_IRQS_OFF) &&
61 # define irq_trace() (0)
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 static int irqsoff_display_graph(struct trace_array *tr, int set);
66 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
68 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
72 # define is_graph(tr) false
76 * Sequence count - we record it when starting a measurement and
77 * skip the latency if the sequence has changed - some other section
78 * did a maximum and could disturb our measurement with serial console
79 * printouts, etc. Truly coinciding maximum latencies should be rare
80 * and what happens together happens separately as well, so this doesn't
81 * decrease the validity of the maximum found:
83 static __cacheline_aligned_in_smp unsigned long max_sequence;
85 #ifdef CONFIG_FUNCTION_TRACER
87 * Prologue for the preempt and irqs off function tracers.
89 * Returns 1 if it is OK to continue, and data->disabled is
91 * 0 if the trace is to be ignored, and data->disabled
94 * Note, this function is also used outside this ifdef but
95 * inside the #ifdef of the function graph tracer below.
96 * This is OK, since the function graph tracer is
97 * dependent on the function tracer.
99 static int func_prolog_dec(struct trace_array *tr,
100 struct trace_array_cpu **data,
101 unsigned long *flags)
107 * Does not matter if we preempt. We test the flags
108 * afterward, to see if irqs are disabled or not.
109 * If we preempt and get a false positive, the flags
112 cpu = raw_smp_processor_id();
113 if (likely(!per_cpu(tracing_cpu, cpu)))
116 local_save_flags(*flags);
118 * Slight chance to get a false positive on tracing_cpu,
119 * although I'm starting to think there isn't a chance.
120 * Leave this for now just to be paranoid.
122 if (!irqs_disabled_flags(*flags) && !preempt_count())
125 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
126 disabled = atomic_inc_return(&(*data)->disabled);
128 if (likely(disabled == 1))
131 atomic_dec(&(*data)->disabled);
137 * irqsoff uses its own tracer function to keep the overhead down:
140 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141 struct ftrace_ops *op, struct pt_regs *pt_regs)
143 struct trace_array *tr = irqsoff_trace;
144 struct trace_array_cpu *data;
147 if (!func_prolog_dec(tr, &data, &flags))
150 trace_function(tr, ip, parent_ip, flags, preempt_count());
152 atomic_dec(&data->disabled);
154 #endif /* CONFIG_FUNCTION_TRACER */
156 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
157 static int irqsoff_display_graph(struct trace_array *tr, int set)
161 if (!(is_graph(tr) ^ set))
164 stop_irqsoff_tracer(irqsoff_trace, !set);
166 for_each_possible_cpu(cpu)
167 per_cpu(tracing_cpu, cpu) = 0;
170 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
172 return start_irqsoff_tracer(irqsoff_trace, set);
175 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
177 struct trace_array *tr = irqsoff_trace;
178 struct trace_array_cpu *data;
183 if (ftrace_graph_ignore_func(trace))
186 * Do not trace a function if it's filtered by set_graph_notrace.
187 * Make the index of ret stack negative to indicate that it should
188 * ignore further functions. But it needs its own ret stack entry
189 * to recover the original index in order to continue tracing after
190 * returning from the function.
192 if (ftrace_graph_notrace_addr(trace->func))
195 if (!func_prolog_dec(tr, &data, &flags))
198 pc = preempt_count();
199 ret = __trace_graph_entry(tr, trace, flags, pc);
200 atomic_dec(&data->disabled);
205 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
207 struct trace_array *tr = irqsoff_trace;
208 struct trace_array_cpu *data;
212 ftrace_graph_addr_finish(trace);
214 if (!func_prolog_dec(tr, &data, &flags))
217 pc = preempt_count();
218 __trace_graph_return(tr, trace, flags, pc);
219 atomic_dec(&data->disabled);
222 static struct fgraph_ops fgraph_ops = {
223 .entryfunc = &irqsoff_graph_entry,
224 .retfunc = &irqsoff_graph_return,
227 static void irqsoff_trace_open(struct trace_iterator *iter)
229 if (is_graph(iter->tr))
230 graph_trace_open(iter);
232 iter->private = NULL;
235 static void irqsoff_trace_close(struct trace_iterator *iter)
238 graph_trace_close(iter);
241 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
242 TRACE_GRAPH_PRINT_PROC | \
243 TRACE_GRAPH_PRINT_REL_TIME | \
244 TRACE_GRAPH_PRINT_DURATION)
246 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
249 * In graph mode call the graph tracer output function,
250 * otherwise go with the TRACE_FN event handler
252 if (is_graph(iter->tr))
253 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
255 return TRACE_TYPE_UNHANDLED;
258 static void irqsoff_print_header(struct seq_file *s)
260 struct trace_array *tr = irqsoff_trace;
263 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
265 trace_default_header(s);
269 __trace_function(struct trace_array *tr,
270 unsigned long ip, unsigned long parent_ip,
271 unsigned long flags, int pc)
274 trace_graph_function(tr, ip, parent_ip, flags, pc);
276 trace_function(tr, ip, parent_ip, flags, pc);
280 #define __trace_function trace_function
282 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
284 return TRACE_TYPE_UNHANDLED;
287 static void irqsoff_trace_open(struct trace_iterator *iter) { }
288 static void irqsoff_trace_close(struct trace_iterator *iter) { }
290 #ifdef CONFIG_FUNCTION_TRACER
291 static void irqsoff_print_header(struct seq_file *s)
293 trace_default_header(s);
296 static void irqsoff_print_header(struct seq_file *s)
298 trace_latency_header(s);
300 #endif /* CONFIG_FUNCTION_TRACER */
301 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
304 * Should this new latency be reported/recorded?
306 static bool report_latency(struct trace_array *tr, u64 delta)
308 if (tracing_thresh) {
309 if (delta < tracing_thresh)
312 if (delta <= tr->max_latency)
319 check_critical_timing(struct trace_array *tr,
320 struct trace_array_cpu *data,
321 unsigned long parent_ip,
328 T0 = data->preempt_timestamp;
329 T1 = ftrace_now(cpu);
332 local_save_flags(flags);
334 pc = preempt_count();
336 if (!report_latency(tr, delta))
339 raw_spin_lock_irqsave(&max_trace_lock, flags);
341 /* check if we are still the max latency */
342 if (!report_latency(tr, delta))
345 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
346 /* Skip 5 functions to get to the irq/preempt enable function */
347 __trace_stack(tr, flags, 5, pc);
349 if (data->critical_sequence != max_sequence)
352 data->critical_end = parent_ip;
354 if (likely(!is_tracing_stopped())) {
355 tr->max_latency = delta;
356 update_max_tr_single(tr, current, cpu);
362 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
365 data->critical_sequence = max_sequence;
366 data->preempt_timestamp = ftrace_now(cpu);
367 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
370 static nokprobe_inline void
371 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
374 struct trace_array *tr = irqsoff_trace;
375 struct trace_array_cpu *data;
378 if (!tracer_enabled || !tracing_is_enabled())
381 cpu = raw_smp_processor_id();
383 if (per_cpu(tracing_cpu, cpu))
386 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
388 if (unlikely(!data) || atomic_read(&data->disabled))
391 atomic_inc(&data->disabled);
393 data->critical_sequence = max_sequence;
394 data->preempt_timestamp = ftrace_now(cpu);
395 data->critical_start = parent_ip ? : ip;
397 local_save_flags(flags);
399 __trace_function(tr, ip, parent_ip, flags, pc);
401 per_cpu(tracing_cpu, cpu) = 1;
403 atomic_dec(&data->disabled);
406 static nokprobe_inline void
407 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
410 struct trace_array *tr = irqsoff_trace;
411 struct trace_array_cpu *data;
414 cpu = raw_smp_processor_id();
415 /* Always clear the tracing cpu on stopping the trace */
416 if (unlikely(per_cpu(tracing_cpu, cpu)))
417 per_cpu(tracing_cpu, cpu) = 0;
421 if (!tracer_enabled || !tracing_is_enabled())
424 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
426 if (unlikely(!data) ||
427 !data->critical_start || atomic_read(&data->disabled))
430 atomic_inc(&data->disabled);
432 local_save_flags(flags);
433 __trace_function(tr, ip, parent_ip, flags, pc);
434 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
435 data->critical_start = 0;
436 atomic_dec(&data->disabled);
439 /* start and stop critical timings used to for stoppage (in idle) */
440 void start_critical_timings(void)
442 int pc = preempt_count();
444 if (preempt_trace(pc) || irq_trace())
445 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
447 EXPORT_SYMBOL_GPL(start_critical_timings);
448 NOKPROBE_SYMBOL(start_critical_timings);
450 void stop_critical_timings(void)
452 int pc = preempt_count();
454 if (preempt_trace(pc) || irq_trace())
455 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
457 EXPORT_SYMBOL_GPL(stop_critical_timings);
458 NOKPROBE_SYMBOL(stop_critical_timings);
460 #ifdef CONFIG_FUNCTION_TRACER
461 static bool function_enabled;
463 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
467 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
468 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
472 ret = register_ftrace_graph(&fgraph_ops);
474 ret = register_ftrace_function(tr->ops);
477 function_enabled = true;
482 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
484 if (!function_enabled)
488 unregister_ftrace_graph(&fgraph_ops);
490 unregister_ftrace_function(tr->ops);
492 function_enabled = false;
495 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
497 if (!(mask & TRACE_ITER_FUNCTION))
501 register_irqsoff_function(tr, is_graph(tr), 1);
503 unregister_irqsoff_function(tr, is_graph(tr));
507 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
511 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
512 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
516 #endif /* CONFIG_FUNCTION_TRACER */
518 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
520 struct tracer *tracer = tr->current_trace;
522 if (irqsoff_function_set(tr, mask, set))
525 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
526 if (mask & TRACE_ITER_DISPLAY_GRAPH)
527 return irqsoff_display_graph(tr, set);
530 return trace_keep_overwrite(tracer, mask, set);
533 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
537 ret = register_irqsoff_function(tr, graph, 0);
539 if (!ret && tracing_is_enabled())
547 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
551 unregister_irqsoff_function(tr, graph);
554 static bool irqsoff_busy;
556 static int __irqsoff_tracer_init(struct trace_array *tr)
561 save_flags = tr->trace_flags;
563 /* non overwrite screws up the latency tracers */
564 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
565 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
569 /* make sure that the tracer is visible */
572 ftrace_init_array_ops(tr, irqsoff_tracer_call);
574 /* Only toplevel instance supports graph tracing */
575 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
577 printk(KERN_ERR "failed to start irqsoff tracer\n");
583 static void __irqsoff_tracer_reset(struct trace_array *tr)
585 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
586 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
588 stop_irqsoff_tracer(tr, is_graph(tr));
590 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
591 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
592 ftrace_reset_array_ops(tr);
594 irqsoff_busy = false;
597 static void irqsoff_tracer_start(struct trace_array *tr)
602 static void irqsoff_tracer_stop(struct trace_array *tr)
607 #ifdef CONFIG_IRQSOFF_TRACER
609 * We are only interested in hardirq on/off events:
611 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
613 unsigned int pc = preempt_count();
615 if (!preempt_trace(pc) && irq_trace())
616 stop_critical_timing(a0, a1, pc);
618 NOKPROBE_SYMBOL(tracer_hardirqs_on);
620 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
622 unsigned int pc = preempt_count();
624 if (!preempt_trace(pc) && irq_trace())
625 start_critical_timing(a0, a1, pc);
627 NOKPROBE_SYMBOL(tracer_hardirqs_off);
629 static int irqsoff_tracer_init(struct trace_array *tr)
631 trace_type = TRACER_IRQS_OFF;
633 return __irqsoff_tracer_init(tr);
636 static void irqsoff_tracer_reset(struct trace_array *tr)
638 __irqsoff_tracer_reset(tr);
641 static struct tracer irqsoff_tracer __read_mostly =
644 .init = irqsoff_tracer_init,
645 .reset = irqsoff_tracer_reset,
646 .start = irqsoff_tracer_start,
647 .stop = irqsoff_tracer_stop,
649 .print_header = irqsoff_print_header,
650 .print_line = irqsoff_print_line,
651 .flag_changed = irqsoff_flag_changed,
652 #ifdef CONFIG_FTRACE_SELFTEST
653 .selftest = trace_selftest_startup_irqsoff,
655 .open = irqsoff_trace_open,
656 .close = irqsoff_trace_close,
657 .allow_instances = true,
660 #endif /* CONFIG_IRQSOFF_TRACER */
662 #ifdef CONFIG_PREEMPT_TRACER
663 void tracer_preempt_on(unsigned long a0, unsigned long a1)
665 int pc = preempt_count();
667 if (preempt_trace(pc) && !irq_trace())
668 stop_critical_timing(a0, a1, pc);
671 void tracer_preempt_off(unsigned long a0, unsigned long a1)
673 int pc = preempt_count();
675 if (preempt_trace(pc) && !irq_trace())
676 start_critical_timing(a0, a1, pc);
679 static int preemptoff_tracer_init(struct trace_array *tr)
681 trace_type = TRACER_PREEMPT_OFF;
683 return __irqsoff_tracer_init(tr);
686 static void preemptoff_tracer_reset(struct trace_array *tr)
688 __irqsoff_tracer_reset(tr);
691 static struct tracer preemptoff_tracer __read_mostly =
693 .name = "preemptoff",
694 .init = preemptoff_tracer_init,
695 .reset = preemptoff_tracer_reset,
696 .start = irqsoff_tracer_start,
697 .stop = irqsoff_tracer_stop,
699 .print_header = irqsoff_print_header,
700 .print_line = irqsoff_print_line,
701 .flag_changed = irqsoff_flag_changed,
702 #ifdef CONFIG_FTRACE_SELFTEST
703 .selftest = trace_selftest_startup_preemptoff,
705 .open = irqsoff_trace_open,
706 .close = irqsoff_trace_close,
707 .allow_instances = true,
710 #endif /* CONFIG_PREEMPT_TRACER */
712 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
714 static int preemptirqsoff_tracer_init(struct trace_array *tr)
716 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
718 return __irqsoff_tracer_init(tr);
721 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
723 __irqsoff_tracer_reset(tr);
726 static struct tracer preemptirqsoff_tracer __read_mostly =
728 .name = "preemptirqsoff",
729 .init = preemptirqsoff_tracer_init,
730 .reset = preemptirqsoff_tracer_reset,
731 .start = irqsoff_tracer_start,
732 .stop = irqsoff_tracer_stop,
734 .print_header = irqsoff_print_header,
735 .print_line = irqsoff_print_line,
736 .flag_changed = irqsoff_flag_changed,
737 #ifdef CONFIG_FTRACE_SELFTEST
738 .selftest = trace_selftest_startup_preemptirqsoff,
740 .open = irqsoff_trace_open,
741 .close = irqsoff_trace_close,
742 .allow_instances = true,
747 __init static int init_irqsoff_tracer(void)
749 #ifdef CONFIG_IRQSOFF_TRACER
750 register_tracer(&irqsoff_tracer);
752 #ifdef CONFIG_PREEMPT_TRACER
753 register_tracer(&preemptoff_tracer);
755 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
756 register_tracer(&preemptirqsoff_tracer);
761 core_initcall(init_irqsoff_tracer);
762 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */