2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
19 static struct trace_array *irqsoff_trace __read_mostly;
20 static int tracer_enabled __read_mostly;
22 static DEFINE_PER_CPU(int, tracing_cpu);
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
27 TRACER_IRQS_OFF = (1 << 1),
28 TRACER_PREEMPT_OFF = (1 << 2),
31 static int trace_type __read_mostly;
33 static int save_flags;
35 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
36 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
38 #ifdef CONFIG_PREEMPT_TRACER
42 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45 # define preempt_trace() (0)
48 #ifdef CONFIG_IRQSOFF_TRACER
52 return ((trace_type & TRACER_IRQS_OFF) &&
56 # define irq_trace() (0)
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 static int irqsoff_display_graph(struct trace_array *tr, int set);
61 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
63 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
67 # define is_graph(tr) false
71 * Sequence count - we record it when starting a measurement and
72 * skip the latency if the sequence has changed - some other section
73 * did a maximum and could disturb our measurement with serial console
74 * printouts, etc. Truly coinciding maximum latencies should be rare
75 * and what happens together happens separately as well, so this doesn't
76 * decrease the validity of the maximum found:
78 static __cacheline_aligned_in_smp unsigned long max_sequence;
80 #ifdef CONFIG_FUNCTION_TRACER
82 * Prologue for the preempt and irqs off function tracers.
84 * Returns 1 if it is OK to continue, and data->disabled is
86 * 0 if the trace is to be ignored, and data->disabled
89 * Note, this function is also used outside this ifdef but
90 * inside the #ifdef of the function graph tracer below.
91 * This is OK, since the function graph tracer is
92 * dependent on the function tracer.
94 static int func_prolog_dec(struct trace_array *tr,
95 struct trace_array_cpu **data,
102 * Does not matter if we preempt. We test the flags
103 * afterward, to see if irqs are disabled or not.
104 * If we preempt and get a false positive, the flags
107 cpu = raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu, cpu)))
111 local_save_flags(*flags);
113 * Slight chance to get a false positive on tracing_cpu,
114 * although I'm starting to think there isn't a chance.
115 * Leave this for now just to be paranoid.
117 if (!irqs_disabled_flags(*flags) && !preempt_count())
120 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
121 disabled = atomic_inc_return(&(*data)->disabled);
123 if (likely(disabled == 1))
126 atomic_dec(&(*data)->disabled);
132 * irqsoff uses its own tracer function to keep the overhead down:
135 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
136 struct ftrace_ops *op, struct pt_regs *pt_regs)
138 struct trace_array *tr = irqsoff_trace;
139 struct trace_array_cpu *data;
142 if (!func_prolog_dec(tr, &data, &flags))
145 trace_function(tr, ip, parent_ip, flags, preempt_count());
147 atomic_dec(&data->disabled);
149 #endif /* CONFIG_FUNCTION_TRACER */
151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 static int irqsoff_display_graph(struct trace_array *tr, int set)
156 if (!(is_graph(tr) ^ set))
159 stop_irqsoff_tracer(irqsoff_trace, !set);
161 for_each_possible_cpu(cpu)
162 per_cpu(tracing_cpu, cpu) = 0;
165 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
167 return start_irqsoff_tracer(irqsoff_trace, set);
170 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
172 struct trace_array *tr = irqsoff_trace;
173 struct trace_array_cpu *data;
178 if (ftrace_graph_ignore_func(trace))
181 * Do not trace a function if it's filtered by set_graph_notrace.
182 * Make the index of ret stack negative to indicate that it should
183 * ignore further functions. But it needs its own ret stack entry
184 * to recover the original index in order to continue tracing after
185 * returning from the function.
187 if (ftrace_graph_notrace_addr(trace->func))
190 if (!func_prolog_dec(tr, &data, &flags))
193 pc = preempt_count();
194 ret = __trace_graph_entry(tr, trace, flags, pc);
195 atomic_dec(&data->disabled);
200 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
202 struct trace_array *tr = irqsoff_trace;
203 struct trace_array_cpu *data;
207 ftrace_graph_addr_finish(trace);
209 if (!func_prolog_dec(tr, &data, &flags))
212 pc = preempt_count();
213 __trace_graph_return(tr, trace, flags, pc);
214 atomic_dec(&data->disabled);
217 static void irqsoff_trace_open(struct trace_iterator *iter)
219 if (is_graph(iter->tr))
220 graph_trace_open(iter);
222 iter->private = NULL;
225 static void irqsoff_trace_close(struct trace_iterator *iter)
228 graph_trace_close(iter);
231 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
232 TRACE_GRAPH_PRINT_PROC | \
233 TRACE_GRAPH_PRINT_ABS_TIME | \
234 TRACE_GRAPH_PRINT_DURATION)
236 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
239 * In graph mode call the graph tracer output function,
240 * otherwise go with the TRACE_FN event handler
242 if (is_graph(iter->tr))
243 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
245 return TRACE_TYPE_UNHANDLED;
248 static void irqsoff_print_header(struct seq_file *s)
250 struct trace_array *tr = irqsoff_trace;
253 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
255 trace_default_header(s);
259 __trace_function(struct trace_array *tr,
260 unsigned long ip, unsigned long parent_ip,
261 unsigned long flags, int pc)
264 trace_graph_function(tr, ip, parent_ip, flags, pc);
266 trace_function(tr, ip, parent_ip, flags, pc);
270 #define __trace_function trace_function
272 #ifdef CONFIG_FUNCTION_TRACER
273 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
279 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
281 return TRACE_TYPE_UNHANDLED;
284 static void irqsoff_trace_open(struct trace_iterator *iter) { }
285 static void irqsoff_trace_close(struct trace_iterator *iter) { }
287 #ifdef CONFIG_FUNCTION_TRACER
288 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
289 static void irqsoff_print_header(struct seq_file *s)
291 trace_default_header(s);
294 static void irqsoff_print_header(struct seq_file *s)
296 trace_latency_header(s);
298 #endif /* CONFIG_FUNCTION_TRACER */
299 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
302 * Should this new latency be reported/recorded?
304 static bool report_latency(struct trace_array *tr, u64 delta)
306 if (tracing_thresh) {
307 if (delta < tracing_thresh)
310 if (delta <= tr->max_latency)
317 check_critical_timing(struct trace_array *tr,
318 struct trace_array_cpu *data,
319 unsigned long parent_ip,
326 T0 = data->preempt_timestamp;
327 T1 = ftrace_now(cpu);
330 local_save_flags(flags);
332 pc = preempt_count();
334 if (!report_latency(tr, delta))
337 raw_spin_lock_irqsave(&max_trace_lock, flags);
339 /* check if we are still the max latency */
340 if (!report_latency(tr, delta))
343 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
344 /* Skip 5 functions to get to the irq/preempt enable function */
345 __trace_stack(tr, flags, 5, pc);
347 if (data->critical_sequence != max_sequence)
350 data->critical_end = parent_ip;
352 if (likely(!is_tracing_stopped())) {
353 tr->max_latency = delta;
354 update_max_tr_single(tr, current, cpu);
360 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
363 data->critical_sequence = max_sequence;
364 data->preempt_timestamp = ftrace_now(cpu);
365 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
369 start_critical_timing(unsigned long ip, unsigned long parent_ip)
372 struct trace_array *tr = irqsoff_trace;
373 struct trace_array_cpu *data;
376 if (!tracer_enabled || !tracing_is_enabled())
379 cpu = raw_smp_processor_id();
381 if (per_cpu(tracing_cpu, cpu))
384 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386 if (unlikely(!data) || atomic_read(&data->disabled))
389 atomic_inc(&data->disabled);
391 data->critical_sequence = max_sequence;
392 data->preempt_timestamp = ftrace_now(cpu);
393 data->critical_start = parent_ip ? : ip;
395 local_save_flags(flags);
397 __trace_function(tr, ip, parent_ip, flags, preempt_count());
399 per_cpu(tracing_cpu, cpu) = 1;
401 atomic_dec(&data->disabled);
405 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
408 struct trace_array *tr = irqsoff_trace;
409 struct trace_array_cpu *data;
412 cpu = raw_smp_processor_id();
413 /* Always clear the tracing cpu on stopping the trace */
414 if (unlikely(per_cpu(tracing_cpu, cpu)))
415 per_cpu(tracing_cpu, cpu) = 0;
419 if (!tracer_enabled || !tracing_is_enabled())
422 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424 if (unlikely(!data) ||
425 !data->critical_start || atomic_read(&data->disabled))
428 atomic_inc(&data->disabled);
430 local_save_flags(flags);
431 __trace_function(tr, ip, parent_ip, flags, preempt_count());
432 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
433 data->critical_start = 0;
434 atomic_dec(&data->disabled);
437 /* start and stop critical timings used to for stoppage (in idle) */
438 void start_critical_timings(void)
440 if (preempt_trace() || irq_trace())
441 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
443 EXPORT_SYMBOL_GPL(start_critical_timings);
445 void stop_critical_timings(void)
447 if (preempt_trace() || irq_trace())
448 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
450 EXPORT_SYMBOL_GPL(stop_critical_timings);
452 #ifdef CONFIG_IRQSOFF_TRACER
453 #ifdef CONFIG_PROVE_LOCKING
454 void time_hardirqs_on(unsigned long a0, unsigned long a1)
456 if (!preempt_trace() && irq_trace())
457 stop_critical_timing(a0, a1);
460 void time_hardirqs_off(unsigned long a0, unsigned long a1)
462 if (!preempt_trace() && irq_trace())
463 start_critical_timing(a0, a1);
466 #else /* !CONFIG_PROVE_LOCKING */
472 void trace_softirqs_on(unsigned long ip)
476 void trace_softirqs_off(unsigned long ip)
480 inline void print_irqtrace_events(struct task_struct *curr)
485 * We are only interested in hardirq on/off events:
487 void trace_hardirqs_on(void)
489 if (!preempt_trace() && irq_trace())
490 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
492 EXPORT_SYMBOL(trace_hardirqs_on);
494 void trace_hardirqs_off(void)
496 if (!preempt_trace() && irq_trace())
497 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
499 EXPORT_SYMBOL(trace_hardirqs_off);
501 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
503 if (!preempt_trace() && irq_trace())
504 stop_critical_timing(CALLER_ADDR0, caller_addr);
506 EXPORT_SYMBOL(trace_hardirqs_on_caller);
508 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
510 if (!preempt_trace() && irq_trace())
511 start_critical_timing(CALLER_ADDR0, caller_addr);
513 EXPORT_SYMBOL(trace_hardirqs_off_caller);
515 #endif /* CONFIG_PROVE_LOCKING */
516 #endif /* CONFIG_IRQSOFF_TRACER */
518 #ifdef CONFIG_PREEMPT_TRACER
519 void trace_preempt_on(unsigned long a0, unsigned long a1)
521 if (preempt_trace() && !irq_trace())
522 stop_critical_timing(a0, a1);
525 void trace_preempt_off(unsigned long a0, unsigned long a1)
527 if (preempt_trace() && !irq_trace())
528 start_critical_timing(a0, a1);
530 #endif /* CONFIG_PREEMPT_TRACER */
532 #ifdef CONFIG_FUNCTION_TRACER
533 static bool function_enabled;
535 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
539 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
540 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
544 ret = register_ftrace_graph(&irqsoff_graph_return,
545 &irqsoff_graph_entry);
547 ret = register_ftrace_function(tr->ops);
550 function_enabled = true;
555 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
557 if (!function_enabled)
561 unregister_ftrace_graph();
563 unregister_ftrace_function(tr->ops);
565 function_enabled = false;
568 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
570 if (!(mask & TRACE_ITER_FUNCTION))
574 register_irqsoff_function(tr, is_graph(tr), 1);
576 unregister_irqsoff_function(tr, is_graph(tr));
580 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
584 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
585 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
589 #endif /* CONFIG_FUNCTION_TRACER */
591 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
593 struct tracer *tracer = tr->current_trace;
595 if (irqsoff_function_set(tr, mask, set))
598 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
599 if (mask & TRACE_ITER_DISPLAY_GRAPH)
600 return irqsoff_display_graph(tr, set);
603 return trace_keep_overwrite(tracer, mask, set);
606 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
610 ret = register_irqsoff_function(tr, graph, 0);
612 if (!ret && tracing_is_enabled())
620 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
624 unregister_irqsoff_function(tr, graph);
627 static bool irqsoff_busy;
629 static int __irqsoff_tracer_init(struct trace_array *tr)
634 save_flags = tr->trace_flags;
636 /* non overwrite screws up the latency tracers */
637 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
638 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
642 /* make sure that the tracer is visible */
645 ftrace_init_array_ops(tr, irqsoff_tracer_call);
647 /* Only toplevel instance supports graph tracing */
648 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
650 printk(KERN_ERR "failed to start irqsoff tracer\n");
656 static void irqsoff_tracer_reset(struct trace_array *tr)
658 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
659 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
661 stop_irqsoff_tracer(tr, is_graph(tr));
663 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
664 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
665 ftrace_reset_array_ops(tr);
667 irqsoff_busy = false;
670 static void irqsoff_tracer_start(struct trace_array *tr)
675 static void irqsoff_tracer_stop(struct trace_array *tr)
680 #ifdef CONFIG_IRQSOFF_TRACER
681 static int irqsoff_tracer_init(struct trace_array *tr)
683 trace_type = TRACER_IRQS_OFF;
685 return __irqsoff_tracer_init(tr);
687 static struct tracer irqsoff_tracer __read_mostly =
690 .init = irqsoff_tracer_init,
691 .reset = irqsoff_tracer_reset,
692 .start = irqsoff_tracer_start,
693 .stop = irqsoff_tracer_stop,
695 .print_header = irqsoff_print_header,
696 .print_line = irqsoff_print_line,
697 .flag_changed = irqsoff_flag_changed,
698 #ifdef CONFIG_FTRACE_SELFTEST
699 .selftest = trace_selftest_startup_irqsoff,
701 .open = irqsoff_trace_open,
702 .close = irqsoff_trace_close,
703 .allow_instances = true,
706 # define register_irqsoff(trace) register_tracer(&trace)
708 # define register_irqsoff(trace) do { } while (0)
711 #ifdef CONFIG_PREEMPT_TRACER
712 static int preemptoff_tracer_init(struct trace_array *tr)
714 trace_type = TRACER_PREEMPT_OFF;
716 return __irqsoff_tracer_init(tr);
719 static struct tracer preemptoff_tracer __read_mostly =
721 .name = "preemptoff",
722 .init = preemptoff_tracer_init,
723 .reset = irqsoff_tracer_reset,
724 .start = irqsoff_tracer_start,
725 .stop = irqsoff_tracer_stop,
727 .print_header = irqsoff_print_header,
728 .print_line = irqsoff_print_line,
729 .flag_changed = irqsoff_flag_changed,
730 #ifdef CONFIG_FTRACE_SELFTEST
731 .selftest = trace_selftest_startup_preemptoff,
733 .open = irqsoff_trace_open,
734 .close = irqsoff_trace_close,
735 .allow_instances = true,
738 # define register_preemptoff(trace) register_tracer(&trace)
740 # define register_preemptoff(trace) do { } while (0)
743 #if defined(CONFIG_IRQSOFF_TRACER) && \
744 defined(CONFIG_PREEMPT_TRACER)
746 static int preemptirqsoff_tracer_init(struct trace_array *tr)
748 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
750 return __irqsoff_tracer_init(tr);
753 static struct tracer preemptirqsoff_tracer __read_mostly =
755 .name = "preemptirqsoff",
756 .init = preemptirqsoff_tracer_init,
757 .reset = irqsoff_tracer_reset,
758 .start = irqsoff_tracer_start,
759 .stop = irqsoff_tracer_stop,
761 .print_header = irqsoff_print_header,
762 .print_line = irqsoff_print_line,
763 .flag_changed = irqsoff_flag_changed,
764 #ifdef CONFIG_FTRACE_SELFTEST
765 .selftest = trace_selftest_startup_preemptirqsoff,
767 .open = irqsoff_trace_open,
768 .close = irqsoff_trace_close,
769 .allow_instances = true,
773 # define register_preemptirqsoff(trace) register_tracer(&trace)
775 # define register_preemptirqsoff(trace) do { } while (0)
778 __init static int init_irqsoff_tracer(void)
780 register_irqsoff(irqsoff_tracer);
781 register_preemptoff(preemptoff_tracer);
782 register_preemptirqsoff(preemptirqsoff_tracer);
786 core_initcall(init_irqsoff_tracer);