1 // SPDX-License-Identifier: GPL-2.0
3 * trace task wakeup timings
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/module.h>
14 #include <linux/kallsyms.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/deadline.h>
19 #include <trace/events/sched.h>
22 static struct trace_array *wakeup_trace;
23 static int __read_mostly tracer_enabled;
25 static struct task_struct *wakeup_task;
26 static int wakeup_cpu;
27 static int wakeup_current_cpu;
28 static unsigned wakeup_prio = -1;
31 static int tracing_dl = 0;
33 static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
36 static void wakeup_reset(struct trace_array *tr);
37 static void __wakeup_reset(struct trace_array *tr);
38 static int start_func_tracer(struct trace_array *tr, int graph);
39 static void stop_func_tracer(struct trace_array *tr, int graph);
41 static int save_flags;
43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
46 # define is_graph(tr) false
49 #ifdef CONFIG_FUNCTION_TRACER
51 static bool function_enabled;
54 * Prologue for the wakeup function tracers.
56 * Returns 1 if it is OK to continue, and preemption
57 * is disabled and data->disabled is incremented.
58 * 0 if the trace is to be ignored, and preemption
59 * is not disabled and data->disabled is
62 * Note, this function is also used outside this ifdef but
63 * inside the #ifdef of the function graph tracer below.
64 * This is OK, since the function graph tracer is
65 * dependent on the function tracer.
68 func_prolog_preempt_disable(struct trace_array *tr,
69 struct trace_array_cpu **data,
75 if (likely(!wakeup_task))
78 *pc = preempt_count();
79 preempt_disable_notrace();
81 cpu = raw_smp_processor_id();
82 if (cpu != wakeup_current_cpu)
85 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
86 disabled = atomic_inc_return(&(*data)->disabled);
87 if (unlikely(disabled != 1))
93 atomic_dec(&(*data)->disabled);
96 preempt_enable_notrace();
100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
102 static int wakeup_display_graph(struct trace_array *tr, int set)
104 if (!(is_graph(tr) ^ set))
107 stop_func_tracer(tr, !set);
109 wakeup_reset(wakeup_trace);
112 return start_func_tracer(tr, set);
115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
117 struct trace_array *tr = wakeup_trace;
118 struct trace_array_cpu *data;
122 if (ftrace_graph_ignore_func(trace))
125 * Do not trace a function if it's filtered by set_graph_notrace.
126 * Make the index of ret stack negative to indicate that it should
127 * ignore further functions. But it needs its own ret stack entry
128 * to recover the original index in order to continue tracing after
129 * returning from the function.
131 if (ftrace_graph_notrace_addr(trace->func))
134 if (!func_prolog_preempt_disable(tr, &data, &pc))
137 local_save_flags(flags);
138 ret = __trace_graph_entry(tr, trace, flags, pc);
139 atomic_dec(&data->disabled);
140 preempt_enable_notrace();
145 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
147 struct trace_array *tr = wakeup_trace;
148 struct trace_array_cpu *data;
152 ftrace_graph_addr_finish(trace);
154 if (!func_prolog_preempt_disable(tr, &data, &pc))
157 local_save_flags(flags);
158 __trace_graph_return(tr, trace, flags, pc);
159 atomic_dec(&data->disabled);
161 preempt_enable_notrace();
165 static struct fgraph_ops fgraph_wakeup_ops = {
166 .entryfunc = &wakeup_graph_entry,
167 .retfunc = &wakeup_graph_return,
170 static void wakeup_trace_open(struct trace_iterator *iter)
172 if (is_graph(iter->tr))
173 graph_trace_open(iter);
175 iter->private = NULL;
178 static void wakeup_trace_close(struct trace_iterator *iter)
181 graph_trace_close(iter);
184 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
185 TRACE_GRAPH_PRINT_CPU | \
186 TRACE_GRAPH_PRINT_REL_TIME | \
187 TRACE_GRAPH_PRINT_DURATION | \
188 TRACE_GRAPH_PRINT_OVERHEAD | \
189 TRACE_GRAPH_PRINT_IRQS)
191 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
194 * In graph mode call the graph tracer output function,
195 * otherwise go with the TRACE_FN event handler
197 if (is_graph(iter->tr))
198 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
200 return TRACE_TYPE_UNHANDLED;
203 static void wakeup_print_header(struct seq_file *s)
205 if (is_graph(wakeup_trace))
206 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
208 trace_default_header(s);
210 #endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
213 * wakeup uses its own tracer function to keep the overhead down:
216 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
217 struct ftrace_ops *op, struct pt_regs *pt_regs)
219 struct trace_array *tr = wakeup_trace;
220 struct trace_array_cpu *data;
224 if (!func_prolog_preempt_disable(tr, &data, &pc))
227 local_irq_save(flags);
228 trace_function(tr, ip, parent_ip, flags, pc);
229 local_irq_restore(flags);
231 atomic_dec(&data->disabled);
232 preempt_enable_notrace();
235 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
239 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
240 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
244 ret = register_ftrace_graph(&fgraph_wakeup_ops);
246 ret = register_ftrace_function(tr->ops);
249 function_enabled = true;
254 static void unregister_wakeup_function(struct trace_array *tr, int graph)
256 if (!function_enabled)
260 unregister_ftrace_graph(&fgraph_wakeup_ops);
262 unregister_ftrace_function(tr->ops);
264 function_enabled = false;
267 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
269 if (!(mask & TRACE_ITER_FUNCTION))
273 register_wakeup_function(tr, is_graph(tr), 1);
275 unregister_wakeup_function(tr, is_graph(tr));
278 #else /* CONFIG_FUNCTION_TRACER */
279 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
283 static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
284 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
288 #endif /* else CONFIG_FUNCTION_TRACER */
290 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
291 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
293 return TRACE_TYPE_UNHANDLED;
296 static void wakeup_trace_open(struct trace_iterator *iter) { }
297 static void wakeup_trace_close(struct trace_iterator *iter) { }
299 static void wakeup_print_header(struct seq_file *s)
301 trace_default_header(s);
303 #endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
306 __trace_function(struct trace_array *tr,
307 unsigned long ip, unsigned long parent_ip,
308 unsigned long flags, int pc)
311 trace_graph_function(tr, ip, parent_ip, flags, pc);
313 trace_function(tr, ip, parent_ip, flags, pc);
316 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
318 struct tracer *tracer = tr->current_trace;
320 if (wakeup_function_set(tr, mask, set))
323 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
324 if (mask & TRACE_ITER_DISPLAY_GRAPH)
325 return wakeup_display_graph(tr, set);
328 return trace_keep_overwrite(tracer, mask, set);
331 static int start_func_tracer(struct trace_array *tr, int graph)
335 ret = register_wakeup_function(tr, graph, 0);
337 if (!ret && tracing_is_enabled())
345 static void stop_func_tracer(struct trace_array *tr, int graph)
349 unregister_wakeup_function(tr, graph);
353 * Should this new latency be reported/recorded?
355 static bool report_latency(struct trace_array *tr, u64 delta)
357 if (tracing_thresh) {
358 if (delta < tracing_thresh)
361 if (delta <= tr->max_latency)
368 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
370 if (task != wakeup_task)
373 wakeup_current_cpu = cpu;
377 tracing_sched_switch_trace(struct trace_array *tr,
378 struct task_struct *prev,
379 struct task_struct *next,
380 unsigned long flags, int pc)
382 struct trace_event_call *call = &event_context_switch;
383 struct ring_buffer *buffer = tr->trace_buffer.buffer;
384 struct ring_buffer_event *event;
385 struct ctx_switch_entry *entry;
387 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
388 sizeof(*entry), flags, pc);
391 entry = ring_buffer_event_data(event);
392 entry->prev_pid = prev->pid;
393 entry->prev_prio = prev->prio;
394 entry->prev_state = task_state_index(prev);
395 entry->next_pid = next->pid;
396 entry->next_prio = next->prio;
397 entry->next_state = task_state_index(next);
398 entry->next_cpu = task_cpu(next);
400 if (!call_filter_check_discard(call, entry, buffer, event))
401 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
405 tracing_sched_wakeup_trace(struct trace_array *tr,
406 struct task_struct *wakee,
407 struct task_struct *curr,
408 unsigned long flags, int pc)
410 struct trace_event_call *call = &event_wakeup;
411 struct ring_buffer_event *event;
412 struct ctx_switch_entry *entry;
413 struct ring_buffer *buffer = tr->trace_buffer.buffer;
415 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
416 sizeof(*entry), flags, pc);
419 entry = ring_buffer_event_data(event);
420 entry->prev_pid = curr->pid;
421 entry->prev_prio = curr->prio;
422 entry->prev_state = task_state_index(curr);
423 entry->next_pid = wakee->pid;
424 entry->next_prio = wakee->prio;
425 entry->next_state = task_state_index(wakee);
426 entry->next_cpu = task_cpu(wakee);
428 if (!call_filter_check_discard(call, entry, buffer, event))
429 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
433 probe_wakeup_sched_switch(void *ignore, bool preempt,
434 struct task_struct *prev, struct task_struct *next)
436 struct trace_array_cpu *data;
443 tracing_record_cmdline(prev);
445 if (unlikely(!tracer_enabled))
449 * When we start a new trace, we set wakeup_task to NULL
450 * and then set tracer_enabled = 1. We want to make sure
451 * that another CPU does not see the tracer_enabled = 1
452 * and the wakeup_task with an older task, that might
453 * actually be the same as next.
457 if (next != wakeup_task)
460 pc = preempt_count();
462 /* disable local data, not wakeup_cpu data */
463 cpu = raw_smp_processor_id();
464 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
465 if (likely(disabled != 1))
468 local_irq_save(flags);
469 arch_spin_lock(&wakeup_lock);
471 /* We could race with grabbing wakeup_lock */
472 if (unlikely(!tracer_enabled || next != wakeup_task))
475 /* The task we are waiting for is waking up */
476 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
478 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
479 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
480 __trace_stack(wakeup_trace, flags, 0, pc);
482 T0 = data->preempt_timestamp;
483 T1 = ftrace_now(cpu);
486 if (!report_latency(wakeup_trace, delta))
489 if (likely(!is_tracing_stopped())) {
490 wakeup_trace->max_latency = delta;
491 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
495 __wakeup_reset(wakeup_trace);
496 arch_spin_unlock(&wakeup_lock);
497 local_irq_restore(flags);
499 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
502 static void __wakeup_reset(struct trace_array *tr)
509 put_task_struct(wakeup_task);
514 static void wakeup_reset(struct trace_array *tr)
518 tracing_reset_online_cpus(&tr->trace_buffer);
520 local_irq_save(flags);
521 arch_spin_lock(&wakeup_lock);
523 arch_spin_unlock(&wakeup_lock);
524 local_irq_restore(flags);
528 probe_wakeup(void *ignore, struct task_struct *p)
530 struct trace_array_cpu *data;
531 int cpu = smp_processor_id();
536 if (likely(!tracer_enabled))
539 tracing_record_cmdline(p);
540 tracing_record_cmdline(current);
543 * Semantic is like this:
544 * - wakeup tracer handles all tasks in the system, independently
545 * from their scheduling class;
546 * - wakeup_rt tracer handles tasks belonging to sched_dl and
548 * - wakeup_dl handles tasks belonging to sched_dl class only.
550 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
551 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
552 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
555 pc = preempt_count();
556 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
557 if (unlikely(disabled != 1))
560 /* interrupts should be off from try_to_wake_up */
561 arch_spin_lock(&wakeup_lock);
563 /* check for races. */
564 if (!tracer_enabled || tracing_dl ||
565 (!dl_task(p) && p->prio >= wakeup_prio))
568 /* reset the trace */
569 __wakeup_reset(wakeup_trace);
571 wakeup_cpu = task_cpu(p);
572 wakeup_current_cpu = wakeup_cpu;
573 wakeup_prio = p->prio;
576 * Once you start tracing a -deadline task, don't bother tracing
577 * another task until the first one wakes up.
584 wakeup_task = get_task_struct(p);
586 local_save_flags(flags);
588 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
589 data->preempt_timestamp = ftrace_now(cpu);
590 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
591 __trace_stack(wakeup_trace, flags, 0, pc);
594 * We must be careful in using CALLER_ADDR2. But since wake_up
595 * is not called by an assembly function (where as schedule is)
596 * it should be safe to use it here.
598 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
601 arch_spin_unlock(&wakeup_lock);
603 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
606 static void start_wakeup_tracer(struct trace_array *tr)
610 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
612 pr_info("wakeup trace: Couldn't activate tracepoint"
613 " probe to kernel_sched_wakeup\n");
617 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
619 pr_info("wakeup trace: Couldn't activate tracepoint"
620 " probe to kernel_sched_wakeup_new\n");
624 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
626 pr_info("sched trace: Couldn't activate tracepoint"
627 " probe to kernel_sched_switch\n");
628 goto fail_deprobe_wake_new;
631 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
633 pr_info("wakeup trace: Couldn't activate tracepoint"
634 " probe to kernel_sched_migrate_task\n");
635 goto fail_deprobe_sched_switch;
641 * Don't let the tracer_enabled = 1 show up before
642 * the wakeup_task is reset. This may be overkill since
643 * wakeup_reset does a spin_unlock after setting the
644 * wakeup_task to NULL, but I want to be safe.
645 * This is a slow path anyway.
649 if (start_func_tracer(tr, is_graph(tr)))
650 printk(KERN_ERR "failed to start wakeup tracer\n");
653 fail_deprobe_sched_switch:
654 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
655 fail_deprobe_wake_new:
656 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
658 unregister_trace_sched_wakeup(probe_wakeup, NULL);
661 static void stop_wakeup_tracer(struct trace_array *tr)
664 stop_func_tracer(tr, is_graph(tr));
665 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
666 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
667 unregister_trace_sched_wakeup(probe_wakeup, NULL);
668 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
671 static bool wakeup_busy;
673 static int __wakeup_tracer_init(struct trace_array *tr)
675 save_flags = tr->trace_flags;
677 /* non overwrite screws up the latency tracers */
678 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
679 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
683 ftrace_init_array_ops(tr, wakeup_tracer_call);
684 start_wakeup_tracer(tr);
690 static int wakeup_tracer_init(struct trace_array *tr)
697 return __wakeup_tracer_init(tr);
700 static int wakeup_rt_tracer_init(struct trace_array *tr)
707 return __wakeup_tracer_init(tr);
710 static int wakeup_dl_tracer_init(struct trace_array *tr)
717 return __wakeup_tracer_init(tr);
720 static void wakeup_tracer_reset(struct trace_array *tr)
722 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
723 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
725 stop_wakeup_tracer(tr);
726 /* make sure we put back any tasks we are tracing */
729 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
730 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
731 ftrace_reset_array_ops(tr);
735 static void wakeup_tracer_start(struct trace_array *tr)
741 static void wakeup_tracer_stop(struct trace_array *tr)
746 static struct tracer wakeup_tracer __read_mostly =
749 .init = wakeup_tracer_init,
750 .reset = wakeup_tracer_reset,
751 .start = wakeup_tracer_start,
752 .stop = wakeup_tracer_stop,
754 .print_header = wakeup_print_header,
755 .print_line = wakeup_print_line,
756 .flag_changed = wakeup_flag_changed,
757 #ifdef CONFIG_FTRACE_SELFTEST
758 .selftest = trace_selftest_startup_wakeup,
760 .open = wakeup_trace_open,
761 .close = wakeup_trace_close,
762 .allow_instances = true,
766 static struct tracer wakeup_rt_tracer __read_mostly =
769 .init = wakeup_rt_tracer_init,
770 .reset = wakeup_tracer_reset,
771 .start = wakeup_tracer_start,
772 .stop = wakeup_tracer_stop,
774 .print_header = wakeup_print_header,
775 .print_line = wakeup_print_line,
776 .flag_changed = wakeup_flag_changed,
777 #ifdef CONFIG_FTRACE_SELFTEST
778 .selftest = trace_selftest_startup_wakeup,
780 .open = wakeup_trace_open,
781 .close = wakeup_trace_close,
782 .allow_instances = true,
786 static struct tracer wakeup_dl_tracer __read_mostly =
789 .init = wakeup_dl_tracer_init,
790 .reset = wakeup_tracer_reset,
791 .start = wakeup_tracer_start,
792 .stop = wakeup_tracer_stop,
794 .print_header = wakeup_print_header,
795 .print_line = wakeup_print_line,
796 .flag_changed = wakeup_flag_changed,
797 #ifdef CONFIG_FTRACE_SELFTEST
798 .selftest = trace_selftest_startup_wakeup,
800 .open = wakeup_trace_open,
801 .close = wakeup_trace_close,
802 .allow_instances = true,
806 __init static int init_wakeup_tracer(void)
810 ret = register_tracer(&wakeup_tracer);
814 ret = register_tracer(&wakeup_rt_tracer);
818 ret = register_tracer(&wakeup_dl_tracer);
824 core_initcall(init_wakeup_tracer);