1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry *entry)
10 switch (entry->type) {
24 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
30 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event);
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
58 * Test the trace buffer to see if all the elements
61 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
63 unsigned long flags, cnt = 0;
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 arch_spin_lock(&buf->tr->max_lock);
70 cnt = ring_buffer_entries(buf->buffer);
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(buf, cpu);
86 arch_spin_unlock(&buf->tr->max_lock);
87 local_irq_restore(flags);
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 static int trace_selftest_test_probe1_cnt;
105 static void trace_selftest_test_probe1_func(unsigned long ip,
107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
110 trace_selftest_test_probe1_cnt++;
113 static int trace_selftest_test_probe2_cnt;
114 static void trace_selftest_test_probe2_func(unsigned long ip,
116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
119 trace_selftest_test_probe2_cnt++;
122 static int trace_selftest_test_probe3_cnt;
123 static void trace_selftest_test_probe3_func(unsigned long ip,
125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
128 trace_selftest_test_probe3_cnt++;
131 static int trace_selftest_test_global_cnt;
132 static void trace_selftest_test_global_func(unsigned long ip,
134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
137 trace_selftest_test_global_cnt++;
140 static int trace_selftest_test_dyn_cnt;
141 static void trace_selftest_test_dyn_func(unsigned long ip,
143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
146 trace_selftest_test_dyn_cnt++;
149 static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
154 static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
159 static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
164 static void print_counts(void)
166 printk("(%d %d %d %d %d) ",
167 trace_selftest_test_probe1_cnt,
168 trace_selftest_test_probe2_cnt,
169 trace_selftest_test_probe3_cnt,
170 trace_selftest_test_global_cnt,
171 trace_selftest_test_dyn_cnt);
174 static void reset_counts(void)
176 trace_selftest_test_probe1_cnt = 0;
177 trace_selftest_test_probe2_cnt = 0;
178 trace_selftest_test_probe3_cnt = 0;
179 trace_selftest_test_global_cnt = 0;
180 trace_selftest_test_dyn_cnt = 0;
183 static int trace_selftest_ops(struct trace_array *tr, int cnt)
185 int save_ftrace_enabled = ftrace_enabled;
186 struct ftrace_ops *dyn_ops;
193 printk(KERN_CONT "PASSED\n");
194 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
199 /* Handle PPC64 '.' name */
200 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
201 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
202 len1 = strlen(func1_name);
203 len2 = strlen(func2_name);
206 * Probe 1 will trace function 1.
207 * Probe 2 will trace function 2.
208 * Probe 3 will trace functions 1 and 2.
210 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
211 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
212 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
213 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
215 register_ftrace_function(&test_probe1);
216 register_ftrace_function(&test_probe2);
217 register_ftrace_function(&test_probe3);
218 /* First time we are running with main function */
220 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221 register_ftrace_function(tr->ops);
224 DYN_FTRACE_TEST_NAME();
228 if (trace_selftest_test_probe1_cnt != 1)
230 if (trace_selftest_test_probe2_cnt != 0)
232 if (trace_selftest_test_probe3_cnt != 1)
235 if (trace_selftest_test_global_cnt == 0)
239 DYN_FTRACE_TEST_NAME2();
243 if (trace_selftest_test_probe1_cnt != 1)
245 if (trace_selftest_test_probe2_cnt != 1)
247 if (trace_selftest_test_probe3_cnt != 2)
250 /* Add a dynamic probe */
251 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
253 printk("MEMORY ERROR ");
257 dyn_ops->func = trace_selftest_test_dyn_func;
259 register_ftrace_function(dyn_ops);
261 trace_selftest_test_global_cnt = 0;
263 DYN_FTRACE_TEST_NAME();
267 if (trace_selftest_test_probe1_cnt != 2)
269 if (trace_selftest_test_probe2_cnt != 1)
271 if (trace_selftest_test_probe3_cnt != 3)
274 if (trace_selftest_test_global_cnt == 0)
277 if (trace_selftest_test_dyn_cnt == 0)
280 DYN_FTRACE_TEST_NAME2();
284 if (trace_selftest_test_probe1_cnt != 2)
286 if (trace_selftest_test_probe2_cnt != 2)
288 if (trace_selftest_test_probe3_cnt != 4)
293 unregister_ftrace_function(dyn_ops);
297 /* Purposely unregister in the same order */
298 unregister_ftrace_function(&test_probe1);
299 unregister_ftrace_function(&test_probe2);
300 unregister_ftrace_function(&test_probe3);
302 unregister_ftrace_function(tr->ops);
303 ftrace_reset_array_ops(tr);
305 /* Make sure everything is off */
307 DYN_FTRACE_TEST_NAME();
308 DYN_FTRACE_TEST_NAME();
310 if (trace_selftest_test_probe1_cnt ||
311 trace_selftest_test_probe2_cnt ||
312 trace_selftest_test_probe3_cnt ||
313 trace_selftest_test_global_cnt ||
314 trace_selftest_test_dyn_cnt)
317 ftrace_enabled = save_ftrace_enabled;
322 /* Test dynamic code modification and ftrace filters */
323 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
324 struct trace_array *tr,
327 int save_ftrace_enabled = ftrace_enabled;
332 /* The ftrace test PASSED */
333 printk(KERN_CONT "PASSED\n");
334 pr_info("Testing dynamic ftrace: ");
336 /* enable tracing, and record the filter function */
339 /* passed in by parameter to fool gcc from optimizing */
343 * Some archs *cough*PowerPC*cough* add characters to the
344 * start of the function names. We simply put a '*' to
347 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
349 /* filter only on our function */
350 ftrace_set_global_filter(func_name, strlen(func_name), 1);
353 ret = tracer_init(trace, tr);
355 warn_failed_init_tracer(trace, ret);
359 /* Sleep for a 1/10 of a second */
362 /* we should have nothing in the buffer */
363 ret = trace_test_buffer(&tr->trace_buffer, &count);
369 printk(KERN_CONT ".. filter did not filter .. ");
373 /* call our function again */
379 /* stop the tracing. */
383 /* check the trace buffer */
384 ret = trace_test_buffer(&tr->trace_buffer, &count);
389 /* we should only have one item */
390 if (!ret && count != 1) {
392 printk(KERN_CONT ".. filter failed count=%ld ..", count);
397 /* Test the ops with global tracing running */
398 ret = trace_selftest_ops(tr, 1);
402 ftrace_enabled = save_ftrace_enabled;
404 /* Enable tracing on all functions again */
405 ftrace_set_global_filter(NULL, 0, 1);
407 /* Test the ops with global tracing off */
409 ret = trace_selftest_ops(tr, 2);
414 static int trace_selftest_recursion_cnt;
415 static void trace_selftest_test_recursion_func(unsigned long ip,
417 struct ftrace_ops *op,
418 struct pt_regs *pt_regs)
421 * This function is registered without the recursion safe flag.
422 * The ftrace infrastructure should provide the recursion
423 * protection. If not, this will crash the kernel!
425 if (trace_selftest_recursion_cnt++ > 10)
427 DYN_FTRACE_TEST_NAME();
430 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
432 struct ftrace_ops *op,
433 struct pt_regs *pt_regs)
436 * We said we would provide our own recursion. By calling
437 * this function again, we should recurse back into this function
438 * and count again. But this only happens if the arch supports
439 * all of ftrace features and nothing else is using the function
442 if (trace_selftest_recursion_cnt++)
444 DYN_FTRACE_TEST_NAME();
447 static struct ftrace_ops test_rec_probe = {
448 .func = trace_selftest_test_recursion_func,
451 static struct ftrace_ops test_recsafe_probe = {
452 .func = trace_selftest_test_recursion_safe_func,
453 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
457 trace_selftest_function_recursion(void)
459 int save_ftrace_enabled = ftrace_enabled;
464 /* The previous test PASSED */
466 pr_info("Testing ftrace recursion: ");
469 /* enable tracing, and record the filter function */
472 /* Handle PPC64 '.' name */
473 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
474 len = strlen(func_name);
476 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
478 pr_cont("*Could not set filter* ");
482 ret = register_ftrace_function(&test_rec_probe);
484 pr_cont("*could not register callback* ");
488 DYN_FTRACE_TEST_NAME();
490 unregister_ftrace_function(&test_rec_probe);
494 * Recursion allows for transitions between context,
495 * and may call the callback twice.
497 if (trace_selftest_recursion_cnt != 1 &&
498 trace_selftest_recursion_cnt != 2) {
499 pr_cont("*callback not called once (or twice) (%d)* ",
500 trace_selftest_recursion_cnt);
504 trace_selftest_recursion_cnt = 1;
507 pr_info("Testing ftrace recursion safe: ");
509 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
511 pr_cont("*Could not set filter* ");
515 ret = register_ftrace_function(&test_recsafe_probe);
517 pr_cont("*could not register callback* ");
521 DYN_FTRACE_TEST_NAME();
523 unregister_ftrace_function(&test_recsafe_probe);
526 if (trace_selftest_recursion_cnt != 2) {
527 pr_cont("*callback not called expected 2 times (%d)* ",
528 trace_selftest_recursion_cnt);
534 ftrace_enabled = save_ftrace_enabled;
539 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
540 # define trace_selftest_function_recursion() ({ 0; })
541 #endif /* CONFIG_DYNAMIC_FTRACE */
544 TRACE_SELFTEST_REGS_START,
545 TRACE_SELFTEST_REGS_FOUND,
546 TRACE_SELFTEST_REGS_NOT_FOUND,
547 } trace_selftest_regs_stat;
549 static void trace_selftest_test_regs_func(unsigned long ip,
551 struct ftrace_ops *op,
552 struct pt_regs *pt_regs)
555 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
557 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
560 static struct ftrace_ops test_regs_probe = {
561 .func = trace_selftest_test_regs_func,
562 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
566 trace_selftest_function_regs(void)
568 int save_ftrace_enabled = ftrace_enabled;
574 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
578 /* The previous test PASSED */
580 pr_info("Testing ftrace regs%s: ",
581 !supported ? "(no arch support)" : "");
583 /* enable tracing, and record the filter function */
586 /* Handle PPC64 '.' name */
587 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
588 len = strlen(func_name);
590 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
592 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
593 * This test really doesn't care.
595 if (ret && ret != -ENODEV) {
596 pr_cont("*Could not set filter* ");
600 ret = register_ftrace_function(&test_regs_probe);
602 * Now if the arch does not support passing regs, then this should
607 pr_cont("*registered save-regs without arch support* ");
610 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
611 ret = register_ftrace_function(&test_regs_probe);
614 pr_cont("*could not register callback* ");
619 DYN_FTRACE_TEST_NAME();
621 unregister_ftrace_function(&test_regs_probe);
625 switch (trace_selftest_regs_stat) {
626 case TRACE_SELFTEST_REGS_START:
627 pr_cont("*callback never called* ");
630 case TRACE_SELFTEST_REGS_FOUND:
633 pr_cont("*callback received regs without arch support* ");
636 case TRACE_SELFTEST_REGS_NOT_FOUND:
639 pr_cont("*callback received NULL regs* ");
645 ftrace_enabled = save_ftrace_enabled;
651 * Simple verification test of ftrace function tracer.
652 * Enable ftrace, sleep 1/10 second, and then read the trace
653 * buffer to see if all is in order.
656 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
658 int save_ftrace_enabled = ftrace_enabled;
662 #ifdef CONFIG_DYNAMIC_FTRACE
663 if (ftrace_filter_param) {
664 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
669 /* make sure msleep has been recorded */
672 /* start the tracing */
675 ret = tracer_init(trace, tr);
677 warn_failed_init_tracer(trace, ret);
681 /* Sleep for a 1/10 of a second */
683 /* stop the tracing. */
687 /* check the trace buffer */
688 ret = trace_test_buffer(&tr->trace_buffer, &count);
694 if (!ret && !count) {
695 printk(KERN_CONT ".. no entries found ..");
700 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
701 DYN_FTRACE_TEST_NAME);
705 ret = trace_selftest_function_recursion();
709 ret = trace_selftest_function_regs();
711 ftrace_enabled = save_ftrace_enabled;
713 /* kill ftrace totally if we failed */
719 #endif /* CONFIG_FUNCTION_TRACER */
722 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
724 /* Maximum number of functions to trace before diagnosing a hang */
725 #define GRAPH_MAX_FUNC_TEST 100000000
727 static unsigned int graph_hang_thresh;
729 /* Wrap the real function entry probe to avoid possible hanging */
730 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
732 /* This is harmlessly racy, we want to approximately detect a hang */
733 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
735 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
736 if (ftrace_dump_on_oops) {
737 ftrace_dump(DUMP_ALL);
738 /* ftrace_dump() disables tracing */
744 return trace_graph_entry(trace);
748 * Pretty much the same than for the function tracer from which the selftest
752 trace_selftest_startup_function_graph(struct tracer *trace,
753 struct trace_array *tr)
758 #ifdef CONFIG_DYNAMIC_FTRACE
759 if (ftrace_filter_param) {
760 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
766 * Simulate the init() callback but we attach a watchdog callback
767 * to detect and recover from possible hangs
769 tracing_reset_online_cpus(&tr->trace_buffer);
771 ret = register_ftrace_graph(&trace_graph_return,
772 &trace_graph_entry_watchdog);
774 warn_failed_init_tracer(trace, ret);
777 tracing_start_cmdline_record();
779 /* Sleep for a 1/10 of a second */
782 /* Have we just recovered from a hang? */
783 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
784 tracing_selftest_disabled = true;
791 /* check the trace buffer */
792 ret = trace_test_buffer(&tr->trace_buffer, &count);
797 if (!ret && !count) {
798 printk(KERN_CONT ".. no entries found ..");
803 /* Don't test dynamic tracing, the function tracer already did */
806 /* Stop it if we failed */
812 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
815 #ifdef CONFIG_IRQSOFF_TRACER
817 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
819 unsigned long save_max = tr->max_latency;
823 /* start the tracing */
824 ret = tracer_init(trace, tr);
826 warn_failed_init_tracer(trace, ret);
830 /* reset the max latency */
832 /* disable interrupts for a bit */
838 * Stop the tracer to avoid a warning subsequent
839 * to buffer flipping failure because tracing_stop()
840 * disables the tr and max buffers, making flipping impossible
841 * in case of parallels max irqs off latencies.
844 /* stop the tracing. */
846 /* check both trace buffers */
847 ret = trace_test_buffer(&tr->trace_buffer, NULL);
849 ret = trace_test_buffer(&tr->max_buffer, &count);
853 if (!ret && !count) {
854 printk(KERN_CONT ".. no entries found ..");
858 tr->max_latency = save_max;
862 #endif /* CONFIG_IRQSOFF_TRACER */
864 #ifdef CONFIG_PREEMPT_TRACER
866 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
868 unsigned long save_max = tr->max_latency;
873 * Now that the big kernel lock is no longer preemptable,
874 * and this is called with the BKL held, it will always
875 * fail. If preemption is already disabled, simply
876 * pass the test. When the BKL is removed, or becomes
877 * preemptible again, we will once again test this,
880 if (preempt_count()) {
881 printk(KERN_CONT "can not test ... force ");
885 /* start the tracing */
886 ret = tracer_init(trace, tr);
888 warn_failed_init_tracer(trace, ret);
892 /* reset the max latency */
894 /* disable preemption for a bit */
900 * Stop the tracer to avoid a warning subsequent
901 * to buffer flipping failure because tracing_stop()
902 * disables the tr and max buffers, making flipping impossible
903 * in case of parallels max preempt off latencies.
906 /* stop the tracing. */
908 /* check both trace buffers */
909 ret = trace_test_buffer(&tr->trace_buffer, NULL);
911 ret = trace_test_buffer(&tr->max_buffer, &count);
915 if (!ret && !count) {
916 printk(KERN_CONT ".. no entries found ..");
920 tr->max_latency = save_max;
924 #endif /* CONFIG_PREEMPT_TRACER */
926 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
928 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
930 unsigned long save_max = tr->max_latency;
935 * Now that the big kernel lock is no longer preemptable,
936 * and this is called with the BKL held, it will always
937 * fail. If preemption is already disabled, simply
938 * pass the test. When the BKL is removed, or becomes
939 * preemptible again, we will once again test this,
942 if (preempt_count()) {
943 printk(KERN_CONT "can not test ... force ");
947 /* start the tracing */
948 ret = tracer_init(trace, tr);
950 warn_failed_init_tracer(trace, ret);
954 /* reset the max latency */
957 /* disable preemption and interrupts for a bit */
962 /* reverse the order of preempt vs irqs */
966 * Stop the tracer to avoid a warning subsequent
967 * to buffer flipping failure because tracing_stop()
968 * disables the tr and max buffers, making flipping impossible
969 * in case of parallels max irqs/preempt off latencies.
972 /* stop the tracing. */
974 /* check both trace buffers */
975 ret = trace_test_buffer(&tr->trace_buffer, NULL);
979 ret = trace_test_buffer(&tr->max_buffer, &count);
983 if (!ret && !count) {
984 printk(KERN_CONT ".. no entries found ..");
989 /* do the test by disabling interrupts first this time */
998 /* reverse the order of preempt vs irqs */
1002 /* stop the tracing. */
1004 /* check both trace buffers */
1005 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1009 ret = trace_test_buffer(&tr->max_buffer, &count);
1011 if (!ret && !count) {
1012 printk(KERN_CONT ".. no entries found ..");
1021 tr->max_latency = save_max;
1025 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1027 #ifdef CONFIG_NOP_TRACER
1029 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1031 /* What could possibly go wrong? */
1036 #ifdef CONFIG_SCHED_TRACER
1038 struct wakeup_test_data {
1039 struct completion is_ready;
1043 static int trace_wakeup_test_thread(void *data)
1045 /* Make this a -deadline thread */
1046 static const struct sched_attr attr = {
1047 .sched_policy = SCHED_DEADLINE,
1048 .sched_runtime = 100000ULL,
1049 .sched_deadline = 10000000ULL,
1050 .sched_period = 10000000ULL
1052 struct wakeup_test_data *x = data;
1054 sched_setattr(current, &attr);
1056 /* Make it know we have a new prio */
1057 complete(&x->is_ready);
1059 /* now go to sleep and let the test wake us up */
1060 set_current_state(TASK_INTERRUPTIBLE);
1063 set_current_state(TASK_INTERRUPTIBLE);
1066 complete(&x->is_ready);
1068 set_current_state(TASK_INTERRUPTIBLE);
1070 /* we are awake, now wait to disappear */
1071 while (!kthread_should_stop()) {
1073 set_current_state(TASK_INTERRUPTIBLE);
1076 __set_current_state(TASK_RUNNING);
1081 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1083 unsigned long save_max = tr->max_latency;
1084 struct task_struct *p;
1085 struct wakeup_test_data data;
1086 unsigned long count;
1089 memset(&data, 0, sizeof(data));
1091 init_completion(&data.is_ready);
1093 /* create a -deadline thread */
1094 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1096 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1100 /* make sure the thread is running at -deadline policy */
1101 wait_for_completion(&data.is_ready);
1103 /* start the tracing */
1104 ret = tracer_init(trace, tr);
1106 warn_failed_init_tracer(trace, ret);
1110 /* reset the max latency */
1111 tr->max_latency = 0;
1115 * Sleep to make sure the -deadline thread is asleep too.
1116 * On virtual machines we can't rely on timings,
1117 * but we want to make sure this test still works.
1122 init_completion(&data.is_ready);
1125 /* memory barrier is in the wake_up_process() */
1129 /* Wait for the task to wake up */
1130 wait_for_completion(&data.is_ready);
1132 /* stop the tracing. */
1134 /* check both trace buffers */
1135 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1137 ret = trace_test_buffer(&tr->max_buffer, &count);
1143 tr->max_latency = save_max;
1145 /* kill the thread */
1148 if (!ret && !count) {
1149 printk(KERN_CONT ".. no entries found ..");
1155 #endif /* CONFIG_SCHED_TRACER */
1157 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1159 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1161 unsigned long count;
1164 /* start the tracing */
1165 ret = tracer_init(trace, tr);
1167 warn_failed_init_tracer(trace, ret);
1171 /* Sleep for a 1/10 of a second */
1173 /* stop the tracing. */
1175 /* check the trace buffer */
1176 ret = trace_test_buffer(&tr->trace_buffer, &count);
1180 if (!ret && !count) {
1181 printk(KERN_CONT ".. no entries found ..");
1187 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1189 #ifdef CONFIG_BRANCH_TRACER
1191 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1193 unsigned long count;
1196 /* start the tracing */
1197 ret = tracer_init(trace, tr);
1199 warn_failed_init_tracer(trace, ret);
1203 /* Sleep for a 1/10 of a second */
1205 /* stop the tracing. */
1207 /* check the trace buffer */
1208 ret = trace_test_buffer(&tr->trace_buffer, &count);
1212 if (!ret && !count) {
1213 printk(KERN_CONT ".. no entries found ..");
1219 #endif /* CONFIG_BRANCH_TRACER */