1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
57 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 * We need to change this state when a selftest is running.
60 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
62 * insertions into the ring-buffer such as trace_printk could occurred
63 * at the same time, giving false positive or negative results.
65 static bool __read_mostly tracing_selftest_running;
68 * If boot-time tracing including tracers/events via kernel cmdline
69 * is running, we do not want to run SELFTEST.
71 bool __read_mostly tracing_selftest_disabled;
73 void __init disable_tracing_selftest(const char *reason)
75 if (!tracing_selftest_disabled) {
76 tracing_selftest_disabled = true;
77 pr_info("Ftrace startup test is disabled due to %s\n", reason);
81 #define tracing_selftest_running 0
82 #define tracing_selftest_disabled 0
85 /* Pipe tracepoints to printk */
86 static struct trace_iterator *tracepoint_print_iter;
87 int tracepoint_printk;
88 static bool tracepoint_printk_stop_on_boot __initdata;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled = 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 enum ftrace_dump_mode ftrace_dump_on_oops;
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
144 unsigned long length;
147 union trace_eval_map_item;
149 struct trace_eval_map_tail {
151 * "end" is first and points to NULL as it must be different
152 * than "mod" or "eval_string"
154 union trace_eval_map_item *next;
155 const char *end; /* points to NULL */
158 static DEFINE_MUTEX(trace_eval_mutex);
161 * The trace_eval_maps are saved in an array with two extra elements,
162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
165 * pointer to the next array of saved eval_map items.
167 union trace_eval_map_item {
168 struct trace_eval_map map;
169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
179 unsigned int trace_ctx);
181 #define MAX_TRACER_SIZE 100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
185 static bool allocate_snapshot;
186 static bool snapshot_at_boot;
188 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
189 static int boot_instance_index;
191 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_snapshot_index;
194 static int __init set_cmdline_ftrace(char *str)
196 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
197 default_bootup_tracer = bootup_tracer_buf;
198 /* We are using ftrace early, expand it */
199 trace_set_ring_buffer_expanded(NULL);
202 __setup("ftrace=", set_cmdline_ftrace);
204 static int __init set_ftrace_dump_on_oops(char *str)
206 if (*str++ != '=' || !*str || !strcmp("1", str)) {
207 ftrace_dump_on_oops = DUMP_ALL;
211 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
212 ftrace_dump_on_oops = DUMP_ORIG;
218 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
220 static int __init stop_trace_on_warning(char *str)
222 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
223 __disable_trace_on_warning = 1;
226 __setup("traceoff_on_warning", stop_trace_on_warning);
228 static int __init boot_alloc_snapshot(char *str)
230 char *slot = boot_snapshot_info + boot_snapshot_index;
231 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
236 if (strlen(str) >= left)
239 ret = snprintf(slot, left, "%s\t", str);
240 boot_snapshot_index += ret;
242 allocate_snapshot = true;
243 /* We also need the main ring buffer expanded */
244 trace_set_ring_buffer_expanded(NULL);
248 __setup("alloc_snapshot", boot_alloc_snapshot);
251 static int __init boot_snapshot(char *str)
253 snapshot_at_boot = true;
254 boot_alloc_snapshot(str);
257 __setup("ftrace_boot_snapshot", boot_snapshot);
260 static int __init boot_instance(char *str)
262 char *slot = boot_instance_info + boot_instance_index;
263 int left = sizeof(boot_instance_info) - boot_instance_index;
266 if (strlen(str) >= left)
269 ret = snprintf(slot, left, "%s\t", str);
270 boot_instance_index += ret;
274 __setup("trace_instance=", boot_instance);
277 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
279 static int __init set_trace_boot_options(char *str)
281 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
284 __setup("trace_options=", set_trace_boot_options);
286 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
287 static char *trace_boot_clock __initdata;
289 static int __init set_trace_boot_clock(char *str)
291 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
292 trace_boot_clock = trace_boot_clock_buf;
295 __setup("trace_clock=", set_trace_boot_clock);
297 static int __init set_tracepoint_printk(char *str)
299 /* Ignore the "tp_printk_stop_on_boot" param */
303 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
304 tracepoint_printk = 1;
307 __setup("tp_printk", set_tracepoint_printk);
309 static int __init set_tracepoint_printk_stop(char *str)
311 tracepoint_printk_stop_on_boot = true;
314 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
316 unsigned long long ns2usecs(u64 nsec)
324 trace_process_export(struct trace_export *export,
325 struct ring_buffer_event *event, int flag)
327 struct trace_entry *entry;
328 unsigned int size = 0;
330 if (export->flags & flag) {
331 entry = ring_buffer_event_data(event);
332 size = ring_buffer_event_length(event);
333 export->write(export, entry, size);
337 static DEFINE_MUTEX(ftrace_export_lock);
339 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
341 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
342 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
343 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
345 static inline void ftrace_exports_enable(struct trace_export *export)
347 if (export->flags & TRACE_EXPORT_FUNCTION)
348 static_branch_inc(&trace_function_exports_enabled);
350 if (export->flags & TRACE_EXPORT_EVENT)
351 static_branch_inc(&trace_event_exports_enabled);
353 if (export->flags & TRACE_EXPORT_MARKER)
354 static_branch_inc(&trace_marker_exports_enabled);
357 static inline void ftrace_exports_disable(struct trace_export *export)
359 if (export->flags & TRACE_EXPORT_FUNCTION)
360 static_branch_dec(&trace_function_exports_enabled);
362 if (export->flags & TRACE_EXPORT_EVENT)
363 static_branch_dec(&trace_event_exports_enabled);
365 if (export->flags & TRACE_EXPORT_MARKER)
366 static_branch_dec(&trace_marker_exports_enabled);
369 static void ftrace_exports(struct ring_buffer_event *event, int flag)
371 struct trace_export *export;
373 preempt_disable_notrace();
375 export = rcu_dereference_raw_check(ftrace_exports_list);
377 trace_process_export(export, event, flag);
378 export = rcu_dereference_raw_check(export->next);
381 preempt_enable_notrace();
385 add_trace_export(struct trace_export **list, struct trace_export *export)
387 rcu_assign_pointer(export->next, *list);
389 * We are entering export into the list but another
390 * CPU might be walking that list. We need to make sure
391 * the export->next pointer is valid before another CPU sees
392 * the export pointer included into the list.
394 rcu_assign_pointer(*list, export);
398 rm_trace_export(struct trace_export **list, struct trace_export *export)
400 struct trace_export **p;
402 for (p = list; *p != NULL; p = &(*p)->next)
409 rcu_assign_pointer(*p, (*p)->next);
415 add_ftrace_export(struct trace_export **list, struct trace_export *export)
417 ftrace_exports_enable(export);
419 add_trace_export(list, export);
423 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
427 ret = rm_trace_export(list, export);
428 ftrace_exports_disable(export);
433 int register_ftrace_export(struct trace_export *export)
435 if (WARN_ON_ONCE(!export->write))
438 mutex_lock(&ftrace_export_lock);
440 add_ftrace_export(&ftrace_exports_list, export);
442 mutex_unlock(&ftrace_export_lock);
446 EXPORT_SYMBOL_GPL(register_ftrace_export);
448 int unregister_ftrace_export(struct trace_export *export)
452 mutex_lock(&ftrace_export_lock);
454 ret = rm_ftrace_export(&ftrace_exports_list, export);
456 mutex_unlock(&ftrace_export_lock);
460 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
462 /* trace_flags holds trace_options default values */
463 #define TRACE_DEFAULT_FLAGS \
464 (FUNCTION_DEFAULT_FLAGS | \
465 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
466 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
467 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
468 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
471 /* trace_options that are only supported by global_trace */
472 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
473 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
475 /* trace_flags that are default zero for instances */
476 #define ZEROED_TRACE_FLAGS \
477 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
480 * The global_trace is the descriptor that holds the top-level tracing
481 * buffers for the live tracing.
483 static struct trace_array global_trace = {
484 .trace_flags = TRACE_DEFAULT_FLAGS,
487 void trace_set_ring_buffer_expanded(struct trace_array *tr)
491 tr->ring_buffer_expanded = true;
494 LIST_HEAD(ftrace_trace_arrays);
496 int trace_array_get(struct trace_array *this_tr)
498 struct trace_array *tr;
501 mutex_lock(&trace_types_lock);
502 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
509 mutex_unlock(&trace_types_lock);
514 static void __trace_array_put(struct trace_array *this_tr)
516 WARN_ON(!this_tr->ref);
521 * trace_array_put - Decrement the reference counter for this trace array.
522 * @this_tr : pointer to the trace array
524 * NOTE: Use this when we no longer need the trace array returned by
525 * trace_array_get_by_name(). This ensures the trace array can be later
529 void trace_array_put(struct trace_array *this_tr)
534 mutex_lock(&trace_types_lock);
535 __trace_array_put(this_tr);
536 mutex_unlock(&trace_types_lock);
538 EXPORT_SYMBOL_GPL(trace_array_put);
540 int tracing_check_open_get_tr(struct trace_array *tr)
544 ret = security_locked_down(LOCKDOWN_TRACEFS);
548 if (tracing_disabled)
551 if (tr && trace_array_get(tr) < 0)
557 int call_filter_check_discard(struct trace_event_call *call, void *rec,
558 struct trace_buffer *buffer,
559 struct ring_buffer_event *event)
561 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
562 !filter_match_preds(call->filter, rec)) {
563 __trace_event_discard_commit(buffer, event);
571 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
572 * @filtered_pids: The list of pids to check
573 * @search_pid: The PID to find in @filtered_pids
575 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
578 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
580 return trace_pid_list_is_set(filtered_pids, search_pid);
584 * trace_ignore_this_task - should a task be ignored for tracing
585 * @filtered_pids: The list of pids to check
586 * @filtered_no_pids: The list of pids not to be traced
587 * @task: The task that should be ignored if not filtered
589 * Checks if @task should be traced or not from @filtered_pids.
590 * Returns true if @task should *NOT* be traced.
591 * Returns false if @task should be traced.
594 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
595 struct trace_pid_list *filtered_no_pids,
596 struct task_struct *task)
599 * If filtered_no_pids is not empty, and the task's pid is listed
600 * in filtered_no_pids, then return true.
601 * Otherwise, if filtered_pids is empty, that means we can
602 * trace all tasks. If it has content, then only trace pids
603 * within filtered_pids.
606 return (filtered_pids &&
607 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
609 trace_find_filtered_pid(filtered_no_pids, task->pid));
613 * trace_filter_add_remove_task - Add or remove a task from a pid_list
614 * @pid_list: The list to modify
615 * @self: The current task for fork or NULL for exit
616 * @task: The task to add or remove
618 * If adding a task, if @self is defined, the task is only added if @self
619 * is also included in @pid_list. This happens on fork and tasks should
620 * only be added when the parent is listed. If @self is NULL, then the
621 * @task pid will be removed from the list, which would happen on exit
624 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
625 struct task_struct *self,
626 struct task_struct *task)
631 /* For forks, we only add if the forking task is listed */
633 if (!trace_find_filtered_pid(pid_list, self->pid))
637 /* "self" is set for forks, and NULL for exits */
639 trace_pid_list_set(pid_list, task->pid);
641 trace_pid_list_clear(pid_list, task->pid);
645 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
646 * @pid_list: The pid list to show
647 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
648 * @pos: The position of the file
650 * This is used by the seq_file "next" operation to iterate the pids
651 * listed in a trace_pid_list structure.
653 * Returns the pid+1 as we want to display pid of zero, but NULL would
654 * stop the iteration.
656 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
658 long pid = (unsigned long)v;
663 /* pid already is +1 of the actual previous bit */
664 if (trace_pid_list_next(pid_list, pid, &next) < 0)
669 /* Return pid + 1 to allow zero to be represented */
670 return (void *)(pid + 1);
674 * trace_pid_start - Used for seq_file to start reading pid lists
675 * @pid_list: The pid list to show
676 * @pos: The position of the file
678 * This is used by seq_file "start" operation to start the iteration
681 * Returns the pid+1 as we want to display pid of zero, but NULL would
682 * stop the iteration.
684 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
690 if (trace_pid_list_first(pid_list, &first) < 0)
695 /* Return pid + 1 so that zero can be the exit value */
696 for (pid++; pid && l < *pos;
697 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
703 * trace_pid_show - show the current pid in seq_file processing
704 * @m: The seq_file structure to write into
705 * @v: A void pointer of the pid (+1) value to display
707 * Can be directly used by seq_file operations to display the current
710 int trace_pid_show(struct seq_file *m, void *v)
712 unsigned long pid = (unsigned long)v - 1;
714 seq_printf(m, "%lu\n", pid);
718 /* 128 should be much more than enough */
719 #define PID_BUF_SIZE 127
721 int trace_pid_write(struct trace_pid_list *filtered_pids,
722 struct trace_pid_list **new_pid_list,
723 const char __user *ubuf, size_t cnt)
725 struct trace_pid_list *pid_list;
726 struct trace_parser parser;
734 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
738 * Always recreate a new array. The write is an all or nothing
739 * operation. Always create a new array when adding new pids by
740 * the user. If the operation fails, then the current list is
743 pid_list = trace_pid_list_alloc();
745 trace_parser_put(&parser);
750 /* copy the current bits to the new max */
751 ret = trace_pid_list_first(filtered_pids, &pid);
753 trace_pid_list_set(pid_list, pid);
754 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
764 ret = trace_get_user(&parser, ubuf, cnt, &pos);
772 if (!trace_parser_loaded(&parser))
776 if (kstrtoul(parser.buffer, 0, &val))
781 if (trace_pid_list_set(pid_list, pid) < 0) {
787 trace_parser_clear(&parser);
790 trace_parser_put(&parser);
793 trace_pid_list_free(pid_list);
798 /* Cleared the list of pids */
799 trace_pid_list_free(pid_list);
803 *new_pid_list = pid_list;
808 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
812 /* Early boot up does not have a buffer yet */
814 return trace_clock_local();
816 ts = ring_buffer_time_stamp(buf->buffer);
817 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
822 u64 ftrace_now(int cpu)
824 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
828 * tracing_is_enabled - Show if global_trace has been enabled
830 * Shows if the global trace has been enabled or not. It uses the
831 * mirror flag "buffer_disabled" to be used in fast paths such as for
832 * the irqsoff tracer. But it may be inaccurate due to races. If you
833 * need to know the accurate state, use tracing_is_on() which is a little
834 * slower, but accurate.
836 int tracing_is_enabled(void)
839 * For quick access (irqsoff uses this in fast path), just
840 * return the mirror variable of the state of the ring buffer.
841 * It's a little racy, but we don't really care.
844 return !global_trace.buffer_disabled;
848 * trace_buf_size is the size in bytes that is allocated
849 * for a buffer. Note, the number of bytes is always rounded
852 * This number is purposely set to a low number of 16384.
853 * If the dump on oops happens, it will be much appreciated
854 * to not have to wait for all that output. Anyway this can be
855 * boot time and run time configurable.
857 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
859 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
861 /* trace_types holds a link list of available tracers. */
862 static struct tracer *trace_types __read_mostly;
865 * trace_types_lock is used to protect the trace_types list.
867 DEFINE_MUTEX(trace_types_lock);
870 * serialize the access of the ring buffer
872 * ring buffer serializes readers, but it is low level protection.
873 * The validity of the events (which returns by ring_buffer_peek() ..etc)
874 * are not protected by ring buffer.
876 * The content of events may become garbage if we allow other process consumes
877 * these events concurrently:
878 * A) the page of the consumed events may become a normal page
879 * (not reader page) in ring buffer, and this page will be rewritten
880 * by events producer.
881 * B) The page of the consumed events may become a page for splice_read,
882 * and this page will be returned to system.
884 * These primitives allow multi process access to different cpu ring buffer
887 * These primitives don't distinguish read-only and read-consume access.
888 * Multi read-only access are also serialized.
892 static DECLARE_RWSEM(all_cpu_access_lock);
893 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
895 static inline void trace_access_lock(int cpu)
897 if (cpu == RING_BUFFER_ALL_CPUS) {
898 /* gain it for accessing the whole ring buffer. */
899 down_write(&all_cpu_access_lock);
901 /* gain it for accessing a cpu ring buffer. */
903 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
904 down_read(&all_cpu_access_lock);
906 /* Secondly block other access to this @cpu ring buffer. */
907 mutex_lock(&per_cpu(cpu_access_lock, cpu));
911 static inline void trace_access_unlock(int cpu)
913 if (cpu == RING_BUFFER_ALL_CPUS) {
914 up_write(&all_cpu_access_lock);
916 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
917 up_read(&all_cpu_access_lock);
921 static inline void trace_access_lock_init(void)
925 for_each_possible_cpu(cpu)
926 mutex_init(&per_cpu(cpu_access_lock, cpu));
931 static DEFINE_MUTEX(access_lock);
933 static inline void trace_access_lock(int cpu)
936 mutex_lock(&access_lock);
939 static inline void trace_access_unlock(int cpu)
942 mutex_unlock(&access_lock);
945 static inline void trace_access_lock_init(void)
951 #ifdef CONFIG_STACKTRACE
952 static void __ftrace_trace_stack(struct trace_buffer *buffer,
953 unsigned int trace_ctx,
954 int skip, struct pt_regs *regs);
955 static inline void ftrace_trace_stack(struct trace_array *tr,
956 struct trace_buffer *buffer,
957 unsigned int trace_ctx,
958 int skip, struct pt_regs *regs);
961 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
962 unsigned int trace_ctx,
963 int skip, struct pt_regs *regs)
966 static inline void ftrace_trace_stack(struct trace_array *tr,
967 struct trace_buffer *buffer,
968 unsigned long trace_ctx,
969 int skip, struct pt_regs *regs)
975 static __always_inline void
976 trace_event_setup(struct ring_buffer_event *event,
977 int type, unsigned int trace_ctx)
979 struct trace_entry *ent = ring_buffer_event_data(event);
981 tracing_generic_entry_update(ent, type, trace_ctx);
984 static __always_inline struct ring_buffer_event *
985 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
988 unsigned int trace_ctx)
990 struct ring_buffer_event *event;
992 event = ring_buffer_lock_reserve(buffer, len);
994 trace_event_setup(event, type, trace_ctx);
999 void tracer_tracing_on(struct trace_array *tr)
1001 if (tr->array_buffer.buffer)
1002 ring_buffer_record_on(tr->array_buffer.buffer);
1004 * This flag is looked at when buffers haven't been allocated
1005 * yet, or by some tracers (like irqsoff), that just want to
1006 * know if the ring buffer has been disabled, but it can handle
1007 * races of where it gets disabled but we still do a record.
1008 * As the check is in the fast path of the tracers, it is more
1009 * important to be fast than accurate.
1011 tr->buffer_disabled = 0;
1012 /* Make the flag seen by readers */
1017 * tracing_on - enable tracing buffers
1019 * This function enables tracing buffers that may have been
1020 * disabled with tracing_off.
1022 void tracing_on(void)
1024 tracer_tracing_on(&global_trace);
1026 EXPORT_SYMBOL_GPL(tracing_on);
1029 static __always_inline void
1030 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1032 __this_cpu_write(trace_taskinfo_save, true);
1034 /* If this is the temp buffer, we need to commit fully */
1035 if (this_cpu_read(trace_buffered_event) == event) {
1036 /* Length is in event->array[0] */
1037 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1038 /* Release the temp buffer */
1039 this_cpu_dec(trace_buffered_event_cnt);
1040 /* ring_buffer_unlock_commit() enables preemption */
1041 preempt_enable_notrace();
1043 ring_buffer_unlock_commit(buffer);
1046 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1047 const char *str, int size)
1049 struct ring_buffer_event *event;
1050 struct trace_buffer *buffer;
1051 struct print_entry *entry;
1052 unsigned int trace_ctx;
1055 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1058 if (unlikely(tracing_selftest_running && tr == &global_trace))
1061 if (unlikely(tracing_disabled))
1064 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1066 trace_ctx = tracing_gen_ctx();
1067 buffer = tr->array_buffer.buffer;
1068 ring_buffer_nest_start(buffer);
1069 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1076 entry = ring_buffer_event_data(event);
1079 memcpy(&entry->buf, str, size);
1081 /* Add a newline if necessary */
1082 if (entry->buf[size - 1] != '\n') {
1083 entry->buf[size] = '\n';
1084 entry->buf[size + 1] = '\0';
1086 entry->buf[size] = '\0';
1088 __buffer_unlock_commit(buffer, event);
1089 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1091 ring_buffer_nest_end(buffer);
1094 EXPORT_SYMBOL_GPL(__trace_array_puts);
1097 * __trace_puts - write a constant string into the trace buffer.
1098 * @ip: The address of the caller
1099 * @str: The constant string to write
1100 * @size: The size of the string.
1102 int __trace_puts(unsigned long ip, const char *str, int size)
1104 return __trace_array_puts(&global_trace, ip, str, size);
1106 EXPORT_SYMBOL_GPL(__trace_puts);
1109 * __trace_bputs - write the pointer to a constant string into trace buffer
1110 * @ip: The address of the caller
1111 * @str: The constant string to write to the buffer to
1113 int __trace_bputs(unsigned long ip, const char *str)
1115 struct ring_buffer_event *event;
1116 struct trace_buffer *buffer;
1117 struct bputs_entry *entry;
1118 unsigned int trace_ctx;
1119 int size = sizeof(struct bputs_entry);
1122 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1125 if (unlikely(tracing_selftest_running || tracing_disabled))
1128 trace_ctx = tracing_gen_ctx();
1129 buffer = global_trace.array_buffer.buffer;
1131 ring_buffer_nest_start(buffer);
1132 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1137 entry = ring_buffer_event_data(event);
1141 __buffer_unlock_commit(buffer, event);
1142 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1146 ring_buffer_nest_end(buffer);
1149 EXPORT_SYMBOL_GPL(__trace_bputs);
1151 #ifdef CONFIG_TRACER_SNAPSHOT
1152 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1155 struct tracer *tracer = tr->current_trace;
1156 unsigned long flags;
1159 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1160 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1164 if (!tr->allocated_snapshot) {
1165 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1166 trace_array_puts(tr, "*** stopping trace here! ***\n");
1167 tracer_tracing_off(tr);
1171 /* Note, snapshot can not be used when the tracer uses it */
1172 if (tracer->use_max_tr) {
1173 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1174 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1178 local_irq_save(flags);
1179 update_max_tr(tr, current, smp_processor_id(), cond_data);
1180 local_irq_restore(flags);
1183 void tracing_snapshot_instance(struct trace_array *tr)
1185 tracing_snapshot_instance_cond(tr, NULL);
1189 * tracing_snapshot - take a snapshot of the current buffer.
1191 * This causes a swap between the snapshot buffer and the current live
1192 * tracing buffer. You can use this to take snapshots of the live
1193 * trace when some condition is triggered, but continue to trace.
1195 * Note, make sure to allocate the snapshot with either
1196 * a tracing_snapshot_alloc(), or by doing it manually
1197 * with: echo 1 > /sys/kernel/tracing/snapshot
1199 * If the snapshot buffer is not allocated, it will stop tracing.
1200 * Basically making a permanent snapshot.
1202 void tracing_snapshot(void)
1204 struct trace_array *tr = &global_trace;
1206 tracing_snapshot_instance(tr);
1208 EXPORT_SYMBOL_GPL(tracing_snapshot);
1211 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1212 * @tr: The tracing instance to snapshot
1213 * @cond_data: The data to be tested conditionally, and possibly saved
1215 * This is the same as tracing_snapshot() except that the snapshot is
1216 * conditional - the snapshot will only happen if the
1217 * cond_snapshot.update() implementation receiving the cond_data
1218 * returns true, which means that the trace array's cond_snapshot
1219 * update() operation used the cond_data to determine whether the
1220 * snapshot should be taken, and if it was, presumably saved it along
1221 * with the snapshot.
1223 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1225 tracing_snapshot_instance_cond(tr, cond_data);
1227 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1230 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1231 * @tr: The tracing instance
1233 * When the user enables a conditional snapshot using
1234 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1235 * with the snapshot. This accessor is used to retrieve it.
1237 * Should not be called from cond_snapshot.update(), since it takes
1238 * the tr->max_lock lock, which the code calling
1239 * cond_snapshot.update() has already done.
1241 * Returns the cond_data associated with the trace array's snapshot.
1243 void *tracing_cond_snapshot_data(struct trace_array *tr)
1245 void *cond_data = NULL;
1247 local_irq_disable();
1248 arch_spin_lock(&tr->max_lock);
1250 if (tr->cond_snapshot)
1251 cond_data = tr->cond_snapshot->cond_data;
1253 arch_spin_unlock(&tr->max_lock);
1258 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1260 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1261 struct array_buffer *size_buf, int cpu_id);
1262 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1264 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1268 if (!tr->allocated_snapshot) {
1270 /* allocate spare buffer */
1271 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1272 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1276 tr->allocated_snapshot = true;
1282 static void free_snapshot(struct trace_array *tr)
1285 * We don't free the ring buffer. instead, resize it because
1286 * The max_tr ring buffer has some state (e.g. ring->clock) and
1287 * we want preserve it.
1289 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1290 set_buffer_entries(&tr->max_buffer, 1);
1291 tracing_reset_online_cpus(&tr->max_buffer);
1292 tr->allocated_snapshot = false;
1296 * tracing_alloc_snapshot - allocate snapshot buffer.
1298 * This only allocates the snapshot buffer if it isn't already
1299 * allocated - it doesn't also take a snapshot.
1301 * This is meant to be used in cases where the snapshot buffer needs
1302 * to be set up for events that can't sleep but need to be able to
1303 * trigger a snapshot.
1305 int tracing_alloc_snapshot(void)
1307 struct trace_array *tr = &global_trace;
1310 ret = tracing_alloc_snapshot_instance(tr);
1315 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1318 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1320 * This is similar to tracing_snapshot(), but it will allocate the
1321 * snapshot buffer if it isn't already allocated. Use this only
1322 * where it is safe to sleep, as the allocation may sleep.
1324 * This causes a swap between the snapshot buffer and the current live
1325 * tracing buffer. You can use this to take snapshots of the live
1326 * trace when some condition is triggered, but continue to trace.
1328 void tracing_snapshot_alloc(void)
1332 ret = tracing_alloc_snapshot();
1338 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1341 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1342 * @tr: The tracing instance
1343 * @cond_data: User data to associate with the snapshot
1344 * @update: Implementation of the cond_snapshot update function
1346 * Check whether the conditional snapshot for the given instance has
1347 * already been enabled, or if the current tracer is already using a
1348 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1349 * save the cond_data and update function inside.
1351 * Returns 0 if successful, error otherwise.
1353 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1354 cond_update_fn_t update)
1356 struct cond_snapshot *cond_snapshot;
1359 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1363 cond_snapshot->cond_data = cond_data;
1364 cond_snapshot->update = update;
1366 mutex_lock(&trace_types_lock);
1368 ret = tracing_alloc_snapshot_instance(tr);
1372 if (tr->current_trace->use_max_tr) {
1378 * The cond_snapshot can only change to NULL without the
1379 * trace_types_lock. We don't care if we race with it going
1380 * to NULL, but we want to make sure that it's not set to
1381 * something other than NULL when we get here, which we can
1382 * do safely with only holding the trace_types_lock and not
1383 * having to take the max_lock.
1385 if (tr->cond_snapshot) {
1390 local_irq_disable();
1391 arch_spin_lock(&tr->max_lock);
1392 tr->cond_snapshot = cond_snapshot;
1393 arch_spin_unlock(&tr->max_lock);
1396 mutex_unlock(&trace_types_lock);
1401 mutex_unlock(&trace_types_lock);
1402 kfree(cond_snapshot);
1405 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1408 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1409 * @tr: The tracing instance
1411 * Check whether the conditional snapshot for the given instance is
1412 * enabled; if so, free the cond_snapshot associated with it,
1413 * otherwise return -EINVAL.
1415 * Returns 0 if successful, error otherwise.
1417 int tracing_snapshot_cond_disable(struct trace_array *tr)
1421 local_irq_disable();
1422 arch_spin_lock(&tr->max_lock);
1424 if (!tr->cond_snapshot)
1427 kfree(tr->cond_snapshot);
1428 tr->cond_snapshot = NULL;
1431 arch_spin_unlock(&tr->max_lock);
1436 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1438 void tracing_snapshot(void)
1440 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1442 EXPORT_SYMBOL_GPL(tracing_snapshot);
1443 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1445 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1447 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1448 int tracing_alloc_snapshot(void)
1450 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1453 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1454 void tracing_snapshot_alloc(void)
1459 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1460 void *tracing_cond_snapshot_data(struct trace_array *tr)
1464 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1465 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1469 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1470 int tracing_snapshot_cond_disable(struct trace_array *tr)
1474 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1475 #define free_snapshot(tr) do { } while (0)
1476 #endif /* CONFIG_TRACER_SNAPSHOT */
1478 void tracer_tracing_off(struct trace_array *tr)
1480 if (tr->array_buffer.buffer)
1481 ring_buffer_record_off(tr->array_buffer.buffer);
1483 * This flag is looked at when buffers haven't been allocated
1484 * yet, or by some tracers (like irqsoff), that just want to
1485 * know if the ring buffer has been disabled, but it can handle
1486 * races of where it gets disabled but we still do a record.
1487 * As the check is in the fast path of the tracers, it is more
1488 * important to be fast than accurate.
1490 tr->buffer_disabled = 1;
1491 /* Make the flag seen by readers */
1496 * tracing_off - turn off tracing buffers
1498 * This function stops the tracing buffers from recording data.
1499 * It does not disable any overhead the tracers themselves may
1500 * be causing. This function simply causes all recording to
1501 * the ring buffers to fail.
1503 void tracing_off(void)
1505 tracer_tracing_off(&global_trace);
1507 EXPORT_SYMBOL_GPL(tracing_off);
1509 void disable_trace_on_warning(void)
1511 if (__disable_trace_on_warning) {
1512 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1513 "Disabling tracing due to warning\n");
1519 * tracer_tracing_is_on - show real state of ring buffer enabled
1520 * @tr : the trace array to know if ring buffer is enabled
1522 * Shows real state of the ring buffer if it is enabled or not.
1524 bool tracer_tracing_is_on(struct trace_array *tr)
1526 if (tr->array_buffer.buffer)
1527 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1528 return !tr->buffer_disabled;
1532 * tracing_is_on - show state of ring buffers enabled
1534 int tracing_is_on(void)
1536 return tracer_tracing_is_on(&global_trace);
1538 EXPORT_SYMBOL_GPL(tracing_is_on);
1540 static int __init set_buf_size(char *str)
1542 unsigned long buf_size;
1546 buf_size = memparse(str, &str);
1548 * nr_entries can not be zero and the startup
1549 * tests require some buffer space. Therefore
1550 * ensure we have at least 4096 bytes of buffer.
1552 trace_buf_size = max(4096UL, buf_size);
1555 __setup("trace_buf_size=", set_buf_size);
1557 static int __init set_tracing_thresh(char *str)
1559 unsigned long threshold;
1564 ret = kstrtoul(str, 0, &threshold);
1567 tracing_thresh = threshold * 1000;
1570 __setup("tracing_thresh=", set_tracing_thresh);
1572 unsigned long nsecs_to_usecs(unsigned long nsecs)
1574 return nsecs / 1000;
1578 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1579 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1580 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1581 * of strings in the order that the evals (enum) were defined.
1586 /* These must match the bit positions in trace_iterator_flags */
1587 static const char *trace_options[] = {
1595 int in_ns; /* is this clock in nanoseconds? */
1596 } trace_clocks[] = {
1597 { trace_clock_local, "local", 1 },
1598 { trace_clock_global, "global", 1 },
1599 { trace_clock_counter, "counter", 0 },
1600 { trace_clock_jiffies, "uptime", 0 },
1601 { trace_clock, "perf", 1 },
1602 { ktime_get_mono_fast_ns, "mono", 1 },
1603 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1604 { ktime_get_boot_fast_ns, "boot", 1 },
1605 { ktime_get_tai_fast_ns, "tai", 1 },
1609 bool trace_clock_in_ns(struct trace_array *tr)
1611 if (trace_clocks[tr->clock_id].in_ns)
1618 * trace_parser_get_init - gets the buffer for trace parser
1620 int trace_parser_get_init(struct trace_parser *parser, int size)
1622 memset(parser, 0, sizeof(*parser));
1624 parser->buffer = kmalloc(size, GFP_KERNEL);
1625 if (!parser->buffer)
1628 parser->size = size;
1633 * trace_parser_put - frees the buffer for trace parser
1635 void trace_parser_put(struct trace_parser *parser)
1637 kfree(parser->buffer);
1638 parser->buffer = NULL;
1642 * trace_get_user - reads the user input string separated by space
1643 * (matched by isspace(ch))
1645 * For each string found the 'struct trace_parser' is updated,
1646 * and the function returns.
1648 * Returns number of bytes read.
1650 * See kernel/trace/trace.h for 'struct trace_parser' details.
1652 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1653 size_t cnt, loff_t *ppos)
1660 trace_parser_clear(parser);
1662 ret = get_user(ch, ubuf++);
1670 * The parser is not finished with the last write,
1671 * continue reading the user input without skipping spaces.
1673 if (!parser->cont) {
1674 /* skip white space */
1675 while (cnt && isspace(ch)) {
1676 ret = get_user(ch, ubuf++);
1685 /* only spaces were written */
1686 if (isspace(ch) || !ch) {
1693 /* read the non-space input */
1694 while (cnt && !isspace(ch) && ch) {
1695 if (parser->idx < parser->size - 1)
1696 parser->buffer[parser->idx++] = ch;
1701 ret = get_user(ch, ubuf++);
1708 /* We either got finished input or we have to wait for another call. */
1709 if (isspace(ch) || !ch) {
1710 parser->buffer[parser->idx] = 0;
1711 parser->cont = false;
1712 } else if (parser->idx < parser->size - 1) {
1713 parser->cont = true;
1714 parser->buffer[parser->idx++] = ch;
1715 /* Make sure the parsed string always terminates with '\0'. */
1716 parser->buffer[parser->idx] = 0;
1729 /* TODO add a seq_buf_to_buffer() */
1730 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1734 if (trace_seq_used(s) <= s->readpos)
1737 len = trace_seq_used(s) - s->readpos;
1740 memcpy(buf, s->buffer + s->readpos, cnt);
1746 unsigned long __read_mostly tracing_thresh;
1748 #ifdef CONFIG_TRACER_MAX_TRACE
1749 static const struct file_operations tracing_max_lat_fops;
1751 #ifdef LATENCY_FS_NOTIFY
1753 static struct workqueue_struct *fsnotify_wq;
1755 static void latency_fsnotify_workfn(struct work_struct *work)
1757 struct trace_array *tr = container_of(work, struct trace_array,
1759 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1762 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1764 struct trace_array *tr = container_of(iwork, struct trace_array,
1766 queue_work(fsnotify_wq, &tr->fsnotify_work);
1769 static void trace_create_maxlat_file(struct trace_array *tr,
1770 struct dentry *d_tracer)
1772 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1773 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1774 tr->d_max_latency = trace_create_file("tracing_max_latency",
1777 &tracing_max_lat_fops);
1780 __init static int latency_fsnotify_init(void)
1782 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1783 WQ_UNBOUND | WQ_HIGHPRI, 0);
1785 pr_err("Unable to allocate tr_max_lat_wq\n");
1791 late_initcall_sync(latency_fsnotify_init);
1793 void latency_fsnotify(struct trace_array *tr)
1798 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1799 * possible that we are called from __schedule() or do_idle(), which
1800 * could cause a deadlock.
1802 irq_work_queue(&tr->fsnotify_irqwork);
1805 #else /* !LATENCY_FS_NOTIFY */
1807 #define trace_create_maxlat_file(tr, d_tracer) \
1808 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1809 d_tracer, tr, &tracing_max_lat_fops)
1814 * Copy the new maximum trace into the separate maximum-trace
1815 * structure. (this way the maximum trace is permanently saved,
1816 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1819 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1821 struct array_buffer *trace_buf = &tr->array_buffer;
1822 struct array_buffer *max_buf = &tr->max_buffer;
1823 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1824 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1827 max_buf->time_start = data->preempt_timestamp;
1829 max_data->saved_latency = tr->max_latency;
1830 max_data->critical_start = data->critical_start;
1831 max_data->critical_end = data->critical_end;
1833 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1834 max_data->pid = tsk->pid;
1836 * If tsk == current, then use current_uid(), as that does not use
1837 * RCU. The irq tracer can be called out of RCU scope.
1840 max_data->uid = current_uid();
1842 max_data->uid = task_uid(tsk);
1844 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1845 max_data->policy = tsk->policy;
1846 max_data->rt_priority = tsk->rt_priority;
1848 /* record this tasks comm */
1849 tracing_record_cmdline(tsk);
1850 latency_fsnotify(tr);
1854 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1856 * @tsk: the task with the latency
1857 * @cpu: The cpu that initiated the trace.
1858 * @cond_data: User data associated with a conditional snapshot
1860 * Flip the buffers between the @tr and the max_tr and record information
1861 * about which task was the cause of this latency.
1864 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1870 WARN_ON_ONCE(!irqs_disabled());
1872 if (!tr->allocated_snapshot) {
1873 /* Only the nop tracer should hit this when disabling */
1874 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1878 arch_spin_lock(&tr->max_lock);
1880 /* Inherit the recordable setting from array_buffer */
1881 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1882 ring_buffer_record_on(tr->max_buffer.buffer);
1884 ring_buffer_record_off(tr->max_buffer.buffer);
1886 #ifdef CONFIG_TRACER_SNAPSHOT
1887 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1888 arch_spin_unlock(&tr->max_lock);
1892 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1894 __update_max_tr(tr, tsk, cpu);
1896 arch_spin_unlock(&tr->max_lock);
1900 * update_max_tr_single - only copy one trace over, and reset the rest
1902 * @tsk: task with the latency
1903 * @cpu: the cpu of the buffer to copy.
1905 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1908 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1915 WARN_ON_ONCE(!irqs_disabled());
1916 if (!tr->allocated_snapshot) {
1917 /* Only the nop tracer should hit this when disabling */
1918 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1922 arch_spin_lock(&tr->max_lock);
1924 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1926 if (ret == -EBUSY) {
1928 * We failed to swap the buffer due to a commit taking
1929 * place on this CPU. We fail to record, but we reset
1930 * the max trace buffer (no one writes directly to it)
1931 * and flag that it failed.
1932 * Another reason is resize is in progress.
1934 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1935 "Failed to swap buffers due to commit or resize in progress\n");
1938 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1940 __update_max_tr(tr, tsk, cpu);
1941 arch_spin_unlock(&tr->max_lock);
1944 #endif /* CONFIG_TRACER_MAX_TRACE */
1946 static int wait_on_pipe(struct trace_iterator *iter, int full)
1948 /* Iterators are static, they should be filled or empty */
1949 if (trace_buffer_iter(iter, iter->cpu_file))
1952 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1956 #ifdef CONFIG_FTRACE_STARTUP_TEST
1957 static bool selftests_can_run;
1959 struct trace_selftests {
1960 struct list_head list;
1961 struct tracer *type;
1964 static LIST_HEAD(postponed_selftests);
1966 static int save_selftest(struct tracer *type)
1968 struct trace_selftests *selftest;
1970 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1974 selftest->type = type;
1975 list_add(&selftest->list, &postponed_selftests);
1979 static int run_tracer_selftest(struct tracer *type)
1981 struct trace_array *tr = &global_trace;
1982 struct tracer *saved_tracer = tr->current_trace;
1985 if (!type->selftest || tracing_selftest_disabled)
1989 * If a tracer registers early in boot up (before scheduling is
1990 * initialized and such), then do not run its selftests yet.
1991 * Instead, run it a little later in the boot process.
1993 if (!selftests_can_run)
1994 return save_selftest(type);
1996 if (!tracing_is_on()) {
1997 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2003 * Run a selftest on this tracer.
2004 * Here we reset the trace buffer, and set the current
2005 * tracer to be this tracer. The tracer can then run some
2006 * internal tracing to verify that everything is in order.
2007 * If we fail, we do not register this tracer.
2009 tracing_reset_online_cpus(&tr->array_buffer);
2011 tr->current_trace = type;
2013 #ifdef CONFIG_TRACER_MAX_TRACE
2014 if (type->use_max_tr) {
2015 /* If we expanded the buffers, make sure the max is expanded too */
2016 if (tr->ring_buffer_expanded)
2017 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2018 RING_BUFFER_ALL_CPUS);
2019 tr->allocated_snapshot = true;
2023 /* the test is responsible for initializing and enabling */
2024 pr_info("Testing tracer %s: ", type->name);
2025 ret = type->selftest(type, tr);
2026 /* the test is responsible for resetting too */
2027 tr->current_trace = saved_tracer;
2029 printk(KERN_CONT "FAILED!\n");
2030 /* Add the warning after printing 'FAILED' */
2034 /* Only reset on passing, to avoid touching corrupted buffers */
2035 tracing_reset_online_cpus(&tr->array_buffer);
2037 #ifdef CONFIG_TRACER_MAX_TRACE
2038 if (type->use_max_tr) {
2039 tr->allocated_snapshot = false;
2041 /* Shrink the max buffer again */
2042 if (tr->ring_buffer_expanded)
2043 ring_buffer_resize(tr->max_buffer.buffer, 1,
2044 RING_BUFFER_ALL_CPUS);
2048 printk(KERN_CONT "PASSED\n");
2052 static int do_run_tracer_selftest(struct tracer *type)
2057 * Tests can take a long time, especially if they are run one after the
2058 * other, as does happen during bootup when all the tracers are
2059 * registered. This could cause the soft lockup watchdog to trigger.
2063 tracing_selftest_running = true;
2064 ret = run_tracer_selftest(type);
2065 tracing_selftest_running = false;
2070 static __init int init_trace_selftests(void)
2072 struct trace_selftests *p, *n;
2073 struct tracer *t, **last;
2076 selftests_can_run = true;
2078 mutex_lock(&trace_types_lock);
2080 if (list_empty(&postponed_selftests))
2083 pr_info("Running postponed tracer tests:\n");
2085 tracing_selftest_running = true;
2086 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2087 /* This loop can take minutes when sanitizers are enabled, so
2088 * lets make sure we allow RCU processing.
2091 ret = run_tracer_selftest(p->type);
2092 /* If the test fails, then warn and remove from available_tracers */
2094 WARN(1, "tracer: %s failed selftest, disabling\n",
2096 last = &trace_types;
2097 for (t = trace_types; t; t = t->next) {
2108 tracing_selftest_running = false;
2111 mutex_unlock(&trace_types_lock);
2115 core_initcall(init_trace_selftests);
2117 static inline int run_tracer_selftest(struct tracer *type)
2121 static inline int do_run_tracer_selftest(struct tracer *type)
2125 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2127 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2129 static void __init apply_trace_boot_options(void);
2132 * register_tracer - register a tracer with the ftrace system.
2133 * @type: the plugin for the tracer
2135 * Register a new plugin tracer.
2137 int __init register_tracer(struct tracer *type)
2143 pr_info("Tracer must have a name\n");
2147 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2148 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2152 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2153 pr_warn("Can not register tracer %s due to lockdown\n",
2158 mutex_lock(&trace_types_lock);
2160 for (t = trace_types; t; t = t->next) {
2161 if (strcmp(type->name, t->name) == 0) {
2163 pr_info("Tracer %s already registered\n",
2170 if (!type->set_flag)
2171 type->set_flag = &dummy_set_flag;
2173 /*allocate a dummy tracer_flags*/
2174 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2179 type->flags->val = 0;
2180 type->flags->opts = dummy_tracer_opt;
2182 if (!type->flags->opts)
2183 type->flags->opts = dummy_tracer_opt;
2185 /* store the tracer for __set_tracer_option */
2186 type->flags->trace = type;
2188 ret = do_run_tracer_selftest(type);
2192 type->next = trace_types;
2194 add_tracer_options(&global_trace, type);
2197 mutex_unlock(&trace_types_lock);
2199 if (ret || !default_bootup_tracer)
2202 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2205 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2206 /* Do we want this tracer to start on bootup? */
2207 tracing_set_tracer(&global_trace, type->name);
2208 default_bootup_tracer = NULL;
2210 apply_trace_boot_options();
2212 /* disable other selftests, since this will break it. */
2213 disable_tracing_selftest("running a tracer");
2219 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2221 struct trace_buffer *buffer = buf->buffer;
2226 ring_buffer_record_disable(buffer);
2228 /* Make sure all commits have finished */
2230 ring_buffer_reset_cpu(buffer, cpu);
2232 ring_buffer_record_enable(buffer);
2235 void tracing_reset_online_cpus(struct array_buffer *buf)
2237 struct trace_buffer *buffer = buf->buffer;
2242 ring_buffer_record_disable(buffer);
2244 /* Make sure all commits have finished */
2247 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2249 ring_buffer_reset_online_cpus(buffer);
2251 ring_buffer_record_enable(buffer);
2254 /* Must have trace_types_lock held */
2255 void tracing_reset_all_online_cpus_unlocked(void)
2257 struct trace_array *tr;
2259 lockdep_assert_held(&trace_types_lock);
2261 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2262 if (!tr->clear_trace)
2264 tr->clear_trace = false;
2265 tracing_reset_online_cpus(&tr->array_buffer);
2266 #ifdef CONFIG_TRACER_MAX_TRACE
2267 tracing_reset_online_cpus(&tr->max_buffer);
2272 void tracing_reset_all_online_cpus(void)
2274 mutex_lock(&trace_types_lock);
2275 tracing_reset_all_online_cpus_unlocked();
2276 mutex_unlock(&trace_types_lock);
2280 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2281 * is the tgid last observed corresponding to pid=i.
2283 static int *tgid_map;
2285 /* The maximum valid index into tgid_map. */
2286 static size_t tgid_map_max;
2288 #define SAVED_CMDLINES_DEFAULT 128
2289 #define NO_CMDLINE_MAP UINT_MAX
2291 * Preemption must be disabled before acquiring trace_cmdline_lock.
2292 * The various trace_arrays' max_lock must be acquired in a context
2293 * where interrupt is disabled.
2295 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2296 struct saved_cmdlines_buffer {
2297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2298 unsigned *map_cmdline_to_pid;
2299 unsigned cmdline_num;
2301 char *saved_cmdlines;
2303 static struct saved_cmdlines_buffer *savedcmd;
2305 static inline char *get_saved_cmdlines(int idx)
2307 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2310 static inline void set_cmdline(int idx, const char *cmdline)
2312 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2315 static int allocate_cmdlines_buffer(unsigned int val,
2316 struct saved_cmdlines_buffer *s)
2318 s->map_cmdline_to_pid = kmalloc_array(val,
2319 sizeof(*s->map_cmdline_to_pid),
2321 if (!s->map_cmdline_to_pid)
2324 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2325 if (!s->saved_cmdlines) {
2326 kfree(s->map_cmdline_to_pid);
2331 s->cmdline_num = val;
2332 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2333 sizeof(s->map_pid_to_cmdline));
2334 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2335 val * sizeof(*s->map_cmdline_to_pid));
2340 static int trace_create_savedcmd(void)
2344 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2348 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2358 int is_tracing_stopped(void)
2360 return global_trace.stop_count;
2363 static void tracing_start_tr(struct trace_array *tr)
2365 struct trace_buffer *buffer;
2366 unsigned long flags;
2368 if (tracing_disabled)
2371 raw_spin_lock_irqsave(&tr->start_lock, flags);
2372 if (--tr->stop_count) {
2373 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2374 /* Someone screwed up their debugging */
2380 /* Prevent the buffers from switching */
2381 arch_spin_lock(&tr->max_lock);
2383 buffer = tr->array_buffer.buffer;
2385 ring_buffer_record_enable(buffer);
2387 #ifdef CONFIG_TRACER_MAX_TRACE
2388 buffer = tr->max_buffer.buffer;
2390 ring_buffer_record_enable(buffer);
2393 arch_spin_unlock(&tr->max_lock);
2396 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2400 * tracing_start - quick start of the tracer
2402 * If tracing is enabled but was stopped by tracing_stop,
2403 * this will start the tracer back up.
2405 void tracing_start(void)
2408 return tracing_start_tr(&global_trace);
2411 static void tracing_stop_tr(struct trace_array *tr)
2413 struct trace_buffer *buffer;
2414 unsigned long flags;
2416 raw_spin_lock_irqsave(&tr->start_lock, flags);
2417 if (tr->stop_count++)
2420 /* Prevent the buffers from switching */
2421 arch_spin_lock(&tr->max_lock);
2423 buffer = tr->array_buffer.buffer;
2425 ring_buffer_record_disable(buffer);
2427 #ifdef CONFIG_TRACER_MAX_TRACE
2428 buffer = tr->max_buffer.buffer;
2430 ring_buffer_record_disable(buffer);
2433 arch_spin_unlock(&tr->max_lock);
2436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2440 * tracing_stop - quick stop of the tracer
2442 * Light weight way to stop tracing. Use in conjunction with
2445 void tracing_stop(void)
2447 return tracing_stop_tr(&global_trace);
2450 static int trace_save_cmdline(struct task_struct *tsk)
2454 /* treat recording of idle task as a success */
2458 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2461 * It's not the end of the world if we don't get
2462 * the lock, but we also don't want to spin
2463 * nor do we want to disable interrupts,
2464 * so if we miss here, then better luck next time.
2466 * This is called within the scheduler and wake up, so interrupts
2467 * had better been disabled and run queue lock been held.
2469 lockdep_assert_preemption_disabled();
2470 if (!arch_spin_trylock(&trace_cmdline_lock))
2473 idx = savedcmd->map_pid_to_cmdline[tpid];
2474 if (idx == NO_CMDLINE_MAP) {
2475 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2477 savedcmd->map_pid_to_cmdline[tpid] = idx;
2478 savedcmd->cmdline_idx = idx;
2481 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2482 set_cmdline(idx, tsk->comm);
2484 arch_spin_unlock(&trace_cmdline_lock);
2489 static void __trace_find_cmdline(int pid, char comm[])
2495 strcpy(comm, "<idle>");
2499 if (WARN_ON_ONCE(pid < 0)) {
2500 strcpy(comm, "<XXX>");
2504 tpid = pid & (PID_MAX_DEFAULT - 1);
2505 map = savedcmd->map_pid_to_cmdline[tpid];
2506 if (map != NO_CMDLINE_MAP) {
2507 tpid = savedcmd->map_cmdline_to_pid[map];
2509 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2513 strcpy(comm, "<...>");
2516 void trace_find_cmdline(int pid, char comm[])
2519 arch_spin_lock(&trace_cmdline_lock);
2521 __trace_find_cmdline(pid, comm);
2523 arch_spin_unlock(&trace_cmdline_lock);
2527 static int *trace_find_tgid_ptr(int pid)
2530 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2531 * if we observe a non-NULL tgid_map then we also observe the correct
2534 int *map = smp_load_acquire(&tgid_map);
2536 if (unlikely(!map || pid > tgid_map_max))
2542 int trace_find_tgid(int pid)
2544 int *ptr = trace_find_tgid_ptr(pid);
2546 return ptr ? *ptr : 0;
2549 static int trace_save_tgid(struct task_struct *tsk)
2553 /* treat recording of idle task as a success */
2557 ptr = trace_find_tgid_ptr(tsk->pid);
2565 static bool tracing_record_taskinfo_skip(int flags)
2567 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2569 if (!__this_cpu_read(trace_taskinfo_save))
2575 * tracing_record_taskinfo - record the task info of a task
2577 * @task: task to record
2578 * @flags: TRACE_RECORD_CMDLINE for recording comm
2579 * TRACE_RECORD_TGID for recording tgid
2581 void tracing_record_taskinfo(struct task_struct *task, int flags)
2585 if (tracing_record_taskinfo_skip(flags))
2589 * Record as much task information as possible. If some fail, continue
2590 * to try to record the others.
2592 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2593 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2595 /* If recording any information failed, retry again soon. */
2599 __this_cpu_write(trace_taskinfo_save, false);
2603 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2605 * @prev: previous task during sched_switch
2606 * @next: next task during sched_switch
2607 * @flags: TRACE_RECORD_CMDLINE for recording comm
2608 * TRACE_RECORD_TGID for recording tgid
2610 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2611 struct task_struct *next, int flags)
2615 if (tracing_record_taskinfo_skip(flags))
2619 * Record as much task information as possible. If some fail, continue
2620 * to try to record the others.
2622 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2623 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2624 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2625 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2627 /* If recording any information failed, retry again soon. */
2631 __this_cpu_write(trace_taskinfo_save, false);
2634 /* Helpers to record a specific task information */
2635 void tracing_record_cmdline(struct task_struct *task)
2637 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2640 void tracing_record_tgid(struct task_struct *task)
2642 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2646 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2647 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2648 * simplifies those functions and keeps them in sync.
2650 enum print_line_t trace_handle_return(struct trace_seq *s)
2652 return trace_seq_has_overflowed(s) ?
2653 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2655 EXPORT_SYMBOL_GPL(trace_handle_return);
2657 static unsigned short migration_disable_value(void)
2659 #if defined(CONFIG_SMP)
2660 return current->migration_disabled;
2666 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2668 unsigned int trace_flags = irqs_status;
2671 pc = preempt_count();
2674 trace_flags |= TRACE_FLAG_NMI;
2675 if (pc & HARDIRQ_MASK)
2676 trace_flags |= TRACE_FLAG_HARDIRQ;
2677 if (in_serving_softirq())
2678 trace_flags |= TRACE_FLAG_SOFTIRQ;
2679 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2680 trace_flags |= TRACE_FLAG_BH_OFF;
2682 if (tif_need_resched())
2683 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2684 if (test_preempt_need_resched())
2685 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2686 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2687 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2690 struct ring_buffer_event *
2691 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2694 unsigned int trace_ctx)
2696 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2699 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2700 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2701 static int trace_buffered_event_ref;
2704 * trace_buffered_event_enable - enable buffering events
2706 * When events are being filtered, it is quicker to use a temporary
2707 * buffer to write the event data into if there's a likely chance
2708 * that it will not be committed. The discard of the ring buffer
2709 * is not as fast as committing, and is much slower than copying
2712 * When an event is to be filtered, allocate per cpu buffers to
2713 * write the event data into, and if the event is filtered and discarded
2714 * it is simply dropped, otherwise, the entire data is to be committed
2717 void trace_buffered_event_enable(void)
2719 struct ring_buffer_event *event;
2723 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2725 if (trace_buffered_event_ref++)
2728 for_each_tracing_cpu(cpu) {
2729 page = alloc_pages_node(cpu_to_node(cpu),
2730 GFP_KERNEL | __GFP_NORETRY, 0);
2731 /* This is just an optimization and can handle failures */
2733 pr_err("Failed to allocate event buffer\n");
2737 event = page_address(page);
2738 memset(event, 0, sizeof(*event));
2740 per_cpu(trace_buffered_event, cpu) = event;
2743 if (cpu == smp_processor_id() &&
2744 __this_cpu_read(trace_buffered_event) !=
2745 per_cpu(trace_buffered_event, cpu))
2751 static void enable_trace_buffered_event(void *data)
2753 /* Probably not needed, but do it anyway */
2755 this_cpu_dec(trace_buffered_event_cnt);
2758 static void disable_trace_buffered_event(void *data)
2760 this_cpu_inc(trace_buffered_event_cnt);
2764 * trace_buffered_event_disable - disable buffering events
2766 * When a filter is removed, it is faster to not use the buffered
2767 * events, and to commit directly into the ring buffer. Free up
2768 * the temp buffers when there are no more users. This requires
2769 * special synchronization with current events.
2771 void trace_buffered_event_disable(void)
2775 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2777 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2780 if (--trace_buffered_event_ref)
2783 /* For each CPU, set the buffer as used. */
2784 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2787 /* Wait for all current users to finish */
2790 for_each_tracing_cpu(cpu) {
2791 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2792 per_cpu(trace_buffered_event, cpu) = NULL;
2796 * Wait for all CPUs that potentially started checking if they can use
2797 * their event buffer only after the previous synchronize_rcu() call and
2798 * they still read a valid pointer from trace_buffered_event. It must be
2799 * ensured they don't see cleared trace_buffered_event_cnt else they
2800 * could wrongly decide to use the pointed-to buffer which is now freed.
2804 /* For each CPU, relinquish the buffer */
2805 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2809 static struct trace_buffer *temp_buffer;
2811 struct ring_buffer_event *
2812 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2813 struct trace_event_file *trace_file,
2814 int type, unsigned long len,
2815 unsigned int trace_ctx)
2817 struct ring_buffer_event *entry;
2818 struct trace_array *tr = trace_file->tr;
2821 *current_rb = tr->array_buffer.buffer;
2823 if (!tr->no_filter_buffering_ref &&
2824 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2825 preempt_disable_notrace();
2827 * Filtering is on, so try to use the per cpu buffer first.
2828 * This buffer will simulate a ring_buffer_event,
2829 * where the type_len is zero and the array[0] will
2830 * hold the full length.
2831 * (see include/linux/ring-buffer.h for details on
2832 * how the ring_buffer_event is structured).
2834 * Using a temp buffer during filtering and copying it
2835 * on a matched filter is quicker than writing directly
2836 * into the ring buffer and then discarding it when
2837 * it doesn't match. That is because the discard
2838 * requires several atomic operations to get right.
2839 * Copying on match and doing nothing on a failed match
2840 * is still quicker than no copy on match, but having
2841 * to discard out of the ring buffer on a failed match.
2843 if ((entry = __this_cpu_read(trace_buffered_event))) {
2844 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2846 val = this_cpu_inc_return(trace_buffered_event_cnt);
2849 * Preemption is disabled, but interrupts and NMIs
2850 * can still come in now. If that happens after
2851 * the above increment, then it will have to go
2852 * back to the old method of allocating the event
2853 * on the ring buffer, and if the filter fails, it
2854 * will have to call ring_buffer_discard_commit()
2857 * Need to also check the unlikely case that the
2858 * length is bigger than the temp buffer size.
2859 * If that happens, then the reserve is pretty much
2860 * guaranteed to fail, as the ring buffer currently
2861 * only allows events less than a page. But that may
2862 * change in the future, so let the ring buffer reserve
2863 * handle the failure in that case.
2865 if (val == 1 && likely(len <= max_len)) {
2866 trace_event_setup(entry, type, trace_ctx);
2867 entry->array[0] = len;
2868 /* Return with preemption disabled */
2871 this_cpu_dec(trace_buffered_event_cnt);
2873 /* __trace_buffer_lock_reserve() disables preemption */
2874 preempt_enable_notrace();
2877 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2880 * If tracing is off, but we have triggers enabled
2881 * we still need to look at the event data. Use the temp_buffer
2882 * to store the trace event for the trigger to use. It's recursive
2883 * safe and will not be recorded anywhere.
2885 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2886 *current_rb = temp_buffer;
2887 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2892 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2894 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2895 static DEFINE_MUTEX(tracepoint_printk_mutex);
2897 static void output_printk(struct trace_event_buffer *fbuffer)
2899 struct trace_event_call *event_call;
2900 struct trace_event_file *file;
2901 struct trace_event *event;
2902 unsigned long flags;
2903 struct trace_iterator *iter = tracepoint_print_iter;
2905 /* We should never get here if iter is NULL */
2906 if (WARN_ON_ONCE(!iter))
2909 event_call = fbuffer->trace_file->event_call;
2910 if (!event_call || !event_call->event.funcs ||
2911 !event_call->event.funcs->trace)
2914 file = fbuffer->trace_file;
2915 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2916 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2917 !filter_match_preds(file->filter, fbuffer->entry)))
2920 event = &fbuffer->trace_file->event_call->event;
2922 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2923 trace_seq_init(&iter->seq);
2924 iter->ent = fbuffer->entry;
2925 event_call->event.funcs->trace(iter, 0, event);
2926 trace_seq_putc(&iter->seq, 0);
2927 printk("%s", iter->seq.buffer);
2929 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2932 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2933 void *buffer, size_t *lenp,
2936 int save_tracepoint_printk;
2939 mutex_lock(&tracepoint_printk_mutex);
2940 save_tracepoint_printk = tracepoint_printk;
2942 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2945 * This will force exiting early, as tracepoint_printk
2946 * is always zero when tracepoint_printk_iter is not allocated
2948 if (!tracepoint_print_iter)
2949 tracepoint_printk = 0;
2951 if (save_tracepoint_printk == tracepoint_printk)
2954 if (tracepoint_printk)
2955 static_key_enable(&tracepoint_printk_key.key);
2957 static_key_disable(&tracepoint_printk_key.key);
2960 mutex_unlock(&tracepoint_printk_mutex);
2965 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2967 enum event_trigger_type tt = ETT_NONE;
2968 struct trace_event_file *file = fbuffer->trace_file;
2970 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2971 fbuffer->entry, &tt))
2974 if (static_key_false(&tracepoint_printk_key.key))
2975 output_printk(fbuffer);
2977 if (static_branch_unlikely(&trace_event_exports_enabled))
2978 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2980 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2981 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2985 event_triggers_post_call(file, tt);
2988 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2993 * trace_buffer_unlock_commit_regs()
2994 * trace_event_buffer_commit()
2995 * trace_event_raw_event_xxx()
2997 # define STACK_SKIP 3
2999 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3000 struct trace_buffer *buffer,
3001 struct ring_buffer_event *event,
3002 unsigned int trace_ctx,
3003 struct pt_regs *regs)
3005 __buffer_unlock_commit(buffer, event);
3008 * If regs is not set, then skip the necessary functions.
3009 * Note, we can still get here via blktrace, wakeup tracer
3010 * and mmiotrace, but that's ok if they lose a function or
3011 * two. They are not that meaningful.
3013 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3014 ftrace_trace_userstack(tr, buffer, trace_ctx);
3018 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3021 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3022 struct ring_buffer_event *event)
3024 __buffer_unlock_commit(buffer, event);
3028 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3029 parent_ip, unsigned int trace_ctx)
3031 struct trace_event_call *call = &event_function;
3032 struct trace_buffer *buffer = tr->array_buffer.buffer;
3033 struct ring_buffer_event *event;
3034 struct ftrace_entry *entry;
3036 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3040 entry = ring_buffer_event_data(event);
3042 entry->parent_ip = parent_ip;
3044 if (!call_filter_check_discard(call, entry, buffer, event)) {
3045 if (static_branch_unlikely(&trace_function_exports_enabled))
3046 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3047 __buffer_unlock_commit(buffer, event);
3051 #ifdef CONFIG_STACKTRACE
3053 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3054 #define FTRACE_KSTACK_NESTING 4
3056 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3058 struct ftrace_stack {
3059 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3063 struct ftrace_stacks {
3064 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3067 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3068 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3070 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3071 unsigned int trace_ctx,
3072 int skip, struct pt_regs *regs)
3074 struct trace_event_call *call = &event_kernel_stack;
3075 struct ring_buffer_event *event;
3076 unsigned int size, nr_entries;
3077 struct ftrace_stack *fstack;
3078 struct stack_entry *entry;
3082 * Add one, for this function and the call to save_stack_trace()
3083 * If regs is set, then these functions will not be in the way.
3085 #ifndef CONFIG_UNWINDER_ORC
3090 preempt_disable_notrace();
3092 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3094 /* This should never happen. If it does, yell once and skip */
3095 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3099 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3100 * interrupt will either see the value pre increment or post
3101 * increment. If the interrupt happens pre increment it will have
3102 * restored the counter when it returns. We just need a barrier to
3103 * keep gcc from moving things around.
3107 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3108 size = ARRAY_SIZE(fstack->calls);
3111 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3114 nr_entries = stack_trace_save(fstack->calls, size, skip);
3117 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3118 struct_size(entry, caller, nr_entries),
3122 entry = ring_buffer_event_data(event);
3124 entry->size = nr_entries;
3125 memcpy(&entry->caller, fstack->calls,
3126 flex_array_size(entry, caller, nr_entries));
3128 if (!call_filter_check_discard(call, entry, buffer, event))
3129 __buffer_unlock_commit(buffer, event);
3132 /* Again, don't let gcc optimize things here */
3134 __this_cpu_dec(ftrace_stack_reserve);
3135 preempt_enable_notrace();
3139 static inline void ftrace_trace_stack(struct trace_array *tr,
3140 struct trace_buffer *buffer,
3141 unsigned int trace_ctx,
3142 int skip, struct pt_regs *regs)
3144 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3147 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3150 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3153 struct trace_buffer *buffer = tr->array_buffer.buffer;
3155 if (rcu_is_watching()) {
3156 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3160 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3164 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3165 * but if the above rcu_is_watching() failed, then the NMI
3166 * triggered someplace critical, and ct_irq_enter() should
3167 * not be called from NMI.
3169 if (unlikely(in_nmi()))
3172 ct_irq_enter_irqson();
3173 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3174 ct_irq_exit_irqson();
3178 * trace_dump_stack - record a stack back trace in the trace buffer
3179 * @skip: Number of functions to skip (helper handlers)
3181 void trace_dump_stack(int skip)
3183 if (tracing_disabled || tracing_selftest_running)
3186 #ifndef CONFIG_UNWINDER_ORC
3187 /* Skip 1 to skip this function. */
3190 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3191 tracing_gen_ctx(), skip, NULL);
3193 EXPORT_SYMBOL_GPL(trace_dump_stack);
3195 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3196 static DEFINE_PER_CPU(int, user_stack_count);
3199 ftrace_trace_userstack(struct trace_array *tr,
3200 struct trace_buffer *buffer, unsigned int trace_ctx)
3202 struct trace_event_call *call = &event_user_stack;
3203 struct ring_buffer_event *event;
3204 struct userstack_entry *entry;
3206 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3210 * NMIs can not handle page faults, even with fix ups.
3211 * The save user stack can (and often does) fault.
3213 if (unlikely(in_nmi()))
3217 * prevent recursion, since the user stack tracing may
3218 * trigger other kernel events.
3221 if (__this_cpu_read(user_stack_count))
3224 __this_cpu_inc(user_stack_count);
3226 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3227 sizeof(*entry), trace_ctx);
3229 goto out_drop_count;
3230 entry = ring_buffer_event_data(event);
3232 entry->tgid = current->tgid;
3233 memset(&entry->caller, 0, sizeof(entry->caller));
3235 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3236 if (!call_filter_check_discard(call, entry, buffer, event))
3237 __buffer_unlock_commit(buffer, event);
3240 __this_cpu_dec(user_stack_count);
3244 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3245 static void ftrace_trace_userstack(struct trace_array *tr,
3246 struct trace_buffer *buffer,
3247 unsigned int trace_ctx)
3250 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3252 #endif /* CONFIG_STACKTRACE */
3255 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3256 unsigned long long delta)
3258 entry->bottom_delta_ts = delta & U32_MAX;
3259 entry->top_delta_ts = (delta >> 32);
3262 void trace_last_func_repeats(struct trace_array *tr,
3263 struct trace_func_repeats *last_info,
3264 unsigned int trace_ctx)
3266 struct trace_buffer *buffer = tr->array_buffer.buffer;
3267 struct func_repeats_entry *entry;
3268 struct ring_buffer_event *event;
3271 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3272 sizeof(*entry), trace_ctx);
3276 delta = ring_buffer_event_time_stamp(buffer, event) -
3277 last_info->ts_last_call;
3279 entry = ring_buffer_event_data(event);
3280 entry->ip = last_info->ip;
3281 entry->parent_ip = last_info->parent_ip;
3282 entry->count = last_info->count;
3283 func_repeats_set_delta_ts(entry, delta);
3285 __buffer_unlock_commit(buffer, event);
3288 /* created for use with alloc_percpu */
3289 struct trace_buffer_struct {
3291 char buffer[4][TRACE_BUF_SIZE];
3294 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3297 * This allows for lockless recording. If we're nested too deeply, then
3298 * this returns NULL.
3300 static char *get_trace_buf(void)
3302 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3304 if (!trace_percpu_buffer || buffer->nesting >= 4)
3309 /* Interrupts must see nesting incremented before we use the buffer */
3311 return &buffer->buffer[buffer->nesting - 1][0];
3314 static void put_trace_buf(void)
3316 /* Don't let the decrement of nesting leak before this */
3318 this_cpu_dec(trace_percpu_buffer->nesting);
3321 static int alloc_percpu_trace_buffer(void)
3323 struct trace_buffer_struct __percpu *buffers;
3325 if (trace_percpu_buffer)
3328 buffers = alloc_percpu(struct trace_buffer_struct);
3329 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3332 trace_percpu_buffer = buffers;
3336 static int buffers_allocated;
3338 void trace_printk_init_buffers(void)
3340 if (buffers_allocated)
3343 if (alloc_percpu_trace_buffer())
3346 /* trace_printk() is for debug use only. Don't use it in production. */
3349 pr_warn("**********************************************************\n");
3350 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3352 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3354 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3355 pr_warn("** unsafe for production use. **\n");
3357 pr_warn("** If you see this message and you are not debugging **\n");
3358 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3360 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3361 pr_warn("**********************************************************\n");
3363 /* Expand the buffers to set size */
3364 tracing_update_buffers(&global_trace);
3366 buffers_allocated = 1;
3369 * trace_printk_init_buffers() can be called by modules.
3370 * If that happens, then we need to start cmdline recording
3371 * directly here. If the global_trace.buffer is already
3372 * allocated here, then this was called by module code.
3374 if (global_trace.array_buffer.buffer)
3375 tracing_start_cmdline_record();
3377 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3379 void trace_printk_start_comm(void)
3381 /* Start tracing comms if trace printk is set */
3382 if (!buffers_allocated)
3384 tracing_start_cmdline_record();
3387 static void trace_printk_start_stop_comm(int enabled)
3389 if (!buffers_allocated)
3393 tracing_start_cmdline_record();
3395 tracing_stop_cmdline_record();
3399 * trace_vbprintk - write binary msg to tracing buffer
3400 * @ip: The address of the caller
3401 * @fmt: The string format to write to the buffer
3402 * @args: Arguments for @fmt
3404 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3406 struct trace_event_call *call = &event_bprint;
3407 struct ring_buffer_event *event;
3408 struct trace_buffer *buffer;
3409 struct trace_array *tr = &global_trace;
3410 struct bprint_entry *entry;
3411 unsigned int trace_ctx;
3415 if (unlikely(tracing_selftest_running || tracing_disabled))
3418 /* Don't pollute graph traces with trace_vprintk internals */
3419 pause_graph_tracing();
3421 trace_ctx = tracing_gen_ctx();
3422 preempt_disable_notrace();
3424 tbuffer = get_trace_buf();
3430 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3432 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3435 size = sizeof(*entry) + sizeof(u32) * len;
3436 buffer = tr->array_buffer.buffer;
3437 ring_buffer_nest_start(buffer);
3438 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3442 entry = ring_buffer_event_data(event);
3446 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3447 if (!call_filter_check_discard(call, entry, buffer, event)) {
3448 __buffer_unlock_commit(buffer, event);
3449 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3453 ring_buffer_nest_end(buffer);
3458 preempt_enable_notrace();
3459 unpause_graph_tracing();
3463 EXPORT_SYMBOL_GPL(trace_vbprintk);
3467 __trace_array_vprintk(struct trace_buffer *buffer,
3468 unsigned long ip, const char *fmt, va_list args)
3470 struct trace_event_call *call = &event_print;
3471 struct ring_buffer_event *event;
3473 struct print_entry *entry;
3474 unsigned int trace_ctx;
3477 if (tracing_disabled)
3480 /* Don't pollute graph traces with trace_vprintk internals */
3481 pause_graph_tracing();
3483 trace_ctx = tracing_gen_ctx();
3484 preempt_disable_notrace();
3487 tbuffer = get_trace_buf();
3493 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3495 size = sizeof(*entry) + len + 1;
3496 ring_buffer_nest_start(buffer);
3497 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3501 entry = ring_buffer_event_data(event);
3504 memcpy(&entry->buf, tbuffer, len + 1);
3505 if (!call_filter_check_discard(call, entry, buffer, event)) {
3506 __buffer_unlock_commit(buffer, event);
3507 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3511 ring_buffer_nest_end(buffer);
3515 preempt_enable_notrace();
3516 unpause_graph_tracing();
3522 int trace_array_vprintk(struct trace_array *tr,
3523 unsigned long ip, const char *fmt, va_list args)
3525 if (tracing_selftest_running && tr == &global_trace)
3528 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3532 * trace_array_printk - Print a message to a specific instance
3533 * @tr: The instance trace_array descriptor
3534 * @ip: The instruction pointer that this is called from.
3535 * @fmt: The format to print (printf format)
3537 * If a subsystem sets up its own instance, they have the right to
3538 * printk strings into their tracing instance buffer using this
3539 * function. Note, this function will not write into the top level
3540 * buffer (use trace_printk() for that), as writing into the top level
3541 * buffer should only have events that can be individually disabled.
3542 * trace_printk() is only used for debugging a kernel, and should not
3543 * be ever incorporated in normal use.
3545 * trace_array_printk() can be used, as it will not add noise to the
3546 * top level tracing buffer.
3548 * Note, trace_array_init_printk() must be called on @tr before this
3552 int trace_array_printk(struct trace_array *tr,
3553 unsigned long ip, const char *fmt, ...)
3561 /* This is only allowed for created instances */
3562 if (tr == &global_trace)
3565 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3569 ret = trace_array_vprintk(tr, ip, fmt, ap);
3573 EXPORT_SYMBOL_GPL(trace_array_printk);
3576 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3577 * @tr: The trace array to initialize the buffers for
3579 * As trace_array_printk() only writes into instances, they are OK to
3580 * have in the kernel (unlike trace_printk()). This needs to be called
3581 * before trace_array_printk() can be used on a trace_array.
3583 int trace_array_init_printk(struct trace_array *tr)
3588 /* This is only allowed for created instances */
3589 if (tr == &global_trace)
3592 return alloc_percpu_trace_buffer();
3594 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3597 int trace_array_printk_buf(struct trace_buffer *buffer,
3598 unsigned long ip, const char *fmt, ...)
3603 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3607 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3613 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3615 return trace_array_vprintk(&global_trace, ip, fmt, args);
3617 EXPORT_SYMBOL_GPL(trace_vprintk);
3619 static void trace_iterator_increment(struct trace_iterator *iter)
3621 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3625 ring_buffer_iter_advance(buf_iter);
3628 static struct trace_entry *
3629 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3630 unsigned long *lost_events)
3632 struct ring_buffer_event *event;
3633 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3636 event = ring_buffer_iter_peek(buf_iter, ts);
3638 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3639 (unsigned long)-1 : 0;
3641 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3646 iter->ent_size = ring_buffer_event_length(event);
3647 return ring_buffer_event_data(event);
3653 static struct trace_entry *
3654 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3655 unsigned long *missing_events, u64 *ent_ts)
3657 struct trace_buffer *buffer = iter->array_buffer->buffer;
3658 struct trace_entry *ent, *next = NULL;
3659 unsigned long lost_events = 0, next_lost = 0;
3660 int cpu_file = iter->cpu_file;
3661 u64 next_ts = 0, ts;
3667 * If we are in a per_cpu trace file, don't bother by iterating over
3668 * all cpu and peek directly.
3670 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3671 if (ring_buffer_empty_cpu(buffer, cpu_file))
3673 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3675 *ent_cpu = cpu_file;
3680 for_each_tracing_cpu(cpu) {
3682 if (ring_buffer_empty_cpu(buffer, cpu))
3685 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3688 * Pick the entry with the smallest timestamp:
3690 if (ent && (!next || ts < next_ts)) {
3694 next_lost = lost_events;
3695 next_size = iter->ent_size;
3699 iter->ent_size = next_size;
3702 *ent_cpu = next_cpu;
3708 *missing_events = next_lost;
3713 #define STATIC_FMT_BUF_SIZE 128
3714 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3716 char *trace_iter_expand_format(struct trace_iterator *iter)
3721 * iter->tr is NULL when used with tp_printk, which makes
3722 * this get called where it is not safe to call krealloc().
3724 if (!iter->tr || iter->fmt == static_fmt_buf)
3727 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3730 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3737 /* Returns true if the string is safe to dereference from an event */
3738 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3741 unsigned long addr = (unsigned long)str;
3742 struct trace_event *trace_event;
3743 struct trace_event_call *event;
3745 /* Ignore strings with no length */
3749 /* OK if part of the event data */
3750 if ((addr >= (unsigned long)iter->ent) &&
3751 (addr < (unsigned long)iter->ent + iter->ent_size))
3754 /* OK if part of the temp seq buffer */
3755 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3756 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3759 /* Core rodata can not be freed */
3760 if (is_kernel_rodata(addr))
3763 if (trace_is_tracepoint_string(str))
3767 * Now this could be a module event, referencing core module
3768 * data, which is OK.
3773 trace_event = ftrace_find_event(iter->ent->type);
3777 event = container_of(trace_event, struct trace_event_call, event);
3778 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3781 /* Would rather have rodata, but this will suffice */
3782 if (within_module_core(addr, event->module))
3788 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3790 static int test_can_verify_check(const char *fmt, ...)
3797 * The verifier is dependent on vsnprintf() modifies the va_list
3798 * passed to it, where it is sent as a reference. Some architectures
3799 * (like x86_32) passes it by value, which means that vsnprintf()
3800 * does not modify the va_list passed to it, and the verifier
3801 * would then need to be able to understand all the values that
3802 * vsnprintf can use. If it is passed by value, then the verifier
3806 vsnprintf(buf, 16, "%d", ap);
3807 ret = va_arg(ap, int);
3813 static void test_can_verify(void)
3815 if (!test_can_verify_check("%d %d", 0, 1)) {
3816 pr_info("trace event string verifier disabled\n");
3817 static_branch_inc(&trace_no_verify);
3822 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3823 * @iter: The iterator that holds the seq buffer and the event being printed
3824 * @fmt: The format used to print the event
3825 * @ap: The va_list holding the data to print from @fmt.
3827 * This writes the data into the @iter->seq buffer using the data from
3828 * @fmt and @ap. If the format has a %s, then the source of the string
3829 * is examined to make sure it is safe to print, otherwise it will
3830 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3833 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3836 const char *p = fmt;
3840 if (WARN_ON_ONCE(!fmt))
3843 if (static_branch_unlikely(&trace_no_verify))
3846 /* Don't bother checking when doing a ftrace_dump() */
3847 if (iter->fmt == static_fmt_buf)
3856 /* We only care about %s and variants */
3857 for (i = 0; p[i]; i++) {
3858 if (i + 1 >= iter->fmt_size) {
3860 * If we can't expand the copy buffer,
3863 if (!trace_iter_expand_format(iter))
3867 if (p[i] == '\\' && p[i+1]) {
3872 /* Need to test cases like %08.*s */
3873 for (j = 1; p[i+j]; j++) {
3874 if (isdigit(p[i+j]) ||
3877 if (p[i+j] == '*') {
3889 /* If no %s found then just print normally */
3893 /* Copy up to the %s, and print that */
3894 strncpy(iter->fmt, p, i);
3895 iter->fmt[i] = '\0';
3896 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3899 * If iter->seq is full, the above call no longer guarantees
3900 * that ap is in sync with fmt processing, and further calls
3901 * to va_arg() can return wrong positional arguments.
3903 * Ensure that ap is no longer used in this case.
3905 if (iter->seq.full) {
3911 len = va_arg(ap, int);
3913 /* The ap now points to the string data of the %s */
3914 str = va_arg(ap, const char *);
3917 * If you hit this warning, it is likely that the
3918 * trace event in question used %s on a string that
3919 * was saved at the time of the event, but may not be
3920 * around when the trace is read. Use __string(),
3921 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3922 * instead. See samples/trace_events/trace-events-sample.h
3925 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3926 "fmt: '%s' current_buffer: '%s'",
3927 fmt, seq_buf_str(&iter->seq.seq))) {
3930 /* Try to safely read the string */
3932 if (len + 1 > iter->fmt_size)
3933 len = iter->fmt_size - 1;
3936 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3940 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3944 trace_seq_printf(&iter->seq, "(0x%px)", str);
3946 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3948 str = "[UNSAFE-MEMORY]";
3949 strcpy(iter->fmt, "%s");
3951 strncpy(iter->fmt, p + i, j + 1);
3952 iter->fmt[j+1] = '\0';
3955 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3957 trace_seq_printf(&iter->seq, iter->fmt, str);
3963 trace_seq_vprintf(&iter->seq, p, ap);
3966 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3968 const char *p, *new_fmt;
3971 if (WARN_ON_ONCE(!fmt))
3974 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3978 new_fmt = q = iter->fmt;
3980 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3981 if (!trace_iter_expand_format(iter))
3984 q += iter->fmt - new_fmt;
3985 new_fmt = iter->fmt;
3990 /* Replace %p with %px */
3994 } else if (p[0] == 'p' && !isalnum(p[1])) {
4005 #define STATIC_TEMP_BUF_SIZE 128
4006 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4008 /* Find the next real entry, without updating the iterator itself */
4009 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4010 int *ent_cpu, u64 *ent_ts)
4012 /* __find_next_entry will reset ent_size */
4013 int ent_size = iter->ent_size;
4014 struct trace_entry *entry;
4017 * If called from ftrace_dump(), then the iter->temp buffer
4018 * will be the static_temp_buf and not created from kmalloc.
4019 * If the entry size is greater than the buffer, we can
4020 * not save it. Just return NULL in that case. This is only
4021 * used to add markers when two consecutive events' time
4022 * stamps have a large delta. See trace_print_lat_context()
4024 if (iter->temp == static_temp_buf &&
4025 STATIC_TEMP_BUF_SIZE < ent_size)
4029 * The __find_next_entry() may call peek_next_entry(), which may
4030 * call ring_buffer_peek() that may make the contents of iter->ent
4031 * undefined. Need to copy iter->ent now.
4033 if (iter->ent && iter->ent != iter->temp) {
4034 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4035 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4037 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4042 iter->temp_size = iter->ent_size;
4044 memcpy(iter->temp, iter->ent, iter->ent_size);
4045 iter->ent = iter->temp;
4047 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4048 /* Put back the original ent_size */
4049 iter->ent_size = ent_size;
4054 /* Find the next real entry, and increment the iterator to the next entry */
4055 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4057 iter->ent = __find_next_entry(iter, &iter->cpu,
4058 &iter->lost_events, &iter->ts);
4061 trace_iterator_increment(iter);
4063 return iter->ent ? iter : NULL;
4066 static void trace_consume(struct trace_iterator *iter)
4068 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4069 &iter->lost_events);
4072 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4074 struct trace_iterator *iter = m->private;
4078 WARN_ON_ONCE(iter->leftover);
4082 /* can't go backwards */
4087 ent = trace_find_next_entry_inc(iter);
4091 while (ent && iter->idx < i)
4092 ent = trace_find_next_entry_inc(iter);
4099 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4101 struct ring_buffer_iter *buf_iter;
4102 unsigned long entries = 0;
4105 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4107 buf_iter = trace_buffer_iter(iter, cpu);
4111 ring_buffer_iter_reset(buf_iter);
4114 * We could have the case with the max latency tracers
4115 * that a reset never took place on a cpu. This is evident
4116 * by the timestamp being before the start of the buffer.
4118 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4119 if (ts >= iter->array_buffer->time_start)
4122 ring_buffer_iter_advance(buf_iter);
4125 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4129 * The current tracer is copied to avoid a global locking
4132 static void *s_start(struct seq_file *m, loff_t *pos)
4134 struct trace_iterator *iter = m->private;
4135 struct trace_array *tr = iter->tr;
4136 int cpu_file = iter->cpu_file;
4141 mutex_lock(&trace_types_lock);
4142 if (unlikely(tr->current_trace != iter->trace)) {
4143 /* Close iter->trace before switching to the new current tracer */
4144 if (iter->trace->close)
4145 iter->trace->close(iter);
4146 iter->trace = tr->current_trace;
4147 /* Reopen the new current tracer */
4148 if (iter->trace->open)
4149 iter->trace->open(iter);
4151 mutex_unlock(&trace_types_lock);
4153 #ifdef CONFIG_TRACER_MAX_TRACE
4154 if (iter->snapshot && iter->trace->use_max_tr)
4155 return ERR_PTR(-EBUSY);
4158 if (*pos != iter->pos) {
4163 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4164 for_each_tracing_cpu(cpu)
4165 tracing_iter_reset(iter, cpu);
4167 tracing_iter_reset(iter, cpu_file);
4170 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4175 * If we overflowed the seq_file before, then we want
4176 * to just reuse the trace_seq buffer again.
4182 p = s_next(m, p, &l);
4186 trace_event_read_lock();
4187 trace_access_lock(cpu_file);
4191 static void s_stop(struct seq_file *m, void *p)
4193 struct trace_iterator *iter = m->private;
4195 #ifdef CONFIG_TRACER_MAX_TRACE
4196 if (iter->snapshot && iter->trace->use_max_tr)
4200 trace_access_unlock(iter->cpu_file);
4201 trace_event_read_unlock();
4205 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4206 unsigned long *entries, int cpu)
4208 unsigned long count;
4210 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4212 * If this buffer has skipped entries, then we hold all
4213 * entries for the trace and we need to ignore the
4214 * ones before the time stamp.
4216 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4217 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4218 /* total is the same as the entries */
4222 ring_buffer_overrun_cpu(buf->buffer, cpu);
4227 get_total_entries(struct array_buffer *buf,
4228 unsigned long *total, unsigned long *entries)
4236 for_each_tracing_cpu(cpu) {
4237 get_total_entries_cpu(buf, &t, &e, cpu);
4243 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4245 unsigned long total, entries;
4250 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4255 unsigned long trace_total_entries(struct trace_array *tr)
4257 unsigned long total, entries;
4262 get_total_entries(&tr->array_buffer, &total, &entries);
4267 static void print_lat_help_header(struct seq_file *m)
4269 seq_puts(m, "# _------=> CPU# \n"
4270 "# / _-----=> irqs-off/BH-disabled\n"
4271 "# | / _----=> need-resched \n"
4272 "# || / _---=> hardirq/softirq \n"
4273 "# ||| / _--=> preempt-depth \n"
4274 "# |||| / _-=> migrate-disable \n"
4275 "# ||||| / delay \n"
4276 "# cmd pid |||||| time | caller \n"
4277 "# \\ / |||||| \\ | / \n");
4280 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4282 unsigned long total;
4283 unsigned long entries;
4285 get_total_entries(buf, &total, &entries);
4286 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4287 entries, total, num_online_cpus());
4291 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4294 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4296 print_event_info(buf, m);
4298 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4299 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4302 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4305 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4306 static const char space[] = " ";
4307 int prec = tgid ? 12 : 2;
4309 print_event_info(buf, m);
4311 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4312 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4313 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4314 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4315 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4316 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4317 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4318 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4322 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4324 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4325 struct array_buffer *buf = iter->array_buffer;
4326 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4327 struct tracer *type = iter->trace;
4328 unsigned long entries;
4329 unsigned long total;
4330 const char *name = type->name;
4332 get_total_entries(buf, &total, &entries);
4334 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4336 seq_puts(m, "# -----------------------------------"
4337 "---------------------------------\n");
4338 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4339 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4340 nsecs_to_usecs(data->saved_latency),
4344 preempt_model_none() ? "server" :
4345 preempt_model_voluntary() ? "desktop" :
4346 preempt_model_full() ? "preempt" :
4347 preempt_model_rt() ? "preempt_rt" :
4349 /* These are reserved for later use */
4352 seq_printf(m, " #P:%d)\n", num_online_cpus());
4356 seq_puts(m, "# -----------------\n");
4357 seq_printf(m, "# | task: %.16s-%d "
4358 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4359 data->comm, data->pid,
4360 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4361 data->policy, data->rt_priority);
4362 seq_puts(m, "# -----------------\n");
4364 if (data->critical_start) {
4365 seq_puts(m, "# => started at: ");
4366 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4367 trace_print_seq(m, &iter->seq);
4368 seq_puts(m, "\n# => ended at: ");
4369 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4370 trace_print_seq(m, &iter->seq);
4371 seq_puts(m, "\n#\n");
4377 static void test_cpu_buff_start(struct trace_iterator *iter)
4379 struct trace_seq *s = &iter->seq;
4380 struct trace_array *tr = iter->tr;
4382 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4385 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4388 if (cpumask_available(iter->started) &&
4389 cpumask_test_cpu(iter->cpu, iter->started))
4392 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4395 if (cpumask_available(iter->started))
4396 cpumask_set_cpu(iter->cpu, iter->started);
4398 /* Don't print started cpu buffer for the first entry of the trace */
4400 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4404 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4406 struct trace_array *tr = iter->tr;
4407 struct trace_seq *s = &iter->seq;
4408 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4409 struct trace_entry *entry;
4410 struct trace_event *event;
4414 test_cpu_buff_start(iter);
4416 event = ftrace_find_event(entry->type);
4418 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4419 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4420 trace_print_lat_context(iter);
4422 trace_print_context(iter);
4425 if (trace_seq_has_overflowed(s))
4426 return TRACE_TYPE_PARTIAL_LINE;
4429 if (tr->trace_flags & TRACE_ITER_FIELDS)
4430 return print_event_fields(iter, event);
4431 return event->funcs->trace(iter, sym_flags, event);
4434 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4436 return trace_handle_return(s);
4439 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4441 struct trace_array *tr = iter->tr;
4442 struct trace_seq *s = &iter->seq;
4443 struct trace_entry *entry;
4444 struct trace_event *event;
4448 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4449 trace_seq_printf(s, "%d %d %llu ",
4450 entry->pid, iter->cpu, iter->ts);
4452 if (trace_seq_has_overflowed(s))
4453 return TRACE_TYPE_PARTIAL_LINE;
4455 event = ftrace_find_event(entry->type);
4457 return event->funcs->raw(iter, 0, event);
4459 trace_seq_printf(s, "%d ?\n", entry->type);
4461 return trace_handle_return(s);
4464 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4466 struct trace_array *tr = iter->tr;
4467 struct trace_seq *s = &iter->seq;
4468 unsigned char newline = '\n';
4469 struct trace_entry *entry;
4470 struct trace_event *event;
4474 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4475 SEQ_PUT_HEX_FIELD(s, entry->pid);
4476 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4477 SEQ_PUT_HEX_FIELD(s, iter->ts);
4478 if (trace_seq_has_overflowed(s))
4479 return TRACE_TYPE_PARTIAL_LINE;
4482 event = ftrace_find_event(entry->type);
4484 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4485 if (ret != TRACE_TYPE_HANDLED)
4489 SEQ_PUT_FIELD(s, newline);
4491 return trace_handle_return(s);
4494 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4496 struct trace_array *tr = iter->tr;
4497 struct trace_seq *s = &iter->seq;
4498 struct trace_entry *entry;
4499 struct trace_event *event;
4503 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4504 SEQ_PUT_FIELD(s, entry->pid);
4505 SEQ_PUT_FIELD(s, iter->cpu);
4506 SEQ_PUT_FIELD(s, iter->ts);
4507 if (trace_seq_has_overflowed(s))
4508 return TRACE_TYPE_PARTIAL_LINE;
4511 event = ftrace_find_event(entry->type);
4512 return event ? event->funcs->binary(iter, 0, event) :
4516 int trace_empty(struct trace_iterator *iter)
4518 struct ring_buffer_iter *buf_iter;
4521 /* If we are looking at one CPU buffer, only check that one */
4522 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4523 cpu = iter->cpu_file;
4524 buf_iter = trace_buffer_iter(iter, cpu);
4526 if (!ring_buffer_iter_empty(buf_iter))
4529 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4535 for_each_tracing_cpu(cpu) {
4536 buf_iter = trace_buffer_iter(iter, cpu);
4538 if (!ring_buffer_iter_empty(buf_iter))
4541 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4549 /* Called with trace_event_read_lock() held. */
4550 enum print_line_t print_trace_line(struct trace_iterator *iter)
4552 struct trace_array *tr = iter->tr;
4553 unsigned long trace_flags = tr->trace_flags;
4554 enum print_line_t ret;
4556 if (iter->lost_events) {
4557 if (iter->lost_events == (unsigned long)-1)
4558 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4561 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4562 iter->cpu, iter->lost_events);
4563 if (trace_seq_has_overflowed(&iter->seq))
4564 return TRACE_TYPE_PARTIAL_LINE;
4567 if (iter->trace && iter->trace->print_line) {
4568 ret = iter->trace->print_line(iter);
4569 if (ret != TRACE_TYPE_UNHANDLED)
4573 if (iter->ent->type == TRACE_BPUTS &&
4574 trace_flags & TRACE_ITER_PRINTK &&
4575 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4576 return trace_print_bputs_msg_only(iter);
4578 if (iter->ent->type == TRACE_BPRINT &&
4579 trace_flags & TRACE_ITER_PRINTK &&
4580 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4581 return trace_print_bprintk_msg_only(iter);
4583 if (iter->ent->type == TRACE_PRINT &&
4584 trace_flags & TRACE_ITER_PRINTK &&
4585 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4586 return trace_print_printk_msg_only(iter);
4588 if (trace_flags & TRACE_ITER_BIN)
4589 return print_bin_fmt(iter);
4591 if (trace_flags & TRACE_ITER_HEX)
4592 return print_hex_fmt(iter);
4594 if (trace_flags & TRACE_ITER_RAW)
4595 return print_raw_fmt(iter);
4597 return print_trace_fmt(iter);
4600 void trace_latency_header(struct seq_file *m)
4602 struct trace_iterator *iter = m->private;
4603 struct trace_array *tr = iter->tr;
4605 /* print nothing if the buffers are empty */
4606 if (trace_empty(iter))
4609 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4610 print_trace_header(m, iter);
4612 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4613 print_lat_help_header(m);
4616 void trace_default_header(struct seq_file *m)
4618 struct trace_iterator *iter = m->private;
4619 struct trace_array *tr = iter->tr;
4620 unsigned long trace_flags = tr->trace_flags;
4622 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4625 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4626 /* print nothing if the buffers are empty */
4627 if (trace_empty(iter))
4629 print_trace_header(m, iter);
4630 if (!(trace_flags & TRACE_ITER_VERBOSE))
4631 print_lat_help_header(m);
4633 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4634 if (trace_flags & TRACE_ITER_IRQ_INFO)
4635 print_func_help_header_irq(iter->array_buffer,
4638 print_func_help_header(iter->array_buffer, m,
4644 static void test_ftrace_alive(struct seq_file *m)
4646 if (!ftrace_is_dead())
4648 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4649 "# MAY BE MISSING FUNCTION EVENTS\n");
4652 #ifdef CONFIG_TRACER_MAX_TRACE
4653 static void show_snapshot_main_help(struct seq_file *m)
4655 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4656 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4657 "# Takes a snapshot of the main buffer.\n"
4658 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4659 "# (Doesn't have to be '2' works with any number that\n"
4660 "# is not a '0' or '1')\n");
4663 static void show_snapshot_percpu_help(struct seq_file *m)
4665 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4666 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4667 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4668 "# Takes a snapshot of the main buffer for this cpu.\n");
4670 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4671 "# Must use main snapshot file to allocate.\n");
4673 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4674 "# (Doesn't have to be '2' works with any number that\n"
4675 "# is not a '0' or '1')\n");
4678 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4680 if (iter->tr->allocated_snapshot)
4681 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4683 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4685 seq_puts(m, "# Snapshot commands:\n");
4686 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4687 show_snapshot_main_help(m);
4689 show_snapshot_percpu_help(m);
4692 /* Should never be called */
4693 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4696 static int s_show(struct seq_file *m, void *v)
4698 struct trace_iterator *iter = v;
4701 if (iter->ent == NULL) {
4703 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4705 test_ftrace_alive(m);
4707 if (iter->snapshot && trace_empty(iter))
4708 print_snapshot_help(m, iter);
4709 else if (iter->trace && iter->trace->print_header)
4710 iter->trace->print_header(m);
4712 trace_default_header(m);
4714 } else if (iter->leftover) {
4716 * If we filled the seq_file buffer earlier, we
4717 * want to just show it now.
4719 ret = trace_print_seq(m, &iter->seq);
4721 /* ret should this time be zero, but you never know */
4722 iter->leftover = ret;
4725 ret = print_trace_line(iter);
4726 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4728 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4730 ret = trace_print_seq(m, &iter->seq);
4732 * If we overflow the seq_file buffer, then it will
4733 * ask us for this data again at start up.
4735 * ret is 0 if seq_file write succeeded.
4738 iter->leftover = ret;
4745 * Should be used after trace_array_get(), trace_types_lock
4746 * ensures that i_cdev was already initialized.
4748 static inline int tracing_get_cpu(struct inode *inode)
4750 if (inode->i_cdev) /* See trace_create_cpu_file() */
4751 return (long)inode->i_cdev - 1;
4752 return RING_BUFFER_ALL_CPUS;
4755 static const struct seq_operations tracer_seq_ops = {
4763 * Note, as iter itself can be allocated and freed in different
4764 * ways, this function is only used to free its content, and not
4765 * the iterator itself. The only requirement to all the allocations
4766 * is that it must zero all fields (kzalloc), as freeing works with
4767 * ethier allocated content or NULL.
4769 static void free_trace_iter_content(struct trace_iterator *iter)
4771 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4772 if (iter->fmt != static_fmt_buf)
4776 kfree(iter->buffer_iter);
4777 mutex_destroy(&iter->mutex);
4778 free_cpumask_var(iter->started);
4781 static struct trace_iterator *
4782 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4784 struct trace_array *tr = inode->i_private;
4785 struct trace_iterator *iter;
4788 if (tracing_disabled)
4789 return ERR_PTR(-ENODEV);
4791 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4793 return ERR_PTR(-ENOMEM);
4795 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4797 if (!iter->buffer_iter)
4801 * trace_find_next_entry() may need to save off iter->ent.
4802 * It will place it into the iter->temp buffer. As most
4803 * events are less than 128, allocate a buffer of that size.
4804 * If one is greater, then trace_find_next_entry() will
4805 * allocate a new buffer to adjust for the bigger iter->ent.
4806 * It's not critical if it fails to get allocated here.
4808 iter->temp = kmalloc(128, GFP_KERNEL);
4810 iter->temp_size = 128;
4813 * trace_event_printf() may need to modify given format
4814 * string to replace %p with %px so that it shows real address
4815 * instead of hash value. However, that is only for the event
4816 * tracing, other tracer may not need. Defer the allocation
4817 * until it is needed.
4822 mutex_lock(&trace_types_lock);
4823 iter->trace = tr->current_trace;
4825 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4830 #ifdef CONFIG_TRACER_MAX_TRACE
4831 /* Currently only the top directory has a snapshot */
4832 if (tr->current_trace->print_max || snapshot)
4833 iter->array_buffer = &tr->max_buffer;
4836 iter->array_buffer = &tr->array_buffer;
4837 iter->snapshot = snapshot;
4839 iter->cpu_file = tracing_get_cpu(inode);
4840 mutex_init(&iter->mutex);
4842 /* Notify the tracer early; before we stop tracing. */
4843 if (iter->trace->open)
4844 iter->trace->open(iter);
4846 /* Annotate start of buffers if we had overruns */
4847 if (ring_buffer_overruns(iter->array_buffer->buffer))
4848 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4850 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4851 if (trace_clocks[tr->clock_id].in_ns)
4852 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4855 * If pause-on-trace is enabled, then stop the trace while
4856 * dumping, unless this is the "snapshot" file
4858 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4859 tracing_stop_tr(tr);
4861 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4862 for_each_tracing_cpu(cpu) {
4863 iter->buffer_iter[cpu] =
4864 ring_buffer_read_prepare(iter->array_buffer->buffer,
4867 ring_buffer_read_prepare_sync();
4868 for_each_tracing_cpu(cpu) {
4869 ring_buffer_read_start(iter->buffer_iter[cpu]);
4870 tracing_iter_reset(iter, cpu);
4873 cpu = iter->cpu_file;
4874 iter->buffer_iter[cpu] =
4875 ring_buffer_read_prepare(iter->array_buffer->buffer,
4877 ring_buffer_read_prepare_sync();
4878 ring_buffer_read_start(iter->buffer_iter[cpu]);
4879 tracing_iter_reset(iter, cpu);
4882 mutex_unlock(&trace_types_lock);
4887 mutex_unlock(&trace_types_lock);
4888 free_trace_iter_content(iter);
4890 seq_release_private(inode, file);
4891 return ERR_PTR(-ENOMEM);
4894 int tracing_open_generic(struct inode *inode, struct file *filp)
4898 ret = tracing_check_open_get_tr(NULL);
4902 filp->private_data = inode->i_private;
4906 bool tracing_is_disabled(void)
4908 return (tracing_disabled) ? true: false;
4912 * Open and update trace_array ref count.
4913 * Must have the current trace_array passed to it.
4915 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4917 struct trace_array *tr = inode->i_private;
4920 ret = tracing_check_open_get_tr(tr);
4924 filp->private_data = inode->i_private;
4930 * The private pointer of the inode is the trace_event_file.
4931 * Update the tr ref count associated to it.
4933 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4935 struct trace_event_file *file = inode->i_private;
4938 ret = tracing_check_open_get_tr(file->tr);
4942 mutex_lock(&event_mutex);
4944 /* Fail if the file is marked for removal */
4945 if (file->flags & EVENT_FILE_FL_FREED) {
4946 trace_array_put(file->tr);
4949 event_file_get(file);
4952 mutex_unlock(&event_mutex);
4956 filp->private_data = inode->i_private;
4961 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4963 struct trace_event_file *file = inode->i_private;
4965 trace_array_put(file->tr);
4966 event_file_put(file);
4971 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4973 tracing_release_file_tr(inode, filp);
4974 return single_release(inode, filp);
4977 static int tracing_mark_open(struct inode *inode, struct file *filp)
4979 stream_open(inode, filp);
4980 return tracing_open_generic_tr(inode, filp);
4983 static int tracing_release(struct inode *inode, struct file *file)
4985 struct trace_array *tr = inode->i_private;
4986 struct seq_file *m = file->private_data;
4987 struct trace_iterator *iter;
4990 if (!(file->f_mode & FMODE_READ)) {
4991 trace_array_put(tr);
4995 /* Writes do not use seq_file */
4997 mutex_lock(&trace_types_lock);
4999 for_each_tracing_cpu(cpu) {
5000 if (iter->buffer_iter[cpu])
5001 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5004 if (iter->trace && iter->trace->close)
5005 iter->trace->close(iter);
5007 if (!iter->snapshot && tr->stop_count)
5008 /* reenable tracing if it was previously enabled */
5009 tracing_start_tr(tr);
5011 __trace_array_put(tr);
5013 mutex_unlock(&trace_types_lock);
5015 free_trace_iter_content(iter);
5016 seq_release_private(inode, file);
5021 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5023 struct trace_array *tr = inode->i_private;
5025 trace_array_put(tr);
5029 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5031 struct trace_array *tr = inode->i_private;
5033 trace_array_put(tr);
5035 return single_release(inode, file);
5038 static int tracing_open(struct inode *inode, struct file *file)
5040 struct trace_array *tr = inode->i_private;
5041 struct trace_iterator *iter;
5044 ret = tracing_check_open_get_tr(tr);
5048 /* If this file was open for write, then erase contents */
5049 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5050 int cpu = tracing_get_cpu(inode);
5051 struct array_buffer *trace_buf = &tr->array_buffer;
5053 #ifdef CONFIG_TRACER_MAX_TRACE
5054 if (tr->current_trace->print_max)
5055 trace_buf = &tr->max_buffer;
5058 if (cpu == RING_BUFFER_ALL_CPUS)
5059 tracing_reset_online_cpus(trace_buf);
5061 tracing_reset_cpu(trace_buf, cpu);
5064 if (file->f_mode & FMODE_READ) {
5065 iter = __tracing_open(inode, file, false);
5067 ret = PTR_ERR(iter);
5068 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5069 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5073 trace_array_put(tr);
5079 * Some tracers are not suitable for instance buffers.
5080 * A tracer is always available for the global array (toplevel)
5081 * or if it explicitly states that it is.
5084 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5086 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5089 /* Find the next tracer that this trace array may use */
5090 static struct tracer *
5091 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5093 while (t && !trace_ok_for_array(t, tr))
5100 t_next(struct seq_file *m, void *v, loff_t *pos)
5102 struct trace_array *tr = m->private;
5103 struct tracer *t = v;
5108 t = get_tracer_for_array(tr, t->next);
5113 static void *t_start(struct seq_file *m, loff_t *pos)
5115 struct trace_array *tr = m->private;
5119 mutex_lock(&trace_types_lock);
5121 t = get_tracer_for_array(tr, trace_types);
5122 for (; t && l < *pos; t = t_next(m, t, &l))
5128 static void t_stop(struct seq_file *m, void *p)
5130 mutex_unlock(&trace_types_lock);
5133 static int t_show(struct seq_file *m, void *v)
5135 struct tracer *t = v;
5140 seq_puts(m, t->name);
5149 static const struct seq_operations show_traces_seq_ops = {
5156 static int show_traces_open(struct inode *inode, struct file *file)
5158 struct trace_array *tr = inode->i_private;
5162 ret = tracing_check_open_get_tr(tr);
5166 ret = seq_open(file, &show_traces_seq_ops);
5168 trace_array_put(tr);
5172 m = file->private_data;
5178 static int show_traces_release(struct inode *inode, struct file *file)
5180 struct trace_array *tr = inode->i_private;
5182 trace_array_put(tr);
5183 return seq_release(inode, file);
5187 tracing_write_stub(struct file *filp, const char __user *ubuf,
5188 size_t count, loff_t *ppos)
5193 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5197 if (file->f_mode & FMODE_READ)
5198 ret = seq_lseek(file, offset, whence);
5200 file->f_pos = ret = 0;
5205 static const struct file_operations tracing_fops = {
5206 .open = tracing_open,
5208 .read_iter = seq_read_iter,
5209 .splice_read = copy_splice_read,
5210 .write = tracing_write_stub,
5211 .llseek = tracing_lseek,
5212 .release = tracing_release,
5215 static const struct file_operations show_traces_fops = {
5216 .open = show_traces_open,
5218 .llseek = seq_lseek,
5219 .release = show_traces_release,
5223 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5224 size_t count, loff_t *ppos)
5226 struct trace_array *tr = file_inode(filp)->i_private;
5230 len = snprintf(NULL, 0, "%*pb\n",
5231 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5232 mask_str = kmalloc(len, GFP_KERNEL);
5236 len = snprintf(mask_str, len, "%*pb\n",
5237 cpumask_pr_args(tr->tracing_cpumask));
5242 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5250 int tracing_set_cpumask(struct trace_array *tr,
5251 cpumask_var_t tracing_cpumask_new)
5258 local_irq_disable();
5259 arch_spin_lock(&tr->max_lock);
5260 for_each_tracing_cpu(cpu) {
5262 * Increase/decrease the disabled counter if we are
5263 * about to flip a bit in the cpumask:
5265 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5266 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5267 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5268 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5269 #ifdef CONFIG_TRACER_MAX_TRACE
5270 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5273 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5274 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5275 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5276 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5277 #ifdef CONFIG_TRACER_MAX_TRACE
5278 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5282 arch_spin_unlock(&tr->max_lock);
5285 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5291 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5292 size_t count, loff_t *ppos)
5294 struct trace_array *tr = file_inode(filp)->i_private;
5295 cpumask_var_t tracing_cpumask_new;
5298 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5301 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5305 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5309 free_cpumask_var(tracing_cpumask_new);
5314 free_cpumask_var(tracing_cpumask_new);
5319 static const struct file_operations tracing_cpumask_fops = {
5320 .open = tracing_open_generic_tr,
5321 .read = tracing_cpumask_read,
5322 .write = tracing_cpumask_write,
5323 .release = tracing_release_generic_tr,
5324 .llseek = generic_file_llseek,
5327 static int tracing_trace_options_show(struct seq_file *m, void *v)
5329 struct tracer_opt *trace_opts;
5330 struct trace_array *tr = m->private;
5334 mutex_lock(&trace_types_lock);
5335 tracer_flags = tr->current_trace->flags->val;
5336 trace_opts = tr->current_trace->flags->opts;
5338 for (i = 0; trace_options[i]; i++) {
5339 if (tr->trace_flags & (1 << i))
5340 seq_printf(m, "%s\n", trace_options[i]);
5342 seq_printf(m, "no%s\n", trace_options[i]);
5345 for (i = 0; trace_opts[i].name; i++) {
5346 if (tracer_flags & trace_opts[i].bit)
5347 seq_printf(m, "%s\n", trace_opts[i].name);
5349 seq_printf(m, "no%s\n", trace_opts[i].name);
5351 mutex_unlock(&trace_types_lock);
5356 static int __set_tracer_option(struct trace_array *tr,
5357 struct tracer_flags *tracer_flags,
5358 struct tracer_opt *opts, int neg)
5360 struct tracer *trace = tracer_flags->trace;
5363 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5368 tracer_flags->val &= ~opts->bit;
5370 tracer_flags->val |= opts->bit;
5374 /* Try to assign a tracer specific option */
5375 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5377 struct tracer *trace = tr->current_trace;
5378 struct tracer_flags *tracer_flags = trace->flags;
5379 struct tracer_opt *opts = NULL;
5382 for (i = 0; tracer_flags->opts[i].name; i++) {
5383 opts = &tracer_flags->opts[i];
5385 if (strcmp(cmp, opts->name) == 0)
5386 return __set_tracer_option(tr, trace->flags, opts, neg);
5392 /* Some tracers require overwrite to stay enabled */
5393 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5395 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5401 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5405 if ((mask == TRACE_ITER_RECORD_TGID) ||
5406 (mask == TRACE_ITER_RECORD_CMD))
5407 lockdep_assert_held(&event_mutex);
5409 /* do nothing if flag is already set */
5410 if (!!(tr->trace_flags & mask) == !!enabled)
5413 /* Give the tracer a chance to approve the change */
5414 if (tr->current_trace->flag_changed)
5415 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5419 tr->trace_flags |= mask;
5421 tr->trace_flags &= ~mask;
5423 if (mask == TRACE_ITER_RECORD_CMD)
5424 trace_event_enable_cmd_record(enabled);
5426 if (mask == TRACE_ITER_RECORD_TGID) {
5428 tgid_map_max = pid_max;
5429 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5433 * Pairs with smp_load_acquire() in
5434 * trace_find_tgid_ptr() to ensure that if it observes
5435 * the tgid_map we just allocated then it also observes
5436 * the corresponding tgid_map_max value.
5438 smp_store_release(&tgid_map, map);
5441 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5445 trace_event_enable_tgid_record(enabled);
5448 if (mask == TRACE_ITER_EVENT_FORK)
5449 trace_event_follow_fork(tr, enabled);
5451 if (mask == TRACE_ITER_FUNC_FORK)
5452 ftrace_pid_follow_fork(tr, enabled);
5454 if (mask == TRACE_ITER_OVERWRITE) {
5455 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5456 #ifdef CONFIG_TRACER_MAX_TRACE
5457 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5461 if (mask == TRACE_ITER_PRINTK) {
5462 trace_printk_start_stop_comm(enabled);
5463 trace_printk_control(enabled);
5469 int trace_set_options(struct trace_array *tr, char *option)
5474 size_t orig_len = strlen(option);
5477 cmp = strstrip(option);
5479 len = str_has_prefix(cmp, "no");
5485 mutex_lock(&event_mutex);
5486 mutex_lock(&trace_types_lock);
5488 ret = match_string(trace_options, -1, cmp);
5489 /* If no option could be set, test the specific tracer options */
5491 ret = set_tracer_option(tr, cmp, neg);
5493 ret = set_tracer_flag(tr, 1 << ret, !neg);
5495 mutex_unlock(&trace_types_lock);
5496 mutex_unlock(&event_mutex);
5499 * If the first trailing whitespace is replaced with '\0' by strstrip,
5500 * turn it back into a space.
5502 if (orig_len > strlen(option))
5503 option[strlen(option)] = ' ';
5508 static void __init apply_trace_boot_options(void)
5510 char *buf = trace_boot_options_buf;
5514 option = strsep(&buf, ",");
5520 trace_set_options(&global_trace, option);
5522 /* Put back the comma to allow this to be called again */
5529 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5530 size_t cnt, loff_t *ppos)
5532 struct seq_file *m = filp->private_data;
5533 struct trace_array *tr = m->private;
5537 if (cnt >= sizeof(buf))
5540 if (copy_from_user(buf, ubuf, cnt))
5545 ret = trace_set_options(tr, buf);
5554 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5556 struct trace_array *tr = inode->i_private;
5559 ret = tracing_check_open_get_tr(tr);
5563 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5565 trace_array_put(tr);
5570 static const struct file_operations tracing_iter_fops = {
5571 .open = tracing_trace_options_open,
5573 .llseek = seq_lseek,
5574 .release = tracing_single_release_tr,
5575 .write = tracing_trace_options_write,
5578 static const char readme_msg[] =
5579 "tracing mini-HOWTO:\n\n"
5580 "# echo 0 > tracing_on : quick way to disable tracing\n"
5581 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5582 " Important files:\n"
5583 " trace\t\t\t- The static contents of the buffer\n"
5584 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5585 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5586 " current_tracer\t- function and latency tracers\n"
5587 " available_tracers\t- list of configured tracers for current_tracer\n"
5588 " error_log\t- error log for failed commands (that support it)\n"
5589 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5590 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5591 " trace_clock\t\t- change the clock used to order events\n"
5592 " local: Per cpu clock but may not be synced across CPUs\n"
5593 " global: Synced across CPUs but slows tracing down.\n"
5594 " counter: Not a clock, but just an increment\n"
5595 " uptime: Jiffy counter from time of boot\n"
5596 " perf: Same clock that perf events use\n"
5597 #ifdef CONFIG_X86_64
5598 " x86-tsc: TSC cycle counter\n"
5600 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5601 " delta: Delta difference against a buffer-wide timestamp\n"
5602 " absolute: Absolute (standalone) timestamp\n"
5603 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5604 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5605 " tracing_cpumask\t- Limit which CPUs to trace\n"
5606 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5607 "\t\t\t Remove sub-buffer with rmdir\n"
5608 " trace_options\t\t- Set format or modify how tracing happens\n"
5609 "\t\t\t Disable an option by prefixing 'no' to the\n"
5610 "\t\t\t option name\n"
5611 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5612 #ifdef CONFIG_DYNAMIC_FTRACE
5613 "\n available_filter_functions - list of functions that can be filtered on\n"
5614 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5615 "\t\t\t functions\n"
5616 "\t accepts: func_full_name or glob-matching-pattern\n"
5617 "\t modules: Can select a group via module\n"
5618 "\t Format: :mod:<module-name>\n"
5619 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5620 "\t triggers: a command to perform when function is hit\n"
5621 "\t Format: <function>:<trigger>[:count]\n"
5622 "\t trigger: traceon, traceoff\n"
5623 "\t\t enable_event:<system>:<event>\n"
5624 "\t\t disable_event:<system>:<event>\n"
5625 #ifdef CONFIG_STACKTRACE
5628 #ifdef CONFIG_TRACER_SNAPSHOT
5633 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5634 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5635 "\t The first one will disable tracing every time do_fault is hit\n"
5636 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5637 "\t The first time do trap is hit and it disables tracing, the\n"
5638 "\t counter will decrement to 2. If tracing is already disabled,\n"
5639 "\t the counter will not decrement. It only decrements when the\n"
5640 "\t trigger did work\n"
5641 "\t To remove trigger without count:\n"
5642 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5643 "\t To remove trigger with a count:\n"
5644 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5645 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5646 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5647 "\t modules: Can select a group via module command :mod:\n"
5648 "\t Does not accept triggers\n"
5649 #endif /* CONFIG_DYNAMIC_FTRACE */
5650 #ifdef CONFIG_FUNCTION_TRACER
5651 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5653 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5656 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5657 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5658 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5659 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5661 #ifdef CONFIG_TRACER_SNAPSHOT
5662 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5663 "\t\t\t snapshot buffer. Read the contents for more\n"
5664 "\t\t\t information\n"
5666 #ifdef CONFIG_STACK_TRACER
5667 " stack_trace\t\t- Shows the max stack trace when active\n"
5668 " stack_max_size\t- Shows current max stack size that was traced\n"
5669 "\t\t\t Write into this file to reset the max size (trigger a\n"
5670 "\t\t\t new trace)\n"
5671 #ifdef CONFIG_DYNAMIC_FTRACE
5672 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5675 #endif /* CONFIG_STACK_TRACER */
5676 #ifdef CONFIG_DYNAMIC_EVENTS
5677 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5678 "\t\t\t Write into this file to define/undefine new trace events.\n"
5680 #ifdef CONFIG_KPROBE_EVENTS
5681 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5682 "\t\t\t Write into this file to define/undefine new trace events.\n"
5684 #ifdef CONFIG_UPROBE_EVENTS
5685 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5686 "\t\t\t Write into this file to define/undefine new trace events.\n"
5688 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5689 defined(CONFIG_FPROBE_EVENTS)
5690 "\t accepts: event-definitions (one definition per line)\n"
5691 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5692 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5693 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5695 #ifdef CONFIG_FPROBE_EVENTS
5696 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5697 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5699 #ifdef CONFIG_HIST_TRIGGERS
5700 "\t s:[synthetic/]<event> <field> [<field>]\n"
5702 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5703 "\t -:[<group>/][<event>]\n"
5704 #ifdef CONFIG_KPROBE_EVENTS
5705 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5706 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5708 #ifdef CONFIG_UPROBE_EVENTS
5709 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5711 "\t args: <name>=fetcharg[:type]\n"
5712 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5713 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5714 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5715 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5716 "\t <argname>[->field[->field|.field...]],\n"
5718 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5721 "\t $stack<index>, $stack, $retval, $comm,\n"
5723 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5724 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5725 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5726 "\t symstr, <type>\\[<array-size>\\]\n"
5727 #ifdef CONFIG_HIST_TRIGGERS
5728 "\t field: <stype> <name>;\n"
5729 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5730 "\t [unsigned] char/int/long\n"
5732 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5733 "\t of the <attached-group>/<attached-event>.\n"
5735 " events/\t\t- Directory containing all trace event subsystems:\n"
5736 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5737 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5738 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5740 " filter\t\t- If set, only events passing filter are traced\n"
5741 " events/<system>/<event>/\t- Directory containing control files for\n"
5743 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5744 " filter\t\t- If set, only events passing filter are traced\n"
5745 " trigger\t\t- If set, a command to perform when event is hit\n"
5746 "\t Format: <trigger>[:count][if <filter>]\n"
5747 "\t trigger: traceon, traceoff\n"
5748 "\t enable_event:<system>:<event>\n"
5749 "\t disable_event:<system>:<event>\n"
5750 #ifdef CONFIG_HIST_TRIGGERS
5751 "\t enable_hist:<system>:<event>\n"
5752 "\t disable_hist:<system>:<event>\n"
5754 #ifdef CONFIG_STACKTRACE
5757 #ifdef CONFIG_TRACER_SNAPSHOT
5760 #ifdef CONFIG_HIST_TRIGGERS
5761 "\t\t hist (see below)\n"
5763 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5764 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5765 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5766 "\t events/block/block_unplug/trigger\n"
5767 "\t The first disables tracing every time block_unplug is hit.\n"
5768 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5769 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5770 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5771 "\t Like function triggers, the counter is only decremented if it\n"
5772 "\t enabled or disabled tracing.\n"
5773 "\t To remove a trigger without a count:\n"
5774 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5775 "\t To remove a trigger with a count:\n"
5776 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5777 "\t Filters can be ignored when removing a trigger.\n"
5778 #ifdef CONFIG_HIST_TRIGGERS
5779 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5780 "\t Format: hist:keys=<field1[,field2,...]>\n"
5781 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5782 "\t [:values=<field1[,field2,...]>]\n"
5783 "\t [:sort=<field1[,field2,...]>]\n"
5784 "\t [:size=#entries]\n"
5785 "\t [:pause][:continue][:clear]\n"
5786 "\t [:name=histname1]\n"
5787 "\t [:nohitcount]\n"
5788 "\t [:<handler>.<action>]\n"
5789 "\t [if <filter>]\n\n"
5790 "\t Note, special fields can be used as well:\n"
5791 "\t common_timestamp - to record current timestamp\n"
5792 "\t common_cpu - to record the CPU the event happened on\n"
5794 "\t A hist trigger variable can be:\n"
5795 "\t - a reference to a field e.g. x=current_timestamp,\n"
5796 "\t - a reference to another variable e.g. y=$x,\n"
5797 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5798 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5800 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5801 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5802 "\t variable reference, field or numeric literal.\n"
5804 "\t When a matching event is hit, an entry is added to a hash\n"
5805 "\t table using the key(s) and value(s) named, and the value of a\n"
5806 "\t sum called 'hitcount' is incremented. Keys and values\n"
5807 "\t correspond to fields in the event's format description. Keys\n"
5808 "\t can be any field, or the special string 'common_stacktrace'.\n"
5809 "\t Compound keys consisting of up to two fields can be specified\n"
5810 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5811 "\t fields. Sort keys consisting of up to two fields can be\n"
5812 "\t specified using the 'sort' keyword. The sort direction can\n"
5813 "\t be modified by appending '.descending' or '.ascending' to a\n"
5814 "\t sort field. The 'size' parameter can be used to specify more\n"
5815 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5816 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5817 "\t its histogram data will be shared with other triggers of the\n"
5818 "\t same name, and trigger hits will update this common data.\n\n"
5819 "\t Reading the 'hist' file for the event will dump the hash\n"
5820 "\t table in its entirety to stdout. If there are multiple hist\n"
5821 "\t triggers attached to an event, there will be a table for each\n"
5822 "\t trigger in the output. The table displayed for a named\n"
5823 "\t trigger will be the same as any other instance having the\n"
5824 "\t same name. The default format used to display a given field\n"
5825 "\t can be modified by appending any of the following modifiers\n"
5826 "\t to the field name, as applicable:\n\n"
5827 "\t .hex display a number as a hex value\n"
5828 "\t .sym display an address as a symbol\n"
5829 "\t .sym-offset display an address as a symbol and offset\n"
5830 "\t .execname display a common_pid as a program name\n"
5831 "\t .syscall display a syscall id as a syscall name\n"
5832 "\t .log2 display log2 value rather than raw number\n"
5833 "\t .buckets=size display values in groups of size rather than raw number\n"
5834 "\t .usecs display a common_timestamp in microseconds\n"
5835 "\t .percent display a number of percentage value\n"
5836 "\t .graph display a bar-graph of a value\n\n"
5837 "\t The 'pause' parameter can be used to pause an existing hist\n"
5838 "\t trigger or to start a hist trigger but not log any events\n"
5839 "\t until told to do so. 'continue' can be used to start or\n"
5840 "\t restart a paused hist trigger.\n\n"
5841 "\t The 'clear' parameter will clear the contents of a running\n"
5842 "\t hist trigger and leave its current paused/active state\n"
5844 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5845 "\t raw hitcount in the histogram.\n\n"
5846 "\t The enable_hist and disable_hist triggers can be used to\n"
5847 "\t have one event conditionally start and stop another event's\n"
5848 "\t already-attached hist trigger. The syntax is analogous to\n"
5849 "\t the enable_event and disable_event triggers.\n\n"
5850 "\t Hist trigger handlers and actions are executed whenever a\n"
5851 "\t a histogram entry is added or updated. They take the form:\n\n"
5852 "\t <handler>.<action>\n\n"
5853 "\t The available handlers are:\n\n"
5854 "\t onmatch(matching.event) - invoke on addition or update\n"
5855 "\t onmax(var) - invoke if var exceeds current max\n"
5856 "\t onchange(var) - invoke action if var changes\n\n"
5857 "\t The available actions are:\n\n"
5858 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5859 "\t save(field,...) - save current event fields\n"
5860 #ifdef CONFIG_TRACER_SNAPSHOT
5861 "\t snapshot() - snapshot the trace buffer\n\n"
5863 #ifdef CONFIG_SYNTH_EVENTS
5864 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5865 "\t Write into this file to define/undefine new synthetic events.\n"
5866 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5872 tracing_readme_read(struct file *filp, char __user *ubuf,
5873 size_t cnt, loff_t *ppos)
5875 return simple_read_from_buffer(ubuf, cnt, ppos,
5876 readme_msg, strlen(readme_msg));
5879 static const struct file_operations tracing_readme_fops = {
5880 .open = tracing_open_generic,
5881 .read = tracing_readme_read,
5882 .llseek = generic_file_llseek,
5885 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5889 return trace_find_tgid_ptr(pid);
5892 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5896 return trace_find_tgid_ptr(pid);
5899 static void saved_tgids_stop(struct seq_file *m, void *v)
5903 static int saved_tgids_show(struct seq_file *m, void *v)
5905 int *entry = (int *)v;
5906 int pid = entry - tgid_map;
5912 seq_printf(m, "%d %d\n", pid, tgid);
5916 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5917 .start = saved_tgids_start,
5918 .stop = saved_tgids_stop,
5919 .next = saved_tgids_next,
5920 .show = saved_tgids_show,
5923 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5927 ret = tracing_check_open_get_tr(NULL);
5931 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5935 static const struct file_operations tracing_saved_tgids_fops = {
5936 .open = tracing_saved_tgids_open,
5938 .llseek = seq_lseek,
5939 .release = seq_release,
5942 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5944 unsigned int *ptr = v;
5946 if (*pos || m->count)
5951 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5953 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5962 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5968 arch_spin_lock(&trace_cmdline_lock);
5970 v = &savedcmd->map_cmdline_to_pid[0];
5972 v = saved_cmdlines_next(m, v, &l);
5980 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5982 arch_spin_unlock(&trace_cmdline_lock);
5986 static int saved_cmdlines_show(struct seq_file *m, void *v)
5988 char buf[TASK_COMM_LEN];
5989 unsigned int *pid = v;
5991 __trace_find_cmdline(*pid, buf);
5992 seq_printf(m, "%d %s\n", *pid, buf);
5996 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5997 .start = saved_cmdlines_start,
5998 .next = saved_cmdlines_next,
5999 .stop = saved_cmdlines_stop,
6000 .show = saved_cmdlines_show,
6003 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6007 ret = tracing_check_open_get_tr(NULL);
6011 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6014 static const struct file_operations tracing_saved_cmdlines_fops = {
6015 .open = tracing_saved_cmdlines_open,
6017 .llseek = seq_lseek,
6018 .release = seq_release,
6022 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6023 size_t cnt, loff_t *ppos)
6029 arch_spin_lock(&trace_cmdline_lock);
6030 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6031 arch_spin_unlock(&trace_cmdline_lock);
6034 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6037 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6039 kfree(s->saved_cmdlines);
6040 kfree(s->map_cmdline_to_pid);
6044 static int tracing_resize_saved_cmdlines(unsigned int val)
6046 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6048 s = kmalloc(sizeof(*s), GFP_KERNEL);
6052 if (allocate_cmdlines_buffer(val, s) < 0) {
6058 arch_spin_lock(&trace_cmdline_lock);
6059 savedcmd_temp = savedcmd;
6061 arch_spin_unlock(&trace_cmdline_lock);
6063 free_saved_cmdlines_buffer(savedcmd_temp);
6069 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6070 size_t cnt, loff_t *ppos)
6075 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6079 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6080 if (!val || val > PID_MAX_DEFAULT)
6083 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6092 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6093 .open = tracing_open_generic,
6094 .read = tracing_saved_cmdlines_size_read,
6095 .write = tracing_saved_cmdlines_size_write,
6098 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6099 static union trace_eval_map_item *
6100 update_eval_map(union trace_eval_map_item *ptr)
6102 if (!ptr->map.eval_string) {
6103 if (ptr->tail.next) {
6104 ptr = ptr->tail.next;
6105 /* Set ptr to the next real item (skip head) */
6113 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6115 union trace_eval_map_item *ptr = v;
6118 * Paranoid! If ptr points to end, we don't want to increment past it.
6119 * This really should never happen.
6122 ptr = update_eval_map(ptr);
6123 if (WARN_ON_ONCE(!ptr))
6127 ptr = update_eval_map(ptr);
6132 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6134 union trace_eval_map_item *v;
6137 mutex_lock(&trace_eval_mutex);
6139 v = trace_eval_maps;
6143 while (v && l < *pos) {
6144 v = eval_map_next(m, v, &l);
6150 static void eval_map_stop(struct seq_file *m, void *v)
6152 mutex_unlock(&trace_eval_mutex);
6155 static int eval_map_show(struct seq_file *m, void *v)
6157 union trace_eval_map_item *ptr = v;
6159 seq_printf(m, "%s %ld (%s)\n",
6160 ptr->map.eval_string, ptr->map.eval_value,
6166 static const struct seq_operations tracing_eval_map_seq_ops = {
6167 .start = eval_map_start,
6168 .next = eval_map_next,
6169 .stop = eval_map_stop,
6170 .show = eval_map_show,
6173 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6177 ret = tracing_check_open_get_tr(NULL);
6181 return seq_open(filp, &tracing_eval_map_seq_ops);
6184 static const struct file_operations tracing_eval_map_fops = {
6185 .open = tracing_eval_map_open,
6187 .llseek = seq_lseek,
6188 .release = seq_release,
6191 static inline union trace_eval_map_item *
6192 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6194 /* Return tail of array given the head */
6195 return ptr + ptr->head.length + 1;
6199 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6202 struct trace_eval_map **stop;
6203 struct trace_eval_map **map;
6204 union trace_eval_map_item *map_array;
6205 union trace_eval_map_item *ptr;
6210 * The trace_eval_maps contains the map plus a head and tail item,
6211 * where the head holds the module and length of array, and the
6212 * tail holds a pointer to the next list.
6214 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6216 pr_warn("Unable to allocate trace eval mapping\n");
6220 mutex_lock(&trace_eval_mutex);
6222 if (!trace_eval_maps)
6223 trace_eval_maps = map_array;
6225 ptr = trace_eval_maps;
6227 ptr = trace_eval_jmp_to_tail(ptr);
6228 if (!ptr->tail.next)
6230 ptr = ptr->tail.next;
6233 ptr->tail.next = map_array;
6235 map_array->head.mod = mod;
6236 map_array->head.length = len;
6239 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6240 map_array->map = **map;
6243 memset(map_array, 0, sizeof(*map_array));
6245 mutex_unlock(&trace_eval_mutex);
6248 static void trace_create_eval_file(struct dentry *d_tracer)
6250 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6251 NULL, &tracing_eval_map_fops);
6254 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6255 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6256 static inline void trace_insert_eval_map_file(struct module *mod,
6257 struct trace_eval_map **start, int len) { }
6258 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6260 static void trace_insert_eval_map(struct module *mod,
6261 struct trace_eval_map **start, int len)
6263 struct trace_eval_map **map;
6270 trace_event_eval_update(map, len);
6272 trace_insert_eval_map_file(mod, start, len);
6276 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6277 size_t cnt, loff_t *ppos)
6279 struct trace_array *tr = filp->private_data;
6280 char buf[MAX_TRACER_SIZE+2];
6283 mutex_lock(&trace_types_lock);
6284 r = sprintf(buf, "%s\n", tr->current_trace->name);
6285 mutex_unlock(&trace_types_lock);
6287 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6290 int tracer_init(struct tracer *t, struct trace_array *tr)
6292 tracing_reset_online_cpus(&tr->array_buffer);
6296 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6300 for_each_tracing_cpu(cpu)
6301 per_cpu_ptr(buf->data, cpu)->entries = val;
6304 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6306 if (cpu == RING_BUFFER_ALL_CPUS) {
6307 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6309 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6313 #ifdef CONFIG_TRACER_MAX_TRACE
6314 /* resize @tr's buffer to the size of @size_tr's entries */
6315 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6316 struct array_buffer *size_buf, int cpu_id)
6320 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6321 for_each_tracing_cpu(cpu) {
6322 ret = ring_buffer_resize(trace_buf->buffer,
6323 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6326 per_cpu_ptr(trace_buf->data, cpu)->entries =
6327 per_cpu_ptr(size_buf->data, cpu)->entries;
6330 ret = ring_buffer_resize(trace_buf->buffer,
6331 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6333 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6334 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6339 #endif /* CONFIG_TRACER_MAX_TRACE */
6341 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6342 unsigned long size, int cpu)
6347 * If kernel or user changes the size of the ring buffer
6348 * we use the size that was given, and we can forget about
6349 * expanding it later.
6351 trace_set_ring_buffer_expanded(tr);
6353 /* May be called before buffers are initialized */
6354 if (!tr->array_buffer.buffer)
6357 /* Do not allow tracing while resizing ring buffer */
6358 tracing_stop_tr(tr);
6360 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6364 #ifdef CONFIG_TRACER_MAX_TRACE
6365 if (!tr->allocated_snapshot)
6368 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6370 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6371 &tr->array_buffer, cpu);
6374 * AARGH! We are left with different
6375 * size max buffer!!!!
6376 * The max buffer is our "snapshot" buffer.
6377 * When a tracer needs a snapshot (one of the
6378 * latency tracers), it swaps the max buffer
6379 * with the saved snap shot. We succeeded to
6380 * update the size of the main buffer, but failed to
6381 * update the size of the max buffer. But when we tried
6382 * to reset the main buffer to the original size, we
6383 * failed there too. This is very unlikely to
6384 * happen, but if it does, warn and kill all
6388 tracing_disabled = 1;
6393 update_buffer_entries(&tr->max_buffer, cpu);
6396 #endif /* CONFIG_TRACER_MAX_TRACE */
6398 update_buffer_entries(&tr->array_buffer, cpu);
6400 tracing_start_tr(tr);
6404 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6405 unsigned long size, int cpu_id)
6409 mutex_lock(&trace_types_lock);
6411 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6412 /* make sure, this cpu is enabled in the mask */
6413 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6419 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6424 mutex_unlock(&trace_types_lock);
6431 * tracing_update_buffers - used by tracing facility to expand ring buffers
6432 * @tr: The tracing instance
6434 * To save on memory when the tracing is never used on a system with it
6435 * configured in. The ring buffers are set to a minimum size. But once
6436 * a user starts to use the tracing facility, then they need to grow
6437 * to their default size.
6439 * This function is to be called when a tracer is about to be used.
6441 int tracing_update_buffers(struct trace_array *tr)
6445 mutex_lock(&trace_types_lock);
6446 if (!tr->ring_buffer_expanded)
6447 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6448 RING_BUFFER_ALL_CPUS);
6449 mutex_unlock(&trace_types_lock);
6454 struct trace_option_dentry;
6457 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6460 * Used to clear out the tracer before deletion of an instance.
6461 * Must have trace_types_lock held.
6463 static void tracing_set_nop(struct trace_array *tr)
6465 if (tr->current_trace == &nop_trace)
6468 tr->current_trace->enabled--;
6470 if (tr->current_trace->reset)
6471 tr->current_trace->reset(tr);
6473 tr->current_trace = &nop_trace;
6476 static bool tracer_options_updated;
6478 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6480 /* Only enable if the directory has been created already. */
6484 /* Only create trace option files after update_tracer_options finish */
6485 if (!tracer_options_updated)
6488 create_trace_option_files(tr, t);
6491 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6494 #ifdef CONFIG_TRACER_MAX_TRACE
6499 mutex_lock(&trace_types_lock);
6501 if (!tr->ring_buffer_expanded) {
6502 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6503 RING_BUFFER_ALL_CPUS);
6509 for (t = trace_types; t; t = t->next) {
6510 if (strcmp(t->name, buf) == 0)
6517 if (t == tr->current_trace)
6520 #ifdef CONFIG_TRACER_SNAPSHOT
6521 if (t->use_max_tr) {
6522 local_irq_disable();
6523 arch_spin_lock(&tr->max_lock);
6524 if (tr->cond_snapshot)
6526 arch_spin_unlock(&tr->max_lock);
6532 /* Some tracers won't work on kernel command line */
6533 if (system_state < SYSTEM_RUNNING && t->noboot) {
6534 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6539 /* Some tracers are only allowed for the top level buffer */
6540 if (!trace_ok_for_array(t, tr)) {
6545 /* If trace pipe files are being read, we can't change the tracer */
6546 if (tr->trace_ref) {
6551 trace_branch_disable();
6553 tr->current_trace->enabled--;
6555 if (tr->current_trace->reset)
6556 tr->current_trace->reset(tr);
6558 #ifdef CONFIG_TRACER_MAX_TRACE
6559 had_max_tr = tr->current_trace->use_max_tr;
6561 /* Current trace needs to be nop_trace before synchronize_rcu */
6562 tr->current_trace = &nop_trace;
6564 if (had_max_tr && !t->use_max_tr) {
6566 * We need to make sure that the update_max_tr sees that
6567 * current_trace changed to nop_trace to keep it from
6568 * swapping the buffers after we resize it.
6569 * The update_max_tr is called from interrupts disabled
6570 * so a synchronized_sched() is sufficient.
6576 if (t->use_max_tr && !tr->allocated_snapshot) {
6577 ret = tracing_alloc_snapshot_instance(tr);
6582 tr->current_trace = &nop_trace;
6586 ret = tracer_init(t, tr);
6591 tr->current_trace = t;
6592 tr->current_trace->enabled++;
6593 trace_branch_enable(tr);
6595 mutex_unlock(&trace_types_lock);
6601 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6602 size_t cnt, loff_t *ppos)
6604 struct trace_array *tr = filp->private_data;
6605 char buf[MAX_TRACER_SIZE+1];
6612 if (cnt > MAX_TRACER_SIZE)
6613 cnt = MAX_TRACER_SIZE;
6615 if (copy_from_user(buf, ubuf, cnt))
6622 err = tracing_set_tracer(tr, name);
6632 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6633 size_t cnt, loff_t *ppos)
6638 r = snprintf(buf, sizeof(buf), "%ld\n",
6639 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6640 if (r > sizeof(buf))
6642 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6646 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6647 size_t cnt, loff_t *ppos)
6652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6662 tracing_thresh_read(struct file *filp, char __user *ubuf,
6663 size_t cnt, loff_t *ppos)
6665 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6669 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6670 size_t cnt, loff_t *ppos)
6672 struct trace_array *tr = filp->private_data;
6675 mutex_lock(&trace_types_lock);
6676 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6680 if (tr->current_trace->update_thresh) {
6681 ret = tr->current_trace->update_thresh(tr);
6688 mutex_unlock(&trace_types_lock);
6693 #ifdef CONFIG_TRACER_MAX_TRACE
6696 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6697 size_t cnt, loff_t *ppos)
6699 struct trace_array *tr = filp->private_data;
6701 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6705 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6706 size_t cnt, loff_t *ppos)
6708 struct trace_array *tr = filp->private_data;
6710 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6715 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6717 if (cpu == RING_BUFFER_ALL_CPUS) {
6718 if (cpumask_empty(tr->pipe_cpumask)) {
6719 cpumask_setall(tr->pipe_cpumask);
6722 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6723 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6729 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6731 if (cpu == RING_BUFFER_ALL_CPUS) {
6732 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6733 cpumask_clear(tr->pipe_cpumask);
6735 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6736 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6740 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6742 struct trace_array *tr = inode->i_private;
6743 struct trace_iterator *iter;
6747 ret = tracing_check_open_get_tr(tr);
6751 mutex_lock(&trace_types_lock);
6752 cpu = tracing_get_cpu(inode);
6753 ret = open_pipe_on_cpu(tr, cpu);
6755 goto fail_pipe_on_cpu;
6757 /* create a buffer to store the information to pass to userspace */
6758 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6761 goto fail_alloc_iter;
6764 trace_seq_init(&iter->seq);
6765 iter->trace = tr->current_trace;
6767 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6772 /* trace pipe does not show start of buffer */
6773 cpumask_setall(iter->started);
6775 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6776 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6778 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6779 if (trace_clocks[tr->clock_id].in_ns)
6780 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6783 iter->array_buffer = &tr->array_buffer;
6784 iter->cpu_file = cpu;
6785 mutex_init(&iter->mutex);
6786 filp->private_data = iter;
6788 if (iter->trace->pipe_open)
6789 iter->trace->pipe_open(iter);
6791 nonseekable_open(inode, filp);
6795 mutex_unlock(&trace_types_lock);
6801 close_pipe_on_cpu(tr, cpu);
6803 __trace_array_put(tr);
6804 mutex_unlock(&trace_types_lock);
6808 static int tracing_release_pipe(struct inode *inode, struct file *file)
6810 struct trace_iterator *iter = file->private_data;
6811 struct trace_array *tr = inode->i_private;
6813 mutex_lock(&trace_types_lock);
6817 if (iter->trace->pipe_close)
6818 iter->trace->pipe_close(iter);
6819 close_pipe_on_cpu(tr, iter->cpu_file);
6820 mutex_unlock(&trace_types_lock);
6822 free_trace_iter_content(iter);
6825 trace_array_put(tr);
6831 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6833 struct trace_array *tr = iter->tr;
6835 /* Iterators are static, they should be filled or empty */
6836 if (trace_buffer_iter(iter, iter->cpu_file))
6837 return EPOLLIN | EPOLLRDNORM;
6839 if (tr->trace_flags & TRACE_ITER_BLOCK)
6841 * Always select as readable when in blocking mode
6843 return EPOLLIN | EPOLLRDNORM;
6845 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6846 filp, poll_table, iter->tr->buffer_percent);
6850 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6852 struct trace_iterator *iter = filp->private_data;
6854 return trace_poll(iter, filp, poll_table);
6857 /* Must be called with iter->mutex held. */
6858 static int tracing_wait_pipe(struct file *filp)
6860 struct trace_iterator *iter = filp->private_data;
6863 while (trace_empty(iter)) {
6865 if ((filp->f_flags & O_NONBLOCK)) {
6870 * We block until we read something and tracing is disabled.
6871 * We still block if tracing is disabled, but we have never
6872 * read anything. This allows a user to cat this file, and
6873 * then enable tracing. But after we have read something,
6874 * we give an EOF when tracing is again disabled.
6876 * iter->pos will be 0 if we haven't read anything.
6878 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6881 mutex_unlock(&iter->mutex);
6883 ret = wait_on_pipe(iter, 0);
6885 mutex_lock(&iter->mutex);
6898 tracing_read_pipe(struct file *filp, char __user *ubuf,
6899 size_t cnt, loff_t *ppos)
6901 struct trace_iterator *iter = filp->private_data;
6905 * Avoid more than one consumer on a single file descriptor
6906 * This is just a matter of traces coherency, the ring buffer itself
6909 mutex_lock(&iter->mutex);
6911 /* return any leftover data */
6912 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6916 trace_seq_init(&iter->seq);
6918 if (iter->trace->read) {
6919 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6925 sret = tracing_wait_pipe(filp);
6929 /* stop when tracing is finished */
6930 if (trace_empty(iter)) {
6935 if (cnt >= PAGE_SIZE)
6936 cnt = PAGE_SIZE - 1;
6938 /* reset all but tr, trace, and overruns */
6939 trace_iterator_reset(iter);
6940 cpumask_clear(iter->started);
6941 trace_seq_init(&iter->seq);
6943 trace_event_read_lock();
6944 trace_access_lock(iter->cpu_file);
6945 while (trace_find_next_entry_inc(iter) != NULL) {
6946 enum print_line_t ret;
6947 int save_len = iter->seq.seq.len;
6949 ret = print_trace_line(iter);
6950 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6952 * If one print_trace_line() fills entire trace_seq in one shot,
6953 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6954 * In this case, we need to consume it, otherwise, loop will peek
6955 * this event next time, resulting in an infinite loop.
6957 if (save_len == 0) {
6959 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6960 trace_consume(iter);
6964 /* In other cases, don't print partial lines */
6965 iter->seq.seq.len = save_len;
6968 if (ret != TRACE_TYPE_NO_CONSUME)
6969 trace_consume(iter);
6971 if (trace_seq_used(&iter->seq) >= cnt)
6975 * Setting the full flag means we reached the trace_seq buffer
6976 * size and we should leave by partial output condition above.
6977 * One of the trace_seq_* functions is not used properly.
6979 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6982 trace_access_unlock(iter->cpu_file);
6983 trace_event_read_unlock();
6985 /* Now copy what we have to the user */
6986 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6987 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6988 trace_seq_init(&iter->seq);
6991 * If there was nothing to send to user, in spite of consuming trace
6992 * entries, go back to wait for more entries.
6998 mutex_unlock(&iter->mutex);
7003 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
7006 __free_page(spd->pages[idx]);
7010 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7016 /* Seq buffer is page-sized, exactly what we need. */
7018 save_len = iter->seq.seq.len;
7019 ret = print_trace_line(iter);
7021 if (trace_seq_has_overflowed(&iter->seq)) {
7022 iter->seq.seq.len = save_len;
7027 * This should not be hit, because it should only
7028 * be set if the iter->seq overflowed. But check it
7029 * anyway to be safe.
7031 if (ret == TRACE_TYPE_PARTIAL_LINE) {
7032 iter->seq.seq.len = save_len;
7036 count = trace_seq_used(&iter->seq) - save_len;
7039 iter->seq.seq.len = save_len;
7043 if (ret != TRACE_TYPE_NO_CONSUME)
7044 trace_consume(iter);
7046 if (!trace_find_next_entry_inc(iter)) {
7056 static ssize_t tracing_splice_read_pipe(struct file *filp,
7058 struct pipe_inode_info *pipe,
7062 struct page *pages_def[PIPE_DEF_BUFFERS];
7063 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7064 struct trace_iterator *iter = filp->private_data;
7065 struct splice_pipe_desc spd = {
7067 .partial = partial_def,
7068 .nr_pages = 0, /* This gets updated below. */
7069 .nr_pages_max = PIPE_DEF_BUFFERS,
7070 .ops = &default_pipe_buf_ops,
7071 .spd_release = tracing_spd_release_pipe,
7077 if (splice_grow_spd(pipe, &spd))
7080 mutex_lock(&iter->mutex);
7082 if (iter->trace->splice_read) {
7083 ret = iter->trace->splice_read(iter, filp,
7084 ppos, pipe, len, flags);
7089 ret = tracing_wait_pipe(filp);
7093 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7098 trace_event_read_lock();
7099 trace_access_lock(iter->cpu_file);
7101 /* Fill as many pages as possible. */
7102 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7103 spd.pages[i] = alloc_page(GFP_KERNEL);
7107 rem = tracing_fill_pipe_page(rem, iter);
7109 /* Copy the data into the page, so we can start over. */
7110 ret = trace_seq_to_buffer(&iter->seq,
7111 page_address(spd.pages[i]),
7112 trace_seq_used(&iter->seq));
7114 __free_page(spd.pages[i]);
7117 spd.partial[i].offset = 0;
7118 spd.partial[i].len = trace_seq_used(&iter->seq);
7120 trace_seq_init(&iter->seq);
7123 trace_access_unlock(iter->cpu_file);
7124 trace_event_read_unlock();
7125 mutex_unlock(&iter->mutex);
7130 ret = splice_to_pipe(pipe, &spd);
7134 splice_shrink_spd(&spd);
7138 mutex_unlock(&iter->mutex);
7143 tracing_entries_read(struct file *filp, char __user *ubuf,
7144 size_t cnt, loff_t *ppos)
7146 struct inode *inode = file_inode(filp);
7147 struct trace_array *tr = inode->i_private;
7148 int cpu = tracing_get_cpu(inode);
7153 mutex_lock(&trace_types_lock);
7155 if (cpu == RING_BUFFER_ALL_CPUS) {
7156 int cpu, buf_size_same;
7161 /* check if all cpu sizes are same */
7162 for_each_tracing_cpu(cpu) {
7163 /* fill in the size from first enabled cpu */
7165 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7166 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7172 if (buf_size_same) {
7173 if (!tr->ring_buffer_expanded)
7174 r = sprintf(buf, "%lu (expanded: %lu)\n",
7176 trace_buf_size >> 10);
7178 r = sprintf(buf, "%lu\n", size >> 10);
7180 r = sprintf(buf, "X\n");
7182 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7184 mutex_unlock(&trace_types_lock);
7186 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7191 tracing_entries_write(struct file *filp, const char __user *ubuf,
7192 size_t cnt, loff_t *ppos)
7194 struct inode *inode = file_inode(filp);
7195 struct trace_array *tr = inode->i_private;
7199 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7203 /* must have at least 1 entry */
7207 /* value is in KB */
7209 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7219 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7220 size_t cnt, loff_t *ppos)
7222 struct trace_array *tr = filp->private_data;
7225 unsigned long size = 0, expanded_size = 0;
7227 mutex_lock(&trace_types_lock);
7228 for_each_tracing_cpu(cpu) {
7229 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7230 if (!tr->ring_buffer_expanded)
7231 expanded_size += trace_buf_size >> 10;
7233 if (tr->ring_buffer_expanded)
7234 r = sprintf(buf, "%lu\n", size);
7236 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7237 mutex_unlock(&trace_types_lock);
7239 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7243 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7244 size_t cnt, loff_t *ppos)
7247 * There is no need to read what the user has written, this function
7248 * is just to make sure that there is no error when "echo" is used
7257 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7259 struct trace_array *tr = inode->i_private;
7261 /* disable tracing ? */
7262 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7263 tracer_tracing_off(tr);
7264 /* resize the ring buffer to 0 */
7265 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7267 trace_array_put(tr);
7273 tracing_mark_write(struct file *filp, const char __user *ubuf,
7274 size_t cnt, loff_t *fpos)
7276 struct trace_array *tr = filp->private_data;
7277 struct ring_buffer_event *event;
7278 enum event_trigger_type tt = ETT_NONE;
7279 struct trace_buffer *buffer;
7280 struct print_entry *entry;
7285 /* Used in tracing_mark_raw_write() as well */
7286 #define FAULTED_STR "<faulted>"
7287 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7289 if (tracing_disabled)
7292 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7295 if (cnt > TRACE_BUF_SIZE)
7296 cnt = TRACE_BUF_SIZE;
7298 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7300 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7302 /* If less than "<faulted>", then make sure we can still add that */
7303 if (cnt < FAULTED_SIZE)
7304 size += FAULTED_SIZE - cnt;
7306 buffer = tr->array_buffer.buffer;
7307 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7309 if (unlikely(!event))
7310 /* Ring buffer disabled, return as if not open for write */
7313 entry = ring_buffer_event_data(event);
7314 entry->ip = _THIS_IP_;
7316 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7318 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7324 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7325 /* do not add \n before testing triggers, but add \0 */
7326 entry->buf[cnt] = '\0';
7327 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7330 if (entry->buf[cnt - 1] != '\n') {
7331 entry->buf[cnt] = '\n';
7332 entry->buf[cnt + 1] = '\0';
7334 entry->buf[cnt] = '\0';
7336 if (static_branch_unlikely(&trace_marker_exports_enabled))
7337 ftrace_exports(event, TRACE_EXPORT_MARKER);
7338 __buffer_unlock_commit(buffer, event);
7341 event_triggers_post_call(tr->trace_marker_file, tt);
7346 /* Limit it for now to 3K (including tag) */
7347 #define RAW_DATA_MAX_SIZE (1024*3)
7350 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7351 size_t cnt, loff_t *fpos)
7353 struct trace_array *tr = filp->private_data;
7354 struct ring_buffer_event *event;
7355 struct trace_buffer *buffer;
7356 struct raw_data_entry *entry;
7361 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7363 if (tracing_disabled)
7366 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7369 /* The marker must at least have a tag id */
7370 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7373 if (cnt > TRACE_BUF_SIZE)
7374 cnt = TRACE_BUF_SIZE;
7376 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7378 size = sizeof(*entry) + cnt;
7379 if (cnt < FAULT_SIZE_ID)
7380 size += FAULT_SIZE_ID - cnt;
7382 buffer = tr->array_buffer.buffer;
7383 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7386 /* Ring buffer disabled, return as if not open for write */
7389 entry = ring_buffer_event_data(event);
7391 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7394 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7399 __buffer_unlock_commit(buffer, event);
7404 static int tracing_clock_show(struct seq_file *m, void *v)
7406 struct trace_array *tr = m->private;
7409 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7411 "%s%s%s%s", i ? " " : "",
7412 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7413 i == tr->clock_id ? "]" : "");
7419 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7423 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7424 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7427 if (i == ARRAY_SIZE(trace_clocks))
7430 mutex_lock(&trace_types_lock);
7434 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7437 * New clock may not be consistent with the previous clock.
7438 * Reset the buffer so that it doesn't have incomparable timestamps.
7440 tracing_reset_online_cpus(&tr->array_buffer);
7442 #ifdef CONFIG_TRACER_MAX_TRACE
7443 if (tr->max_buffer.buffer)
7444 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7445 tracing_reset_online_cpus(&tr->max_buffer);
7448 mutex_unlock(&trace_types_lock);
7453 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7454 size_t cnt, loff_t *fpos)
7456 struct seq_file *m = filp->private_data;
7457 struct trace_array *tr = m->private;
7459 const char *clockstr;
7462 if (cnt >= sizeof(buf))
7465 if (copy_from_user(buf, ubuf, cnt))
7470 clockstr = strstrip(buf);
7472 ret = tracing_set_clock(tr, clockstr);
7481 static int tracing_clock_open(struct inode *inode, struct file *file)
7483 struct trace_array *tr = inode->i_private;
7486 ret = tracing_check_open_get_tr(tr);
7490 ret = single_open(file, tracing_clock_show, inode->i_private);
7492 trace_array_put(tr);
7497 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7499 struct trace_array *tr = m->private;
7501 mutex_lock(&trace_types_lock);
7503 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7504 seq_puts(m, "delta [absolute]\n");
7506 seq_puts(m, "[delta] absolute\n");
7508 mutex_unlock(&trace_types_lock);
7513 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7515 struct trace_array *tr = inode->i_private;
7518 ret = tracing_check_open_get_tr(tr);
7522 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7524 trace_array_put(tr);
7529 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7531 if (rbe == this_cpu_read(trace_buffered_event))
7532 return ring_buffer_time_stamp(buffer);
7534 return ring_buffer_event_time_stamp(buffer, rbe);
7538 * Set or disable using the per CPU trace_buffer_event when possible.
7540 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7544 mutex_lock(&trace_types_lock);
7546 if (set && tr->no_filter_buffering_ref++)
7550 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7555 --tr->no_filter_buffering_ref;
7558 mutex_unlock(&trace_types_lock);
7563 struct ftrace_buffer_info {
7564 struct trace_iterator iter;
7566 unsigned int spare_cpu;
7570 #ifdef CONFIG_TRACER_SNAPSHOT
7571 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7573 struct trace_array *tr = inode->i_private;
7574 struct trace_iterator *iter;
7578 ret = tracing_check_open_get_tr(tr);
7582 if (file->f_mode & FMODE_READ) {
7583 iter = __tracing_open(inode, file, true);
7585 ret = PTR_ERR(iter);
7587 /* Writes still need the seq_file to hold the private data */
7589 m = kzalloc(sizeof(*m), GFP_KERNEL);
7592 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7600 iter->array_buffer = &tr->max_buffer;
7601 iter->cpu_file = tracing_get_cpu(inode);
7603 file->private_data = m;
7607 trace_array_put(tr);
7612 static void tracing_swap_cpu_buffer(void *tr)
7614 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7618 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7621 struct seq_file *m = filp->private_data;
7622 struct trace_iterator *iter = m->private;
7623 struct trace_array *tr = iter->tr;
7627 ret = tracing_update_buffers(tr);
7631 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7635 mutex_lock(&trace_types_lock);
7637 if (tr->current_trace->use_max_tr) {
7642 local_irq_disable();
7643 arch_spin_lock(&tr->max_lock);
7644 if (tr->cond_snapshot)
7646 arch_spin_unlock(&tr->max_lock);
7653 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7657 if (tr->allocated_snapshot)
7661 /* Only allow per-cpu swap if the ring buffer supports it */
7662 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7663 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7668 if (tr->allocated_snapshot)
7669 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7670 &tr->array_buffer, iter->cpu_file);
7672 ret = tracing_alloc_snapshot_instance(tr);
7675 /* Now, we're going to swap */
7676 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7677 local_irq_disable();
7678 update_max_tr(tr, current, smp_processor_id(), NULL);
7681 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7686 if (tr->allocated_snapshot) {
7687 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7688 tracing_reset_online_cpus(&tr->max_buffer);
7690 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7700 mutex_unlock(&trace_types_lock);
7704 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7706 struct seq_file *m = file->private_data;
7709 ret = tracing_release(inode, file);
7711 if (file->f_mode & FMODE_READ)
7714 /* If write only, the seq_file is just a stub */
7722 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7723 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7724 size_t count, loff_t *ppos);
7725 static int tracing_buffers_release(struct inode *inode, struct file *file);
7726 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7727 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7729 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7731 struct ftrace_buffer_info *info;
7734 /* The following checks for tracefs lockdown */
7735 ret = tracing_buffers_open(inode, filp);
7739 info = filp->private_data;
7741 if (info->iter.trace->use_max_tr) {
7742 tracing_buffers_release(inode, filp);
7746 info->iter.snapshot = true;
7747 info->iter.array_buffer = &info->iter.tr->max_buffer;
7752 #endif /* CONFIG_TRACER_SNAPSHOT */
7755 static const struct file_operations tracing_thresh_fops = {
7756 .open = tracing_open_generic,
7757 .read = tracing_thresh_read,
7758 .write = tracing_thresh_write,
7759 .llseek = generic_file_llseek,
7762 #ifdef CONFIG_TRACER_MAX_TRACE
7763 static const struct file_operations tracing_max_lat_fops = {
7764 .open = tracing_open_generic_tr,
7765 .read = tracing_max_lat_read,
7766 .write = tracing_max_lat_write,
7767 .llseek = generic_file_llseek,
7768 .release = tracing_release_generic_tr,
7772 static const struct file_operations set_tracer_fops = {
7773 .open = tracing_open_generic_tr,
7774 .read = tracing_set_trace_read,
7775 .write = tracing_set_trace_write,
7776 .llseek = generic_file_llseek,
7777 .release = tracing_release_generic_tr,
7780 static const struct file_operations tracing_pipe_fops = {
7781 .open = tracing_open_pipe,
7782 .poll = tracing_poll_pipe,
7783 .read = tracing_read_pipe,
7784 .splice_read = tracing_splice_read_pipe,
7785 .release = tracing_release_pipe,
7786 .llseek = no_llseek,
7789 static const struct file_operations tracing_entries_fops = {
7790 .open = tracing_open_generic_tr,
7791 .read = tracing_entries_read,
7792 .write = tracing_entries_write,
7793 .llseek = generic_file_llseek,
7794 .release = tracing_release_generic_tr,
7797 static const struct file_operations tracing_total_entries_fops = {
7798 .open = tracing_open_generic_tr,
7799 .read = tracing_total_entries_read,
7800 .llseek = generic_file_llseek,
7801 .release = tracing_release_generic_tr,
7804 static const struct file_operations tracing_free_buffer_fops = {
7805 .open = tracing_open_generic_tr,
7806 .write = tracing_free_buffer_write,
7807 .release = tracing_free_buffer_release,
7810 static const struct file_operations tracing_mark_fops = {
7811 .open = tracing_mark_open,
7812 .write = tracing_mark_write,
7813 .release = tracing_release_generic_tr,
7816 static const struct file_operations tracing_mark_raw_fops = {
7817 .open = tracing_mark_open,
7818 .write = tracing_mark_raw_write,
7819 .release = tracing_release_generic_tr,
7822 static const struct file_operations trace_clock_fops = {
7823 .open = tracing_clock_open,
7825 .llseek = seq_lseek,
7826 .release = tracing_single_release_tr,
7827 .write = tracing_clock_write,
7830 static const struct file_operations trace_time_stamp_mode_fops = {
7831 .open = tracing_time_stamp_mode_open,
7833 .llseek = seq_lseek,
7834 .release = tracing_single_release_tr,
7837 #ifdef CONFIG_TRACER_SNAPSHOT
7838 static const struct file_operations snapshot_fops = {
7839 .open = tracing_snapshot_open,
7841 .write = tracing_snapshot_write,
7842 .llseek = tracing_lseek,
7843 .release = tracing_snapshot_release,
7846 static const struct file_operations snapshot_raw_fops = {
7847 .open = snapshot_raw_open,
7848 .read = tracing_buffers_read,
7849 .release = tracing_buffers_release,
7850 .splice_read = tracing_buffers_splice_read,
7851 .llseek = no_llseek,
7854 #endif /* CONFIG_TRACER_SNAPSHOT */
7857 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7858 * @filp: The active open file structure
7859 * @ubuf: The userspace provided buffer to read value into
7860 * @cnt: The maximum number of bytes to read
7861 * @ppos: The current "file" position
7863 * This function implements the write interface for a struct trace_min_max_param.
7864 * The filp->private_data must point to a trace_min_max_param structure that
7865 * defines where to write the value, the min and the max acceptable values,
7866 * and a lock to protect the write.
7869 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7871 struct trace_min_max_param *param = filp->private_data;
7878 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7883 mutex_lock(param->lock);
7885 if (param->min && val < *param->min)
7888 if (param->max && val > *param->max)
7895 mutex_unlock(param->lock);
7904 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7905 * @filp: The active open file structure
7906 * @ubuf: The userspace provided buffer to read value into
7907 * @cnt: The maximum number of bytes to read
7908 * @ppos: The current "file" position
7910 * This function implements the read interface for a struct trace_min_max_param.
7911 * The filp->private_data must point to a trace_min_max_param struct with valid
7915 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7917 struct trace_min_max_param *param = filp->private_data;
7918 char buf[U64_STR_SIZE];
7927 if (cnt > sizeof(buf))
7930 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7932 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7935 const struct file_operations trace_min_max_fops = {
7936 .open = tracing_open_generic,
7937 .read = trace_min_max_read,
7938 .write = trace_min_max_write,
7941 #define TRACING_LOG_ERRS_MAX 8
7942 #define TRACING_LOG_LOC_MAX 128
7944 #define CMD_PREFIX " Command: "
7947 const char **errs; /* ptr to loc-specific array of err strings */
7948 u8 type; /* index into errs -> specific err string */
7949 u16 pos; /* caret position */
7953 struct tracing_log_err {
7954 struct list_head list;
7955 struct err_info info;
7956 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7957 char *cmd; /* what caused err */
7960 static DEFINE_MUTEX(tracing_err_log_lock);
7962 static struct tracing_log_err *alloc_tracing_log_err(int len)
7964 struct tracing_log_err *err;
7966 err = kzalloc(sizeof(*err), GFP_KERNEL);
7968 return ERR_PTR(-ENOMEM);
7970 err->cmd = kzalloc(len, GFP_KERNEL);
7973 return ERR_PTR(-ENOMEM);
7979 static void free_tracing_log_err(struct tracing_log_err *err)
7985 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7988 struct tracing_log_err *err;
7991 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7992 err = alloc_tracing_log_err(len);
7993 if (PTR_ERR(err) != -ENOMEM)
7994 tr->n_err_log_entries++;
7998 cmd = kzalloc(len, GFP_KERNEL);
8000 return ERR_PTR(-ENOMEM);
8001 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8004 list_del(&err->list);
8010 * err_pos - find the position of a string within a command for error careting
8011 * @cmd: The tracing command that caused the error
8012 * @str: The string to position the caret at within @cmd
8014 * Finds the position of the first occurrence of @str within @cmd. The
8015 * return value can be passed to tracing_log_err() for caret placement
8018 * Returns the index within @cmd of the first occurrence of @str or 0
8019 * if @str was not found.
8021 unsigned int err_pos(char *cmd, const char *str)
8025 if (WARN_ON(!strlen(cmd)))
8028 found = strstr(cmd, str);
8036 * tracing_log_err - write an error to the tracing error log
8037 * @tr: The associated trace array for the error (NULL for top level array)
8038 * @loc: A string describing where the error occurred
8039 * @cmd: The tracing command that caused the error
8040 * @errs: The array of loc-specific static error strings
8041 * @type: The index into errs[], which produces the specific static err string
8042 * @pos: The position the caret should be placed in the cmd
8044 * Writes an error into tracing/error_log of the form:
8046 * <loc>: error: <text>
8050 * tracing/error_log is a small log file containing the last
8051 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8052 * unless there has been a tracing error, and the error log can be
8053 * cleared and have its memory freed by writing the empty string in
8054 * truncation mode to it i.e. echo > tracing/error_log.
8056 * NOTE: the @errs array along with the @type param are used to
8057 * produce a static error string - this string is not copied and saved
8058 * when the error is logged - only a pointer to it is saved. See
8059 * existing callers for examples of how static strings are typically
8060 * defined for use with tracing_log_err().
8062 void tracing_log_err(struct trace_array *tr,
8063 const char *loc, const char *cmd,
8064 const char **errs, u8 type, u16 pos)
8066 struct tracing_log_err *err;
8072 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8074 mutex_lock(&tracing_err_log_lock);
8075 err = get_tracing_log_err(tr, len);
8076 if (PTR_ERR(err) == -ENOMEM) {
8077 mutex_unlock(&tracing_err_log_lock);
8081 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8082 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8084 err->info.errs = errs;
8085 err->info.type = type;
8086 err->info.pos = pos;
8087 err->info.ts = local_clock();
8089 list_add_tail(&err->list, &tr->err_log);
8090 mutex_unlock(&tracing_err_log_lock);
8093 static void clear_tracing_err_log(struct trace_array *tr)
8095 struct tracing_log_err *err, *next;
8097 mutex_lock(&tracing_err_log_lock);
8098 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8099 list_del(&err->list);
8100 free_tracing_log_err(err);
8103 tr->n_err_log_entries = 0;
8104 mutex_unlock(&tracing_err_log_lock);
8107 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8109 struct trace_array *tr = m->private;
8111 mutex_lock(&tracing_err_log_lock);
8113 return seq_list_start(&tr->err_log, *pos);
8116 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8118 struct trace_array *tr = m->private;
8120 return seq_list_next(v, &tr->err_log, pos);
8123 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8125 mutex_unlock(&tracing_err_log_lock);
8128 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8132 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8134 for (i = 0; i < pos; i++)
8139 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8141 struct tracing_log_err *err = v;
8144 const char *err_text = err->info.errs[err->info.type];
8145 u64 sec = err->info.ts;
8148 nsec = do_div(sec, NSEC_PER_SEC);
8149 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8150 err->loc, err_text);
8151 seq_printf(m, "%s", err->cmd);
8152 tracing_err_log_show_pos(m, err->info.pos);
8158 static const struct seq_operations tracing_err_log_seq_ops = {
8159 .start = tracing_err_log_seq_start,
8160 .next = tracing_err_log_seq_next,
8161 .stop = tracing_err_log_seq_stop,
8162 .show = tracing_err_log_seq_show
8165 static int tracing_err_log_open(struct inode *inode, struct file *file)
8167 struct trace_array *tr = inode->i_private;
8170 ret = tracing_check_open_get_tr(tr);
8174 /* If this file was opened for write, then erase contents */
8175 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8176 clear_tracing_err_log(tr);
8178 if (file->f_mode & FMODE_READ) {
8179 ret = seq_open(file, &tracing_err_log_seq_ops);
8181 struct seq_file *m = file->private_data;
8184 trace_array_put(tr);
8190 static ssize_t tracing_err_log_write(struct file *file,
8191 const char __user *buffer,
8192 size_t count, loff_t *ppos)
8197 static int tracing_err_log_release(struct inode *inode, struct file *file)
8199 struct trace_array *tr = inode->i_private;
8201 trace_array_put(tr);
8203 if (file->f_mode & FMODE_READ)
8204 seq_release(inode, file);
8209 static const struct file_operations tracing_err_log_fops = {
8210 .open = tracing_err_log_open,
8211 .write = tracing_err_log_write,
8213 .llseek = tracing_lseek,
8214 .release = tracing_err_log_release,
8217 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8219 struct trace_array *tr = inode->i_private;
8220 struct ftrace_buffer_info *info;
8223 ret = tracing_check_open_get_tr(tr);
8227 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8229 trace_array_put(tr);
8233 mutex_lock(&trace_types_lock);
8236 info->iter.cpu_file = tracing_get_cpu(inode);
8237 info->iter.trace = tr->current_trace;
8238 info->iter.array_buffer = &tr->array_buffer;
8240 /* Force reading ring buffer for first read */
8241 info->read = (unsigned int)-1;
8243 filp->private_data = info;
8247 mutex_unlock(&trace_types_lock);
8249 ret = nonseekable_open(inode, filp);
8251 trace_array_put(tr);
8257 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8259 struct ftrace_buffer_info *info = filp->private_data;
8260 struct trace_iterator *iter = &info->iter;
8262 return trace_poll(iter, filp, poll_table);
8266 tracing_buffers_read(struct file *filp, char __user *ubuf,
8267 size_t count, loff_t *ppos)
8269 struct ftrace_buffer_info *info = filp->private_data;
8270 struct trace_iterator *iter = &info->iter;
8277 #ifdef CONFIG_TRACER_MAX_TRACE
8278 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8283 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8285 if (IS_ERR(info->spare)) {
8286 ret = PTR_ERR(info->spare);
8289 info->spare_cpu = iter->cpu_file;
8295 /* Do we have previous read data to read? */
8296 if (info->read < PAGE_SIZE)
8300 trace_access_lock(iter->cpu_file);
8301 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8305 trace_access_unlock(iter->cpu_file);
8308 if (trace_empty(iter)) {
8309 if ((filp->f_flags & O_NONBLOCK))
8312 ret = wait_on_pipe(iter, 0);
8323 size = PAGE_SIZE - info->read;
8327 ret = copy_to_user(ubuf, info->spare + info->read, size);
8339 static int tracing_buffers_release(struct inode *inode, struct file *file)
8341 struct ftrace_buffer_info *info = file->private_data;
8342 struct trace_iterator *iter = &info->iter;
8344 mutex_lock(&trace_types_lock);
8346 iter->tr->trace_ref--;
8348 __trace_array_put(iter->tr);
8351 /* Make sure the waiters see the new wait_index */
8354 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8357 ring_buffer_free_read_page(iter->array_buffer->buffer,
8358 info->spare_cpu, info->spare);
8361 mutex_unlock(&trace_types_lock);
8367 struct trace_buffer *buffer;
8370 refcount_t refcount;
8373 static void buffer_ref_release(struct buffer_ref *ref)
8375 if (!refcount_dec_and_test(&ref->refcount))
8377 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8381 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8382 struct pipe_buffer *buf)
8384 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8386 buffer_ref_release(ref);
8390 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8391 struct pipe_buffer *buf)
8393 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8395 if (refcount_read(&ref->refcount) > INT_MAX/2)
8398 refcount_inc(&ref->refcount);
8402 /* Pipe buffer operations for a buffer. */
8403 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8404 .release = buffer_pipe_buf_release,
8405 .get = buffer_pipe_buf_get,
8409 * Callback from splice_to_pipe(), if we need to release some pages
8410 * at the end of the spd in case we error'ed out in filling the pipe.
8412 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8414 struct buffer_ref *ref =
8415 (struct buffer_ref *)spd->partial[i].private;
8417 buffer_ref_release(ref);
8418 spd->partial[i].private = 0;
8422 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8423 struct pipe_inode_info *pipe, size_t len,
8426 struct ftrace_buffer_info *info = file->private_data;
8427 struct trace_iterator *iter = &info->iter;
8428 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8429 struct page *pages_def[PIPE_DEF_BUFFERS];
8430 struct splice_pipe_desc spd = {
8432 .partial = partial_def,
8433 .nr_pages_max = PIPE_DEF_BUFFERS,
8434 .ops = &buffer_pipe_buf_ops,
8435 .spd_release = buffer_spd_release,
8437 struct buffer_ref *ref;
8441 #ifdef CONFIG_TRACER_MAX_TRACE
8442 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8446 if (*ppos & (PAGE_SIZE - 1))
8449 if (len & (PAGE_SIZE - 1)) {
8450 if (len < PAGE_SIZE)
8455 if (splice_grow_spd(pipe, &spd))
8459 trace_access_lock(iter->cpu_file);
8460 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8462 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8466 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8472 refcount_set(&ref->refcount, 1);
8473 ref->buffer = iter->array_buffer->buffer;
8474 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8475 if (IS_ERR(ref->page)) {
8476 ret = PTR_ERR(ref->page);
8481 ref->cpu = iter->cpu_file;
8483 r = ring_buffer_read_page(ref->buffer, &ref->page,
8484 len, iter->cpu_file, 1);
8486 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8492 page = virt_to_page(ref->page);
8494 spd.pages[i] = page;
8495 spd.partial[i].len = PAGE_SIZE;
8496 spd.partial[i].offset = 0;
8497 spd.partial[i].private = (unsigned long)ref;
8501 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8504 trace_access_unlock(iter->cpu_file);
8507 /* did we read anything? */
8508 if (!spd.nr_pages) {
8515 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8518 wait_index = READ_ONCE(iter->wait_index);
8520 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8524 /* No need to wait after waking up when tracing is off */
8525 if (!tracer_tracing_is_on(iter->tr))
8528 /* Make sure we see the new wait_index */
8530 if (wait_index != iter->wait_index)
8536 ret = splice_to_pipe(pipe, &spd);
8538 splice_shrink_spd(&spd);
8543 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8544 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8546 struct ftrace_buffer_info *info = file->private_data;
8547 struct trace_iterator *iter = &info->iter;
8550 return -ENOIOCTLCMD;
8552 mutex_lock(&trace_types_lock);
8555 /* Make sure the waiters see the new wait_index */
8558 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8560 mutex_unlock(&trace_types_lock);
8564 static const struct file_operations tracing_buffers_fops = {
8565 .open = tracing_buffers_open,
8566 .read = tracing_buffers_read,
8567 .poll = tracing_buffers_poll,
8568 .release = tracing_buffers_release,
8569 .splice_read = tracing_buffers_splice_read,
8570 .unlocked_ioctl = tracing_buffers_ioctl,
8571 .llseek = no_llseek,
8575 tracing_stats_read(struct file *filp, char __user *ubuf,
8576 size_t count, loff_t *ppos)
8578 struct inode *inode = file_inode(filp);
8579 struct trace_array *tr = inode->i_private;
8580 struct array_buffer *trace_buf = &tr->array_buffer;
8581 int cpu = tracing_get_cpu(inode);
8582 struct trace_seq *s;
8584 unsigned long long t;
8585 unsigned long usec_rem;
8587 s = kmalloc(sizeof(*s), GFP_KERNEL);
8593 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8594 trace_seq_printf(s, "entries: %ld\n", cnt);
8596 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8597 trace_seq_printf(s, "overrun: %ld\n", cnt);
8599 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8600 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8602 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8603 trace_seq_printf(s, "bytes: %ld\n", cnt);
8605 if (trace_clocks[tr->clock_id].in_ns) {
8606 /* local or global for trace_clock */
8607 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8608 usec_rem = do_div(t, USEC_PER_SEC);
8609 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8612 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8613 usec_rem = do_div(t, USEC_PER_SEC);
8614 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8616 /* counter or tsc mode for trace_clock */
8617 trace_seq_printf(s, "oldest event ts: %llu\n",
8618 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8620 trace_seq_printf(s, "now ts: %llu\n",
8621 ring_buffer_time_stamp(trace_buf->buffer));
8624 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8625 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8627 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8628 trace_seq_printf(s, "read events: %ld\n", cnt);
8630 count = simple_read_from_buffer(ubuf, count, ppos,
8631 s->buffer, trace_seq_used(s));
8638 static const struct file_operations tracing_stats_fops = {
8639 .open = tracing_open_generic_tr,
8640 .read = tracing_stats_read,
8641 .llseek = generic_file_llseek,
8642 .release = tracing_release_generic_tr,
8645 #ifdef CONFIG_DYNAMIC_FTRACE
8648 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8649 size_t cnt, loff_t *ppos)
8655 /* 256 should be plenty to hold the amount needed */
8656 buf = kmalloc(256, GFP_KERNEL);
8660 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8661 ftrace_update_tot_cnt,
8662 ftrace_number_of_pages,
8663 ftrace_number_of_groups);
8665 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8670 static const struct file_operations tracing_dyn_info_fops = {
8671 .open = tracing_open_generic,
8672 .read = tracing_read_dyn_info,
8673 .llseek = generic_file_llseek,
8675 #endif /* CONFIG_DYNAMIC_FTRACE */
8677 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8679 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8680 struct trace_array *tr, struct ftrace_probe_ops *ops,
8683 tracing_snapshot_instance(tr);
8687 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8688 struct trace_array *tr, struct ftrace_probe_ops *ops,
8691 struct ftrace_func_mapper *mapper = data;
8695 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8705 tracing_snapshot_instance(tr);
8709 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8710 struct ftrace_probe_ops *ops, void *data)
8712 struct ftrace_func_mapper *mapper = data;
8715 seq_printf(m, "%ps:", (void *)ip);
8717 seq_puts(m, "snapshot");
8720 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8723 seq_printf(m, ":count=%ld\n", *count);
8725 seq_puts(m, ":unlimited\n");
8731 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8732 unsigned long ip, void *init_data, void **data)
8734 struct ftrace_func_mapper *mapper = *data;
8737 mapper = allocate_ftrace_func_mapper();
8743 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8747 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8748 unsigned long ip, void *data)
8750 struct ftrace_func_mapper *mapper = data;
8755 free_ftrace_func_mapper(mapper, NULL);
8759 ftrace_func_mapper_remove_ip(mapper, ip);
8762 static struct ftrace_probe_ops snapshot_probe_ops = {
8763 .func = ftrace_snapshot,
8764 .print = ftrace_snapshot_print,
8767 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8768 .func = ftrace_count_snapshot,
8769 .print = ftrace_snapshot_print,
8770 .init = ftrace_snapshot_init,
8771 .free = ftrace_snapshot_free,
8775 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8776 char *glob, char *cmd, char *param, int enable)
8778 struct ftrace_probe_ops *ops;
8779 void *count = (void *)-1;
8786 /* hash funcs only work with set_ftrace_filter */
8790 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8793 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8798 number = strsep(¶m, ":");
8800 if (!strlen(number))
8804 * We use the callback data field (which is a pointer)
8807 ret = kstrtoul(number, 0, (unsigned long *)&count);
8812 ret = tracing_alloc_snapshot_instance(tr);
8816 ret = register_ftrace_function_probe(glob, tr, ops, count);
8819 return ret < 0 ? ret : 0;
8822 static struct ftrace_func_command ftrace_snapshot_cmd = {
8824 .func = ftrace_trace_snapshot_callback,
8827 static __init int register_snapshot_cmd(void)
8829 return register_ftrace_command(&ftrace_snapshot_cmd);
8832 static inline __init int register_snapshot_cmd(void) { return 0; }
8833 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8835 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8837 if (WARN_ON(!tr->dir))
8838 return ERR_PTR(-ENODEV);
8840 /* Top directory uses NULL as the parent */
8841 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8844 /* All sub buffers have a descriptor */
8848 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8850 struct dentry *d_tracer;
8853 return tr->percpu_dir;
8855 d_tracer = tracing_get_dentry(tr);
8856 if (IS_ERR(d_tracer))
8859 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8861 MEM_FAIL(!tr->percpu_dir,
8862 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8864 return tr->percpu_dir;
8867 static struct dentry *
8868 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8869 void *data, long cpu, const struct file_operations *fops)
8871 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8873 if (ret) /* See tracing_get_cpu() */
8874 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8879 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8881 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8882 struct dentry *d_cpu;
8883 char cpu_dir[30]; /* 30 characters should be more than enough */
8888 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8889 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8891 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8895 /* per cpu trace_pipe */
8896 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8897 tr, cpu, &tracing_pipe_fops);
8900 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8901 tr, cpu, &tracing_fops);
8903 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8904 tr, cpu, &tracing_buffers_fops);
8906 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8907 tr, cpu, &tracing_stats_fops);
8909 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8910 tr, cpu, &tracing_entries_fops);
8912 #ifdef CONFIG_TRACER_SNAPSHOT
8913 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8914 tr, cpu, &snapshot_fops);
8916 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8917 tr, cpu, &snapshot_raw_fops);
8921 #ifdef CONFIG_FTRACE_SELFTEST
8922 /* Let selftest have access to static functions in this file */
8923 #include "trace_selftest.c"
8927 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8930 struct trace_option_dentry *topt = filp->private_data;
8933 if (topt->flags->val & topt->opt->bit)
8938 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8942 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8945 struct trace_option_dentry *topt = filp->private_data;
8949 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8953 if (val != 0 && val != 1)
8956 if (!!(topt->flags->val & topt->opt->bit) != val) {
8957 mutex_lock(&trace_types_lock);
8958 ret = __set_tracer_option(topt->tr, topt->flags,
8960 mutex_unlock(&trace_types_lock);
8970 static int tracing_open_options(struct inode *inode, struct file *filp)
8972 struct trace_option_dentry *topt = inode->i_private;
8975 ret = tracing_check_open_get_tr(topt->tr);
8979 filp->private_data = inode->i_private;
8983 static int tracing_release_options(struct inode *inode, struct file *file)
8985 struct trace_option_dentry *topt = file->private_data;
8987 trace_array_put(topt->tr);
8991 static const struct file_operations trace_options_fops = {
8992 .open = tracing_open_options,
8993 .read = trace_options_read,
8994 .write = trace_options_write,
8995 .llseek = generic_file_llseek,
8996 .release = tracing_release_options,
9000 * In order to pass in both the trace_array descriptor as well as the index
9001 * to the flag that the trace option file represents, the trace_array
9002 * has a character array of trace_flags_index[], which holds the index
9003 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
9004 * The address of this character array is passed to the flag option file
9005 * read/write callbacks.
9007 * In order to extract both the index and the trace_array descriptor,
9008 * get_tr_index() uses the following algorithm.
9012 * As the pointer itself contains the address of the index (remember
9015 * Then to get the trace_array descriptor, by subtracting that index
9016 * from the ptr, we get to the start of the index itself.
9018 * ptr - idx == &index[0]
9020 * Then a simple container_of() from that pointer gets us to the
9021 * trace_array descriptor.
9023 static void get_tr_index(void *data, struct trace_array **ptr,
9024 unsigned int *pindex)
9026 *pindex = *(unsigned char *)data;
9028 *ptr = container_of(data - *pindex, struct trace_array,
9033 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9036 void *tr_index = filp->private_data;
9037 struct trace_array *tr;
9041 get_tr_index(tr_index, &tr, &index);
9043 if (tr->trace_flags & (1 << index))
9048 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9052 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9055 void *tr_index = filp->private_data;
9056 struct trace_array *tr;
9061 get_tr_index(tr_index, &tr, &index);
9063 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9067 if (val != 0 && val != 1)
9070 mutex_lock(&event_mutex);
9071 mutex_lock(&trace_types_lock);
9072 ret = set_tracer_flag(tr, 1 << index, val);
9073 mutex_unlock(&trace_types_lock);
9074 mutex_unlock(&event_mutex);
9084 static const struct file_operations trace_options_core_fops = {
9085 .open = tracing_open_generic,
9086 .read = trace_options_core_read,
9087 .write = trace_options_core_write,
9088 .llseek = generic_file_llseek,
9091 struct dentry *trace_create_file(const char *name,
9093 struct dentry *parent,
9095 const struct file_operations *fops)
9099 ret = tracefs_create_file(name, mode, parent, data, fops);
9101 pr_warn("Could not create tracefs '%s' entry\n", name);
9107 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9109 struct dentry *d_tracer;
9114 d_tracer = tracing_get_dentry(tr);
9115 if (IS_ERR(d_tracer))
9118 tr->options = tracefs_create_dir("options", d_tracer);
9120 pr_warn("Could not create tracefs directory 'options'\n");
9128 create_trace_option_file(struct trace_array *tr,
9129 struct trace_option_dentry *topt,
9130 struct tracer_flags *flags,
9131 struct tracer_opt *opt)
9133 struct dentry *t_options;
9135 t_options = trace_options_init_dentry(tr);
9139 topt->flags = flags;
9143 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9144 t_options, topt, &trace_options_fops);
9149 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9151 struct trace_option_dentry *topts;
9152 struct trace_options *tr_topts;
9153 struct tracer_flags *flags;
9154 struct tracer_opt *opts;
9161 flags = tracer->flags;
9163 if (!flags || !flags->opts)
9167 * If this is an instance, only create flags for tracers
9168 * the instance may have.
9170 if (!trace_ok_for_array(tracer, tr))
9173 for (i = 0; i < tr->nr_topts; i++) {
9174 /* Make sure there's no duplicate flags. */
9175 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9181 for (cnt = 0; opts[cnt].name; cnt++)
9184 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9188 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9195 tr->topts = tr_topts;
9196 tr->topts[tr->nr_topts].tracer = tracer;
9197 tr->topts[tr->nr_topts].topts = topts;
9200 for (cnt = 0; opts[cnt].name; cnt++) {
9201 create_trace_option_file(tr, &topts[cnt], flags,
9203 MEM_FAIL(topts[cnt].entry == NULL,
9204 "Failed to create trace option: %s",
9209 static struct dentry *
9210 create_trace_option_core_file(struct trace_array *tr,
9211 const char *option, long index)
9213 struct dentry *t_options;
9215 t_options = trace_options_init_dentry(tr);
9219 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9220 (void *)&tr->trace_flags_index[index],
9221 &trace_options_core_fops);
9224 static void create_trace_options_dir(struct trace_array *tr)
9226 struct dentry *t_options;
9227 bool top_level = tr == &global_trace;
9230 t_options = trace_options_init_dentry(tr);
9234 for (i = 0; trace_options[i]; i++) {
9236 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9237 create_trace_option_core_file(tr, trace_options[i], i);
9242 rb_simple_read(struct file *filp, char __user *ubuf,
9243 size_t cnt, loff_t *ppos)
9245 struct trace_array *tr = filp->private_data;
9249 r = tracer_tracing_is_on(tr);
9250 r = sprintf(buf, "%d\n", r);
9252 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9256 rb_simple_write(struct file *filp, const char __user *ubuf,
9257 size_t cnt, loff_t *ppos)
9259 struct trace_array *tr = filp->private_data;
9260 struct trace_buffer *buffer = tr->array_buffer.buffer;
9264 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9269 mutex_lock(&trace_types_lock);
9270 if (!!val == tracer_tracing_is_on(tr)) {
9271 val = 0; /* do nothing */
9273 tracer_tracing_on(tr);
9274 if (tr->current_trace->start)
9275 tr->current_trace->start(tr);
9277 tracer_tracing_off(tr);
9278 if (tr->current_trace->stop)
9279 tr->current_trace->stop(tr);
9280 /* Wake up any waiters */
9281 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9283 mutex_unlock(&trace_types_lock);
9291 static const struct file_operations rb_simple_fops = {
9292 .open = tracing_open_generic_tr,
9293 .read = rb_simple_read,
9294 .write = rb_simple_write,
9295 .release = tracing_release_generic_tr,
9296 .llseek = default_llseek,
9300 buffer_percent_read(struct file *filp, char __user *ubuf,
9301 size_t cnt, loff_t *ppos)
9303 struct trace_array *tr = filp->private_data;
9307 r = tr->buffer_percent;
9308 r = sprintf(buf, "%d\n", r);
9310 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9314 buffer_percent_write(struct file *filp, const char __user *ubuf,
9315 size_t cnt, loff_t *ppos)
9317 struct trace_array *tr = filp->private_data;
9321 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9328 tr->buffer_percent = val;
9335 static const struct file_operations buffer_percent_fops = {
9336 .open = tracing_open_generic_tr,
9337 .read = buffer_percent_read,
9338 .write = buffer_percent_write,
9339 .release = tracing_release_generic_tr,
9340 .llseek = default_llseek,
9343 static struct dentry *trace_instance_dir;
9346 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9349 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9351 enum ring_buffer_flags rb_flags;
9353 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9357 buf->buffer = ring_buffer_alloc(size, rb_flags);
9361 buf->data = alloc_percpu(struct trace_array_cpu);
9363 ring_buffer_free(buf->buffer);
9368 /* Allocate the first page for all buffers */
9369 set_buffer_entries(&tr->array_buffer,
9370 ring_buffer_size(tr->array_buffer.buffer, 0));
9375 static void free_trace_buffer(struct array_buffer *buf)
9378 ring_buffer_free(buf->buffer);
9380 free_percpu(buf->data);
9385 static int allocate_trace_buffers(struct trace_array *tr, int size)
9389 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9393 #ifdef CONFIG_TRACER_MAX_TRACE
9394 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9395 allocate_snapshot ? size : 1);
9396 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9397 free_trace_buffer(&tr->array_buffer);
9400 tr->allocated_snapshot = allocate_snapshot;
9402 allocate_snapshot = false;
9408 static void free_trace_buffers(struct trace_array *tr)
9413 free_trace_buffer(&tr->array_buffer);
9415 #ifdef CONFIG_TRACER_MAX_TRACE
9416 free_trace_buffer(&tr->max_buffer);
9420 static void init_trace_flags_index(struct trace_array *tr)
9424 /* Used by the trace options files */
9425 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9426 tr->trace_flags_index[i] = i;
9429 static void __update_tracer_options(struct trace_array *tr)
9433 for (t = trace_types; t; t = t->next)
9434 add_tracer_options(tr, t);
9437 static void update_tracer_options(struct trace_array *tr)
9439 mutex_lock(&trace_types_lock);
9440 tracer_options_updated = true;
9441 __update_tracer_options(tr);
9442 mutex_unlock(&trace_types_lock);
9445 /* Must have trace_types_lock held */
9446 struct trace_array *trace_array_find(const char *instance)
9448 struct trace_array *tr, *found = NULL;
9450 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9451 if (tr->name && strcmp(tr->name, instance) == 0) {
9460 struct trace_array *trace_array_find_get(const char *instance)
9462 struct trace_array *tr;
9464 mutex_lock(&trace_types_lock);
9465 tr = trace_array_find(instance);
9468 mutex_unlock(&trace_types_lock);
9473 static int trace_array_create_dir(struct trace_array *tr)
9477 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9481 ret = event_trace_add_tracer(tr->dir, tr);
9483 tracefs_remove(tr->dir);
9487 init_tracer_tracefs(tr, tr->dir);
9488 __update_tracer_options(tr);
9493 static struct trace_array *trace_array_create(const char *name)
9495 struct trace_array *tr;
9499 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9501 return ERR_PTR(ret);
9503 tr->name = kstrdup(name, GFP_KERNEL);
9507 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9510 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9513 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9515 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9517 raw_spin_lock_init(&tr->start_lock);
9519 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9521 tr->current_trace = &nop_trace;
9523 INIT_LIST_HEAD(&tr->systems);
9524 INIT_LIST_HEAD(&tr->events);
9525 INIT_LIST_HEAD(&tr->hist_vars);
9526 INIT_LIST_HEAD(&tr->err_log);
9528 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9531 /* The ring buffer is defaultly expanded */
9532 trace_set_ring_buffer_expanded(tr);
9534 if (ftrace_allocate_ftrace_ops(tr) < 0)
9537 ftrace_init_trace_array(tr);
9539 init_trace_flags_index(tr);
9541 if (trace_instance_dir) {
9542 ret = trace_array_create_dir(tr);
9546 __trace_early_add_events(tr);
9548 list_add(&tr->list, &ftrace_trace_arrays);
9555 ftrace_free_ftrace_ops(tr);
9556 free_trace_buffers(tr);
9557 free_cpumask_var(tr->pipe_cpumask);
9558 free_cpumask_var(tr->tracing_cpumask);
9562 return ERR_PTR(ret);
9565 static int instance_mkdir(const char *name)
9567 struct trace_array *tr;
9570 mutex_lock(&event_mutex);
9571 mutex_lock(&trace_types_lock);
9574 if (trace_array_find(name))
9577 tr = trace_array_create(name);
9579 ret = PTR_ERR_OR_ZERO(tr);
9582 mutex_unlock(&trace_types_lock);
9583 mutex_unlock(&event_mutex);
9588 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9589 * @name: The name of the trace array to be looked up/created.
9591 * Returns pointer to trace array with given name.
9592 * NULL, if it cannot be created.
9594 * NOTE: This function increments the reference counter associated with the
9595 * trace array returned. This makes sure it cannot be freed while in use.
9596 * Use trace_array_put() once the trace array is no longer needed.
9597 * If the trace_array is to be freed, trace_array_destroy() needs to
9598 * be called after the trace_array_put(), or simply let user space delete
9599 * it from the tracefs instances directory. But until the
9600 * trace_array_put() is called, user space can not delete it.
9603 struct trace_array *trace_array_get_by_name(const char *name)
9605 struct trace_array *tr;
9607 mutex_lock(&event_mutex);
9608 mutex_lock(&trace_types_lock);
9610 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9611 if (tr->name && strcmp(tr->name, name) == 0)
9615 tr = trace_array_create(name);
9623 mutex_unlock(&trace_types_lock);
9624 mutex_unlock(&event_mutex);
9627 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9629 static int __remove_instance(struct trace_array *tr)
9633 /* Reference counter for a newly created trace array = 1. */
9634 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9637 list_del(&tr->list);
9639 /* Disable all the flags that were enabled coming in */
9640 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9641 if ((1 << i) & ZEROED_TRACE_FLAGS)
9642 set_tracer_flag(tr, 1 << i, 0);
9645 tracing_set_nop(tr);
9646 clear_ftrace_function_probes(tr);
9647 event_trace_del_tracer(tr);
9648 ftrace_clear_pids(tr);
9649 ftrace_destroy_function_files(tr);
9650 tracefs_remove(tr->dir);
9651 free_percpu(tr->last_func_repeats);
9652 free_trace_buffers(tr);
9653 clear_tracing_err_log(tr);
9655 for (i = 0; i < tr->nr_topts; i++) {
9656 kfree(tr->topts[i].topts);
9660 free_cpumask_var(tr->pipe_cpumask);
9661 free_cpumask_var(tr->tracing_cpumask);
9668 int trace_array_destroy(struct trace_array *this_tr)
9670 struct trace_array *tr;
9676 mutex_lock(&event_mutex);
9677 mutex_lock(&trace_types_lock);
9681 /* Making sure trace array exists before destroying it. */
9682 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9683 if (tr == this_tr) {
9684 ret = __remove_instance(tr);
9689 mutex_unlock(&trace_types_lock);
9690 mutex_unlock(&event_mutex);
9694 EXPORT_SYMBOL_GPL(trace_array_destroy);
9696 static int instance_rmdir(const char *name)
9698 struct trace_array *tr;
9701 mutex_lock(&event_mutex);
9702 mutex_lock(&trace_types_lock);
9705 tr = trace_array_find(name);
9707 ret = __remove_instance(tr);
9709 mutex_unlock(&trace_types_lock);
9710 mutex_unlock(&event_mutex);
9715 static __init void create_trace_instances(struct dentry *d_tracer)
9717 struct trace_array *tr;
9719 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9722 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9725 mutex_lock(&event_mutex);
9726 mutex_lock(&trace_types_lock);
9728 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9731 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9732 "Failed to create instance directory\n"))
9736 mutex_unlock(&trace_types_lock);
9737 mutex_unlock(&event_mutex);
9741 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9745 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9746 tr, &show_traces_fops);
9748 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9749 tr, &set_tracer_fops);
9751 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9752 tr, &tracing_cpumask_fops);
9754 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9755 tr, &tracing_iter_fops);
9757 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9760 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9761 tr, &tracing_pipe_fops);
9763 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9764 tr, &tracing_entries_fops);
9766 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9767 tr, &tracing_total_entries_fops);
9769 trace_create_file("free_buffer", 0200, d_tracer,
9770 tr, &tracing_free_buffer_fops);
9772 trace_create_file("trace_marker", 0220, d_tracer,
9773 tr, &tracing_mark_fops);
9775 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9777 trace_create_file("trace_marker_raw", 0220, d_tracer,
9778 tr, &tracing_mark_raw_fops);
9780 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9783 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9784 tr, &rb_simple_fops);
9786 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9787 &trace_time_stamp_mode_fops);
9789 tr->buffer_percent = 50;
9791 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9792 tr, &buffer_percent_fops);
9794 create_trace_options_dir(tr);
9796 #ifdef CONFIG_TRACER_MAX_TRACE
9797 trace_create_maxlat_file(tr, d_tracer);
9800 if (ftrace_create_function_files(tr, d_tracer))
9801 MEM_FAIL(1, "Could not allocate function filter files");
9803 #ifdef CONFIG_TRACER_SNAPSHOT
9804 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9805 tr, &snapshot_fops);
9808 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9809 tr, &tracing_err_log_fops);
9811 for_each_tracing_cpu(cpu)
9812 tracing_init_tracefs_percpu(tr, cpu);
9814 ftrace_init_tracefs(tr, d_tracer);
9817 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9819 struct vfsmount *mnt;
9820 struct file_system_type *type;
9823 * To maintain backward compatibility for tools that mount
9824 * debugfs to get to the tracing facility, tracefs is automatically
9825 * mounted to the debugfs/tracing directory.
9827 type = get_fs_type("tracefs");
9830 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9831 put_filesystem(type);
9840 * tracing_init_dentry - initialize top level trace array
9842 * This is called when creating files or directories in the tracing
9843 * directory. It is called via fs_initcall() by any of the boot up code
9844 * and expects to return the dentry of the top level tracing directory.
9846 int tracing_init_dentry(void)
9848 struct trace_array *tr = &global_trace;
9850 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9851 pr_warn("Tracing disabled due to lockdown\n");
9855 /* The top level trace array uses NULL as parent */
9859 if (WARN_ON(!tracefs_initialized()))
9863 * As there may still be users that expect the tracing
9864 * files to exist in debugfs/tracing, we must automount
9865 * the tracefs file system there, so older tools still
9866 * work with the newer kernel.
9868 tr->dir = debugfs_create_automount("tracing", NULL,
9869 trace_automount, NULL);
9874 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9875 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9877 static struct workqueue_struct *eval_map_wq __initdata;
9878 static struct work_struct eval_map_work __initdata;
9879 static struct work_struct tracerfs_init_work __initdata;
9881 static void __init eval_map_work_func(struct work_struct *work)
9885 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9886 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9889 static int __init trace_eval_init(void)
9891 INIT_WORK(&eval_map_work, eval_map_work_func);
9893 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9895 pr_err("Unable to allocate eval_map_wq\n");
9897 eval_map_work_func(&eval_map_work);
9901 queue_work(eval_map_wq, &eval_map_work);
9905 subsys_initcall(trace_eval_init);
9907 static int __init trace_eval_sync(void)
9909 /* Make sure the eval map updates are finished */
9911 destroy_workqueue(eval_map_wq);
9915 late_initcall_sync(trace_eval_sync);
9918 #ifdef CONFIG_MODULES
9919 static void trace_module_add_evals(struct module *mod)
9921 if (!mod->num_trace_evals)
9925 * Modules with bad taint do not have events created, do
9926 * not bother with enums either.
9928 if (trace_module_has_bad_taint(mod))
9931 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9934 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9935 static void trace_module_remove_evals(struct module *mod)
9937 union trace_eval_map_item *map;
9938 union trace_eval_map_item **last = &trace_eval_maps;
9940 if (!mod->num_trace_evals)
9943 mutex_lock(&trace_eval_mutex);
9945 map = trace_eval_maps;
9948 if (map->head.mod == mod)
9950 map = trace_eval_jmp_to_tail(map);
9951 last = &map->tail.next;
9952 map = map->tail.next;
9957 *last = trace_eval_jmp_to_tail(map)->tail.next;
9960 mutex_unlock(&trace_eval_mutex);
9963 static inline void trace_module_remove_evals(struct module *mod) { }
9964 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9966 static int trace_module_notify(struct notifier_block *self,
9967 unsigned long val, void *data)
9969 struct module *mod = data;
9972 case MODULE_STATE_COMING:
9973 trace_module_add_evals(mod);
9975 case MODULE_STATE_GOING:
9976 trace_module_remove_evals(mod);
9983 static struct notifier_block trace_module_nb = {
9984 .notifier_call = trace_module_notify,
9987 #endif /* CONFIG_MODULES */
9989 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9994 init_tracer_tracefs(&global_trace, NULL);
9995 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9997 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9998 &global_trace, &tracing_thresh_fops);
10000 trace_create_file("README", TRACE_MODE_READ, NULL,
10001 NULL, &tracing_readme_fops);
10003 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10004 NULL, &tracing_saved_cmdlines_fops);
10006 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10007 NULL, &tracing_saved_cmdlines_size_fops);
10009 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10010 NULL, &tracing_saved_tgids_fops);
10012 trace_create_eval_file(NULL);
10014 #ifdef CONFIG_MODULES
10015 register_module_notifier(&trace_module_nb);
10018 #ifdef CONFIG_DYNAMIC_FTRACE
10019 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10020 NULL, &tracing_dyn_info_fops);
10023 create_trace_instances(NULL);
10025 update_tracer_options(&global_trace);
10028 static __init int tracer_init_tracefs(void)
10032 trace_access_lock_init();
10034 ret = tracing_init_dentry();
10039 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10040 queue_work(eval_map_wq, &tracerfs_init_work);
10042 tracer_init_tracefs_work_func(NULL);
10045 rv_init_interface();
10050 fs_initcall(tracer_init_tracefs);
10052 static int trace_die_panic_handler(struct notifier_block *self,
10053 unsigned long ev, void *unused);
10055 static struct notifier_block trace_panic_notifier = {
10056 .notifier_call = trace_die_panic_handler,
10057 .priority = INT_MAX - 1,
10060 static struct notifier_block trace_die_notifier = {
10061 .notifier_call = trace_die_panic_handler,
10062 .priority = INT_MAX - 1,
10066 * The idea is to execute the following die/panic callback early, in order
10067 * to avoid showing irrelevant information in the trace (like other panic
10068 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10069 * warnings get disabled (to prevent potential log flooding).
10071 static int trace_die_panic_handler(struct notifier_block *self,
10072 unsigned long ev, void *unused)
10074 if (!ftrace_dump_on_oops)
10075 return NOTIFY_DONE;
10077 /* The die notifier requires DIE_OOPS to trigger */
10078 if (self == &trace_die_notifier && ev != DIE_OOPS)
10079 return NOTIFY_DONE;
10081 ftrace_dump(ftrace_dump_on_oops);
10083 return NOTIFY_DONE;
10087 * printk is set to max of 1024, we really don't need it that big.
10088 * Nothing should be printing 1000 characters anyway.
10090 #define TRACE_MAX_PRINT 1000
10093 * Define here KERN_TRACE so that we have one place to modify
10094 * it if we decide to change what log level the ftrace dump
10097 #define KERN_TRACE KERN_EMERG
10100 trace_printk_seq(struct trace_seq *s)
10102 /* Probably should print a warning here. */
10103 if (s->seq.len >= TRACE_MAX_PRINT)
10104 s->seq.len = TRACE_MAX_PRINT;
10107 * More paranoid code. Although the buffer size is set to
10108 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10109 * an extra layer of protection.
10111 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10112 s->seq.len = s->seq.size - 1;
10114 /* should be zero ended, but we are paranoid. */
10115 s->buffer[s->seq.len] = 0;
10117 printk(KERN_TRACE "%s", s->buffer);
10122 void trace_init_global_iter(struct trace_iterator *iter)
10124 iter->tr = &global_trace;
10125 iter->trace = iter->tr->current_trace;
10126 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10127 iter->array_buffer = &global_trace.array_buffer;
10129 if (iter->trace && iter->trace->open)
10130 iter->trace->open(iter);
10132 /* Annotate start of buffers if we had overruns */
10133 if (ring_buffer_overruns(iter->array_buffer->buffer))
10134 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10136 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10137 if (trace_clocks[iter->tr->clock_id].in_ns)
10138 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10140 /* Can not use kmalloc for iter.temp and iter.fmt */
10141 iter->temp = static_temp_buf;
10142 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10143 iter->fmt = static_fmt_buf;
10144 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10147 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10149 /* use static because iter can be a bit big for the stack */
10150 static struct trace_iterator iter;
10151 static atomic_t dump_running;
10152 struct trace_array *tr = &global_trace;
10153 unsigned int old_userobj;
10154 unsigned long flags;
10157 /* Only allow one dump user at a time. */
10158 if (atomic_inc_return(&dump_running) != 1) {
10159 atomic_dec(&dump_running);
10164 * Always turn off tracing when we dump.
10165 * We don't need to show trace output of what happens
10166 * between multiple crashes.
10168 * If the user does a sysrq-z, then they can re-enable
10169 * tracing with echo 1 > tracing_on.
10173 local_irq_save(flags);
10175 /* Simulate the iterator */
10176 trace_init_global_iter(&iter);
10178 for_each_tracing_cpu(cpu) {
10179 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10182 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10184 /* don't look at user memory in panic mode */
10185 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10187 switch (oops_dump_mode) {
10189 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10192 iter.cpu_file = raw_smp_processor_id();
10197 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10198 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10201 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10203 /* Did function tracer already get disabled? */
10204 if (ftrace_is_dead()) {
10205 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10206 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10210 * We need to stop all tracing on all CPUS to read
10211 * the next buffer. This is a bit expensive, but is
10212 * not done often. We fill all what we can read,
10213 * and then release the locks again.
10216 while (!trace_empty(&iter)) {
10219 printk(KERN_TRACE "---------------------------------\n");
10223 trace_iterator_reset(&iter);
10224 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10226 if (trace_find_next_entry_inc(&iter) != NULL) {
10229 ret = print_trace_line(&iter);
10230 if (ret != TRACE_TYPE_NO_CONSUME)
10231 trace_consume(&iter);
10233 touch_nmi_watchdog();
10235 trace_printk_seq(&iter.seq);
10239 printk(KERN_TRACE " (ftrace buffer empty)\n");
10241 printk(KERN_TRACE "---------------------------------\n");
10244 tr->trace_flags |= old_userobj;
10246 for_each_tracing_cpu(cpu) {
10247 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10249 atomic_dec(&dump_running);
10250 local_irq_restore(flags);
10252 EXPORT_SYMBOL_GPL(ftrace_dump);
10254 #define WRITE_BUFSIZE 4096
10256 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10257 size_t count, loff_t *ppos,
10258 int (*createfn)(const char *))
10260 char *kbuf, *buf, *tmp;
10265 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10269 while (done < count) {
10270 size = count - done;
10272 if (size >= WRITE_BUFSIZE)
10273 size = WRITE_BUFSIZE - 1;
10275 if (copy_from_user(kbuf, buffer + done, size)) {
10282 tmp = strchr(buf, '\n');
10285 size = tmp - buf + 1;
10287 size = strlen(buf);
10288 if (done + size < count) {
10291 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10292 pr_warn("Line length is too long: Should be less than %d\n",
10293 WRITE_BUFSIZE - 2);
10300 /* Remove comments */
10301 tmp = strchr(buf, '#');
10306 ret = createfn(buf);
10311 } while (done < count);
10321 #ifdef CONFIG_TRACER_MAX_TRACE
10322 __init static bool tr_needs_alloc_snapshot(const char *name)
10325 int len = strlen(name);
10328 if (!boot_snapshot_index)
10331 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10332 boot_snapshot_info[len] == '\t')
10335 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10339 sprintf(test, "\t%s\t", name);
10340 ret = strstr(boot_snapshot_info, test) == NULL;
10345 __init static void do_allocate_snapshot(const char *name)
10347 if (!tr_needs_alloc_snapshot(name))
10351 * When allocate_snapshot is set, the next call to
10352 * allocate_trace_buffers() (called by trace_array_get_by_name())
10353 * will allocate the snapshot buffer. That will alse clear
10356 allocate_snapshot = true;
10359 static inline void do_allocate_snapshot(const char *name) { }
10362 __init static void enable_instances(void)
10364 struct trace_array *tr;
10369 /* A tab is always appended */
10370 boot_instance_info[boot_instance_index - 1] = '\0';
10371 str = boot_instance_info;
10373 while ((curr_str = strsep(&str, "\t"))) {
10375 tok = strsep(&curr_str, ",");
10377 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10378 do_allocate_snapshot(tok);
10380 tr = trace_array_get_by_name(tok);
10382 pr_warn("Failed to create instance buffer %s\n", curr_str);
10385 /* Allow user space to delete it */
10386 trace_array_put(tr);
10388 while ((tok = strsep(&curr_str, ","))) {
10389 early_enable_events(tr, tok, true);
10394 __init static int tracer_alloc_buffers(void)
10400 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10401 pr_warn("Tracing disabled due to lockdown\n");
10406 * Make sure we don't accidentally add more trace options
10407 * than we have bits for.
10409 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10411 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10414 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10415 goto out_free_buffer_mask;
10417 /* Only allocate trace_printk buffers if a trace_printk exists */
10418 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10419 /* Must be called before global_trace.buffer is allocated */
10420 trace_printk_init_buffers();
10422 /* To save memory, keep the ring buffer size to its minimum */
10423 if (global_trace.ring_buffer_expanded)
10424 ring_buf_size = trace_buf_size;
10428 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10429 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10431 raw_spin_lock_init(&global_trace.start_lock);
10434 * The prepare callbacks allocates some memory for the ring buffer. We
10435 * don't free the buffer if the CPU goes down. If we were to free
10436 * the buffer, then the user would lose any trace that was in the
10437 * buffer. The memory will be removed once the "instance" is removed.
10439 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10440 "trace/RB:prepare", trace_rb_cpu_prepare,
10443 goto out_free_cpumask;
10444 /* Used for event triggers */
10446 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10448 goto out_rm_hp_state;
10450 if (trace_create_savedcmd() < 0)
10451 goto out_free_temp_buffer;
10453 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10454 goto out_free_savedcmd;
10456 /* TODO: make the number of buffers hot pluggable with CPUS */
10457 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10458 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10459 goto out_free_pipe_cpumask;
10461 if (global_trace.buffer_disabled)
10464 if (trace_boot_clock) {
10465 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10467 pr_warn("Trace clock %s not defined, going back to default\n",
10472 * register_tracer() might reference current_trace, so it
10473 * needs to be set before we register anything. This is
10474 * just a bootstrap of current_trace anyway.
10476 global_trace.current_trace = &nop_trace;
10478 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10480 ftrace_init_global_array_ops(&global_trace);
10482 init_trace_flags_index(&global_trace);
10484 register_tracer(&nop_trace);
10486 /* Function tracing may start here (via kernel command line) */
10487 init_function_trace();
10489 /* All seems OK, enable tracing */
10490 tracing_disabled = 0;
10492 atomic_notifier_chain_register(&panic_notifier_list,
10493 &trace_panic_notifier);
10495 register_die_notifier(&trace_die_notifier);
10497 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10499 INIT_LIST_HEAD(&global_trace.systems);
10500 INIT_LIST_HEAD(&global_trace.events);
10501 INIT_LIST_HEAD(&global_trace.hist_vars);
10502 INIT_LIST_HEAD(&global_trace.err_log);
10503 list_add(&global_trace.list, &ftrace_trace_arrays);
10505 apply_trace_boot_options();
10507 register_snapshot_cmd();
10513 out_free_pipe_cpumask:
10514 free_cpumask_var(global_trace.pipe_cpumask);
10516 free_saved_cmdlines_buffer(savedcmd);
10517 out_free_temp_buffer:
10518 ring_buffer_free(temp_buffer);
10520 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10522 free_cpumask_var(global_trace.tracing_cpumask);
10523 out_free_buffer_mask:
10524 free_cpumask_var(tracing_buffer_mask);
10529 void __init ftrace_boot_snapshot(void)
10531 #ifdef CONFIG_TRACER_MAX_TRACE
10532 struct trace_array *tr;
10534 if (!snapshot_at_boot)
10537 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10538 if (!tr->allocated_snapshot)
10541 tracing_snapshot_instance(tr);
10542 trace_array_puts(tr, "** Boot snapshot taken **\n");
10547 void __init early_trace_init(void)
10549 if (tracepoint_printk) {
10550 tracepoint_print_iter =
10551 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10552 if (MEM_FAIL(!tracepoint_print_iter,
10553 "Failed to allocate trace iterator\n"))
10554 tracepoint_printk = 0;
10556 static_key_enable(&tracepoint_printk_key.key);
10558 tracer_alloc_buffers();
10563 void __init trace_init(void)
10565 trace_event_init();
10567 if (boot_instance_index)
10568 enable_instances();
10571 __init static void clear_boot_tracer(void)
10574 * The default tracer at boot buffer is an init section.
10575 * This function is called in lateinit. If we did not
10576 * find the boot tracer, then clear it out, to prevent
10577 * later registration from accessing the buffer that is
10578 * about to be freed.
10580 if (!default_bootup_tracer)
10583 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10584 default_bootup_tracer);
10585 default_bootup_tracer = NULL;
10588 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10589 __init static void tracing_set_default_clock(void)
10591 /* sched_clock_stable() is determined in late_initcall */
10592 if (!trace_boot_clock && !sched_clock_stable()) {
10593 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10594 pr_warn("Can not set tracing clock due to lockdown\n");
10598 printk(KERN_WARNING
10599 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10600 "If you want to keep using the local clock, then add:\n"
10601 " \"trace_clock=local\"\n"
10602 "on the kernel command line\n");
10603 tracing_set_clock(&global_trace, "global");
10607 static inline void tracing_set_default_clock(void) { }
10610 __init static int late_trace_init(void)
10612 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10613 static_key_disable(&tracepoint_printk_key.key);
10614 tracepoint_printk = 0;
10617 tracing_set_default_clock();
10618 clear_boot_tracer();
10622 late_initcall_sync(late_trace_init);