1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/kmemleak.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
104 * To prevent the comm cache from being overwritten when no
105 * tracing is active, only save the comm when a trace event
108 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
111 * Kill all tracing for good (never come back).
112 * It is initialized to 1 but will turn to zero if the initialization
113 * of the tracer is successful. But that is the only place that sets
116 static int tracing_disabled = 1;
118 cpumask_var_t __read_mostly tracing_buffer_mask;
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124 * is set, then ftrace_dump is called. This will output the contents
125 * of the ftrace buffers to the console. This is very useful for
126 * capturing traces that lead to crashes and outputing it to a
129 * It is default off, but you can enable it with either specifying
130 * "ftrace_dump_on_oops" in the kernel command line, or setting
131 * /proc/sys/kernel/ftrace_dump_on_oops
132 * Set 1 if you want to dump buffers of all CPUs
133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
136 enum ftrace_dump_mode ftrace_dump_on_oops;
138 /* When set, tracing will stop when a WARN*() is hit */
139 int __disable_trace_on_warning;
141 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
142 /* Map of enums to their values, for "eval_map" file */
143 struct trace_eval_map_head {
145 unsigned long length;
148 union trace_eval_map_item;
150 struct trace_eval_map_tail {
152 * "end" is first and points to NULL as it must be different
153 * than "mod" or "eval_string"
155 union trace_eval_map_item *next;
156 const char *end; /* points to NULL */
159 static DEFINE_MUTEX(trace_eval_mutex);
162 * The trace_eval_maps are saved in an array with two extra elements,
163 * one at the beginning, and one at the end. The beginning item contains
164 * the count of the saved maps (head.length), and the module they
165 * belong to if not built in (head.mod). The ending item contains a
166 * pointer to the next array of saved eval_map items.
168 union trace_eval_map_item {
169 struct trace_eval_map map;
170 struct trace_eval_map_head head;
171 struct trace_eval_map_tail tail;
174 static union trace_eval_map_item *trace_eval_maps;
175 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178 static void ftrace_trace_userstack(struct trace_array *tr,
179 struct trace_buffer *buffer,
180 unsigned long flags, int pc);
182 #define MAX_TRACER_SIZE 100
183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
184 static char *default_bootup_tracer;
186 static bool allocate_snapshot;
188 static int __init set_cmdline_ftrace(char *str)
190 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
191 default_bootup_tracer = bootup_tracer_buf;
192 /* We are using ftrace early, expand it */
193 ring_buffer_expanded = true;
196 __setup("ftrace=", set_cmdline_ftrace);
198 static int __init set_ftrace_dump_on_oops(char *str)
200 if (*str++ != '=' || !*str) {
201 ftrace_dump_on_oops = DUMP_ALL;
205 if (!strcmp("orig_cpu", str)) {
206 ftrace_dump_on_oops = DUMP_ORIG;
212 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
214 static int __init stop_trace_on_warning(char *str)
216 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
217 __disable_trace_on_warning = 1;
220 __setup("traceoff_on_warning", stop_trace_on_warning);
222 static int __init boot_alloc_snapshot(char *str)
224 allocate_snapshot = true;
225 /* We also need the main ring buffer expanded */
226 ring_buffer_expanded = true;
229 __setup("alloc_snapshot", boot_alloc_snapshot);
232 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
234 static int __init set_trace_boot_options(char *str)
236 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
239 __setup("trace_options=", set_trace_boot_options);
241 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
242 static char *trace_boot_clock __initdata;
244 static int __init set_trace_boot_clock(char *str)
246 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
247 trace_boot_clock = trace_boot_clock_buf;
250 __setup("trace_clock=", set_trace_boot_clock);
252 static int __init set_tracepoint_printk(char *str)
254 /* Ignore the "tp_printk_stop_on_boot" param */
258 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
259 tracepoint_printk = 1;
262 __setup("tp_printk", set_tracepoint_printk);
264 unsigned long long ns2usecs(u64 nsec)
272 trace_process_export(struct trace_export *export,
273 struct ring_buffer_event *event, int flag)
275 struct trace_entry *entry;
276 unsigned int size = 0;
278 if (export->flags & flag) {
279 entry = ring_buffer_event_data(event);
280 size = ring_buffer_event_length(event);
281 export->write(export, entry, size);
285 static DEFINE_MUTEX(ftrace_export_lock);
287 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
289 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
290 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
291 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
293 static inline void ftrace_exports_enable(struct trace_export *export)
295 if (export->flags & TRACE_EXPORT_FUNCTION)
296 static_branch_inc(&trace_function_exports_enabled);
298 if (export->flags & TRACE_EXPORT_EVENT)
299 static_branch_inc(&trace_event_exports_enabled);
301 if (export->flags & TRACE_EXPORT_MARKER)
302 static_branch_inc(&trace_marker_exports_enabled);
305 static inline void ftrace_exports_disable(struct trace_export *export)
307 if (export->flags & TRACE_EXPORT_FUNCTION)
308 static_branch_dec(&trace_function_exports_enabled);
310 if (export->flags & TRACE_EXPORT_EVENT)
311 static_branch_dec(&trace_event_exports_enabled);
313 if (export->flags & TRACE_EXPORT_MARKER)
314 static_branch_dec(&trace_marker_exports_enabled);
317 static void ftrace_exports(struct ring_buffer_event *event, int flag)
319 struct trace_export *export;
321 preempt_disable_notrace();
323 export = rcu_dereference_raw_check(ftrace_exports_list);
325 trace_process_export(export, event, flag);
326 export = rcu_dereference_raw_check(export->next);
329 preempt_enable_notrace();
333 add_trace_export(struct trace_export **list, struct trace_export *export)
335 rcu_assign_pointer(export->next, *list);
337 * We are entering export into the list but another
338 * CPU might be walking that list. We need to make sure
339 * the export->next pointer is valid before another CPU sees
340 * the export pointer included into the list.
342 rcu_assign_pointer(*list, export);
346 rm_trace_export(struct trace_export **list, struct trace_export *export)
348 struct trace_export **p;
350 for (p = list; *p != NULL; p = &(*p)->next)
357 rcu_assign_pointer(*p, (*p)->next);
363 add_ftrace_export(struct trace_export **list, struct trace_export *export)
365 ftrace_exports_enable(export);
367 add_trace_export(list, export);
371 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
375 ret = rm_trace_export(list, export);
376 ftrace_exports_disable(export);
381 int register_ftrace_export(struct trace_export *export)
383 if (WARN_ON_ONCE(!export->write))
386 mutex_lock(&ftrace_export_lock);
388 add_ftrace_export(&ftrace_exports_list, export);
390 mutex_unlock(&ftrace_export_lock);
394 EXPORT_SYMBOL_GPL(register_ftrace_export);
396 int unregister_ftrace_export(struct trace_export *export)
400 mutex_lock(&ftrace_export_lock);
402 ret = rm_ftrace_export(&ftrace_exports_list, export);
404 mutex_unlock(&ftrace_export_lock);
408 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
410 /* trace_flags holds trace_options default values */
411 #define TRACE_DEFAULT_FLAGS \
412 (FUNCTION_DEFAULT_FLAGS | \
413 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
414 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
415 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
416 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
418 /* trace_options that are only supported by global_trace */
419 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
420 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
422 /* trace_flags that are default zero for instances */
423 #define ZEROED_TRACE_FLAGS \
424 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
427 * The global_trace is the descriptor that holds the top-level tracing
428 * buffers for the live tracing.
430 static struct trace_array global_trace = {
431 .trace_flags = TRACE_DEFAULT_FLAGS,
434 LIST_HEAD(ftrace_trace_arrays);
436 int trace_array_get(struct trace_array *this_tr)
438 struct trace_array *tr;
441 mutex_lock(&trace_types_lock);
442 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
449 mutex_unlock(&trace_types_lock);
454 static void __trace_array_put(struct trace_array *this_tr)
456 WARN_ON(!this_tr->ref);
461 * trace_array_put - Decrement the reference counter for this trace array.
463 * NOTE: Use this when we no longer need the trace array returned by
464 * trace_array_get_by_name(). This ensures the trace array can be later
468 void trace_array_put(struct trace_array *this_tr)
473 mutex_lock(&trace_types_lock);
474 __trace_array_put(this_tr);
475 mutex_unlock(&trace_types_lock);
477 EXPORT_SYMBOL_GPL(trace_array_put);
479 int tracing_check_open_get_tr(struct trace_array *tr)
483 ret = security_locked_down(LOCKDOWN_TRACEFS);
487 if (tracing_disabled)
490 if (tr && trace_array_get(tr) < 0)
496 int call_filter_check_discard(struct trace_event_call *call, void *rec,
497 struct trace_buffer *buffer,
498 struct ring_buffer_event *event)
500 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
501 !filter_match_preds(call->filter, rec)) {
502 __trace_event_discard_commit(buffer, event);
509 void trace_free_pid_list(struct trace_pid_list *pid_list)
511 vfree(pid_list->pids);
516 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
517 * @filtered_pids: The list of pids to check
518 * @search_pid: The PID to find in @filtered_pids
520 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
523 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
526 * If pid_max changed after filtered_pids was created, we
527 * by default ignore all pids greater than the previous pid_max.
529 if (search_pid >= filtered_pids->pid_max)
532 return test_bit(search_pid, filtered_pids->pids);
536 * trace_ignore_this_task - should a task be ignored for tracing
537 * @filtered_pids: The list of pids to check
538 * @task: The task that should be ignored if not filtered
540 * Checks if @task should be traced or not from @filtered_pids.
541 * Returns true if @task should *NOT* be traced.
542 * Returns false if @task should be traced.
545 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
546 struct trace_pid_list *filtered_no_pids,
547 struct task_struct *task)
550 * If filterd_no_pids is not empty, and the task's pid is listed
551 * in filtered_no_pids, then return true.
552 * Otherwise, if filtered_pids is empty, that means we can
553 * trace all tasks. If it has content, then only trace pids
554 * within filtered_pids.
557 return (filtered_pids &&
558 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
560 trace_find_filtered_pid(filtered_no_pids, task->pid));
564 * trace_filter_add_remove_task - Add or remove a task from a pid_list
565 * @pid_list: The list to modify
566 * @self: The current task for fork or NULL for exit
567 * @task: The task to add or remove
569 * If adding a task, if @self is defined, the task is only added if @self
570 * is also included in @pid_list. This happens on fork and tasks should
571 * only be added when the parent is listed. If @self is NULL, then the
572 * @task pid will be removed from the list, which would happen on exit
575 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
576 struct task_struct *self,
577 struct task_struct *task)
582 /* For forks, we only add if the forking task is listed */
584 if (!trace_find_filtered_pid(pid_list, self->pid))
588 /* Sorry, but we don't support pid_max changing after setting */
589 if (task->pid >= pid_list->pid_max)
592 /* "self" is set for forks, and NULL for exits */
594 set_bit(task->pid, pid_list->pids);
596 clear_bit(task->pid, pid_list->pids);
600 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
601 * @pid_list: The pid list to show
602 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
603 * @pos: The position of the file
605 * This is used by the seq_file "next" operation to iterate the pids
606 * listed in a trace_pid_list structure.
608 * Returns the pid+1 as we want to display pid of zero, but NULL would
609 * stop the iteration.
611 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
613 unsigned long pid = (unsigned long)v;
617 /* pid already is +1 of the actual prevous bit */
618 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
620 /* Return pid + 1 to allow zero to be represented */
621 if (pid < pid_list->pid_max)
622 return (void *)(pid + 1);
628 * trace_pid_start - Used for seq_file to start reading pid lists
629 * @pid_list: The pid list to show
630 * @pos: The position of the file
632 * This is used by seq_file "start" operation to start the iteration
635 * Returns the pid+1 as we want to display pid of zero, but NULL would
636 * stop the iteration.
638 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
643 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
644 if (pid >= pid_list->pid_max)
647 /* Return pid + 1 so that zero can be the exit value */
648 for (pid++; pid && l < *pos;
649 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
655 * trace_pid_show - show the current pid in seq_file processing
656 * @m: The seq_file structure to write into
657 * @v: A void pointer of the pid (+1) value to display
659 * Can be directly used by seq_file operations to display the current
662 int trace_pid_show(struct seq_file *m, void *v)
664 unsigned long pid = (unsigned long)v - 1;
666 seq_printf(m, "%lu\n", pid);
670 /* 128 should be much more than enough */
671 #define PID_BUF_SIZE 127
673 int trace_pid_write(struct trace_pid_list *filtered_pids,
674 struct trace_pid_list **new_pid_list,
675 const char __user *ubuf, size_t cnt)
677 struct trace_pid_list *pid_list;
678 struct trace_parser parser;
686 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
690 * Always recreate a new array. The write is an all or nothing
691 * operation. Always create a new array when adding new pids by
692 * the user. If the operation fails, then the current list is
695 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
697 trace_parser_put(&parser);
701 pid_list->pid_max = READ_ONCE(pid_max);
703 /* Only truncating will shrink pid_max */
704 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
705 pid_list->pid_max = filtered_pids->pid_max;
707 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
708 if (!pid_list->pids) {
709 trace_parser_put(&parser);
715 /* copy the current bits to the new max */
716 for_each_set_bit(pid, filtered_pids->pids,
717 filtered_pids->pid_max) {
718 set_bit(pid, pid_list->pids);
727 ret = trace_get_user(&parser, ubuf, cnt, &pos);
728 if (ret < 0 || !trace_parser_loaded(&parser))
736 if (kstrtoul(parser.buffer, 0, &val))
738 if (val >= pid_list->pid_max)
743 set_bit(pid, pid_list->pids);
746 trace_parser_clear(&parser);
749 trace_parser_put(&parser);
752 trace_free_pid_list(pid_list);
757 /* Cleared the list of pids */
758 trace_free_pid_list(pid_list);
763 *new_pid_list = pid_list;
768 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
772 /* Early boot up does not have a buffer yet */
774 return trace_clock_local();
776 ts = ring_buffer_time_stamp(buf->buffer, cpu);
777 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
782 u64 ftrace_now(int cpu)
784 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
788 * tracing_is_enabled - Show if global_trace has been disabled
790 * Shows if the global trace has been enabled or not. It uses the
791 * mirror flag "buffer_disabled" to be used in fast paths such as for
792 * the irqsoff tracer. But it may be inaccurate due to races. If you
793 * need to know the accurate state, use tracing_is_on() which is a little
794 * slower, but accurate.
796 int tracing_is_enabled(void)
799 * For quick access (irqsoff uses this in fast path), just
800 * return the mirror variable of the state of the ring buffer.
801 * It's a little racy, but we don't really care.
804 return !global_trace.buffer_disabled;
808 * trace_buf_size is the size in bytes that is allocated
809 * for a buffer. Note, the number of bytes is always rounded
812 * This number is purposely set to a low number of 16384.
813 * If the dump on oops happens, it will be much appreciated
814 * to not have to wait for all that output. Anyway this can be
815 * boot time and run time configurable.
817 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
819 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
821 /* trace_types holds a link list of available tracers. */
822 static struct tracer *trace_types __read_mostly;
825 * trace_types_lock is used to protect the trace_types list.
827 DEFINE_MUTEX(trace_types_lock);
830 * serialize the access of the ring buffer
832 * ring buffer serializes readers, but it is low level protection.
833 * The validity of the events (which returns by ring_buffer_peek() ..etc)
834 * are not protected by ring buffer.
836 * The content of events may become garbage if we allow other process consumes
837 * these events concurrently:
838 * A) the page of the consumed events may become a normal page
839 * (not reader page) in ring buffer, and this page will be rewrited
840 * by events producer.
841 * B) The page of the consumed events may become a page for splice_read,
842 * and this page will be returned to system.
844 * These primitives allow multi process access to different cpu ring buffer
847 * These primitives don't distinguish read-only and read-consume access.
848 * Multi read-only access are also serialized.
852 static DECLARE_RWSEM(all_cpu_access_lock);
853 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
855 static inline void trace_access_lock(int cpu)
857 if (cpu == RING_BUFFER_ALL_CPUS) {
858 /* gain it for accessing the whole ring buffer. */
859 down_write(&all_cpu_access_lock);
861 /* gain it for accessing a cpu ring buffer. */
863 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
864 down_read(&all_cpu_access_lock);
866 /* Secondly block other access to this @cpu ring buffer. */
867 mutex_lock(&per_cpu(cpu_access_lock, cpu));
871 static inline void trace_access_unlock(int cpu)
873 if (cpu == RING_BUFFER_ALL_CPUS) {
874 up_write(&all_cpu_access_lock);
876 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
877 up_read(&all_cpu_access_lock);
881 static inline void trace_access_lock_init(void)
885 for_each_possible_cpu(cpu)
886 mutex_init(&per_cpu(cpu_access_lock, cpu));
891 static DEFINE_MUTEX(access_lock);
893 static inline void trace_access_lock(int cpu)
896 mutex_lock(&access_lock);
899 static inline void trace_access_unlock(int cpu)
902 mutex_unlock(&access_lock);
905 static inline void trace_access_lock_init(void)
911 #ifdef CONFIG_STACKTRACE
912 static void __ftrace_trace_stack(struct trace_buffer *buffer,
914 int skip, int pc, struct pt_regs *regs);
915 static inline void ftrace_trace_stack(struct trace_array *tr,
916 struct trace_buffer *buffer,
918 int skip, int pc, struct pt_regs *regs);
921 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
923 int skip, int pc, struct pt_regs *regs)
926 static inline void ftrace_trace_stack(struct trace_array *tr,
927 struct trace_buffer *buffer,
929 int skip, int pc, struct pt_regs *regs)
935 static __always_inline void
936 trace_event_setup(struct ring_buffer_event *event,
937 int type, unsigned long flags, int pc)
939 struct trace_entry *ent = ring_buffer_event_data(event);
941 tracing_generic_entry_update(ent, type, flags, pc);
944 static __always_inline struct ring_buffer_event *
945 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
948 unsigned long flags, int pc)
950 struct ring_buffer_event *event;
952 event = ring_buffer_lock_reserve(buffer, len);
954 trace_event_setup(event, type, flags, pc);
959 void tracer_tracing_on(struct trace_array *tr)
961 if (tr->array_buffer.buffer)
962 ring_buffer_record_on(tr->array_buffer.buffer);
964 * This flag is looked at when buffers haven't been allocated
965 * yet, or by some tracers (like irqsoff), that just want to
966 * know if the ring buffer has been disabled, but it can handle
967 * races of where it gets disabled but we still do a record.
968 * As the check is in the fast path of the tracers, it is more
969 * important to be fast than accurate.
971 tr->buffer_disabled = 0;
972 /* Make the flag seen by readers */
977 * tracing_on - enable tracing buffers
979 * This function enables tracing buffers that may have been
980 * disabled with tracing_off.
982 void tracing_on(void)
984 tracer_tracing_on(&global_trace);
986 EXPORT_SYMBOL_GPL(tracing_on);
989 static __always_inline void
990 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
992 __this_cpu_write(trace_taskinfo_save, true);
994 /* If this is the temp buffer, we need to commit fully */
995 if (this_cpu_read(trace_buffered_event) == event) {
996 /* Length is in event->array[0] */
997 ring_buffer_write(buffer, event->array[0], &event->array[1]);
998 /* Release the temp buffer */
999 this_cpu_dec(trace_buffered_event_cnt);
1001 ring_buffer_unlock_commit(buffer, event);
1005 * __trace_puts - write a constant string into the trace buffer.
1006 * @ip: The address of the caller
1007 * @str: The constant string to write
1008 * @size: The size of the string.
1010 int __trace_puts(unsigned long ip, const char *str, int size)
1012 struct ring_buffer_event *event;
1013 struct trace_buffer *buffer;
1014 struct print_entry *entry;
1015 unsigned long irq_flags;
1019 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1022 pc = preempt_count();
1024 if (unlikely(tracing_selftest_running || tracing_disabled))
1027 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1029 local_save_flags(irq_flags);
1030 buffer = global_trace.array_buffer.buffer;
1031 ring_buffer_nest_start(buffer);
1032 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1039 entry = ring_buffer_event_data(event);
1042 memcpy(&entry->buf, str, size);
1044 /* Add a newline if necessary */
1045 if (entry->buf[size - 1] != '\n') {
1046 entry->buf[size] = '\n';
1047 entry->buf[size + 1] = '\0';
1049 entry->buf[size] = '\0';
1051 __buffer_unlock_commit(buffer, event);
1052 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1054 ring_buffer_nest_end(buffer);
1057 EXPORT_SYMBOL_GPL(__trace_puts);
1060 * __trace_bputs - write the pointer to a constant string into trace buffer
1061 * @ip: The address of the caller
1062 * @str: The constant string to write to the buffer to
1064 int __trace_bputs(unsigned long ip, const char *str)
1066 struct ring_buffer_event *event;
1067 struct trace_buffer *buffer;
1068 struct bputs_entry *entry;
1069 unsigned long irq_flags;
1070 int size = sizeof(struct bputs_entry);
1074 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1077 pc = preempt_count();
1079 if (unlikely(tracing_selftest_running || tracing_disabled))
1082 local_save_flags(irq_flags);
1083 buffer = global_trace.array_buffer.buffer;
1085 ring_buffer_nest_start(buffer);
1086 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1091 entry = ring_buffer_event_data(event);
1095 __buffer_unlock_commit(buffer, event);
1096 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1100 ring_buffer_nest_end(buffer);
1103 EXPORT_SYMBOL_GPL(__trace_bputs);
1105 #ifdef CONFIG_TRACER_SNAPSHOT
1106 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1109 struct tracer *tracer = tr->current_trace;
1110 unsigned long flags;
1113 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1114 internal_trace_puts("*** snapshot is being ignored ***\n");
1118 if (!tr->allocated_snapshot) {
1119 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1120 internal_trace_puts("*** stopping trace here! ***\n");
1125 /* Note, snapshot can not be used when the tracer uses it */
1126 if (tracer->use_max_tr) {
1127 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1128 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1132 local_irq_save(flags);
1133 update_max_tr(tr, current, smp_processor_id(), cond_data);
1134 local_irq_restore(flags);
1137 void tracing_snapshot_instance(struct trace_array *tr)
1139 tracing_snapshot_instance_cond(tr, NULL);
1143 * tracing_snapshot - take a snapshot of the current buffer.
1145 * This causes a swap between the snapshot buffer and the current live
1146 * tracing buffer. You can use this to take snapshots of the live
1147 * trace when some condition is triggered, but continue to trace.
1149 * Note, make sure to allocate the snapshot with either
1150 * a tracing_snapshot_alloc(), or by doing it manually
1151 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1153 * If the snapshot buffer is not allocated, it will stop tracing.
1154 * Basically making a permanent snapshot.
1156 void tracing_snapshot(void)
1158 struct trace_array *tr = &global_trace;
1160 tracing_snapshot_instance(tr);
1162 EXPORT_SYMBOL_GPL(tracing_snapshot);
1165 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1166 * @tr: The tracing instance to snapshot
1167 * @cond_data: The data to be tested conditionally, and possibly saved
1169 * This is the same as tracing_snapshot() except that the snapshot is
1170 * conditional - the snapshot will only happen if the
1171 * cond_snapshot.update() implementation receiving the cond_data
1172 * returns true, which means that the trace array's cond_snapshot
1173 * update() operation used the cond_data to determine whether the
1174 * snapshot should be taken, and if it was, presumably saved it along
1175 * with the snapshot.
1177 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1179 tracing_snapshot_instance_cond(tr, cond_data);
1181 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1184 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1185 * @tr: The tracing instance
1187 * When the user enables a conditional snapshot using
1188 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1189 * with the snapshot. This accessor is used to retrieve it.
1191 * Should not be called from cond_snapshot.update(), since it takes
1192 * the tr->max_lock lock, which the code calling
1193 * cond_snapshot.update() has already done.
1195 * Returns the cond_data associated with the trace array's snapshot.
1197 void *tracing_cond_snapshot_data(struct trace_array *tr)
1199 void *cond_data = NULL;
1201 local_irq_disable();
1202 arch_spin_lock(&tr->max_lock);
1204 if (tr->cond_snapshot)
1205 cond_data = tr->cond_snapshot->cond_data;
1207 arch_spin_unlock(&tr->max_lock);
1212 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1214 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1215 struct array_buffer *size_buf, int cpu_id);
1216 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1218 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1222 if (!tr->allocated_snapshot) {
1224 /* allocate spare buffer */
1225 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1226 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1230 tr->allocated_snapshot = true;
1236 static void free_snapshot(struct trace_array *tr)
1239 * We don't free the ring buffer. instead, resize it because
1240 * The max_tr ring buffer has some state (e.g. ring->clock) and
1241 * we want preserve it.
1243 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1244 set_buffer_entries(&tr->max_buffer, 1);
1245 tracing_reset_online_cpus(&tr->max_buffer);
1246 tr->allocated_snapshot = false;
1250 * tracing_alloc_snapshot - allocate snapshot buffer.
1252 * This only allocates the snapshot buffer if it isn't already
1253 * allocated - it doesn't also take a snapshot.
1255 * This is meant to be used in cases where the snapshot buffer needs
1256 * to be set up for events that can't sleep but need to be able to
1257 * trigger a snapshot.
1259 int tracing_alloc_snapshot(void)
1261 struct trace_array *tr = &global_trace;
1264 ret = tracing_alloc_snapshot_instance(tr);
1269 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1272 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1274 * This is similar to tracing_snapshot(), but it will allocate the
1275 * snapshot buffer if it isn't already allocated. Use this only
1276 * where it is safe to sleep, as the allocation may sleep.
1278 * This causes a swap between the snapshot buffer and the current live
1279 * tracing buffer. You can use this to take snapshots of the live
1280 * trace when some condition is triggered, but continue to trace.
1282 void tracing_snapshot_alloc(void)
1286 ret = tracing_alloc_snapshot();
1292 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1295 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1296 * @tr: The tracing instance
1297 * @cond_data: User data to associate with the snapshot
1298 * @update: Implementation of the cond_snapshot update function
1300 * Check whether the conditional snapshot for the given instance has
1301 * already been enabled, or if the current tracer is already using a
1302 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1303 * save the cond_data and update function inside.
1305 * Returns 0 if successful, error otherwise.
1307 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1308 cond_update_fn_t update)
1310 struct cond_snapshot *cond_snapshot;
1313 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1317 cond_snapshot->cond_data = cond_data;
1318 cond_snapshot->update = update;
1320 mutex_lock(&trace_types_lock);
1322 ret = tracing_alloc_snapshot_instance(tr);
1326 if (tr->current_trace->use_max_tr) {
1332 * The cond_snapshot can only change to NULL without the
1333 * trace_types_lock. We don't care if we race with it going
1334 * to NULL, but we want to make sure that it's not set to
1335 * something other than NULL when we get here, which we can
1336 * do safely with only holding the trace_types_lock and not
1337 * having to take the max_lock.
1339 if (tr->cond_snapshot) {
1344 local_irq_disable();
1345 arch_spin_lock(&tr->max_lock);
1346 tr->cond_snapshot = cond_snapshot;
1347 arch_spin_unlock(&tr->max_lock);
1350 mutex_unlock(&trace_types_lock);
1355 mutex_unlock(&trace_types_lock);
1356 kfree(cond_snapshot);
1359 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1362 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1363 * @tr: The tracing instance
1365 * Check whether the conditional snapshot for the given instance is
1366 * enabled; if so, free the cond_snapshot associated with it,
1367 * otherwise return -EINVAL.
1369 * Returns 0 if successful, error otherwise.
1371 int tracing_snapshot_cond_disable(struct trace_array *tr)
1375 local_irq_disable();
1376 arch_spin_lock(&tr->max_lock);
1378 if (!tr->cond_snapshot)
1381 kfree(tr->cond_snapshot);
1382 tr->cond_snapshot = NULL;
1385 arch_spin_unlock(&tr->max_lock);
1390 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1392 void tracing_snapshot(void)
1394 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1396 EXPORT_SYMBOL_GPL(tracing_snapshot);
1397 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1399 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1401 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1402 int tracing_alloc_snapshot(void)
1404 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1407 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1408 void tracing_snapshot_alloc(void)
1413 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1414 void *tracing_cond_snapshot_data(struct trace_array *tr)
1418 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1419 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1423 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1424 int tracing_snapshot_cond_disable(struct trace_array *tr)
1428 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1429 #endif /* CONFIG_TRACER_SNAPSHOT */
1431 void tracer_tracing_off(struct trace_array *tr)
1433 if (tr->array_buffer.buffer)
1434 ring_buffer_record_off(tr->array_buffer.buffer);
1436 * This flag is looked at when buffers haven't been allocated
1437 * yet, or by some tracers (like irqsoff), that just want to
1438 * know if the ring buffer has been disabled, but it can handle
1439 * races of where it gets disabled but we still do a record.
1440 * As the check is in the fast path of the tracers, it is more
1441 * important to be fast than accurate.
1443 tr->buffer_disabled = 1;
1444 /* Make the flag seen by readers */
1449 * tracing_off - turn off tracing buffers
1451 * This function stops the tracing buffers from recording data.
1452 * It does not disable any overhead the tracers themselves may
1453 * be causing. This function simply causes all recording to
1454 * the ring buffers to fail.
1456 void tracing_off(void)
1458 tracer_tracing_off(&global_trace);
1460 EXPORT_SYMBOL_GPL(tracing_off);
1462 void disable_trace_on_warning(void)
1464 if (__disable_trace_on_warning) {
1465 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1466 "Disabling tracing due to warning\n");
1472 * tracer_tracing_is_on - show real state of ring buffer enabled
1473 * @tr : the trace array to know if ring buffer is enabled
1475 * Shows real state of the ring buffer if it is enabled or not.
1477 bool tracer_tracing_is_on(struct trace_array *tr)
1479 if (tr->array_buffer.buffer)
1480 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1481 return !tr->buffer_disabled;
1485 * tracing_is_on - show state of ring buffers enabled
1487 int tracing_is_on(void)
1489 return tracer_tracing_is_on(&global_trace);
1491 EXPORT_SYMBOL_GPL(tracing_is_on);
1493 static int __init set_buf_size(char *str)
1495 unsigned long buf_size;
1499 buf_size = memparse(str, &str);
1501 * nr_entries can not be zero and the startup
1502 * tests require some buffer space. Therefore
1503 * ensure we have at least 4096 bytes of buffer.
1505 trace_buf_size = max(4096UL, buf_size);
1508 __setup("trace_buf_size=", set_buf_size);
1510 static int __init set_tracing_thresh(char *str)
1512 unsigned long threshold;
1517 ret = kstrtoul(str, 0, &threshold);
1520 tracing_thresh = threshold * 1000;
1523 __setup("tracing_thresh=", set_tracing_thresh);
1525 unsigned long nsecs_to_usecs(unsigned long nsecs)
1527 return nsecs / 1000;
1531 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1532 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1533 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1534 * of strings in the order that the evals (enum) were defined.
1539 /* These must match the bit postions in trace_iterator_flags */
1540 static const char *trace_options[] = {
1548 int in_ns; /* is this clock in nanoseconds? */
1549 } trace_clocks[] = {
1550 { trace_clock_local, "local", 1 },
1551 { trace_clock_global, "global", 1 },
1552 { trace_clock_counter, "counter", 0 },
1553 { trace_clock_jiffies, "uptime", 0 },
1554 { trace_clock, "perf", 1 },
1555 { ktime_get_mono_fast_ns, "mono", 1 },
1556 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1557 { ktime_get_boot_fast_ns, "boot", 1 },
1561 bool trace_clock_in_ns(struct trace_array *tr)
1563 if (trace_clocks[tr->clock_id].in_ns)
1570 * trace_parser_get_init - gets the buffer for trace parser
1572 int trace_parser_get_init(struct trace_parser *parser, int size)
1574 memset(parser, 0, sizeof(*parser));
1576 parser->buffer = kmalloc(size, GFP_KERNEL);
1577 if (!parser->buffer)
1580 parser->size = size;
1585 * trace_parser_put - frees the buffer for trace parser
1587 void trace_parser_put(struct trace_parser *parser)
1589 kfree(parser->buffer);
1590 parser->buffer = NULL;
1594 * trace_get_user - reads the user input string separated by space
1595 * (matched by isspace(ch))
1597 * For each string found the 'struct trace_parser' is updated,
1598 * and the function returns.
1600 * Returns number of bytes read.
1602 * See kernel/trace/trace.h for 'struct trace_parser' details.
1604 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1605 size_t cnt, loff_t *ppos)
1612 trace_parser_clear(parser);
1614 ret = get_user(ch, ubuf++);
1622 * The parser is not finished with the last write,
1623 * continue reading the user input without skipping spaces.
1625 if (!parser->cont) {
1626 /* skip white space */
1627 while (cnt && isspace(ch)) {
1628 ret = get_user(ch, ubuf++);
1637 /* only spaces were written */
1638 if (isspace(ch) || !ch) {
1645 /* read the non-space input */
1646 while (cnt && !isspace(ch) && ch) {
1647 if (parser->idx < parser->size - 1)
1648 parser->buffer[parser->idx++] = ch;
1653 ret = get_user(ch, ubuf++);
1660 /* We either got finished input or we have to wait for another call. */
1661 if (isspace(ch) || !ch) {
1662 parser->buffer[parser->idx] = 0;
1663 parser->cont = false;
1664 } else if (parser->idx < parser->size - 1) {
1665 parser->cont = true;
1666 parser->buffer[parser->idx++] = ch;
1667 /* Make sure the parsed string always terminates with '\0'. */
1668 parser->buffer[parser->idx] = 0;
1681 /* TODO add a seq_buf_to_buffer() */
1682 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1686 if (trace_seq_used(s) <= s->seq.readpos)
1689 len = trace_seq_used(s) - s->seq.readpos;
1692 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1694 s->seq.readpos += cnt;
1698 unsigned long __read_mostly tracing_thresh;
1699 static const struct file_operations tracing_max_lat_fops;
1701 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1702 defined(CONFIG_FSNOTIFY)
1704 static struct workqueue_struct *fsnotify_wq;
1706 static void latency_fsnotify_workfn(struct work_struct *work)
1708 struct trace_array *tr = container_of(work, struct trace_array,
1710 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1713 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1715 struct trace_array *tr = container_of(iwork, struct trace_array,
1717 queue_work(fsnotify_wq, &tr->fsnotify_work);
1720 static void trace_create_maxlat_file(struct trace_array *tr,
1721 struct dentry *d_tracer)
1723 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1724 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1725 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1726 d_tracer, &tr->max_latency,
1727 &tracing_max_lat_fops);
1730 __init static int latency_fsnotify_init(void)
1732 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1733 WQ_UNBOUND | WQ_HIGHPRI, 0);
1735 pr_err("Unable to allocate tr_max_lat_wq\n");
1741 late_initcall_sync(latency_fsnotify_init);
1743 void latency_fsnotify(struct trace_array *tr)
1748 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1749 * possible that we are called from __schedule() or do_idle(), which
1750 * could cause a deadlock.
1752 irq_work_queue(&tr->fsnotify_irqwork);
1756 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1757 * defined(CONFIG_FSNOTIFY)
1761 #define trace_create_maxlat_file(tr, d_tracer) \
1762 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1763 &tr->max_latency, &tracing_max_lat_fops)
1767 #ifdef CONFIG_TRACER_MAX_TRACE
1769 * Copy the new maximum trace into the separate maximum-trace
1770 * structure. (this way the maximum trace is permanently saved,
1771 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1774 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1776 struct array_buffer *trace_buf = &tr->array_buffer;
1777 struct array_buffer *max_buf = &tr->max_buffer;
1778 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1779 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1782 max_buf->time_start = data->preempt_timestamp;
1784 max_data->saved_latency = tr->max_latency;
1785 max_data->critical_start = data->critical_start;
1786 max_data->critical_end = data->critical_end;
1788 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1789 max_data->pid = tsk->pid;
1791 * If tsk == current, then use current_uid(), as that does not use
1792 * RCU. The irq tracer can be called out of RCU scope.
1795 max_data->uid = current_uid();
1797 max_data->uid = task_uid(tsk);
1799 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1800 max_data->policy = tsk->policy;
1801 max_data->rt_priority = tsk->rt_priority;
1803 /* record this tasks comm */
1804 tracing_record_cmdline(tsk);
1805 latency_fsnotify(tr);
1809 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1811 * @tsk: the task with the latency
1812 * @cpu: The cpu that initiated the trace.
1813 * @cond_data: User data associated with a conditional snapshot
1815 * Flip the buffers between the @tr and the max_tr and record information
1816 * about which task was the cause of this latency.
1819 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1825 WARN_ON_ONCE(!irqs_disabled());
1827 if (!tr->allocated_snapshot) {
1828 /* Only the nop tracer should hit this when disabling */
1829 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1833 arch_spin_lock(&tr->max_lock);
1835 /* Inherit the recordable setting from array_buffer */
1836 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1837 ring_buffer_record_on(tr->max_buffer.buffer);
1839 ring_buffer_record_off(tr->max_buffer.buffer);
1841 #ifdef CONFIG_TRACER_SNAPSHOT
1842 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1845 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1847 __update_max_tr(tr, tsk, cpu);
1850 arch_spin_unlock(&tr->max_lock);
1854 * update_max_tr_single - only copy one trace over, and reset the rest
1856 * @tsk: task with the latency
1857 * @cpu: the cpu of the buffer to copy.
1859 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1862 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1869 WARN_ON_ONCE(!irqs_disabled());
1870 if (!tr->allocated_snapshot) {
1871 /* Only the nop tracer should hit this when disabling */
1872 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1876 arch_spin_lock(&tr->max_lock);
1878 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1880 if (ret == -EBUSY) {
1882 * We failed to swap the buffer due to a commit taking
1883 * place on this CPU. We fail to record, but we reset
1884 * the max trace buffer (no one writes directly to it)
1885 * and flag that it failed.
1886 * Another reason is resize is in progress.
1888 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1889 "Failed to swap buffers due to commit or resize in progress\n");
1892 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1894 __update_max_tr(tr, tsk, cpu);
1895 arch_spin_unlock(&tr->max_lock);
1897 /* Any waiters on the old snapshot buffer need to wake up */
1898 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1900 #endif /* CONFIG_TRACER_MAX_TRACE */
1902 static int wait_on_pipe(struct trace_iterator *iter, int full)
1906 /* Iterators are static, they should be filled or empty */
1907 if (trace_buffer_iter(iter, iter->cpu_file))
1910 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
1912 #ifdef CONFIG_TRACER_MAX_TRACE
1914 * Make sure this is still the snapshot buffer, as if a snapshot were
1915 * to happen, this would now be the main buffer.
1918 iter->array_buffer = &iter->tr->max_buffer;
1923 #ifdef CONFIG_FTRACE_STARTUP_TEST
1924 static bool selftests_can_run;
1926 struct trace_selftests {
1927 struct list_head list;
1928 struct tracer *type;
1931 static LIST_HEAD(postponed_selftests);
1933 static int save_selftest(struct tracer *type)
1935 struct trace_selftests *selftest;
1937 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1941 selftest->type = type;
1942 list_add(&selftest->list, &postponed_selftests);
1946 static int run_tracer_selftest(struct tracer *type)
1948 struct trace_array *tr = &global_trace;
1949 struct tracer *saved_tracer = tr->current_trace;
1952 if (!type->selftest || tracing_selftest_disabled)
1956 * If a tracer registers early in boot up (before scheduling is
1957 * initialized and such), then do not run its selftests yet.
1958 * Instead, run it a little later in the boot process.
1960 if (!selftests_can_run)
1961 return save_selftest(type);
1964 * Run a selftest on this tracer.
1965 * Here we reset the trace buffer, and set the current
1966 * tracer to be this tracer. The tracer can then run some
1967 * internal tracing to verify that everything is in order.
1968 * If we fail, we do not register this tracer.
1970 tracing_reset_online_cpus(&tr->array_buffer);
1972 tr->current_trace = type;
1974 #ifdef CONFIG_TRACER_MAX_TRACE
1975 if (type->use_max_tr) {
1976 /* If we expanded the buffers, make sure the max is expanded too */
1977 if (ring_buffer_expanded)
1978 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1979 RING_BUFFER_ALL_CPUS);
1980 tr->allocated_snapshot = true;
1984 /* the test is responsible for initializing and enabling */
1985 pr_info("Testing tracer %s: ", type->name);
1986 ret = type->selftest(type, tr);
1987 /* the test is responsible for resetting too */
1988 tr->current_trace = saved_tracer;
1990 printk(KERN_CONT "FAILED!\n");
1991 /* Add the warning after printing 'FAILED' */
1995 /* Only reset on passing, to avoid touching corrupted buffers */
1996 tracing_reset_online_cpus(&tr->array_buffer);
1998 #ifdef CONFIG_TRACER_MAX_TRACE
1999 if (type->use_max_tr) {
2000 tr->allocated_snapshot = false;
2002 /* Shrink the max buffer again */
2003 if (ring_buffer_expanded)
2004 ring_buffer_resize(tr->max_buffer.buffer, 1,
2005 RING_BUFFER_ALL_CPUS);
2009 printk(KERN_CONT "PASSED\n");
2013 static __init int init_trace_selftests(void)
2015 struct trace_selftests *p, *n;
2016 struct tracer *t, **last;
2019 selftests_can_run = true;
2021 mutex_lock(&trace_types_lock);
2023 if (list_empty(&postponed_selftests))
2026 pr_info("Running postponed tracer tests:\n");
2028 tracing_selftest_running = true;
2029 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2030 /* This loop can take minutes when sanitizers are enabled, so
2031 * lets make sure we allow RCU processing.
2034 ret = run_tracer_selftest(p->type);
2035 /* If the test fails, then warn and remove from available_tracers */
2037 WARN(1, "tracer: %s failed selftest, disabling\n",
2039 last = &trace_types;
2040 for (t = trace_types; t; t = t->next) {
2051 tracing_selftest_running = false;
2054 mutex_unlock(&trace_types_lock);
2058 core_initcall(init_trace_selftests);
2060 static inline int run_tracer_selftest(struct tracer *type)
2064 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2066 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2068 static void __init apply_trace_boot_options(void);
2071 * register_tracer - register a tracer with the ftrace system.
2072 * @type: the plugin for the tracer
2074 * Register a new plugin tracer.
2076 int __init register_tracer(struct tracer *type)
2082 pr_info("Tracer must have a name\n");
2086 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2087 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2091 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2092 pr_warn("Can not register tracer %s due to lockdown\n",
2097 mutex_lock(&trace_types_lock);
2099 tracing_selftest_running = true;
2101 for (t = trace_types; t; t = t->next) {
2102 if (strcmp(type->name, t->name) == 0) {
2104 pr_info("Tracer %s already registered\n",
2111 if (!type->set_flag)
2112 type->set_flag = &dummy_set_flag;
2114 /*allocate a dummy tracer_flags*/
2115 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2120 type->flags->val = 0;
2121 type->flags->opts = dummy_tracer_opt;
2123 if (!type->flags->opts)
2124 type->flags->opts = dummy_tracer_opt;
2126 /* store the tracer for __set_tracer_option */
2127 type->flags->trace = type;
2129 ret = run_tracer_selftest(type);
2133 type->next = trace_types;
2135 add_tracer_options(&global_trace, type);
2138 tracing_selftest_running = false;
2139 mutex_unlock(&trace_types_lock);
2141 if (ret || !default_bootup_tracer)
2144 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2147 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2148 /* Do we want this tracer to start on bootup? */
2149 tracing_set_tracer(&global_trace, type->name);
2150 default_bootup_tracer = NULL;
2152 apply_trace_boot_options();
2154 /* disable other selftests, since this will break it. */
2155 disable_tracing_selftest("running a tracer");
2161 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2163 struct trace_buffer *buffer = buf->buffer;
2168 ring_buffer_record_disable(buffer);
2170 /* Make sure all commits have finished */
2172 ring_buffer_reset_cpu(buffer, cpu);
2174 ring_buffer_record_enable(buffer);
2177 void tracing_reset_online_cpus(struct array_buffer *buf)
2179 struct trace_buffer *buffer = buf->buffer;
2184 ring_buffer_record_disable(buffer);
2186 /* Make sure all commits have finished */
2189 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2191 ring_buffer_reset_online_cpus(buffer);
2193 ring_buffer_record_enable(buffer);
2196 /* Must have trace_types_lock held */
2197 void tracing_reset_all_online_cpus_unlocked(void)
2199 struct trace_array *tr;
2201 lockdep_assert_held(&trace_types_lock);
2203 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2204 if (!tr->clear_trace)
2206 tr->clear_trace = false;
2207 tracing_reset_online_cpus(&tr->array_buffer);
2208 #ifdef CONFIG_TRACER_MAX_TRACE
2209 tracing_reset_online_cpus(&tr->max_buffer);
2214 void tracing_reset_all_online_cpus(void)
2216 mutex_lock(&trace_types_lock);
2217 tracing_reset_all_online_cpus_unlocked();
2218 mutex_unlock(&trace_types_lock);
2222 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2223 * is the tgid last observed corresponding to pid=i.
2225 static int *tgid_map;
2227 /* The maximum valid index into tgid_map. */
2228 static size_t tgid_map_max;
2230 #define SAVED_CMDLINES_DEFAULT 128
2231 #define NO_CMDLINE_MAP UINT_MAX
2233 * Preemption must be disabled before acquiring trace_cmdline_lock.
2234 * The various trace_arrays' max_lock must be acquired in a context
2235 * where interrupt is disabled.
2237 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2238 struct saved_cmdlines_buffer {
2239 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2240 unsigned *map_cmdline_to_pid;
2241 unsigned cmdline_num;
2243 char saved_cmdlines[];
2245 static struct saved_cmdlines_buffer *savedcmd;
2247 static inline char *get_saved_cmdlines(int idx)
2249 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2252 static inline void set_cmdline(int idx, const char *cmdline)
2254 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2257 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
2259 int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
2261 kfree(s->map_cmdline_to_pid);
2263 free_pages((unsigned long)s, order);
2266 static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
2268 struct saved_cmdlines_buffer *s;
2270 int orig_size, size;
2273 /* Figure out how much is needed to hold the given number of cmdlines */
2274 orig_size = sizeof(*s) + val * TASK_COMM_LEN;
2275 order = get_order(orig_size);
2276 size = 1 << (order + PAGE_SHIFT);
2277 page = alloc_pages(GFP_KERNEL, order);
2281 s = page_address(page);
2282 kmemleak_alloc(s, size, 1, GFP_KERNEL);
2283 memset(s, 0, sizeof(*s));
2285 /* Round up to actual allocation */
2286 val = (size - sizeof(*s)) / TASK_COMM_LEN;
2287 s->cmdline_num = val;
2289 s->map_cmdline_to_pid = kmalloc_array(val,
2290 sizeof(*s->map_cmdline_to_pid),
2292 if (!s->map_cmdline_to_pid) {
2293 free_saved_cmdlines_buffer(s);
2298 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2299 sizeof(s->map_pid_to_cmdline));
2300 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2301 val * sizeof(*s->map_cmdline_to_pid));
2306 static int trace_create_savedcmd(void)
2308 savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
2310 return savedcmd ? 0 : -ENOMEM;
2313 int is_tracing_stopped(void)
2315 return global_trace.stop_count;
2318 static void tracing_start_tr(struct trace_array *tr)
2320 struct trace_buffer *buffer;
2321 unsigned long flags;
2323 if (tracing_disabled)
2326 raw_spin_lock_irqsave(&tr->start_lock, flags);
2327 if (--tr->stop_count) {
2328 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2329 /* Someone screwed up their debugging */
2335 /* Prevent the buffers from switching */
2336 arch_spin_lock(&tr->max_lock);
2338 buffer = tr->array_buffer.buffer;
2340 ring_buffer_record_enable(buffer);
2342 #ifdef CONFIG_TRACER_MAX_TRACE
2343 buffer = tr->max_buffer.buffer;
2345 ring_buffer_record_enable(buffer);
2348 arch_spin_unlock(&tr->max_lock);
2351 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2355 * tracing_start - quick start of the tracer
2357 * If tracing is enabled but was stopped by tracing_stop,
2358 * this will start the tracer back up.
2360 void tracing_start(void)
2363 return tracing_start_tr(&global_trace);
2366 static void tracing_stop_tr(struct trace_array *tr)
2368 struct trace_buffer *buffer;
2369 unsigned long flags;
2371 raw_spin_lock_irqsave(&tr->start_lock, flags);
2372 if (tr->stop_count++)
2375 /* Prevent the buffers from switching */
2376 arch_spin_lock(&tr->max_lock);
2378 buffer = tr->array_buffer.buffer;
2380 ring_buffer_record_disable(buffer);
2382 #ifdef CONFIG_TRACER_MAX_TRACE
2383 buffer = tr->max_buffer.buffer;
2385 ring_buffer_record_disable(buffer);
2388 arch_spin_unlock(&tr->max_lock);
2391 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2395 * tracing_stop - quick stop of the tracer
2397 * Light weight way to stop tracing. Use in conjunction with
2400 void tracing_stop(void)
2402 return tracing_stop_tr(&global_trace);
2405 static int trace_save_cmdline(struct task_struct *tsk)
2409 /* treat recording of idle task as a success */
2413 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2416 * It's not the end of the world if we don't get
2417 * the lock, but we also don't want to spin
2418 * nor do we want to disable interrupts,
2419 * so if we miss here, then better luck next time.
2421 * This is called within the scheduler and wake up, so interrupts
2422 * had better been disabled and run queue lock been held.
2424 lockdep_assert_preemption_disabled();
2425 if (!arch_spin_trylock(&trace_cmdline_lock))
2428 idx = savedcmd->map_pid_to_cmdline[tpid];
2429 if (idx == NO_CMDLINE_MAP) {
2430 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2432 savedcmd->map_pid_to_cmdline[tpid] = idx;
2433 savedcmd->cmdline_idx = idx;
2436 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2437 set_cmdline(idx, tsk->comm);
2439 arch_spin_unlock(&trace_cmdline_lock);
2444 static void __trace_find_cmdline(int pid, char comm[])
2450 strcpy(comm, "<idle>");
2454 if (WARN_ON_ONCE(pid < 0)) {
2455 strcpy(comm, "<XXX>");
2459 tpid = pid & (PID_MAX_DEFAULT - 1);
2460 map = savedcmd->map_pid_to_cmdline[tpid];
2461 if (map != NO_CMDLINE_MAP) {
2462 tpid = savedcmd->map_cmdline_to_pid[map];
2464 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2468 strcpy(comm, "<...>");
2471 void trace_find_cmdline(int pid, char comm[])
2474 arch_spin_lock(&trace_cmdline_lock);
2476 __trace_find_cmdline(pid, comm);
2478 arch_spin_unlock(&trace_cmdline_lock);
2482 static int *trace_find_tgid_ptr(int pid)
2485 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2486 * if we observe a non-NULL tgid_map then we also observe the correct
2489 int *map = smp_load_acquire(&tgid_map);
2491 if (unlikely(!map || pid > tgid_map_max))
2497 int trace_find_tgid(int pid)
2499 int *ptr = trace_find_tgid_ptr(pid);
2501 return ptr ? *ptr : 0;
2504 static int trace_save_tgid(struct task_struct *tsk)
2508 /* treat recording of idle task as a success */
2512 ptr = trace_find_tgid_ptr(tsk->pid);
2520 static bool tracing_record_taskinfo_skip(int flags)
2522 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2524 if (!__this_cpu_read(trace_taskinfo_save))
2530 * tracing_record_taskinfo - record the task info of a task
2532 * @task: task to record
2533 * @flags: TRACE_RECORD_CMDLINE for recording comm
2534 * TRACE_RECORD_TGID for recording tgid
2536 void tracing_record_taskinfo(struct task_struct *task, int flags)
2540 if (tracing_record_taskinfo_skip(flags))
2544 * Record as much task information as possible. If some fail, continue
2545 * to try to record the others.
2547 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2548 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2550 /* If recording any information failed, retry again soon. */
2554 __this_cpu_write(trace_taskinfo_save, false);
2558 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2560 * @prev: previous task during sched_switch
2561 * @next: next task during sched_switch
2562 * @flags: TRACE_RECORD_CMDLINE for recording comm
2563 * TRACE_RECORD_TGID for recording tgid
2565 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2566 struct task_struct *next, int flags)
2570 if (tracing_record_taskinfo_skip(flags))
2574 * Record as much task information as possible. If some fail, continue
2575 * to try to record the others.
2577 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2578 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2579 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2580 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2582 /* If recording any information failed, retry again soon. */
2586 __this_cpu_write(trace_taskinfo_save, false);
2589 /* Helpers to record a specific task information */
2590 void tracing_record_cmdline(struct task_struct *task)
2592 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2595 void tracing_record_tgid(struct task_struct *task)
2597 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2601 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2602 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2603 * simplifies those functions and keeps them in sync.
2605 enum print_line_t trace_handle_return(struct trace_seq *s)
2607 return trace_seq_has_overflowed(s) ?
2608 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2610 EXPORT_SYMBOL_GPL(trace_handle_return);
2613 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2614 unsigned long flags, int pc)
2616 struct task_struct *tsk = current;
2618 entry->preempt_count = pc & 0xff;
2619 entry->pid = (tsk) ? tsk->pid : 0;
2622 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2623 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2625 TRACE_FLAG_IRQS_NOSUPPORT |
2627 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2628 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2629 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2630 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2631 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2633 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2635 struct ring_buffer_event *
2636 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2639 unsigned long flags, int pc)
2641 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2644 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2645 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2646 static int trace_buffered_event_ref;
2649 * trace_buffered_event_enable - enable buffering events
2651 * When events are being filtered, it is quicker to use a temporary
2652 * buffer to write the event data into if there's a likely chance
2653 * that it will not be committed. The discard of the ring buffer
2654 * is not as fast as committing, and is much slower than copying
2657 * When an event is to be filtered, allocate per cpu buffers to
2658 * write the event data into, and if the event is filtered and discarded
2659 * it is simply dropped, otherwise, the entire data is to be committed
2662 void trace_buffered_event_enable(void)
2664 struct ring_buffer_event *event;
2668 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2670 if (trace_buffered_event_ref++)
2673 for_each_tracing_cpu(cpu) {
2674 page = alloc_pages_node(cpu_to_node(cpu),
2675 GFP_KERNEL | __GFP_NORETRY, 0);
2676 /* This is just an optimization and can handle failures */
2678 pr_err("Failed to allocate event buffer\n");
2682 event = page_address(page);
2683 memset(event, 0, sizeof(*event));
2685 per_cpu(trace_buffered_event, cpu) = event;
2688 if (cpu == smp_processor_id() &&
2689 __this_cpu_read(trace_buffered_event) !=
2690 per_cpu(trace_buffered_event, cpu))
2696 static void enable_trace_buffered_event(void *data)
2698 /* Probably not needed, but do it anyway */
2700 this_cpu_dec(trace_buffered_event_cnt);
2703 static void disable_trace_buffered_event(void *data)
2705 this_cpu_inc(trace_buffered_event_cnt);
2709 * trace_buffered_event_disable - disable buffering events
2711 * When a filter is removed, it is faster to not use the buffered
2712 * events, and to commit directly into the ring buffer. Free up
2713 * the temp buffers when there are no more users. This requires
2714 * special synchronization with current events.
2716 void trace_buffered_event_disable(void)
2720 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2722 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2725 if (--trace_buffered_event_ref)
2728 /* For each CPU, set the buffer as used. */
2729 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2732 /* Wait for all current users to finish */
2735 for_each_tracing_cpu(cpu) {
2736 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2737 per_cpu(trace_buffered_event, cpu) = NULL;
2741 * Wait for all CPUs that potentially started checking if they can use
2742 * their event buffer only after the previous synchronize_rcu() call and
2743 * they still read a valid pointer from trace_buffered_event. It must be
2744 * ensured they don't see cleared trace_buffered_event_cnt else they
2745 * could wrongly decide to use the pointed-to buffer which is now freed.
2749 /* For each CPU, relinquish the buffer */
2750 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2754 static struct trace_buffer *temp_buffer;
2756 struct ring_buffer_event *
2757 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2758 struct trace_event_file *trace_file,
2759 int type, unsigned long len,
2760 unsigned long flags, int pc)
2762 struct ring_buffer_event *entry;
2765 *current_rb = trace_file->tr->array_buffer.buffer;
2767 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2768 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2769 (entry = this_cpu_read(trace_buffered_event))) {
2770 /* Try to use the per cpu buffer first */
2771 val = this_cpu_inc_return(trace_buffered_event_cnt);
2772 if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
2773 trace_event_setup(entry, type, flags, pc);
2774 entry->array[0] = len;
2777 this_cpu_dec(trace_buffered_event_cnt);
2780 entry = __trace_buffer_lock_reserve(*current_rb,
2781 type, len, flags, pc);
2783 * If tracing is off, but we have triggers enabled
2784 * we still need to look at the event data. Use the temp_buffer
2785 * to store the trace event for the trigger to use. It's recursive
2786 * safe and will not be recorded anywhere.
2788 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2789 *current_rb = temp_buffer;
2790 entry = __trace_buffer_lock_reserve(*current_rb,
2791 type, len, flags, pc);
2795 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2797 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2798 static DEFINE_MUTEX(tracepoint_printk_mutex);
2800 static void output_printk(struct trace_event_buffer *fbuffer)
2802 struct trace_event_call *event_call;
2803 struct trace_event_file *file;
2804 struct trace_event *event;
2805 unsigned long flags;
2806 struct trace_iterator *iter = tracepoint_print_iter;
2808 /* We should never get here if iter is NULL */
2809 if (WARN_ON_ONCE(!iter))
2812 event_call = fbuffer->trace_file->event_call;
2813 if (!event_call || !event_call->event.funcs ||
2814 !event_call->event.funcs->trace)
2817 file = fbuffer->trace_file;
2818 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2819 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2820 !filter_match_preds(file->filter, fbuffer->entry)))
2823 event = &fbuffer->trace_file->event_call->event;
2825 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2826 trace_seq_init(&iter->seq);
2827 iter->ent = fbuffer->entry;
2828 event_call->event.funcs->trace(iter, 0, event);
2829 trace_seq_putc(&iter->seq, 0);
2830 printk("%s", iter->seq.buffer);
2832 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2835 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2836 void *buffer, size_t *lenp,
2839 int save_tracepoint_printk;
2842 mutex_lock(&tracepoint_printk_mutex);
2843 save_tracepoint_printk = tracepoint_printk;
2845 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2848 * This will force exiting early, as tracepoint_printk
2849 * is always zero when tracepoint_printk_iter is not allocated
2851 if (!tracepoint_print_iter)
2852 tracepoint_printk = 0;
2854 if (save_tracepoint_printk == tracepoint_printk)
2857 if (tracepoint_printk)
2858 static_key_enable(&tracepoint_printk_key.key);
2860 static_key_disable(&tracepoint_printk_key.key);
2863 mutex_unlock(&tracepoint_printk_mutex);
2868 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2870 if (static_key_false(&tracepoint_printk_key.key))
2871 output_printk(fbuffer);
2873 if (static_branch_unlikely(&trace_event_exports_enabled))
2874 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2875 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2876 fbuffer->event, fbuffer->entry,
2877 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2879 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2884 * trace_buffer_unlock_commit_regs()
2885 * trace_event_buffer_commit()
2886 * trace_event_raw_event_xxx()
2888 # define STACK_SKIP 3
2890 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2891 struct trace_buffer *buffer,
2892 struct ring_buffer_event *event,
2893 unsigned long flags, int pc,
2894 struct pt_regs *regs)
2896 __buffer_unlock_commit(buffer, event);
2899 * If regs is not set, then skip the necessary functions.
2900 * Note, we can still get here via blktrace, wakeup tracer
2901 * and mmiotrace, but that's ok if they lose a function or
2902 * two. They are not that meaningful.
2904 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2905 ftrace_trace_userstack(tr, buffer, flags, pc);
2909 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2912 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2913 struct ring_buffer_event *event)
2915 __buffer_unlock_commit(buffer, event);
2919 trace_function(struct trace_array *tr,
2920 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2923 struct trace_event_call *call = &event_function;
2924 struct trace_buffer *buffer = tr->array_buffer.buffer;
2925 struct ring_buffer_event *event;
2926 struct ftrace_entry *entry;
2928 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2932 entry = ring_buffer_event_data(event);
2934 entry->parent_ip = parent_ip;
2936 if (!call_filter_check_discard(call, entry, buffer, event)) {
2937 if (static_branch_unlikely(&trace_function_exports_enabled))
2938 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2939 __buffer_unlock_commit(buffer, event);
2943 #ifdef CONFIG_STACKTRACE
2945 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2946 #define FTRACE_KSTACK_NESTING 4
2948 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2950 struct ftrace_stack {
2951 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2955 struct ftrace_stacks {
2956 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2959 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2960 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2962 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2963 unsigned long flags,
2964 int skip, int pc, struct pt_regs *regs)
2966 struct trace_event_call *call = &event_kernel_stack;
2967 struct ring_buffer_event *event;
2968 unsigned int size, nr_entries;
2969 struct ftrace_stack *fstack;
2970 struct stack_entry *entry;
2974 * Add one, for this function and the call to save_stack_trace()
2975 * If regs is set, then these functions will not be in the way.
2977 #ifndef CONFIG_UNWINDER_ORC
2982 preempt_disable_notrace();
2984 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2986 /* This should never happen. If it does, yell once and skip */
2987 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2991 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2992 * interrupt will either see the value pre increment or post
2993 * increment. If the interrupt happens pre increment it will have
2994 * restored the counter when it returns. We just need a barrier to
2995 * keep gcc from moving things around.
2999 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3000 size = ARRAY_SIZE(fstack->calls);
3003 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3006 nr_entries = stack_trace_save(fstack->calls, size, skip);
3009 size = nr_entries * sizeof(unsigned long);
3010 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3011 (sizeof(*entry) - sizeof(entry->caller)) + size,
3015 entry = ring_buffer_event_data(event);
3017 memcpy(&entry->caller, fstack->calls, size);
3018 entry->size = nr_entries;
3020 if (!call_filter_check_discard(call, entry, buffer, event))
3021 __buffer_unlock_commit(buffer, event);
3024 /* Again, don't let gcc optimize things here */
3026 __this_cpu_dec(ftrace_stack_reserve);
3027 preempt_enable_notrace();
3031 static inline void ftrace_trace_stack(struct trace_array *tr,
3032 struct trace_buffer *buffer,
3033 unsigned long flags,
3034 int skip, int pc, struct pt_regs *regs)
3036 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3039 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
3042 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3045 struct trace_buffer *buffer = tr->array_buffer.buffer;
3047 if (rcu_is_watching()) {
3048 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3053 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3054 * but if the above rcu_is_watching() failed, then the NMI
3055 * triggered someplace critical, and rcu_irq_enter() should
3056 * not be called from NMI.
3058 if (unlikely(in_nmi()))
3061 rcu_irq_enter_irqson();
3062 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3063 rcu_irq_exit_irqson();
3067 * trace_dump_stack - record a stack back trace in the trace buffer
3068 * @skip: Number of functions to skip (helper handlers)
3070 void trace_dump_stack(int skip)
3072 unsigned long flags;
3074 if (tracing_disabled || tracing_selftest_running)
3077 local_save_flags(flags);
3079 #ifndef CONFIG_UNWINDER_ORC
3080 /* Skip 1 to skip this function. */
3083 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3084 flags, skip, preempt_count(), NULL);
3086 EXPORT_SYMBOL_GPL(trace_dump_stack);
3088 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3089 static DEFINE_PER_CPU(int, user_stack_count);
3092 ftrace_trace_userstack(struct trace_array *tr,
3093 struct trace_buffer *buffer, unsigned long flags, int pc)
3095 struct trace_event_call *call = &event_user_stack;
3096 struct ring_buffer_event *event;
3097 struct userstack_entry *entry;
3099 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3103 * NMIs can not handle page faults, even with fix ups.
3104 * The save user stack can (and often does) fault.
3106 if (unlikely(in_nmi()))
3110 * prevent recursion, since the user stack tracing may
3111 * trigger other kernel events.
3114 if (__this_cpu_read(user_stack_count))
3117 __this_cpu_inc(user_stack_count);
3119 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3120 sizeof(*entry), flags, pc);
3122 goto out_drop_count;
3123 entry = ring_buffer_event_data(event);
3125 entry->tgid = current->tgid;
3126 memset(&entry->caller, 0, sizeof(entry->caller));
3128 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3129 if (!call_filter_check_discard(call, entry, buffer, event))
3130 __buffer_unlock_commit(buffer, event);
3133 __this_cpu_dec(user_stack_count);
3137 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3138 static void ftrace_trace_userstack(struct trace_array *tr,
3139 struct trace_buffer *buffer,
3140 unsigned long flags, int pc)
3143 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3145 #endif /* CONFIG_STACKTRACE */
3147 /* created for use with alloc_percpu */
3148 struct trace_buffer_struct {
3150 char buffer[4][TRACE_BUF_SIZE];
3153 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3156 * Thise allows for lockless recording. If we're nested too deeply, then
3157 * this returns NULL.
3159 static char *get_trace_buf(void)
3161 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3163 if (!trace_percpu_buffer || buffer->nesting >= 4)
3168 /* Interrupts must see nesting incremented before we use the buffer */
3170 return &buffer->buffer[buffer->nesting - 1][0];
3173 static void put_trace_buf(void)
3175 /* Don't let the decrement of nesting leak before this */
3177 this_cpu_dec(trace_percpu_buffer->nesting);
3180 static int alloc_percpu_trace_buffer(void)
3182 struct trace_buffer_struct __percpu *buffers;
3184 if (trace_percpu_buffer)
3187 buffers = alloc_percpu(struct trace_buffer_struct);
3188 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3191 trace_percpu_buffer = buffers;
3195 static int buffers_allocated;
3197 void trace_printk_init_buffers(void)
3199 if (buffers_allocated)
3202 if (alloc_percpu_trace_buffer())
3205 /* trace_printk() is for debug use only. Don't use it in production. */
3208 pr_warn("**********************************************************\n");
3209 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3211 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3213 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3214 pr_warn("** unsafe for production use. **\n");
3216 pr_warn("** If you see this message and you are not debugging **\n");
3217 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3219 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3220 pr_warn("**********************************************************\n");
3222 /* Expand the buffers to set size */
3223 tracing_update_buffers();
3225 buffers_allocated = 1;
3228 * trace_printk_init_buffers() can be called by modules.
3229 * If that happens, then we need to start cmdline recording
3230 * directly here. If the global_trace.buffer is already
3231 * allocated here, then this was called by module code.
3233 if (global_trace.array_buffer.buffer)
3234 tracing_start_cmdline_record();
3236 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3238 void trace_printk_start_comm(void)
3240 /* Start tracing comms if trace printk is set */
3241 if (!buffers_allocated)
3243 tracing_start_cmdline_record();
3246 static void trace_printk_start_stop_comm(int enabled)
3248 if (!buffers_allocated)
3252 tracing_start_cmdline_record();
3254 tracing_stop_cmdline_record();
3258 * trace_vbprintk - write binary msg to tracing buffer
3259 * @ip: The address of the caller
3260 * @fmt: The string format to write to the buffer
3261 * @args: Arguments for @fmt
3263 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3265 struct trace_event_call *call = &event_bprint;
3266 struct ring_buffer_event *event;
3267 struct trace_buffer *buffer;
3268 struct trace_array *tr = &global_trace;
3269 struct bprint_entry *entry;
3270 unsigned long flags;
3272 int len = 0, size, pc;
3274 if (unlikely(tracing_selftest_running || tracing_disabled))
3277 /* Don't pollute graph traces with trace_vprintk internals */
3278 pause_graph_tracing();
3280 pc = preempt_count();
3281 preempt_disable_notrace();
3283 tbuffer = get_trace_buf();
3289 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3291 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3294 local_save_flags(flags);
3295 size = sizeof(*entry) + sizeof(u32) * len;
3296 buffer = tr->array_buffer.buffer;
3297 ring_buffer_nest_start(buffer);
3298 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3302 entry = ring_buffer_event_data(event);
3306 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3307 if (!call_filter_check_discard(call, entry, buffer, event)) {
3308 __buffer_unlock_commit(buffer, event);
3309 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3313 ring_buffer_nest_end(buffer);
3318 preempt_enable_notrace();
3319 unpause_graph_tracing();
3323 EXPORT_SYMBOL_GPL(trace_vbprintk);
3327 __trace_array_vprintk(struct trace_buffer *buffer,
3328 unsigned long ip, const char *fmt, va_list args)
3330 struct trace_event_call *call = &event_print;
3331 struct ring_buffer_event *event;
3332 int len = 0, size, pc;
3333 struct print_entry *entry;
3334 unsigned long flags;
3337 if (tracing_disabled || tracing_selftest_running)
3340 /* Don't pollute graph traces with trace_vprintk internals */
3341 pause_graph_tracing();
3343 pc = preempt_count();
3344 preempt_disable_notrace();
3347 tbuffer = get_trace_buf();
3353 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3355 local_save_flags(flags);
3356 size = sizeof(*entry) + len + 1;
3357 ring_buffer_nest_start(buffer);
3358 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3362 entry = ring_buffer_event_data(event);
3365 memcpy(&entry->buf, tbuffer, len + 1);
3366 if (!call_filter_check_discard(call, entry, buffer, event)) {
3367 __buffer_unlock_commit(buffer, event);
3368 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3372 ring_buffer_nest_end(buffer);
3376 preempt_enable_notrace();
3377 unpause_graph_tracing();
3383 int trace_array_vprintk(struct trace_array *tr,
3384 unsigned long ip, const char *fmt, va_list args)
3386 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3390 * trace_array_printk - Print a message to a specific instance
3391 * @tr: The instance trace_array descriptor
3392 * @ip: The instruction pointer that this is called from.
3393 * @fmt: The format to print (printf format)
3395 * If a subsystem sets up its own instance, they have the right to
3396 * printk strings into their tracing instance buffer using this
3397 * function. Note, this function will not write into the top level
3398 * buffer (use trace_printk() for that), as writing into the top level
3399 * buffer should only have events that can be individually disabled.
3400 * trace_printk() is only used for debugging a kernel, and should not
3401 * be ever encorporated in normal use.
3403 * trace_array_printk() can be used, as it will not add noise to the
3404 * top level tracing buffer.
3406 * Note, trace_array_init_printk() must be called on @tr before this
3410 int trace_array_printk(struct trace_array *tr,
3411 unsigned long ip, const char *fmt, ...)
3419 /* This is only allowed for created instances */
3420 if (tr == &global_trace)
3423 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3427 ret = trace_array_vprintk(tr, ip, fmt, ap);
3431 EXPORT_SYMBOL_GPL(trace_array_printk);
3434 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3435 * @tr: The trace array to initialize the buffers for
3437 * As trace_array_printk() only writes into instances, they are OK to
3438 * have in the kernel (unlike trace_printk()). This needs to be called
3439 * before trace_array_printk() can be used on a trace_array.
3441 int trace_array_init_printk(struct trace_array *tr)
3446 /* This is only allowed for created instances */
3447 if (tr == &global_trace)
3450 return alloc_percpu_trace_buffer();
3452 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3455 int trace_array_printk_buf(struct trace_buffer *buffer,
3456 unsigned long ip, const char *fmt, ...)
3461 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3465 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3471 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3473 return trace_array_vprintk(&global_trace, ip, fmt, args);
3475 EXPORT_SYMBOL_GPL(trace_vprintk);
3477 static void trace_iterator_increment(struct trace_iterator *iter)
3479 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3483 ring_buffer_iter_advance(buf_iter);
3486 static struct trace_entry *
3487 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3488 unsigned long *lost_events)
3490 struct ring_buffer_event *event;
3491 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3494 event = ring_buffer_iter_peek(buf_iter, ts);
3496 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3497 (unsigned long)-1 : 0;
3499 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3504 iter->ent_size = ring_buffer_event_length(event);
3505 return ring_buffer_event_data(event);
3511 static struct trace_entry *
3512 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3513 unsigned long *missing_events, u64 *ent_ts)
3515 struct trace_buffer *buffer = iter->array_buffer->buffer;
3516 struct trace_entry *ent, *next = NULL;
3517 unsigned long lost_events = 0, next_lost = 0;
3518 int cpu_file = iter->cpu_file;
3519 u64 next_ts = 0, ts;
3525 * If we are in a per_cpu trace file, don't bother by iterating over
3526 * all cpu and peek directly.
3528 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3529 if (ring_buffer_empty_cpu(buffer, cpu_file))
3531 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3533 *ent_cpu = cpu_file;
3538 for_each_tracing_cpu(cpu) {
3540 if (ring_buffer_empty_cpu(buffer, cpu))
3543 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3546 * Pick the entry with the smallest timestamp:
3548 if (ent && (!next || ts < next_ts)) {
3552 next_lost = lost_events;
3553 next_size = iter->ent_size;
3557 iter->ent_size = next_size;
3560 *ent_cpu = next_cpu;
3566 *missing_events = next_lost;
3571 #define STATIC_FMT_BUF_SIZE 128
3572 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3574 static char *trace_iter_expand_format(struct trace_iterator *iter)
3578 if (iter->fmt == static_fmt_buf)
3581 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3584 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3591 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3593 const char *p, *new_fmt;
3596 if (WARN_ON_ONCE(!fmt))
3600 new_fmt = q = iter->fmt;
3602 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3603 if (!trace_iter_expand_format(iter))
3606 q += iter->fmt - new_fmt;
3607 new_fmt = iter->fmt;
3612 /* Replace %p with %px */
3616 } else if (p[0] == 'p' && !isalnum(p[1])) {
3627 #define STATIC_TEMP_BUF_SIZE 128
3628 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3630 /* Find the next real entry, without updating the iterator itself */
3631 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3632 int *ent_cpu, u64 *ent_ts)
3634 /* __find_next_entry will reset ent_size */
3635 int ent_size = iter->ent_size;
3636 struct trace_entry *entry;
3639 * If called from ftrace_dump(), then the iter->temp buffer
3640 * will be the static_temp_buf and not created from kmalloc.
3641 * If the entry size is greater than the buffer, we can
3642 * not save it. Just return NULL in that case. This is only
3643 * used to add markers when two consecutive events' time
3644 * stamps have a large delta. See trace_print_lat_context()
3646 if (iter->temp == static_temp_buf &&
3647 STATIC_TEMP_BUF_SIZE < ent_size)
3651 * The __find_next_entry() may call peek_next_entry(), which may
3652 * call ring_buffer_peek() that may make the contents of iter->ent
3653 * undefined. Need to copy iter->ent now.
3655 if (iter->ent && iter->ent != iter->temp) {
3656 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3657 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3659 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3664 iter->temp_size = iter->ent_size;
3666 memcpy(iter->temp, iter->ent, iter->ent_size);
3667 iter->ent = iter->temp;
3669 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3670 /* Put back the original ent_size */
3671 iter->ent_size = ent_size;
3676 /* Find the next real entry, and increment the iterator to the next entry */
3677 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3679 iter->ent = __find_next_entry(iter, &iter->cpu,
3680 &iter->lost_events, &iter->ts);
3683 trace_iterator_increment(iter);
3685 return iter->ent ? iter : NULL;
3688 static void trace_consume(struct trace_iterator *iter)
3690 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3691 &iter->lost_events);
3694 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3696 struct trace_iterator *iter = m->private;
3700 WARN_ON_ONCE(iter->leftover);
3704 /* can't go backwards */
3709 ent = trace_find_next_entry_inc(iter);
3713 while (ent && iter->idx < i)
3714 ent = trace_find_next_entry_inc(iter);
3721 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3723 struct ring_buffer_iter *buf_iter;
3724 unsigned long entries = 0;
3727 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3729 buf_iter = trace_buffer_iter(iter, cpu);
3733 ring_buffer_iter_reset(buf_iter);
3736 * We could have the case with the max latency tracers
3737 * that a reset never took place on a cpu. This is evident
3738 * by the timestamp being before the start of the buffer.
3740 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3741 if (ts >= iter->array_buffer->time_start)
3744 ring_buffer_iter_advance(buf_iter);
3747 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3751 * The current tracer is copied to avoid a global locking
3754 static void *s_start(struct seq_file *m, loff_t *pos)
3756 struct trace_iterator *iter = m->private;
3757 struct trace_array *tr = iter->tr;
3758 int cpu_file = iter->cpu_file;
3764 * copy the tracer to avoid using a global lock all around.
3765 * iter->trace is a copy of current_trace, the pointer to the
3766 * name may be used instead of a strcmp(), as iter->trace->name
3767 * will point to the same string as current_trace->name.
3769 mutex_lock(&trace_types_lock);
3770 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
3771 /* Close iter->trace before switching to the new current tracer */
3772 if (iter->trace->close)
3773 iter->trace->close(iter);
3774 *iter->trace = *tr->current_trace;
3775 /* Reopen the new current tracer */
3776 if (iter->trace->open)
3777 iter->trace->open(iter);
3779 mutex_unlock(&trace_types_lock);
3781 #ifdef CONFIG_TRACER_MAX_TRACE
3782 if (iter->snapshot && iter->trace->use_max_tr)
3783 return ERR_PTR(-EBUSY);
3786 if (*pos != iter->pos) {
3791 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3792 for_each_tracing_cpu(cpu)
3793 tracing_iter_reset(iter, cpu);
3795 tracing_iter_reset(iter, cpu_file);
3798 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3803 * If we overflowed the seq_file before, then we want
3804 * to just reuse the trace_seq buffer again.
3810 p = s_next(m, p, &l);
3814 trace_event_read_lock();
3815 trace_access_lock(cpu_file);
3819 static void s_stop(struct seq_file *m, void *p)
3821 struct trace_iterator *iter = m->private;
3823 #ifdef CONFIG_TRACER_MAX_TRACE
3824 if (iter->snapshot && iter->trace->use_max_tr)
3828 trace_access_unlock(iter->cpu_file);
3829 trace_event_read_unlock();
3833 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3834 unsigned long *entries, int cpu)
3836 unsigned long count;
3838 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3840 * If this buffer has skipped entries, then we hold all
3841 * entries for the trace and we need to ignore the
3842 * ones before the time stamp.
3844 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3845 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3846 /* total is the same as the entries */
3850 ring_buffer_overrun_cpu(buf->buffer, cpu);
3855 get_total_entries(struct array_buffer *buf,
3856 unsigned long *total, unsigned long *entries)
3864 for_each_tracing_cpu(cpu) {
3865 get_total_entries_cpu(buf, &t, &e, cpu);
3871 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3873 unsigned long total, entries;
3878 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3883 unsigned long trace_total_entries(struct trace_array *tr)
3885 unsigned long total, entries;
3890 get_total_entries(&tr->array_buffer, &total, &entries);
3895 static void print_lat_help_header(struct seq_file *m)
3897 seq_puts(m, "# _------=> CPU# \n"
3898 "# / _-----=> irqs-off \n"
3899 "# | / _----=> need-resched \n"
3900 "# || / _---=> hardirq/softirq \n"
3901 "# ||| / _--=> preempt-depth \n"
3903 "# cmd pid ||||| time | caller \n"
3904 "# \\ / ||||| \\ | / \n");
3907 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3909 unsigned long total;
3910 unsigned long entries;
3912 get_total_entries(buf, &total, &entries);
3913 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3914 entries, total, num_online_cpus());
3918 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3921 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3923 print_event_info(buf, m);
3925 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3926 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3929 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3932 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3933 const char *space = " ";
3934 int prec = tgid ? 12 : 2;
3936 print_event_info(buf, m);
3938 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3939 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3940 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3941 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3942 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3943 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3944 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3948 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3950 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3951 struct array_buffer *buf = iter->array_buffer;
3952 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3953 struct tracer *type = iter->trace;
3954 unsigned long entries;
3955 unsigned long total;
3956 const char *name = "preemption";
3960 get_total_entries(buf, &total, &entries);
3962 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3964 seq_puts(m, "# -----------------------------------"
3965 "---------------------------------\n");
3966 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3967 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3968 nsecs_to_usecs(data->saved_latency),
3972 #if defined(CONFIG_PREEMPT_NONE)
3974 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3976 #elif defined(CONFIG_PREEMPT)
3978 #elif defined(CONFIG_PREEMPT_RT)
3983 /* These are reserved for later use */
3986 seq_printf(m, " #P:%d)\n", num_online_cpus());
3990 seq_puts(m, "# -----------------\n");
3991 seq_printf(m, "# | task: %.16s-%d "
3992 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3993 data->comm, data->pid,
3994 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3995 data->policy, data->rt_priority);
3996 seq_puts(m, "# -----------------\n");
3998 if (data->critical_start) {
3999 seq_puts(m, "# => started at: ");
4000 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4001 trace_print_seq(m, &iter->seq);
4002 seq_puts(m, "\n# => ended at: ");
4003 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4004 trace_print_seq(m, &iter->seq);
4005 seq_puts(m, "\n#\n");
4011 static void test_cpu_buff_start(struct trace_iterator *iter)
4013 struct trace_seq *s = &iter->seq;
4014 struct trace_array *tr = iter->tr;
4016 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4019 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4022 if (cpumask_available(iter->started) &&
4023 cpumask_test_cpu(iter->cpu, iter->started))
4026 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4029 if (cpumask_available(iter->started))
4030 cpumask_set_cpu(iter->cpu, iter->started);
4032 /* Don't print started cpu buffer for the first entry of the trace */
4034 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4038 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4040 struct trace_array *tr = iter->tr;
4041 struct trace_seq *s = &iter->seq;
4042 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4043 struct trace_entry *entry;
4044 struct trace_event *event;
4048 test_cpu_buff_start(iter);
4050 event = ftrace_find_event(entry->type);
4052 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4053 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4054 trace_print_lat_context(iter);
4056 trace_print_context(iter);
4059 if (trace_seq_has_overflowed(s))
4060 return TRACE_TYPE_PARTIAL_LINE;
4063 return event->funcs->trace(iter, sym_flags, event);
4065 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4067 return trace_handle_return(s);
4070 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4072 struct trace_array *tr = iter->tr;
4073 struct trace_seq *s = &iter->seq;
4074 struct trace_entry *entry;
4075 struct trace_event *event;
4079 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4080 trace_seq_printf(s, "%d %d %llu ",
4081 entry->pid, iter->cpu, iter->ts);
4083 if (trace_seq_has_overflowed(s))
4084 return TRACE_TYPE_PARTIAL_LINE;
4086 event = ftrace_find_event(entry->type);
4088 return event->funcs->raw(iter, 0, event);
4090 trace_seq_printf(s, "%d ?\n", entry->type);
4092 return trace_handle_return(s);
4095 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4097 struct trace_array *tr = iter->tr;
4098 struct trace_seq *s = &iter->seq;
4099 unsigned char newline = '\n';
4100 struct trace_entry *entry;
4101 struct trace_event *event;
4105 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4106 SEQ_PUT_HEX_FIELD(s, entry->pid);
4107 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4108 SEQ_PUT_HEX_FIELD(s, iter->ts);
4109 if (trace_seq_has_overflowed(s))
4110 return TRACE_TYPE_PARTIAL_LINE;
4113 event = ftrace_find_event(entry->type);
4115 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4116 if (ret != TRACE_TYPE_HANDLED)
4120 SEQ_PUT_FIELD(s, newline);
4122 return trace_handle_return(s);
4125 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4127 struct trace_array *tr = iter->tr;
4128 struct trace_seq *s = &iter->seq;
4129 struct trace_entry *entry;
4130 struct trace_event *event;
4134 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4135 SEQ_PUT_FIELD(s, entry->pid);
4136 SEQ_PUT_FIELD(s, iter->cpu);
4137 SEQ_PUT_FIELD(s, iter->ts);
4138 if (trace_seq_has_overflowed(s))
4139 return TRACE_TYPE_PARTIAL_LINE;
4142 event = ftrace_find_event(entry->type);
4143 return event ? event->funcs->binary(iter, 0, event) :
4147 int trace_empty(struct trace_iterator *iter)
4149 struct ring_buffer_iter *buf_iter;
4152 /* If we are looking at one CPU buffer, only check that one */
4153 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4154 cpu = iter->cpu_file;
4155 buf_iter = trace_buffer_iter(iter, cpu);
4157 if (!ring_buffer_iter_empty(buf_iter))
4160 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4166 for_each_tracing_cpu(cpu) {
4167 buf_iter = trace_buffer_iter(iter, cpu);
4169 if (!ring_buffer_iter_empty(buf_iter))
4172 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4180 /* Called with trace_event_read_lock() held. */
4181 enum print_line_t print_trace_line(struct trace_iterator *iter)
4183 struct trace_array *tr = iter->tr;
4184 unsigned long trace_flags = tr->trace_flags;
4185 enum print_line_t ret;
4187 if (iter->lost_events) {
4188 if (iter->lost_events == (unsigned long)-1)
4189 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4192 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4193 iter->cpu, iter->lost_events);
4194 if (trace_seq_has_overflowed(&iter->seq))
4195 return TRACE_TYPE_PARTIAL_LINE;
4198 if (iter->trace && iter->trace->print_line) {
4199 ret = iter->trace->print_line(iter);
4200 if (ret != TRACE_TYPE_UNHANDLED)
4204 if (iter->ent->type == TRACE_BPUTS &&
4205 trace_flags & TRACE_ITER_PRINTK &&
4206 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4207 return trace_print_bputs_msg_only(iter);
4209 if (iter->ent->type == TRACE_BPRINT &&
4210 trace_flags & TRACE_ITER_PRINTK &&
4211 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4212 return trace_print_bprintk_msg_only(iter);
4214 if (iter->ent->type == TRACE_PRINT &&
4215 trace_flags & TRACE_ITER_PRINTK &&
4216 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4217 return trace_print_printk_msg_only(iter);
4219 if (trace_flags & TRACE_ITER_BIN)
4220 return print_bin_fmt(iter);
4222 if (trace_flags & TRACE_ITER_HEX)
4223 return print_hex_fmt(iter);
4225 if (trace_flags & TRACE_ITER_RAW)
4226 return print_raw_fmt(iter);
4228 return print_trace_fmt(iter);
4231 void trace_latency_header(struct seq_file *m)
4233 struct trace_iterator *iter = m->private;
4234 struct trace_array *tr = iter->tr;
4236 /* print nothing if the buffers are empty */
4237 if (trace_empty(iter))
4240 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4241 print_trace_header(m, iter);
4243 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4244 print_lat_help_header(m);
4247 void trace_default_header(struct seq_file *m)
4249 struct trace_iterator *iter = m->private;
4250 struct trace_array *tr = iter->tr;
4251 unsigned long trace_flags = tr->trace_flags;
4253 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4256 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4257 /* print nothing if the buffers are empty */
4258 if (trace_empty(iter))
4260 print_trace_header(m, iter);
4261 if (!(trace_flags & TRACE_ITER_VERBOSE))
4262 print_lat_help_header(m);
4264 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4265 if (trace_flags & TRACE_ITER_IRQ_INFO)
4266 print_func_help_header_irq(iter->array_buffer,
4269 print_func_help_header(iter->array_buffer, m,
4275 static void test_ftrace_alive(struct seq_file *m)
4277 if (!ftrace_is_dead())
4279 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4280 "# MAY BE MISSING FUNCTION EVENTS\n");
4283 #ifdef CONFIG_TRACER_MAX_TRACE
4284 static void show_snapshot_main_help(struct seq_file *m)
4286 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4287 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4288 "# Takes a snapshot of the main buffer.\n"
4289 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4290 "# (Doesn't have to be '2' works with any number that\n"
4291 "# is not a '0' or '1')\n");
4294 static void show_snapshot_percpu_help(struct seq_file *m)
4296 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4297 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4298 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4299 "# Takes a snapshot of the main buffer for this cpu.\n");
4301 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4302 "# Must use main snapshot file to allocate.\n");
4304 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4305 "# (Doesn't have to be '2' works with any number that\n"
4306 "# is not a '0' or '1')\n");
4309 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4311 if (iter->tr->allocated_snapshot)
4312 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4314 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4316 seq_puts(m, "# Snapshot commands:\n");
4317 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4318 show_snapshot_main_help(m);
4320 show_snapshot_percpu_help(m);
4323 /* Should never be called */
4324 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4327 static int s_show(struct seq_file *m, void *v)
4329 struct trace_iterator *iter = v;
4332 if (iter->ent == NULL) {
4334 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4336 test_ftrace_alive(m);
4338 if (iter->snapshot && trace_empty(iter))
4339 print_snapshot_help(m, iter);
4340 else if (iter->trace && iter->trace->print_header)
4341 iter->trace->print_header(m);
4343 trace_default_header(m);
4345 } else if (iter->leftover) {
4347 * If we filled the seq_file buffer earlier, we
4348 * want to just show it now.
4350 ret = trace_print_seq(m, &iter->seq);
4352 /* ret should this time be zero, but you never know */
4353 iter->leftover = ret;
4356 ret = print_trace_line(iter);
4357 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4359 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4361 ret = trace_print_seq(m, &iter->seq);
4363 * If we overflow the seq_file buffer, then it will
4364 * ask us for this data again at start up.
4366 * ret is 0 if seq_file write succeeded.
4369 iter->leftover = ret;
4376 * Should be used after trace_array_get(), trace_types_lock
4377 * ensures that i_cdev was already initialized.
4379 static inline int tracing_get_cpu(struct inode *inode)
4381 if (inode->i_cdev) /* See trace_create_cpu_file() */
4382 return (long)inode->i_cdev - 1;
4383 return RING_BUFFER_ALL_CPUS;
4386 static const struct seq_operations tracer_seq_ops = {
4393 static struct trace_iterator *
4394 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4396 struct trace_array *tr = inode->i_private;
4397 struct trace_iterator *iter;
4400 if (tracing_disabled)
4401 return ERR_PTR(-ENODEV);
4403 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4405 return ERR_PTR(-ENOMEM);
4407 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4409 if (!iter->buffer_iter)
4413 * trace_find_next_entry() may need to save off iter->ent.
4414 * It will place it into the iter->temp buffer. As most
4415 * events are less than 128, allocate a buffer of that size.
4416 * If one is greater, then trace_find_next_entry() will
4417 * allocate a new buffer to adjust for the bigger iter->ent.
4418 * It's not critical if it fails to get allocated here.
4420 iter->temp = kmalloc(128, GFP_KERNEL);
4422 iter->temp_size = 128;
4425 * trace_event_printf() may need to modify given format
4426 * string to replace %p with %px so that it shows real address
4427 * instead of hash value. However, that is only for the event
4428 * tracing, other tracer may not need. Defer the allocation
4429 * until it is needed.
4435 * We make a copy of the current tracer to avoid concurrent
4436 * changes on it while we are reading.
4438 mutex_lock(&trace_types_lock);
4439 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4443 *iter->trace = *tr->current_trace;
4445 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4450 #ifdef CONFIG_TRACER_MAX_TRACE
4451 /* Currently only the top directory has a snapshot */
4452 if (tr->current_trace->print_max || snapshot)
4453 iter->array_buffer = &tr->max_buffer;
4456 iter->array_buffer = &tr->array_buffer;
4457 iter->snapshot = snapshot;
4459 iter->cpu_file = tracing_get_cpu(inode);
4460 mutex_init(&iter->mutex);
4462 /* Notify the tracer early; before we stop tracing. */
4463 if (iter->trace->open)
4464 iter->trace->open(iter);
4466 /* Annotate start of buffers if we had overruns */
4467 if (ring_buffer_overruns(iter->array_buffer->buffer))
4468 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4470 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4471 if (trace_clocks[tr->clock_id].in_ns)
4472 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4475 * If pause-on-trace is enabled, then stop the trace while
4476 * dumping, unless this is the "snapshot" file
4478 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4479 tracing_stop_tr(tr);
4481 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4482 for_each_tracing_cpu(cpu) {
4483 iter->buffer_iter[cpu] =
4484 ring_buffer_read_prepare(iter->array_buffer->buffer,
4487 ring_buffer_read_prepare_sync();
4488 for_each_tracing_cpu(cpu) {
4489 ring_buffer_read_start(iter->buffer_iter[cpu]);
4490 tracing_iter_reset(iter, cpu);
4493 cpu = iter->cpu_file;
4494 iter->buffer_iter[cpu] =
4495 ring_buffer_read_prepare(iter->array_buffer->buffer,
4497 ring_buffer_read_prepare_sync();
4498 ring_buffer_read_start(iter->buffer_iter[cpu]);
4499 tracing_iter_reset(iter, cpu);
4502 mutex_unlock(&trace_types_lock);
4507 mutex_unlock(&trace_types_lock);
4510 kfree(iter->buffer_iter);
4512 seq_release_private(inode, file);
4513 return ERR_PTR(-ENOMEM);
4516 int tracing_open_generic(struct inode *inode, struct file *filp)
4520 ret = tracing_check_open_get_tr(NULL);
4524 filp->private_data = inode->i_private;
4528 bool tracing_is_disabled(void)
4530 return (tracing_disabled) ? true: false;
4534 * Open and update trace_array ref count.
4535 * Must have the current trace_array passed to it.
4537 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4539 struct trace_array *tr = inode->i_private;
4542 ret = tracing_check_open_get_tr(tr);
4546 filp->private_data = inode->i_private;
4552 * The private pointer of the inode is the trace_event_file.
4553 * Update the tr ref count associated to it.
4555 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4557 struct trace_event_file *file = inode->i_private;
4560 ret = tracing_check_open_get_tr(file->tr);
4564 mutex_lock(&event_mutex);
4566 /* Fail if the file is marked for removal */
4567 if (file->flags & EVENT_FILE_FL_FREED) {
4568 trace_array_put(file->tr);
4571 event_file_get(file);
4574 mutex_unlock(&event_mutex);
4578 filp->private_data = inode->i_private;
4583 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4585 struct trace_event_file *file = inode->i_private;
4587 trace_array_put(file->tr);
4588 event_file_put(file);
4593 static int tracing_release(struct inode *inode, struct file *file)
4595 struct trace_array *tr = inode->i_private;
4596 struct seq_file *m = file->private_data;
4597 struct trace_iterator *iter;
4600 if (!(file->f_mode & FMODE_READ)) {
4601 trace_array_put(tr);
4605 /* Writes do not use seq_file */
4607 mutex_lock(&trace_types_lock);
4609 for_each_tracing_cpu(cpu) {
4610 if (iter->buffer_iter[cpu])
4611 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4614 if (iter->trace && iter->trace->close)
4615 iter->trace->close(iter);
4617 if (!iter->snapshot && tr->stop_count)
4618 /* reenable tracing if it was previously enabled */
4619 tracing_start_tr(tr);
4621 __trace_array_put(tr);
4623 mutex_unlock(&trace_types_lock);
4625 mutex_destroy(&iter->mutex);
4626 free_cpumask_var(iter->started);
4630 kfree(iter->buffer_iter);
4631 seq_release_private(inode, file);
4636 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4638 struct trace_array *tr = inode->i_private;
4640 trace_array_put(tr);
4644 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4646 struct trace_array *tr = inode->i_private;
4648 trace_array_put(tr);
4650 return single_release(inode, file);
4653 static int tracing_open(struct inode *inode, struct file *file)
4655 struct trace_array *tr = inode->i_private;
4656 struct trace_iterator *iter;
4659 ret = tracing_check_open_get_tr(tr);
4663 /* If this file was open for write, then erase contents */
4664 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4665 int cpu = tracing_get_cpu(inode);
4666 struct array_buffer *trace_buf = &tr->array_buffer;
4668 #ifdef CONFIG_TRACER_MAX_TRACE
4669 if (tr->current_trace->print_max)
4670 trace_buf = &tr->max_buffer;
4673 if (cpu == RING_BUFFER_ALL_CPUS)
4674 tracing_reset_online_cpus(trace_buf);
4676 tracing_reset_cpu(trace_buf, cpu);
4679 if (file->f_mode & FMODE_READ) {
4680 iter = __tracing_open(inode, file, false);
4682 ret = PTR_ERR(iter);
4683 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4684 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4688 trace_array_put(tr);
4694 * Some tracers are not suitable for instance buffers.
4695 * A tracer is always available for the global array (toplevel)
4696 * or if it explicitly states that it is.
4699 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4701 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4704 /* Find the next tracer that this trace array may use */
4705 static struct tracer *
4706 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4708 while (t && !trace_ok_for_array(t, tr))
4715 t_next(struct seq_file *m, void *v, loff_t *pos)
4717 struct trace_array *tr = m->private;
4718 struct tracer *t = v;
4723 t = get_tracer_for_array(tr, t->next);
4728 static void *t_start(struct seq_file *m, loff_t *pos)
4730 struct trace_array *tr = m->private;
4734 mutex_lock(&trace_types_lock);
4736 t = get_tracer_for_array(tr, trace_types);
4737 for (; t && l < *pos; t = t_next(m, t, &l))
4743 static void t_stop(struct seq_file *m, void *p)
4745 mutex_unlock(&trace_types_lock);
4748 static int t_show(struct seq_file *m, void *v)
4750 struct tracer *t = v;
4755 seq_puts(m, t->name);
4764 static const struct seq_operations show_traces_seq_ops = {
4771 static int show_traces_open(struct inode *inode, struct file *file)
4773 struct trace_array *tr = inode->i_private;
4777 ret = tracing_check_open_get_tr(tr);
4781 ret = seq_open(file, &show_traces_seq_ops);
4783 trace_array_put(tr);
4787 m = file->private_data;
4793 static int show_traces_release(struct inode *inode, struct file *file)
4795 struct trace_array *tr = inode->i_private;
4797 trace_array_put(tr);
4798 return seq_release(inode, file);
4802 tracing_write_stub(struct file *filp, const char __user *ubuf,
4803 size_t count, loff_t *ppos)
4808 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4812 if (file->f_mode & FMODE_READ)
4813 ret = seq_lseek(file, offset, whence);
4815 file->f_pos = ret = 0;
4820 static const struct file_operations tracing_fops = {
4821 .open = tracing_open,
4823 .read_iter = seq_read_iter,
4824 .splice_read = generic_file_splice_read,
4825 .write = tracing_write_stub,
4826 .llseek = tracing_lseek,
4827 .release = tracing_release,
4830 static const struct file_operations show_traces_fops = {
4831 .open = show_traces_open,
4833 .llseek = seq_lseek,
4834 .release = show_traces_release,
4838 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4839 size_t count, loff_t *ppos)
4841 struct trace_array *tr = file_inode(filp)->i_private;
4845 len = snprintf(NULL, 0, "%*pb\n",
4846 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4847 mask_str = kmalloc(len, GFP_KERNEL);
4851 len = snprintf(mask_str, len, "%*pb\n",
4852 cpumask_pr_args(tr->tracing_cpumask));
4857 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4865 int tracing_set_cpumask(struct trace_array *tr,
4866 cpumask_var_t tracing_cpumask_new)
4873 local_irq_disable();
4874 arch_spin_lock(&tr->max_lock);
4875 for_each_tracing_cpu(cpu) {
4877 * Increase/decrease the disabled counter if we are
4878 * about to flip a bit in the cpumask:
4880 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4881 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4882 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4883 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4884 #ifdef CONFIG_TRACER_MAX_TRACE
4885 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
4888 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4889 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4890 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4891 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4892 #ifdef CONFIG_TRACER_MAX_TRACE
4893 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
4897 arch_spin_unlock(&tr->max_lock);
4900 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4906 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4907 size_t count, loff_t *ppos)
4909 struct trace_array *tr = file_inode(filp)->i_private;
4910 cpumask_var_t tracing_cpumask_new;
4913 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4916 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4920 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4924 free_cpumask_var(tracing_cpumask_new);
4929 free_cpumask_var(tracing_cpumask_new);
4934 static const struct file_operations tracing_cpumask_fops = {
4935 .open = tracing_open_generic_tr,
4936 .read = tracing_cpumask_read,
4937 .write = tracing_cpumask_write,
4938 .release = tracing_release_generic_tr,
4939 .llseek = generic_file_llseek,
4942 static int tracing_trace_options_show(struct seq_file *m, void *v)
4944 struct tracer_opt *trace_opts;
4945 struct trace_array *tr = m->private;
4949 mutex_lock(&trace_types_lock);
4950 tracer_flags = tr->current_trace->flags->val;
4951 trace_opts = tr->current_trace->flags->opts;
4953 for (i = 0; trace_options[i]; i++) {
4954 if (tr->trace_flags & (1 << i))
4955 seq_printf(m, "%s\n", trace_options[i]);
4957 seq_printf(m, "no%s\n", trace_options[i]);
4960 for (i = 0; trace_opts[i].name; i++) {
4961 if (tracer_flags & trace_opts[i].bit)
4962 seq_printf(m, "%s\n", trace_opts[i].name);
4964 seq_printf(m, "no%s\n", trace_opts[i].name);
4966 mutex_unlock(&trace_types_lock);
4971 static int __set_tracer_option(struct trace_array *tr,
4972 struct tracer_flags *tracer_flags,
4973 struct tracer_opt *opts, int neg)
4975 struct tracer *trace = tracer_flags->trace;
4978 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4983 tracer_flags->val &= ~opts->bit;
4985 tracer_flags->val |= opts->bit;
4989 /* Try to assign a tracer specific option */
4990 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4992 struct tracer *trace = tr->current_trace;
4993 struct tracer_flags *tracer_flags = trace->flags;
4994 struct tracer_opt *opts = NULL;
4997 for (i = 0; tracer_flags->opts[i].name; i++) {
4998 opts = &tracer_flags->opts[i];
5000 if (strcmp(cmp, opts->name) == 0)
5001 return __set_tracer_option(tr, trace->flags, opts, neg);
5007 /* Some tracers require overwrite to stay enabled */
5008 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5010 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5016 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5020 if ((mask == TRACE_ITER_RECORD_TGID) ||
5021 (mask == TRACE_ITER_RECORD_CMD))
5022 lockdep_assert_held(&event_mutex);
5024 /* do nothing if flag is already set */
5025 if (!!(tr->trace_flags & mask) == !!enabled)
5028 /* Give the tracer a chance to approve the change */
5029 if (tr->current_trace->flag_changed)
5030 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5034 tr->trace_flags |= mask;
5036 tr->trace_flags &= ~mask;
5038 if (mask == TRACE_ITER_RECORD_CMD)
5039 trace_event_enable_cmd_record(enabled);
5041 if (mask == TRACE_ITER_RECORD_TGID) {
5043 tgid_map_max = pid_max;
5044 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5048 * Pairs with smp_load_acquire() in
5049 * trace_find_tgid_ptr() to ensure that if it observes
5050 * the tgid_map we just allocated then it also observes
5051 * the corresponding tgid_map_max value.
5053 smp_store_release(&tgid_map, map);
5056 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5060 trace_event_enable_tgid_record(enabled);
5063 if (mask == TRACE_ITER_EVENT_FORK)
5064 trace_event_follow_fork(tr, enabled);
5066 if (mask == TRACE_ITER_FUNC_FORK)
5067 ftrace_pid_follow_fork(tr, enabled);
5069 if (mask == TRACE_ITER_OVERWRITE) {
5070 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5071 #ifdef CONFIG_TRACER_MAX_TRACE
5072 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5076 if (mask == TRACE_ITER_PRINTK) {
5077 trace_printk_start_stop_comm(enabled);
5078 trace_printk_control(enabled);
5084 int trace_set_options(struct trace_array *tr, char *option)
5089 size_t orig_len = strlen(option);
5092 cmp = strstrip(option);
5094 len = str_has_prefix(cmp, "no");
5100 mutex_lock(&event_mutex);
5101 mutex_lock(&trace_types_lock);
5103 ret = match_string(trace_options, -1, cmp);
5104 /* If no option could be set, test the specific tracer options */
5106 ret = set_tracer_option(tr, cmp, neg);
5108 ret = set_tracer_flag(tr, 1 << ret, !neg);
5110 mutex_unlock(&trace_types_lock);
5111 mutex_unlock(&event_mutex);
5114 * If the first trailing whitespace is replaced with '\0' by strstrip,
5115 * turn it back into a space.
5117 if (orig_len > strlen(option))
5118 option[strlen(option)] = ' ';
5123 static void __init apply_trace_boot_options(void)
5125 char *buf = trace_boot_options_buf;
5129 option = strsep(&buf, ",");
5135 trace_set_options(&global_trace, option);
5137 /* Put back the comma to allow this to be called again */
5144 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5145 size_t cnt, loff_t *ppos)
5147 struct seq_file *m = filp->private_data;
5148 struct trace_array *tr = m->private;
5152 if (cnt >= sizeof(buf))
5155 if (copy_from_user(buf, ubuf, cnt))
5160 ret = trace_set_options(tr, buf);
5169 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5171 struct trace_array *tr = inode->i_private;
5174 ret = tracing_check_open_get_tr(tr);
5178 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5180 trace_array_put(tr);
5185 static const struct file_operations tracing_iter_fops = {
5186 .open = tracing_trace_options_open,
5188 .llseek = seq_lseek,
5189 .release = tracing_single_release_tr,
5190 .write = tracing_trace_options_write,
5193 static const char readme_msg[] =
5194 "tracing mini-HOWTO:\n\n"
5195 "# echo 0 > tracing_on : quick way to disable tracing\n"
5196 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5197 " Important files:\n"
5198 " trace\t\t\t- The static contents of the buffer\n"
5199 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5200 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5201 " current_tracer\t- function and latency tracers\n"
5202 " available_tracers\t- list of configured tracers for current_tracer\n"
5203 " error_log\t- error log for failed commands (that support it)\n"
5204 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5205 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5206 " trace_clock\t\t-change the clock used to order events\n"
5207 " local: Per cpu clock but may not be synced across CPUs\n"
5208 " global: Synced across CPUs but slows tracing down.\n"
5209 " counter: Not a clock, but just an increment\n"
5210 " uptime: Jiffy counter from time of boot\n"
5211 " perf: Same clock that perf events use\n"
5212 #ifdef CONFIG_X86_64
5213 " x86-tsc: TSC cycle counter\n"
5215 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5216 " delta: Delta difference against a buffer-wide timestamp\n"
5217 " absolute: Absolute (standalone) timestamp\n"
5218 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5219 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5220 " tracing_cpumask\t- Limit which CPUs to trace\n"
5221 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5222 "\t\t\t Remove sub-buffer with rmdir\n"
5223 " trace_options\t\t- Set format or modify how tracing happens\n"
5224 "\t\t\t Disable an option by prefixing 'no' to the\n"
5225 "\t\t\t option name\n"
5226 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5227 #ifdef CONFIG_DYNAMIC_FTRACE
5228 "\n available_filter_functions - list of functions that can be filtered on\n"
5229 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5230 "\t\t\t functions\n"
5231 "\t accepts: func_full_name or glob-matching-pattern\n"
5232 "\t modules: Can select a group via module\n"
5233 "\t Format: :mod:<module-name>\n"
5234 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5235 "\t triggers: a command to perform when function is hit\n"
5236 "\t Format: <function>:<trigger>[:count]\n"
5237 "\t trigger: traceon, traceoff\n"
5238 "\t\t enable_event:<system>:<event>\n"
5239 "\t\t disable_event:<system>:<event>\n"
5240 #ifdef CONFIG_STACKTRACE
5243 #ifdef CONFIG_TRACER_SNAPSHOT
5248 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5249 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5250 "\t The first one will disable tracing every time do_fault is hit\n"
5251 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5252 "\t The first time do trap is hit and it disables tracing, the\n"
5253 "\t counter will decrement to 2. If tracing is already disabled,\n"
5254 "\t the counter will not decrement. It only decrements when the\n"
5255 "\t trigger did work\n"
5256 "\t To remove trigger without count:\n"
5257 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5258 "\t To remove trigger with a count:\n"
5259 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5260 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5261 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5262 "\t modules: Can select a group via module command :mod:\n"
5263 "\t Does not accept triggers\n"
5264 #endif /* CONFIG_DYNAMIC_FTRACE */
5265 #ifdef CONFIG_FUNCTION_TRACER
5266 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5268 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5271 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5272 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5273 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5274 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5276 #ifdef CONFIG_TRACER_SNAPSHOT
5277 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5278 "\t\t\t snapshot buffer. Read the contents for more\n"
5279 "\t\t\t information\n"
5281 #ifdef CONFIG_STACK_TRACER
5282 " stack_trace\t\t- Shows the max stack trace when active\n"
5283 " stack_max_size\t- Shows current max stack size that was traced\n"
5284 "\t\t\t Write into this file to reset the max size (trigger a\n"
5285 "\t\t\t new trace)\n"
5286 #ifdef CONFIG_DYNAMIC_FTRACE
5287 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5290 #endif /* CONFIG_STACK_TRACER */
5291 #ifdef CONFIG_DYNAMIC_EVENTS
5292 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5293 "\t\t\t Write into this file to define/undefine new trace events.\n"
5295 #ifdef CONFIG_KPROBE_EVENTS
5296 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5297 "\t\t\t Write into this file to define/undefine new trace events.\n"
5299 #ifdef CONFIG_UPROBE_EVENTS
5300 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5301 "\t\t\t Write into this file to define/undefine new trace events.\n"
5303 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5304 "\t accepts: event-definitions (one definition per line)\n"
5305 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5306 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5307 #ifdef CONFIG_HIST_TRIGGERS
5308 "\t s:[synthetic/]<event> <field> [<field>]\n"
5310 "\t -:[<group>/]<event>\n"
5311 #ifdef CONFIG_KPROBE_EVENTS
5312 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5313 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5315 #ifdef CONFIG_UPROBE_EVENTS
5316 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5318 "\t args: <name>=fetcharg[:type]\n"
5319 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5320 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5321 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5323 "\t $stack<index>, $stack, $retval, $comm,\n"
5325 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5326 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5327 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5328 "\t <type>\\[<array-size>\\]\n"
5329 #ifdef CONFIG_HIST_TRIGGERS
5330 "\t field: <stype> <name>;\n"
5331 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5332 "\t [unsigned] char/int/long\n"
5335 " events/\t\t- Directory containing all trace event subsystems:\n"
5336 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5337 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5338 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5340 " filter\t\t- If set, only events passing filter are traced\n"
5341 " events/<system>/<event>/\t- Directory containing control files for\n"
5343 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5344 " filter\t\t- If set, only events passing filter are traced\n"
5345 " trigger\t\t- If set, a command to perform when event is hit\n"
5346 "\t Format: <trigger>[:count][if <filter>]\n"
5347 "\t trigger: traceon, traceoff\n"
5348 "\t enable_event:<system>:<event>\n"
5349 "\t disable_event:<system>:<event>\n"
5350 #ifdef CONFIG_HIST_TRIGGERS
5351 "\t enable_hist:<system>:<event>\n"
5352 "\t disable_hist:<system>:<event>\n"
5354 #ifdef CONFIG_STACKTRACE
5357 #ifdef CONFIG_TRACER_SNAPSHOT
5360 #ifdef CONFIG_HIST_TRIGGERS
5361 "\t\t hist (see below)\n"
5363 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5364 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5365 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5366 "\t events/block/block_unplug/trigger\n"
5367 "\t The first disables tracing every time block_unplug is hit.\n"
5368 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5369 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5370 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5371 "\t Like function triggers, the counter is only decremented if it\n"
5372 "\t enabled or disabled tracing.\n"
5373 "\t To remove a trigger without a count:\n"
5374 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5375 "\t To remove a trigger with a count:\n"
5376 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5377 "\t Filters can be ignored when removing a trigger.\n"
5378 #ifdef CONFIG_HIST_TRIGGERS
5379 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5380 "\t Format: hist:keys=<field1[,field2,...]>\n"
5381 "\t [:values=<field1[,field2,...]>]\n"
5382 "\t [:sort=<field1[,field2,...]>]\n"
5383 "\t [:size=#entries]\n"
5384 "\t [:pause][:continue][:clear]\n"
5385 "\t [:name=histname1]\n"
5386 "\t [:<handler>.<action>]\n"
5387 "\t [if <filter>]\n\n"
5388 "\t Note, special fields can be used as well:\n"
5389 "\t common_timestamp - to record current timestamp\n"
5390 "\t common_cpu - to record the CPU the event happened on\n"
5392 "\t When a matching event is hit, an entry is added to a hash\n"
5393 "\t table using the key(s) and value(s) named, and the value of a\n"
5394 "\t sum called 'hitcount' is incremented. Keys and values\n"
5395 "\t correspond to fields in the event's format description. Keys\n"
5396 "\t can be any field, or the special string 'stacktrace'.\n"
5397 "\t Compound keys consisting of up to two fields can be specified\n"
5398 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5399 "\t fields. Sort keys consisting of up to two fields can be\n"
5400 "\t specified using the 'sort' keyword. The sort direction can\n"
5401 "\t be modified by appending '.descending' or '.ascending' to a\n"
5402 "\t sort field. The 'size' parameter can be used to specify more\n"
5403 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5404 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5405 "\t its histogram data will be shared with other triggers of the\n"
5406 "\t same name, and trigger hits will update this common data.\n\n"
5407 "\t Reading the 'hist' file for the event will dump the hash\n"
5408 "\t table in its entirety to stdout. If there are multiple hist\n"
5409 "\t triggers attached to an event, there will be a table for each\n"
5410 "\t trigger in the output. The table displayed for a named\n"
5411 "\t trigger will be the same as any other instance having the\n"
5412 "\t same name. The default format used to display a given field\n"
5413 "\t can be modified by appending any of the following modifiers\n"
5414 "\t to the field name, as applicable:\n\n"
5415 "\t .hex display a number as a hex value\n"
5416 "\t .sym display an address as a symbol\n"
5417 "\t .sym-offset display an address as a symbol and offset\n"
5418 "\t .execname display a common_pid as a program name\n"
5419 "\t .syscall display a syscall id as a syscall name\n"
5420 "\t .log2 display log2 value rather than raw number\n"
5421 "\t .usecs display a common_timestamp in microseconds\n\n"
5422 "\t The 'pause' parameter can be used to pause an existing hist\n"
5423 "\t trigger or to start a hist trigger but not log any events\n"
5424 "\t until told to do so. 'continue' can be used to start or\n"
5425 "\t restart a paused hist trigger.\n\n"
5426 "\t The 'clear' parameter will clear the contents of a running\n"
5427 "\t hist trigger and leave its current paused/active state\n"
5429 "\t The enable_hist and disable_hist triggers can be used to\n"
5430 "\t have one event conditionally start and stop another event's\n"
5431 "\t already-attached hist trigger. The syntax is analogous to\n"
5432 "\t the enable_event and disable_event triggers.\n\n"
5433 "\t Hist trigger handlers and actions are executed whenever a\n"
5434 "\t a histogram entry is added or updated. They take the form:\n\n"
5435 "\t <handler>.<action>\n\n"
5436 "\t The available handlers are:\n\n"
5437 "\t onmatch(matching.event) - invoke on addition or update\n"
5438 "\t onmax(var) - invoke if var exceeds current max\n"
5439 "\t onchange(var) - invoke action if var changes\n\n"
5440 "\t The available actions are:\n\n"
5441 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5442 "\t save(field,...) - save current event fields\n"
5443 #ifdef CONFIG_TRACER_SNAPSHOT
5444 "\t snapshot() - snapshot the trace buffer\n\n"
5446 #ifdef CONFIG_SYNTH_EVENTS
5447 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5448 "\t Write into this file to define/undefine new synthetic events.\n"
5449 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5455 tracing_readme_read(struct file *filp, char __user *ubuf,
5456 size_t cnt, loff_t *ppos)
5458 return simple_read_from_buffer(ubuf, cnt, ppos,
5459 readme_msg, strlen(readme_msg));
5462 static const struct file_operations tracing_readme_fops = {
5463 .open = tracing_open_generic,
5464 .read = tracing_readme_read,
5465 .llseek = generic_file_llseek,
5468 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5472 return trace_find_tgid_ptr(pid);
5475 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5479 return trace_find_tgid_ptr(pid);
5482 static void saved_tgids_stop(struct seq_file *m, void *v)
5486 static int saved_tgids_show(struct seq_file *m, void *v)
5488 int *entry = (int *)v;
5489 int pid = entry - tgid_map;
5495 seq_printf(m, "%d %d\n", pid, tgid);
5499 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5500 .start = saved_tgids_start,
5501 .stop = saved_tgids_stop,
5502 .next = saved_tgids_next,
5503 .show = saved_tgids_show,
5506 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5510 ret = tracing_check_open_get_tr(NULL);
5514 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5518 static const struct file_operations tracing_saved_tgids_fops = {
5519 .open = tracing_saved_tgids_open,
5521 .llseek = seq_lseek,
5522 .release = seq_release,
5525 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5527 unsigned int *ptr = v;
5529 if (*pos || m->count)
5534 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5536 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5545 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5551 arch_spin_lock(&trace_cmdline_lock);
5553 v = &savedcmd->map_cmdline_to_pid[0];
5555 v = saved_cmdlines_next(m, v, &l);
5563 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5565 arch_spin_unlock(&trace_cmdline_lock);
5569 static int saved_cmdlines_show(struct seq_file *m, void *v)
5571 char buf[TASK_COMM_LEN];
5572 unsigned int *pid = v;
5574 __trace_find_cmdline(*pid, buf);
5575 seq_printf(m, "%d %s\n", *pid, buf);
5579 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5580 .start = saved_cmdlines_start,
5581 .next = saved_cmdlines_next,
5582 .stop = saved_cmdlines_stop,
5583 .show = saved_cmdlines_show,
5586 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5590 ret = tracing_check_open_get_tr(NULL);
5594 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5597 static const struct file_operations tracing_saved_cmdlines_fops = {
5598 .open = tracing_saved_cmdlines_open,
5600 .llseek = seq_lseek,
5601 .release = seq_release,
5605 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5606 size_t cnt, loff_t *ppos)
5612 arch_spin_lock(&trace_cmdline_lock);
5613 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5614 arch_spin_unlock(&trace_cmdline_lock);
5617 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5620 static int tracing_resize_saved_cmdlines(unsigned int val)
5622 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5624 s = allocate_cmdlines_buffer(val);
5629 arch_spin_lock(&trace_cmdline_lock);
5630 savedcmd_temp = savedcmd;
5632 arch_spin_unlock(&trace_cmdline_lock);
5634 free_saved_cmdlines_buffer(savedcmd_temp);
5640 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5641 size_t cnt, loff_t *ppos)
5646 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5650 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5651 if (!val || val > PID_MAX_DEFAULT)
5654 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5663 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5664 .open = tracing_open_generic,
5665 .read = tracing_saved_cmdlines_size_read,
5666 .write = tracing_saved_cmdlines_size_write,
5669 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5670 static union trace_eval_map_item *
5671 update_eval_map(union trace_eval_map_item *ptr)
5673 if (!ptr->map.eval_string) {
5674 if (ptr->tail.next) {
5675 ptr = ptr->tail.next;
5676 /* Set ptr to the next real item (skip head) */
5684 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5686 union trace_eval_map_item *ptr = v;
5689 * Paranoid! If ptr points to end, we don't want to increment past it.
5690 * This really should never happen.
5693 ptr = update_eval_map(ptr);
5694 if (WARN_ON_ONCE(!ptr))
5698 ptr = update_eval_map(ptr);
5703 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5705 union trace_eval_map_item *v;
5708 mutex_lock(&trace_eval_mutex);
5710 v = trace_eval_maps;
5714 while (v && l < *pos) {
5715 v = eval_map_next(m, v, &l);
5721 static void eval_map_stop(struct seq_file *m, void *v)
5723 mutex_unlock(&trace_eval_mutex);
5726 static int eval_map_show(struct seq_file *m, void *v)
5728 union trace_eval_map_item *ptr = v;
5730 seq_printf(m, "%s %ld (%s)\n",
5731 ptr->map.eval_string, ptr->map.eval_value,
5737 static const struct seq_operations tracing_eval_map_seq_ops = {
5738 .start = eval_map_start,
5739 .next = eval_map_next,
5740 .stop = eval_map_stop,
5741 .show = eval_map_show,
5744 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5748 ret = tracing_check_open_get_tr(NULL);
5752 return seq_open(filp, &tracing_eval_map_seq_ops);
5755 static const struct file_operations tracing_eval_map_fops = {
5756 .open = tracing_eval_map_open,
5758 .llseek = seq_lseek,
5759 .release = seq_release,
5762 static inline union trace_eval_map_item *
5763 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5765 /* Return tail of array given the head */
5766 return ptr + ptr->head.length + 1;
5770 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5773 struct trace_eval_map **stop;
5774 struct trace_eval_map **map;
5775 union trace_eval_map_item *map_array;
5776 union trace_eval_map_item *ptr;
5781 * The trace_eval_maps contains the map plus a head and tail item,
5782 * where the head holds the module and length of array, and the
5783 * tail holds a pointer to the next list.
5785 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5787 pr_warn("Unable to allocate trace eval mapping\n");
5791 mutex_lock(&trace_eval_mutex);
5793 if (!trace_eval_maps)
5794 trace_eval_maps = map_array;
5796 ptr = trace_eval_maps;
5798 ptr = trace_eval_jmp_to_tail(ptr);
5799 if (!ptr->tail.next)
5801 ptr = ptr->tail.next;
5804 ptr->tail.next = map_array;
5806 map_array->head.mod = mod;
5807 map_array->head.length = len;
5810 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5811 map_array->map = **map;
5814 memset(map_array, 0, sizeof(*map_array));
5816 mutex_unlock(&trace_eval_mutex);
5819 static void trace_create_eval_file(struct dentry *d_tracer)
5821 trace_create_file("eval_map", 0444, d_tracer,
5822 NULL, &tracing_eval_map_fops);
5825 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5826 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5827 static inline void trace_insert_eval_map_file(struct module *mod,
5828 struct trace_eval_map **start, int len) { }
5829 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5831 static void trace_insert_eval_map(struct module *mod,
5832 struct trace_eval_map **start, int len)
5834 struct trace_eval_map **map;
5841 trace_event_eval_update(map, len);
5843 trace_insert_eval_map_file(mod, start, len);
5847 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5848 size_t cnt, loff_t *ppos)
5850 struct trace_array *tr = filp->private_data;
5851 char buf[MAX_TRACER_SIZE+2];
5854 mutex_lock(&trace_types_lock);
5855 r = sprintf(buf, "%s\n", tr->current_trace->name);
5856 mutex_unlock(&trace_types_lock);
5858 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5861 int tracer_init(struct tracer *t, struct trace_array *tr)
5863 tracing_reset_online_cpus(&tr->array_buffer);
5867 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5871 for_each_tracing_cpu(cpu)
5872 per_cpu_ptr(buf->data, cpu)->entries = val;
5875 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5877 if (cpu == RING_BUFFER_ALL_CPUS) {
5878 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5880 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5884 #ifdef CONFIG_TRACER_MAX_TRACE
5885 /* resize @tr's buffer to the size of @size_tr's entries */
5886 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5887 struct array_buffer *size_buf, int cpu_id)
5891 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5892 for_each_tracing_cpu(cpu) {
5893 ret = ring_buffer_resize(trace_buf->buffer,
5894 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5897 per_cpu_ptr(trace_buf->data, cpu)->entries =
5898 per_cpu_ptr(size_buf->data, cpu)->entries;
5901 ret = ring_buffer_resize(trace_buf->buffer,
5902 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5904 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5905 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5910 #endif /* CONFIG_TRACER_MAX_TRACE */
5912 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5913 unsigned long size, int cpu)
5918 * If kernel or user changes the size of the ring buffer
5919 * we use the size that was given, and we can forget about
5920 * expanding it later.
5922 ring_buffer_expanded = true;
5924 /* May be called before buffers are initialized */
5925 if (!tr->array_buffer.buffer)
5928 /* Do not allow tracing while resizing ring buffer */
5929 tracing_stop_tr(tr);
5931 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5935 #ifdef CONFIG_TRACER_MAX_TRACE
5936 if (!tr->allocated_snapshot)
5939 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5941 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5942 &tr->array_buffer, cpu);
5945 * AARGH! We are left with different
5946 * size max buffer!!!!
5947 * The max buffer is our "snapshot" buffer.
5948 * When a tracer needs a snapshot (one of the
5949 * latency tracers), it swaps the max buffer
5950 * with the saved snap shot. We succeeded to
5951 * update the size of the main buffer, but failed to
5952 * update the size of the max buffer. But when we tried
5953 * to reset the main buffer to the original size, we
5954 * failed there too. This is very unlikely to
5955 * happen, but if it does, warn and kill all
5959 tracing_disabled = 1;
5964 update_buffer_entries(&tr->max_buffer, cpu);
5967 #endif /* CONFIG_TRACER_MAX_TRACE */
5969 update_buffer_entries(&tr->array_buffer, cpu);
5971 tracing_start_tr(tr);
5975 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5976 unsigned long size, int cpu_id)
5980 mutex_lock(&trace_types_lock);
5982 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5983 /* make sure, this cpu is enabled in the mask */
5984 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5990 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5995 mutex_unlock(&trace_types_lock);
6002 * tracing_update_buffers - used by tracing facility to expand ring buffers
6004 * To save on memory when the tracing is never used on a system with it
6005 * configured in. The ring buffers are set to a minimum size. But once
6006 * a user starts to use the tracing facility, then they need to grow
6007 * to their default size.
6009 * This function is to be called when a tracer is about to be used.
6011 int tracing_update_buffers(void)
6015 mutex_lock(&trace_types_lock);
6016 if (!ring_buffer_expanded)
6017 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6018 RING_BUFFER_ALL_CPUS);
6019 mutex_unlock(&trace_types_lock);
6024 struct trace_option_dentry;
6027 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6030 * Used to clear out the tracer before deletion of an instance.
6031 * Must have trace_types_lock held.
6033 static void tracing_set_nop(struct trace_array *tr)
6035 if (tr->current_trace == &nop_trace)
6038 tr->current_trace->enabled--;
6040 if (tr->current_trace->reset)
6041 tr->current_trace->reset(tr);
6043 tr->current_trace = &nop_trace;
6046 static bool tracer_options_updated;
6048 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6050 /* Only enable if the directory has been created already. */
6054 /* Only create trace option files after update_tracer_options finish */
6055 if (!tracer_options_updated)
6058 create_trace_option_files(tr, t);
6061 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6064 #ifdef CONFIG_TRACER_MAX_TRACE
6069 mutex_lock(&trace_types_lock);
6071 if (!ring_buffer_expanded) {
6072 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6073 RING_BUFFER_ALL_CPUS);
6079 for (t = trace_types; t; t = t->next) {
6080 if (strcmp(t->name, buf) == 0)
6087 if (t == tr->current_trace)
6090 #ifdef CONFIG_TRACER_SNAPSHOT
6091 if (t->use_max_tr) {
6092 local_irq_disable();
6093 arch_spin_lock(&tr->max_lock);
6094 if (tr->cond_snapshot)
6096 arch_spin_unlock(&tr->max_lock);
6102 /* Some tracers won't work on kernel command line */
6103 if (system_state < SYSTEM_RUNNING && t->noboot) {
6104 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6109 /* Some tracers are only allowed for the top level buffer */
6110 if (!trace_ok_for_array(t, tr)) {
6115 /* If trace pipe files are being read, we can't change the tracer */
6116 if (tr->trace_ref) {
6121 trace_branch_disable();
6123 tr->current_trace->enabled--;
6125 if (tr->current_trace->reset)
6126 tr->current_trace->reset(tr);
6128 #ifdef CONFIG_TRACER_MAX_TRACE
6129 had_max_tr = tr->current_trace->use_max_tr;
6131 /* Current trace needs to be nop_trace before synchronize_rcu */
6132 tr->current_trace = &nop_trace;
6134 if (had_max_tr && !t->use_max_tr) {
6136 * We need to make sure that the update_max_tr sees that
6137 * current_trace changed to nop_trace to keep it from
6138 * swapping the buffers after we resize it.
6139 * The update_max_tr is called from interrupts disabled
6140 * so a synchronized_sched() is sufficient.
6146 if (t->use_max_tr && !tr->allocated_snapshot) {
6147 ret = tracing_alloc_snapshot_instance(tr);
6152 tr->current_trace = &nop_trace;
6156 ret = tracer_init(t, tr);
6161 tr->current_trace = t;
6162 tr->current_trace->enabled++;
6163 trace_branch_enable(tr);
6165 mutex_unlock(&trace_types_lock);
6171 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6172 size_t cnt, loff_t *ppos)
6174 struct trace_array *tr = filp->private_data;
6175 char buf[MAX_TRACER_SIZE+1];
6182 if (cnt > MAX_TRACER_SIZE)
6183 cnt = MAX_TRACER_SIZE;
6185 if (copy_from_user(buf, ubuf, cnt))
6190 /* strip ending whitespace. */
6191 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6194 err = tracing_set_tracer(tr, buf);
6204 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6205 size_t cnt, loff_t *ppos)
6210 r = snprintf(buf, sizeof(buf), "%ld\n",
6211 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6212 if (r > sizeof(buf))
6214 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6218 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6219 size_t cnt, loff_t *ppos)
6224 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6234 tracing_thresh_read(struct file *filp, char __user *ubuf,
6235 size_t cnt, loff_t *ppos)
6237 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6241 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6242 size_t cnt, loff_t *ppos)
6244 struct trace_array *tr = filp->private_data;
6247 mutex_lock(&trace_types_lock);
6248 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6252 if (tr->current_trace->update_thresh) {
6253 ret = tr->current_trace->update_thresh(tr);
6260 mutex_unlock(&trace_types_lock);
6265 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6268 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6269 size_t cnt, loff_t *ppos)
6271 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6275 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6276 size_t cnt, loff_t *ppos)
6278 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6283 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6285 if (cpu == RING_BUFFER_ALL_CPUS) {
6286 if (cpumask_empty(tr->pipe_cpumask)) {
6287 cpumask_setall(tr->pipe_cpumask);
6290 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6291 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6297 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6299 if (cpu == RING_BUFFER_ALL_CPUS) {
6300 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6301 cpumask_clear(tr->pipe_cpumask);
6303 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6304 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6308 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6310 struct trace_array *tr = inode->i_private;
6311 struct trace_iterator *iter;
6315 ret = tracing_check_open_get_tr(tr);
6319 mutex_lock(&trace_types_lock);
6320 cpu = tracing_get_cpu(inode);
6321 ret = open_pipe_on_cpu(tr, cpu);
6323 goto fail_pipe_on_cpu;
6325 /* create a buffer to store the information to pass to userspace */
6326 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6329 goto fail_alloc_iter;
6332 trace_seq_init(&iter->seq);
6333 iter->trace = tr->current_trace;
6335 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6340 /* trace pipe does not show start of buffer */
6341 cpumask_setall(iter->started);
6343 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6344 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6346 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6347 if (trace_clocks[tr->clock_id].in_ns)
6348 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6351 iter->array_buffer = &tr->array_buffer;
6352 iter->cpu_file = cpu;
6353 mutex_init(&iter->mutex);
6354 filp->private_data = iter;
6356 if (iter->trace->pipe_open)
6357 iter->trace->pipe_open(iter);
6359 nonseekable_open(inode, filp);
6363 mutex_unlock(&trace_types_lock);
6369 close_pipe_on_cpu(tr, cpu);
6371 __trace_array_put(tr);
6372 mutex_unlock(&trace_types_lock);
6376 static int tracing_release_pipe(struct inode *inode, struct file *file)
6378 struct trace_iterator *iter = file->private_data;
6379 struct trace_array *tr = inode->i_private;
6381 mutex_lock(&trace_types_lock);
6385 if (iter->trace->pipe_close)
6386 iter->trace->pipe_close(iter);
6387 close_pipe_on_cpu(tr, iter->cpu_file);
6388 mutex_unlock(&trace_types_lock);
6390 free_cpumask_var(iter->started);
6392 mutex_destroy(&iter->mutex);
6395 trace_array_put(tr);
6401 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6403 struct trace_array *tr = iter->tr;
6405 /* Iterators are static, they should be filled or empty */
6406 if (trace_buffer_iter(iter, iter->cpu_file))
6407 return EPOLLIN | EPOLLRDNORM;
6409 if (tr->trace_flags & TRACE_ITER_BLOCK)
6411 * Always select as readable when in blocking mode
6413 return EPOLLIN | EPOLLRDNORM;
6415 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6416 filp, poll_table, iter->tr->buffer_percent);
6420 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6422 struct trace_iterator *iter = filp->private_data;
6424 return trace_poll(iter, filp, poll_table);
6427 /* Must be called with iter->mutex held. */
6428 static int tracing_wait_pipe(struct file *filp)
6430 struct trace_iterator *iter = filp->private_data;
6433 while (trace_empty(iter)) {
6435 if ((filp->f_flags & O_NONBLOCK)) {
6440 * We block until we read something and tracing is disabled.
6441 * We still block if tracing is disabled, but we have never
6442 * read anything. This allows a user to cat this file, and
6443 * then enable tracing. But after we have read something,
6444 * we give an EOF when tracing is again disabled.
6446 * iter->pos will be 0 if we haven't read anything.
6448 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6451 mutex_unlock(&iter->mutex);
6453 ret = wait_on_pipe(iter, 0);
6455 mutex_lock(&iter->mutex);
6468 tracing_read_pipe(struct file *filp, char __user *ubuf,
6469 size_t cnt, loff_t *ppos)
6471 struct trace_iterator *iter = filp->private_data;
6475 * Avoid more than one consumer on a single file descriptor
6476 * This is just a matter of traces coherency, the ring buffer itself
6479 mutex_lock(&iter->mutex);
6481 /* return any leftover data */
6482 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6486 trace_seq_init(&iter->seq);
6488 if (iter->trace->read) {
6489 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6495 sret = tracing_wait_pipe(filp);
6499 /* stop when tracing is finished */
6500 if (trace_empty(iter)) {
6505 if (cnt >= PAGE_SIZE)
6506 cnt = PAGE_SIZE - 1;
6508 /* reset all but tr, trace, and overruns */
6509 memset(&iter->seq, 0,
6510 sizeof(struct trace_iterator) -
6511 offsetof(struct trace_iterator, seq));
6512 cpumask_clear(iter->started);
6513 trace_seq_init(&iter->seq);
6516 trace_event_read_lock();
6517 trace_access_lock(iter->cpu_file);
6518 while (trace_find_next_entry_inc(iter) != NULL) {
6519 enum print_line_t ret;
6520 int save_len = iter->seq.seq.len;
6522 ret = print_trace_line(iter);
6523 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6525 * If one print_trace_line() fills entire trace_seq in one shot,
6526 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6527 * In this case, we need to consume it, otherwise, loop will peek
6528 * this event next time, resulting in an infinite loop.
6530 if (save_len == 0) {
6532 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6533 trace_consume(iter);
6537 /* In other cases, don't print partial lines */
6538 iter->seq.seq.len = save_len;
6541 if (ret != TRACE_TYPE_NO_CONSUME)
6542 trace_consume(iter);
6544 if (trace_seq_used(&iter->seq) >= cnt)
6548 * Setting the full flag means we reached the trace_seq buffer
6549 * size and we should leave by partial output condition above.
6550 * One of the trace_seq_* functions is not used properly.
6552 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6555 trace_access_unlock(iter->cpu_file);
6556 trace_event_read_unlock();
6558 /* Now copy what we have to the user */
6559 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6560 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6561 trace_seq_init(&iter->seq);
6564 * If there was nothing to send to user, in spite of consuming trace
6565 * entries, go back to wait for more entries.
6571 mutex_unlock(&iter->mutex);
6576 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6579 __free_page(spd->pages[idx]);
6583 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6589 /* Seq buffer is page-sized, exactly what we need. */
6591 save_len = iter->seq.seq.len;
6592 ret = print_trace_line(iter);
6594 if (trace_seq_has_overflowed(&iter->seq)) {
6595 iter->seq.seq.len = save_len;
6600 * This should not be hit, because it should only
6601 * be set if the iter->seq overflowed. But check it
6602 * anyway to be safe.
6604 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6605 iter->seq.seq.len = save_len;
6609 count = trace_seq_used(&iter->seq) - save_len;
6612 iter->seq.seq.len = save_len;
6616 if (ret != TRACE_TYPE_NO_CONSUME)
6617 trace_consume(iter);
6619 if (!trace_find_next_entry_inc(iter)) {
6629 static ssize_t tracing_splice_read_pipe(struct file *filp,
6631 struct pipe_inode_info *pipe,
6635 struct page *pages_def[PIPE_DEF_BUFFERS];
6636 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6637 struct trace_iterator *iter = filp->private_data;
6638 struct splice_pipe_desc spd = {
6640 .partial = partial_def,
6641 .nr_pages = 0, /* This gets updated below. */
6642 .nr_pages_max = PIPE_DEF_BUFFERS,
6643 .ops = &default_pipe_buf_ops,
6644 .spd_release = tracing_spd_release_pipe,
6650 if (splice_grow_spd(pipe, &spd))
6653 mutex_lock(&iter->mutex);
6655 if (iter->trace->splice_read) {
6656 ret = iter->trace->splice_read(iter, filp,
6657 ppos, pipe, len, flags);
6662 ret = tracing_wait_pipe(filp);
6666 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6671 trace_event_read_lock();
6672 trace_access_lock(iter->cpu_file);
6674 /* Fill as many pages as possible. */
6675 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6676 spd.pages[i] = alloc_page(GFP_KERNEL);
6680 rem = tracing_fill_pipe_page(rem, iter);
6682 /* Copy the data into the page, so we can start over. */
6683 ret = trace_seq_to_buffer(&iter->seq,
6684 page_address(spd.pages[i]),
6685 trace_seq_used(&iter->seq));
6687 __free_page(spd.pages[i]);
6690 spd.partial[i].offset = 0;
6691 spd.partial[i].len = trace_seq_used(&iter->seq);
6693 trace_seq_init(&iter->seq);
6696 trace_access_unlock(iter->cpu_file);
6697 trace_event_read_unlock();
6698 mutex_unlock(&iter->mutex);
6703 ret = splice_to_pipe(pipe, &spd);
6707 splice_shrink_spd(&spd);
6711 mutex_unlock(&iter->mutex);
6716 tracing_entries_read(struct file *filp, char __user *ubuf,
6717 size_t cnt, loff_t *ppos)
6719 struct inode *inode = file_inode(filp);
6720 struct trace_array *tr = inode->i_private;
6721 int cpu = tracing_get_cpu(inode);
6726 mutex_lock(&trace_types_lock);
6728 if (cpu == RING_BUFFER_ALL_CPUS) {
6729 int cpu, buf_size_same;
6734 /* check if all cpu sizes are same */
6735 for_each_tracing_cpu(cpu) {
6736 /* fill in the size from first enabled cpu */
6738 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6739 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6745 if (buf_size_same) {
6746 if (!ring_buffer_expanded)
6747 r = sprintf(buf, "%lu (expanded: %lu)\n",
6749 trace_buf_size >> 10);
6751 r = sprintf(buf, "%lu\n", size >> 10);
6753 r = sprintf(buf, "X\n");
6755 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6757 mutex_unlock(&trace_types_lock);
6759 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6764 tracing_entries_write(struct file *filp, const char __user *ubuf,
6765 size_t cnt, loff_t *ppos)
6767 struct inode *inode = file_inode(filp);
6768 struct trace_array *tr = inode->i_private;
6772 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6776 /* must have at least 1 entry */
6780 /* value is in KB */
6782 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6792 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6793 size_t cnt, loff_t *ppos)
6795 struct trace_array *tr = filp->private_data;
6798 unsigned long size = 0, expanded_size = 0;
6800 mutex_lock(&trace_types_lock);
6801 for_each_tracing_cpu(cpu) {
6802 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6803 if (!ring_buffer_expanded)
6804 expanded_size += trace_buf_size >> 10;
6806 if (ring_buffer_expanded)
6807 r = sprintf(buf, "%lu\n", size);
6809 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6810 mutex_unlock(&trace_types_lock);
6812 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6816 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6817 size_t cnt, loff_t *ppos)
6820 * There is no need to read what the user has written, this function
6821 * is just to make sure that there is no error when "echo" is used
6830 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6832 struct trace_array *tr = inode->i_private;
6834 /* disable tracing ? */
6835 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6836 tracer_tracing_off(tr);
6837 /* resize the ring buffer to 0 */
6838 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6840 trace_array_put(tr);
6846 tracing_mark_write(struct file *filp, const char __user *ubuf,
6847 size_t cnt, loff_t *fpos)
6849 struct trace_array *tr = filp->private_data;
6850 struct ring_buffer_event *event;
6851 enum event_trigger_type tt = ETT_NONE;
6852 struct trace_buffer *buffer;
6853 struct print_entry *entry;
6854 unsigned long irq_flags;
6859 /* Used in tracing_mark_raw_write() as well */
6860 #define FAULTED_STR "<faulted>"
6861 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6863 if (tracing_disabled)
6866 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6869 if (cnt > TRACE_BUF_SIZE)
6870 cnt = TRACE_BUF_SIZE;
6872 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6874 local_save_flags(irq_flags);
6875 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6877 /* If less than "<faulted>", then make sure we can still add that */
6878 if (cnt < FAULTED_SIZE)
6879 size += FAULTED_SIZE - cnt;
6881 buffer = tr->array_buffer.buffer;
6882 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6883 irq_flags, preempt_count());
6884 if (unlikely(!event))
6885 /* Ring buffer disabled, return as if not open for write */
6888 entry = ring_buffer_event_data(event);
6889 entry->ip = _THIS_IP_;
6891 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6893 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6899 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6900 /* do not add \n before testing triggers, but add \0 */
6901 entry->buf[cnt] = '\0';
6902 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6905 if (entry->buf[cnt - 1] != '\n') {
6906 entry->buf[cnt] = '\n';
6907 entry->buf[cnt + 1] = '\0';
6909 entry->buf[cnt] = '\0';
6911 if (static_branch_unlikely(&trace_marker_exports_enabled))
6912 ftrace_exports(event, TRACE_EXPORT_MARKER);
6913 __buffer_unlock_commit(buffer, event);
6916 event_triggers_post_call(tr->trace_marker_file, tt);
6924 /* Limit it for now to 3K (including tag) */
6925 #define RAW_DATA_MAX_SIZE (1024*3)
6928 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6929 size_t cnt, loff_t *fpos)
6931 struct trace_array *tr = filp->private_data;
6932 struct ring_buffer_event *event;
6933 struct trace_buffer *buffer;
6934 struct raw_data_entry *entry;
6935 unsigned long irq_flags;
6940 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6942 if (tracing_disabled)
6945 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6948 /* The marker must at least have a tag id */
6949 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6952 if (cnt > TRACE_BUF_SIZE)
6953 cnt = TRACE_BUF_SIZE;
6955 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6957 local_save_flags(irq_flags);
6958 size = sizeof(*entry) + cnt;
6959 if (cnt < FAULT_SIZE_ID)
6960 size += FAULT_SIZE_ID - cnt;
6962 buffer = tr->array_buffer.buffer;
6963 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6964 irq_flags, preempt_count());
6966 /* Ring buffer disabled, return as if not open for write */
6969 entry = ring_buffer_event_data(event);
6971 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6974 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6979 __buffer_unlock_commit(buffer, event);
6987 static int tracing_clock_show(struct seq_file *m, void *v)
6989 struct trace_array *tr = m->private;
6992 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6994 "%s%s%s%s", i ? " " : "",
6995 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6996 i == tr->clock_id ? "]" : "");
7002 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7006 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7007 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7010 if (i == ARRAY_SIZE(trace_clocks))
7013 mutex_lock(&trace_types_lock);
7017 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7020 * New clock may not be consistent with the previous clock.
7021 * Reset the buffer so that it doesn't have incomparable timestamps.
7023 tracing_reset_online_cpus(&tr->array_buffer);
7025 #ifdef CONFIG_TRACER_MAX_TRACE
7026 if (tr->max_buffer.buffer)
7027 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7028 tracing_reset_online_cpus(&tr->max_buffer);
7031 mutex_unlock(&trace_types_lock);
7036 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7037 size_t cnt, loff_t *fpos)
7039 struct seq_file *m = filp->private_data;
7040 struct trace_array *tr = m->private;
7042 const char *clockstr;
7045 if (cnt >= sizeof(buf))
7048 if (copy_from_user(buf, ubuf, cnt))
7053 clockstr = strstrip(buf);
7055 ret = tracing_set_clock(tr, clockstr);
7064 static int tracing_clock_open(struct inode *inode, struct file *file)
7066 struct trace_array *tr = inode->i_private;
7069 ret = tracing_check_open_get_tr(tr);
7073 ret = single_open(file, tracing_clock_show, inode->i_private);
7075 trace_array_put(tr);
7080 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7082 struct trace_array *tr = m->private;
7084 mutex_lock(&trace_types_lock);
7086 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7087 seq_puts(m, "delta [absolute]\n");
7089 seq_puts(m, "[delta] absolute\n");
7091 mutex_unlock(&trace_types_lock);
7096 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7098 struct trace_array *tr = inode->i_private;
7101 ret = tracing_check_open_get_tr(tr);
7105 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7107 trace_array_put(tr);
7112 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
7116 mutex_lock(&trace_types_lock);
7118 if (abs && tr->time_stamp_abs_ref++)
7122 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
7127 if (--tr->time_stamp_abs_ref)
7131 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
7133 #ifdef CONFIG_TRACER_MAX_TRACE
7134 if (tr->max_buffer.buffer)
7135 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
7138 mutex_unlock(&trace_types_lock);
7143 struct ftrace_buffer_info {
7144 struct trace_iterator iter;
7146 unsigned int spare_cpu;
7150 #ifdef CONFIG_TRACER_SNAPSHOT
7151 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7153 struct trace_array *tr = inode->i_private;
7154 struct trace_iterator *iter;
7158 ret = tracing_check_open_get_tr(tr);
7162 if (file->f_mode & FMODE_READ) {
7163 iter = __tracing_open(inode, file, true);
7165 ret = PTR_ERR(iter);
7167 /* Writes still need the seq_file to hold the private data */
7169 m = kzalloc(sizeof(*m), GFP_KERNEL);
7172 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7180 iter->array_buffer = &tr->max_buffer;
7181 iter->cpu_file = tracing_get_cpu(inode);
7183 file->private_data = m;
7187 trace_array_put(tr);
7192 static void tracing_swap_cpu_buffer(void *tr)
7194 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7198 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7201 struct seq_file *m = filp->private_data;
7202 struct trace_iterator *iter = m->private;
7203 struct trace_array *tr = iter->tr;
7207 ret = tracing_update_buffers();
7211 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7215 mutex_lock(&trace_types_lock);
7217 if (tr->current_trace->use_max_tr) {
7222 local_irq_disable();
7223 arch_spin_lock(&tr->max_lock);
7224 if (tr->cond_snapshot)
7226 arch_spin_unlock(&tr->max_lock);
7233 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7237 if (tr->allocated_snapshot)
7241 /* Only allow per-cpu swap if the ring buffer supports it */
7242 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7243 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7248 if (tr->allocated_snapshot)
7249 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7250 &tr->array_buffer, iter->cpu_file);
7252 ret = tracing_alloc_snapshot_instance(tr);
7255 /* Now, we're going to swap */
7256 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7257 local_irq_disable();
7258 update_max_tr(tr, current, smp_processor_id(), NULL);
7261 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7266 if (tr->allocated_snapshot) {
7267 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7268 tracing_reset_online_cpus(&tr->max_buffer);
7270 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7280 mutex_unlock(&trace_types_lock);
7284 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7286 struct seq_file *m = file->private_data;
7289 ret = tracing_release(inode, file);
7291 if (file->f_mode & FMODE_READ)
7294 /* If write only, the seq_file is just a stub */
7302 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7303 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7304 size_t count, loff_t *ppos);
7305 static int tracing_buffers_release(struct inode *inode, struct file *file);
7306 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7307 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7309 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7311 struct ftrace_buffer_info *info;
7314 /* The following checks for tracefs lockdown */
7315 ret = tracing_buffers_open(inode, filp);
7319 info = filp->private_data;
7321 if (info->iter.trace->use_max_tr) {
7322 tracing_buffers_release(inode, filp);
7326 info->iter.snapshot = true;
7327 info->iter.array_buffer = &info->iter.tr->max_buffer;
7332 #endif /* CONFIG_TRACER_SNAPSHOT */
7335 static const struct file_operations tracing_thresh_fops = {
7336 .open = tracing_open_generic,
7337 .read = tracing_thresh_read,
7338 .write = tracing_thresh_write,
7339 .llseek = generic_file_llseek,
7342 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7343 static const struct file_operations tracing_max_lat_fops = {
7344 .open = tracing_open_generic,
7345 .read = tracing_max_lat_read,
7346 .write = tracing_max_lat_write,
7347 .llseek = generic_file_llseek,
7351 static const struct file_operations set_tracer_fops = {
7352 .open = tracing_open_generic_tr,
7353 .read = tracing_set_trace_read,
7354 .write = tracing_set_trace_write,
7355 .llseek = generic_file_llseek,
7356 .release = tracing_release_generic_tr,
7359 static const struct file_operations tracing_pipe_fops = {
7360 .open = tracing_open_pipe,
7361 .poll = tracing_poll_pipe,
7362 .read = tracing_read_pipe,
7363 .splice_read = tracing_splice_read_pipe,
7364 .release = tracing_release_pipe,
7365 .llseek = no_llseek,
7368 static const struct file_operations tracing_entries_fops = {
7369 .open = tracing_open_generic_tr,
7370 .read = tracing_entries_read,
7371 .write = tracing_entries_write,
7372 .llseek = generic_file_llseek,
7373 .release = tracing_release_generic_tr,
7376 static const struct file_operations tracing_total_entries_fops = {
7377 .open = tracing_open_generic_tr,
7378 .read = tracing_total_entries_read,
7379 .llseek = generic_file_llseek,
7380 .release = tracing_release_generic_tr,
7383 static const struct file_operations tracing_free_buffer_fops = {
7384 .open = tracing_open_generic_tr,
7385 .write = tracing_free_buffer_write,
7386 .release = tracing_free_buffer_release,
7389 static const struct file_operations tracing_mark_fops = {
7390 .open = tracing_open_generic_tr,
7391 .write = tracing_mark_write,
7392 .llseek = generic_file_llseek,
7393 .release = tracing_release_generic_tr,
7396 static const struct file_operations tracing_mark_raw_fops = {
7397 .open = tracing_open_generic_tr,
7398 .write = tracing_mark_raw_write,
7399 .llseek = generic_file_llseek,
7400 .release = tracing_release_generic_tr,
7403 static const struct file_operations trace_clock_fops = {
7404 .open = tracing_clock_open,
7406 .llseek = seq_lseek,
7407 .release = tracing_single_release_tr,
7408 .write = tracing_clock_write,
7411 static const struct file_operations trace_time_stamp_mode_fops = {
7412 .open = tracing_time_stamp_mode_open,
7414 .llseek = seq_lseek,
7415 .release = tracing_single_release_tr,
7418 #ifdef CONFIG_TRACER_SNAPSHOT
7419 static const struct file_operations snapshot_fops = {
7420 .open = tracing_snapshot_open,
7422 .write = tracing_snapshot_write,
7423 .llseek = tracing_lseek,
7424 .release = tracing_snapshot_release,
7427 static const struct file_operations snapshot_raw_fops = {
7428 .open = snapshot_raw_open,
7429 .read = tracing_buffers_read,
7430 .release = tracing_buffers_release,
7431 .splice_read = tracing_buffers_splice_read,
7432 .llseek = no_llseek,
7435 #endif /* CONFIG_TRACER_SNAPSHOT */
7437 #define TRACING_LOG_ERRS_MAX 8
7438 #define TRACING_LOG_LOC_MAX 128
7440 #define CMD_PREFIX " Command: "
7443 const char **errs; /* ptr to loc-specific array of err strings */
7444 u8 type; /* index into errs -> specific err string */
7445 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7449 struct tracing_log_err {
7450 struct list_head list;
7451 struct err_info info;
7452 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7453 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7456 static DEFINE_MUTEX(tracing_err_log_lock);
7458 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7460 struct tracing_log_err *err;
7462 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7463 err = kzalloc(sizeof(*err), GFP_KERNEL);
7465 err = ERR_PTR(-ENOMEM);
7467 tr->n_err_log_entries++;
7472 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7473 list_del(&err->list);
7479 * err_pos - find the position of a string within a command for error careting
7480 * @cmd: The tracing command that caused the error
7481 * @str: The string to position the caret at within @cmd
7483 * Finds the position of the first occurence of @str within @cmd. The
7484 * return value can be passed to tracing_log_err() for caret placement
7487 * Returns the index within @cmd of the first occurence of @str or 0
7488 * if @str was not found.
7490 unsigned int err_pos(char *cmd, const char *str)
7494 if (WARN_ON(!strlen(cmd)))
7497 found = strstr(cmd, str);
7505 * tracing_log_err - write an error to the tracing error log
7506 * @tr: The associated trace array for the error (NULL for top level array)
7507 * @loc: A string describing where the error occurred
7508 * @cmd: The tracing command that caused the error
7509 * @errs: The array of loc-specific static error strings
7510 * @type: The index into errs[], which produces the specific static err string
7511 * @pos: The position the caret should be placed in the cmd
7513 * Writes an error into tracing/error_log of the form:
7515 * <loc>: error: <text>
7519 * tracing/error_log is a small log file containing the last
7520 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7521 * unless there has been a tracing error, and the error log can be
7522 * cleared and have its memory freed by writing the empty string in
7523 * truncation mode to it i.e. echo > tracing/error_log.
7525 * NOTE: the @errs array along with the @type param are used to
7526 * produce a static error string - this string is not copied and saved
7527 * when the error is logged - only a pointer to it is saved. See
7528 * existing callers for examples of how static strings are typically
7529 * defined for use with tracing_log_err().
7531 void tracing_log_err(struct trace_array *tr,
7532 const char *loc, const char *cmd,
7533 const char **errs, u8 type, u8 pos)
7535 struct tracing_log_err *err;
7540 mutex_lock(&tracing_err_log_lock);
7541 err = get_tracing_log_err(tr);
7542 if (PTR_ERR(err) == -ENOMEM) {
7543 mutex_unlock(&tracing_err_log_lock);
7547 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7548 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7550 err->info.errs = errs;
7551 err->info.type = type;
7552 err->info.pos = pos;
7553 err->info.ts = local_clock();
7555 list_add_tail(&err->list, &tr->err_log);
7556 mutex_unlock(&tracing_err_log_lock);
7559 static void clear_tracing_err_log(struct trace_array *tr)
7561 struct tracing_log_err *err, *next;
7563 mutex_lock(&tracing_err_log_lock);
7564 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7565 list_del(&err->list);
7569 tr->n_err_log_entries = 0;
7570 mutex_unlock(&tracing_err_log_lock);
7573 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7575 struct trace_array *tr = m->private;
7577 mutex_lock(&tracing_err_log_lock);
7579 return seq_list_start(&tr->err_log, *pos);
7582 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7584 struct trace_array *tr = m->private;
7586 return seq_list_next(v, &tr->err_log, pos);
7589 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7591 mutex_unlock(&tracing_err_log_lock);
7594 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7598 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7600 for (i = 0; i < pos; i++)
7605 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7607 struct tracing_log_err *err = v;
7610 const char *err_text = err->info.errs[err->info.type];
7611 u64 sec = err->info.ts;
7614 nsec = do_div(sec, NSEC_PER_SEC);
7615 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7616 err->loc, err_text);
7617 seq_printf(m, "%s", err->cmd);
7618 tracing_err_log_show_pos(m, err->info.pos);
7624 static const struct seq_operations tracing_err_log_seq_ops = {
7625 .start = tracing_err_log_seq_start,
7626 .next = tracing_err_log_seq_next,
7627 .stop = tracing_err_log_seq_stop,
7628 .show = tracing_err_log_seq_show
7631 static int tracing_err_log_open(struct inode *inode, struct file *file)
7633 struct trace_array *tr = inode->i_private;
7636 ret = tracing_check_open_get_tr(tr);
7640 /* If this file was opened for write, then erase contents */
7641 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7642 clear_tracing_err_log(tr);
7644 if (file->f_mode & FMODE_READ) {
7645 ret = seq_open(file, &tracing_err_log_seq_ops);
7647 struct seq_file *m = file->private_data;
7650 trace_array_put(tr);
7656 static ssize_t tracing_err_log_write(struct file *file,
7657 const char __user *buffer,
7658 size_t count, loff_t *ppos)
7663 static int tracing_err_log_release(struct inode *inode, struct file *file)
7665 struct trace_array *tr = inode->i_private;
7667 trace_array_put(tr);
7669 if (file->f_mode & FMODE_READ)
7670 seq_release(inode, file);
7675 static const struct file_operations tracing_err_log_fops = {
7676 .open = tracing_err_log_open,
7677 .write = tracing_err_log_write,
7679 .llseek = tracing_lseek,
7680 .release = tracing_err_log_release,
7683 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7685 struct trace_array *tr = inode->i_private;
7686 struct ftrace_buffer_info *info;
7689 ret = tracing_check_open_get_tr(tr);
7693 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7695 trace_array_put(tr);
7699 mutex_lock(&trace_types_lock);
7702 info->iter.cpu_file = tracing_get_cpu(inode);
7703 info->iter.trace = tr->current_trace;
7704 info->iter.array_buffer = &tr->array_buffer;
7706 /* Force reading ring buffer for first read */
7707 info->read = (unsigned int)-1;
7709 filp->private_data = info;
7713 mutex_unlock(&trace_types_lock);
7715 ret = nonseekable_open(inode, filp);
7717 trace_array_put(tr);
7723 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7725 struct ftrace_buffer_info *info = filp->private_data;
7726 struct trace_iterator *iter = &info->iter;
7728 return trace_poll(iter, filp, poll_table);
7732 tracing_buffers_read(struct file *filp, char __user *ubuf,
7733 size_t count, loff_t *ppos)
7735 struct ftrace_buffer_info *info = filp->private_data;
7736 struct trace_iterator *iter = &info->iter;
7743 #ifdef CONFIG_TRACER_MAX_TRACE
7744 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7749 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7751 if (IS_ERR(info->spare)) {
7752 ret = PTR_ERR(info->spare);
7755 info->spare_cpu = iter->cpu_file;
7761 /* Do we have previous read data to read? */
7762 if (info->read < PAGE_SIZE)
7766 trace_access_lock(iter->cpu_file);
7767 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7771 trace_access_unlock(iter->cpu_file);
7774 if (trace_empty(iter)) {
7775 if ((filp->f_flags & O_NONBLOCK))
7778 ret = wait_on_pipe(iter, 0);
7789 size = PAGE_SIZE - info->read;
7793 ret = copy_to_user(ubuf, info->spare + info->read, size);
7805 static int tracing_buffers_release(struct inode *inode, struct file *file)
7807 struct ftrace_buffer_info *info = file->private_data;
7808 struct trace_iterator *iter = &info->iter;
7810 mutex_lock(&trace_types_lock);
7812 iter->tr->trace_ref--;
7814 __trace_array_put(iter->tr);
7817 ring_buffer_free_read_page(iter->array_buffer->buffer,
7818 info->spare_cpu, info->spare);
7821 mutex_unlock(&trace_types_lock);
7827 struct trace_buffer *buffer;
7830 refcount_t refcount;
7833 static void buffer_ref_release(struct buffer_ref *ref)
7835 if (!refcount_dec_and_test(&ref->refcount))
7837 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7841 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7842 struct pipe_buffer *buf)
7844 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7846 buffer_ref_release(ref);
7850 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7851 struct pipe_buffer *buf)
7853 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7855 if (refcount_read(&ref->refcount) > INT_MAX/2)
7858 refcount_inc(&ref->refcount);
7862 /* Pipe buffer operations for a buffer. */
7863 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7864 .release = buffer_pipe_buf_release,
7865 .get = buffer_pipe_buf_get,
7869 * Callback from splice_to_pipe(), if we need to release some pages
7870 * at the end of the spd in case we error'ed out in filling the pipe.
7872 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7874 struct buffer_ref *ref =
7875 (struct buffer_ref *)spd->partial[i].private;
7877 buffer_ref_release(ref);
7878 spd->partial[i].private = 0;
7882 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7883 struct pipe_inode_info *pipe, size_t len,
7886 struct ftrace_buffer_info *info = file->private_data;
7887 struct trace_iterator *iter = &info->iter;
7888 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7889 struct page *pages_def[PIPE_DEF_BUFFERS];
7890 struct splice_pipe_desc spd = {
7892 .partial = partial_def,
7893 .nr_pages_max = PIPE_DEF_BUFFERS,
7894 .ops = &buffer_pipe_buf_ops,
7895 .spd_release = buffer_spd_release,
7897 struct buffer_ref *ref;
7901 #ifdef CONFIG_TRACER_MAX_TRACE
7902 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7906 if (*ppos & (PAGE_SIZE - 1))
7909 if (len & (PAGE_SIZE - 1)) {
7910 if (len < PAGE_SIZE)
7915 if (splice_grow_spd(pipe, &spd))
7919 trace_access_lock(iter->cpu_file);
7920 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7922 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7926 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7932 refcount_set(&ref->refcount, 1);
7933 ref->buffer = iter->array_buffer->buffer;
7934 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7935 if (IS_ERR(ref->page)) {
7936 ret = PTR_ERR(ref->page);
7941 ref->cpu = iter->cpu_file;
7943 r = ring_buffer_read_page(ref->buffer, &ref->page,
7944 len, iter->cpu_file, 1);
7946 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7952 page = virt_to_page(ref->page);
7954 spd.pages[i] = page;
7955 spd.partial[i].len = PAGE_SIZE;
7956 spd.partial[i].offset = 0;
7957 spd.partial[i].private = (unsigned long)ref;
7961 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7964 trace_access_unlock(iter->cpu_file);
7967 /* did we read anything? */
7968 if (!spd.nr_pages) {
7973 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7976 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
7983 ret = splice_to_pipe(pipe, &spd);
7985 splice_shrink_spd(&spd);
7990 static const struct file_operations tracing_buffers_fops = {
7991 .open = tracing_buffers_open,
7992 .read = tracing_buffers_read,
7993 .poll = tracing_buffers_poll,
7994 .release = tracing_buffers_release,
7995 .splice_read = tracing_buffers_splice_read,
7996 .llseek = no_llseek,
8000 tracing_stats_read(struct file *filp, char __user *ubuf,
8001 size_t count, loff_t *ppos)
8003 struct inode *inode = file_inode(filp);
8004 struct trace_array *tr = inode->i_private;
8005 struct array_buffer *trace_buf = &tr->array_buffer;
8006 int cpu = tracing_get_cpu(inode);
8007 struct trace_seq *s;
8009 unsigned long long t;
8010 unsigned long usec_rem;
8012 s = kmalloc(sizeof(*s), GFP_KERNEL);
8018 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8019 trace_seq_printf(s, "entries: %ld\n", cnt);
8021 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8022 trace_seq_printf(s, "overrun: %ld\n", cnt);
8024 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8025 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8027 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8028 trace_seq_printf(s, "bytes: %ld\n", cnt);
8030 if (trace_clocks[tr->clock_id].in_ns) {
8031 /* local or global for trace_clock */
8032 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8033 usec_rem = do_div(t, USEC_PER_SEC);
8034 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8037 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
8038 usec_rem = do_div(t, USEC_PER_SEC);
8039 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8041 /* counter or tsc mode for trace_clock */
8042 trace_seq_printf(s, "oldest event ts: %llu\n",
8043 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8045 trace_seq_printf(s, "now ts: %llu\n",
8046 ring_buffer_time_stamp(trace_buf->buffer, cpu));
8049 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8050 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8052 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8053 trace_seq_printf(s, "read events: %ld\n", cnt);
8055 count = simple_read_from_buffer(ubuf, count, ppos,
8056 s->buffer, trace_seq_used(s));
8063 static const struct file_operations tracing_stats_fops = {
8064 .open = tracing_open_generic_tr,
8065 .read = tracing_stats_read,
8066 .llseek = generic_file_llseek,
8067 .release = tracing_release_generic_tr,
8070 #ifdef CONFIG_DYNAMIC_FTRACE
8073 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8074 size_t cnt, loff_t *ppos)
8080 /* 256 should be plenty to hold the amount needed */
8081 buf = kmalloc(256, GFP_KERNEL);
8085 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8086 ftrace_update_tot_cnt,
8087 ftrace_number_of_pages,
8088 ftrace_number_of_groups);
8090 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8095 static const struct file_operations tracing_dyn_info_fops = {
8096 .open = tracing_open_generic,
8097 .read = tracing_read_dyn_info,
8098 .llseek = generic_file_llseek,
8100 #endif /* CONFIG_DYNAMIC_FTRACE */
8102 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8104 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8105 struct trace_array *tr, struct ftrace_probe_ops *ops,
8108 tracing_snapshot_instance(tr);
8112 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8113 struct trace_array *tr, struct ftrace_probe_ops *ops,
8116 struct ftrace_func_mapper *mapper = data;
8120 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8130 tracing_snapshot_instance(tr);
8134 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8135 struct ftrace_probe_ops *ops, void *data)
8137 struct ftrace_func_mapper *mapper = data;
8140 seq_printf(m, "%ps:", (void *)ip);
8142 seq_puts(m, "snapshot");
8145 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8148 seq_printf(m, ":count=%ld\n", *count);
8150 seq_puts(m, ":unlimited\n");
8156 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8157 unsigned long ip, void *init_data, void **data)
8159 struct ftrace_func_mapper *mapper = *data;
8162 mapper = allocate_ftrace_func_mapper();
8168 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8172 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8173 unsigned long ip, void *data)
8175 struct ftrace_func_mapper *mapper = data;
8180 free_ftrace_func_mapper(mapper, NULL);
8184 ftrace_func_mapper_remove_ip(mapper, ip);
8187 static struct ftrace_probe_ops snapshot_probe_ops = {
8188 .func = ftrace_snapshot,
8189 .print = ftrace_snapshot_print,
8192 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8193 .func = ftrace_count_snapshot,
8194 .print = ftrace_snapshot_print,
8195 .init = ftrace_snapshot_init,
8196 .free = ftrace_snapshot_free,
8200 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8201 char *glob, char *cmd, char *param, int enable)
8203 struct ftrace_probe_ops *ops;
8204 void *count = (void *)-1;
8211 /* hash funcs only work with set_ftrace_filter */
8215 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8218 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8223 number = strsep(¶m, ":");
8225 if (!strlen(number))
8229 * We use the callback data field (which is a pointer)
8232 ret = kstrtoul(number, 0, (unsigned long *)&count);
8237 ret = tracing_alloc_snapshot_instance(tr);
8241 ret = register_ftrace_function_probe(glob, tr, ops, count);
8244 return ret < 0 ? ret : 0;
8247 static struct ftrace_func_command ftrace_snapshot_cmd = {
8249 .func = ftrace_trace_snapshot_callback,
8252 static __init int register_snapshot_cmd(void)
8254 return register_ftrace_command(&ftrace_snapshot_cmd);
8257 static inline __init int register_snapshot_cmd(void) { return 0; }
8258 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8260 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8262 if (WARN_ON(!tr->dir))
8263 return ERR_PTR(-ENODEV);
8265 /* Top directory uses NULL as the parent */
8266 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8269 /* All sub buffers have a descriptor */
8273 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8275 struct dentry *d_tracer;
8278 return tr->percpu_dir;
8280 d_tracer = tracing_get_dentry(tr);
8281 if (IS_ERR(d_tracer))
8284 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8286 MEM_FAIL(!tr->percpu_dir,
8287 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8289 return tr->percpu_dir;
8292 static struct dentry *
8293 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8294 void *data, long cpu, const struct file_operations *fops)
8296 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8298 if (ret) /* See tracing_get_cpu() */
8299 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8304 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8306 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8307 struct dentry *d_cpu;
8308 char cpu_dir[30]; /* 30 characters should be more than enough */
8313 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8314 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8316 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8320 /* per cpu trace_pipe */
8321 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8322 tr, cpu, &tracing_pipe_fops);
8325 trace_create_cpu_file("trace", 0644, d_cpu,
8326 tr, cpu, &tracing_fops);
8328 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8329 tr, cpu, &tracing_buffers_fops);
8331 trace_create_cpu_file("stats", 0444, d_cpu,
8332 tr, cpu, &tracing_stats_fops);
8334 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8335 tr, cpu, &tracing_entries_fops);
8337 #ifdef CONFIG_TRACER_SNAPSHOT
8338 trace_create_cpu_file("snapshot", 0644, d_cpu,
8339 tr, cpu, &snapshot_fops);
8341 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8342 tr, cpu, &snapshot_raw_fops);
8346 #ifdef CONFIG_FTRACE_SELFTEST
8347 /* Let selftest have access to static functions in this file */
8348 #include "trace_selftest.c"
8352 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8355 struct trace_option_dentry *topt = filp->private_data;
8358 if (topt->flags->val & topt->opt->bit)
8363 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8367 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8370 struct trace_option_dentry *topt = filp->private_data;
8374 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8378 if (val != 0 && val != 1)
8381 if (!!(topt->flags->val & topt->opt->bit) != val) {
8382 mutex_lock(&trace_types_lock);
8383 ret = __set_tracer_option(topt->tr, topt->flags,
8385 mutex_unlock(&trace_types_lock);
8395 static int tracing_open_options(struct inode *inode, struct file *filp)
8397 struct trace_option_dentry *topt = inode->i_private;
8400 ret = tracing_check_open_get_tr(topt->tr);
8404 filp->private_data = inode->i_private;
8408 static int tracing_release_options(struct inode *inode, struct file *file)
8410 struct trace_option_dentry *topt = file->private_data;
8412 trace_array_put(topt->tr);
8416 static const struct file_operations trace_options_fops = {
8417 .open = tracing_open_options,
8418 .read = trace_options_read,
8419 .write = trace_options_write,
8420 .llseek = generic_file_llseek,
8421 .release = tracing_release_options,
8425 * In order to pass in both the trace_array descriptor as well as the index
8426 * to the flag that the trace option file represents, the trace_array
8427 * has a character array of trace_flags_index[], which holds the index
8428 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8429 * The address of this character array is passed to the flag option file
8430 * read/write callbacks.
8432 * In order to extract both the index and the trace_array descriptor,
8433 * get_tr_index() uses the following algorithm.
8437 * As the pointer itself contains the address of the index (remember
8440 * Then to get the trace_array descriptor, by subtracting that index
8441 * from the ptr, we get to the start of the index itself.
8443 * ptr - idx == &index[0]
8445 * Then a simple container_of() from that pointer gets us to the
8446 * trace_array descriptor.
8448 static void get_tr_index(void *data, struct trace_array **ptr,
8449 unsigned int *pindex)
8451 *pindex = *(unsigned char *)data;
8453 *ptr = container_of(data - *pindex, struct trace_array,
8458 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8461 void *tr_index = filp->private_data;
8462 struct trace_array *tr;
8466 get_tr_index(tr_index, &tr, &index);
8468 if (tr->trace_flags & (1 << index))
8473 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8477 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8480 void *tr_index = filp->private_data;
8481 struct trace_array *tr;
8486 get_tr_index(tr_index, &tr, &index);
8488 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8492 if (val != 0 && val != 1)
8495 mutex_lock(&event_mutex);
8496 mutex_lock(&trace_types_lock);
8497 ret = set_tracer_flag(tr, 1 << index, val);
8498 mutex_unlock(&trace_types_lock);
8499 mutex_unlock(&event_mutex);
8509 static const struct file_operations trace_options_core_fops = {
8510 .open = tracing_open_generic,
8511 .read = trace_options_core_read,
8512 .write = trace_options_core_write,
8513 .llseek = generic_file_llseek,
8516 struct dentry *trace_create_file(const char *name,
8518 struct dentry *parent,
8520 const struct file_operations *fops)
8524 ret = tracefs_create_file(name, mode, parent, data, fops);
8526 pr_warn("Could not create tracefs '%s' entry\n", name);
8532 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8534 struct dentry *d_tracer;
8539 d_tracer = tracing_get_dentry(tr);
8540 if (IS_ERR(d_tracer))
8543 tr->options = tracefs_create_dir("options", d_tracer);
8545 pr_warn("Could not create tracefs directory 'options'\n");
8553 create_trace_option_file(struct trace_array *tr,
8554 struct trace_option_dentry *topt,
8555 struct tracer_flags *flags,
8556 struct tracer_opt *opt)
8558 struct dentry *t_options;
8560 t_options = trace_options_init_dentry(tr);
8564 topt->flags = flags;
8568 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8569 &trace_options_fops);
8574 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8576 struct trace_option_dentry *topts;
8577 struct trace_options *tr_topts;
8578 struct tracer_flags *flags;
8579 struct tracer_opt *opts;
8586 flags = tracer->flags;
8588 if (!flags || !flags->opts)
8592 * If this is an instance, only create flags for tracers
8593 * the instance may have.
8595 if (!trace_ok_for_array(tracer, tr))
8598 for (i = 0; i < tr->nr_topts; i++) {
8599 /* Make sure there's no duplicate flags. */
8600 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8606 for (cnt = 0; opts[cnt].name; cnt++)
8609 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8613 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8620 tr->topts = tr_topts;
8621 tr->topts[tr->nr_topts].tracer = tracer;
8622 tr->topts[tr->nr_topts].topts = topts;
8625 for (cnt = 0; opts[cnt].name; cnt++) {
8626 create_trace_option_file(tr, &topts[cnt], flags,
8628 MEM_FAIL(topts[cnt].entry == NULL,
8629 "Failed to create trace option: %s",
8634 static struct dentry *
8635 create_trace_option_core_file(struct trace_array *tr,
8636 const char *option, long index)
8638 struct dentry *t_options;
8640 t_options = trace_options_init_dentry(tr);
8644 return trace_create_file(option, 0644, t_options,
8645 (void *)&tr->trace_flags_index[index],
8646 &trace_options_core_fops);
8649 static void create_trace_options_dir(struct trace_array *tr)
8651 struct dentry *t_options;
8652 bool top_level = tr == &global_trace;
8655 t_options = trace_options_init_dentry(tr);
8659 for (i = 0; trace_options[i]; i++) {
8661 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8662 create_trace_option_core_file(tr, trace_options[i], i);
8667 rb_simple_read(struct file *filp, char __user *ubuf,
8668 size_t cnt, loff_t *ppos)
8670 struct trace_array *tr = filp->private_data;
8674 r = tracer_tracing_is_on(tr);
8675 r = sprintf(buf, "%d\n", r);
8677 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8681 rb_simple_write(struct file *filp, const char __user *ubuf,
8682 size_t cnt, loff_t *ppos)
8684 struct trace_array *tr = filp->private_data;
8685 struct trace_buffer *buffer = tr->array_buffer.buffer;
8689 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8694 mutex_lock(&trace_types_lock);
8695 if (!!val == tracer_tracing_is_on(tr)) {
8696 val = 0; /* do nothing */
8698 tracer_tracing_on(tr);
8699 if (tr->current_trace->start)
8700 tr->current_trace->start(tr);
8702 tracer_tracing_off(tr);
8703 if (tr->current_trace->stop)
8704 tr->current_trace->stop(tr);
8706 mutex_unlock(&trace_types_lock);
8714 static const struct file_operations rb_simple_fops = {
8715 .open = tracing_open_generic_tr,
8716 .read = rb_simple_read,
8717 .write = rb_simple_write,
8718 .release = tracing_release_generic_tr,
8719 .llseek = default_llseek,
8723 buffer_percent_read(struct file *filp, char __user *ubuf,
8724 size_t cnt, loff_t *ppos)
8726 struct trace_array *tr = filp->private_data;
8730 r = tr->buffer_percent;
8731 r = sprintf(buf, "%d\n", r);
8733 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8737 buffer_percent_write(struct file *filp, const char __user *ubuf,
8738 size_t cnt, loff_t *ppos)
8740 struct trace_array *tr = filp->private_data;
8744 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8751 tr->buffer_percent = val;
8758 static const struct file_operations buffer_percent_fops = {
8759 .open = tracing_open_generic_tr,
8760 .read = buffer_percent_read,
8761 .write = buffer_percent_write,
8762 .release = tracing_release_generic_tr,
8763 .llseek = default_llseek,
8766 static struct dentry *trace_instance_dir;
8769 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8772 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8774 enum ring_buffer_flags rb_flags;
8776 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8780 buf->buffer = ring_buffer_alloc(size, rb_flags);
8784 buf->data = alloc_percpu(struct trace_array_cpu);
8786 ring_buffer_free(buf->buffer);
8791 /* Allocate the first page for all buffers */
8792 set_buffer_entries(&tr->array_buffer,
8793 ring_buffer_size(tr->array_buffer.buffer, 0));
8798 static int allocate_trace_buffers(struct trace_array *tr, int size)
8802 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8806 #ifdef CONFIG_TRACER_MAX_TRACE
8807 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8808 allocate_snapshot ? size : 1);
8809 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8810 ring_buffer_free(tr->array_buffer.buffer);
8811 tr->array_buffer.buffer = NULL;
8812 free_percpu(tr->array_buffer.data);
8813 tr->array_buffer.data = NULL;
8816 tr->allocated_snapshot = allocate_snapshot;
8819 * Only the top level trace array gets its snapshot allocated
8820 * from the kernel command line.
8822 allocate_snapshot = false;
8828 static void free_trace_buffer(struct array_buffer *buf)
8831 ring_buffer_free(buf->buffer);
8833 free_percpu(buf->data);
8838 static void free_trace_buffers(struct trace_array *tr)
8843 free_trace_buffer(&tr->array_buffer);
8845 #ifdef CONFIG_TRACER_MAX_TRACE
8846 free_trace_buffer(&tr->max_buffer);
8850 static void init_trace_flags_index(struct trace_array *tr)
8854 /* Used by the trace options files */
8855 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8856 tr->trace_flags_index[i] = i;
8859 static void __update_tracer_options(struct trace_array *tr)
8863 for (t = trace_types; t; t = t->next)
8864 add_tracer_options(tr, t);
8867 static void update_tracer_options(struct trace_array *tr)
8869 mutex_lock(&trace_types_lock);
8870 tracer_options_updated = true;
8871 __update_tracer_options(tr);
8872 mutex_unlock(&trace_types_lock);
8875 /* Must have trace_types_lock held */
8876 struct trace_array *trace_array_find(const char *instance)
8878 struct trace_array *tr, *found = NULL;
8880 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8881 if (tr->name && strcmp(tr->name, instance) == 0) {
8890 struct trace_array *trace_array_find_get(const char *instance)
8892 struct trace_array *tr;
8894 mutex_lock(&trace_types_lock);
8895 tr = trace_array_find(instance);
8898 mutex_unlock(&trace_types_lock);
8903 static int trace_array_create_dir(struct trace_array *tr)
8907 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8911 ret = event_trace_add_tracer(tr->dir, tr);
8913 tracefs_remove(tr->dir);
8917 init_tracer_tracefs(tr, tr->dir);
8918 __update_tracer_options(tr);
8923 static struct trace_array *trace_array_create(const char *name)
8925 struct trace_array *tr;
8929 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8931 return ERR_PTR(ret);
8933 tr->name = kstrdup(name, GFP_KERNEL);
8937 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8940 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
8943 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8945 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8947 raw_spin_lock_init(&tr->start_lock);
8949 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8951 tr->current_trace = &nop_trace;
8953 INIT_LIST_HEAD(&tr->systems);
8954 INIT_LIST_HEAD(&tr->events);
8955 INIT_LIST_HEAD(&tr->hist_vars);
8956 INIT_LIST_HEAD(&tr->err_log);
8958 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8961 if (ftrace_allocate_ftrace_ops(tr) < 0)
8964 ftrace_init_trace_array(tr);
8966 init_trace_flags_index(tr);
8968 if (trace_instance_dir) {
8969 ret = trace_array_create_dir(tr);
8973 __trace_early_add_events(tr);
8975 list_add(&tr->list, &ftrace_trace_arrays);
8982 ftrace_free_ftrace_ops(tr);
8983 free_trace_buffers(tr);
8984 free_cpumask_var(tr->pipe_cpumask);
8985 free_cpumask_var(tr->tracing_cpumask);
8989 return ERR_PTR(ret);
8992 static int instance_mkdir(const char *name)
8994 struct trace_array *tr;
8997 mutex_lock(&event_mutex);
8998 mutex_lock(&trace_types_lock);
9001 if (trace_array_find(name))
9004 tr = trace_array_create(name);
9006 ret = PTR_ERR_OR_ZERO(tr);
9009 mutex_unlock(&trace_types_lock);
9010 mutex_unlock(&event_mutex);
9015 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9016 * @name: The name of the trace array to be looked up/created.
9018 * Returns pointer to trace array with given name.
9019 * NULL, if it cannot be created.
9021 * NOTE: This function increments the reference counter associated with the
9022 * trace array returned. This makes sure it cannot be freed while in use.
9023 * Use trace_array_put() once the trace array is no longer needed.
9024 * If the trace_array is to be freed, trace_array_destroy() needs to
9025 * be called after the trace_array_put(), or simply let user space delete
9026 * it from the tracefs instances directory. But until the
9027 * trace_array_put() is called, user space can not delete it.
9030 struct trace_array *trace_array_get_by_name(const char *name)
9032 struct trace_array *tr;
9034 mutex_lock(&event_mutex);
9035 mutex_lock(&trace_types_lock);
9037 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9038 if (tr->name && strcmp(tr->name, name) == 0)
9042 tr = trace_array_create(name);
9050 mutex_unlock(&trace_types_lock);
9051 mutex_unlock(&event_mutex);
9054 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9056 static int __remove_instance(struct trace_array *tr)
9060 /* Reference counter for a newly created trace array = 1. */
9061 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9064 list_del(&tr->list);
9066 /* Disable all the flags that were enabled coming in */
9067 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9068 if ((1 << i) & ZEROED_TRACE_FLAGS)
9069 set_tracer_flag(tr, 1 << i, 0);
9072 tracing_set_nop(tr);
9073 clear_ftrace_function_probes(tr);
9074 event_trace_del_tracer(tr);
9075 ftrace_clear_pids(tr);
9076 ftrace_destroy_function_files(tr);
9077 tracefs_remove(tr->dir);
9078 free_trace_buffers(tr);
9079 clear_tracing_err_log(tr);
9081 for (i = 0; i < tr->nr_topts; i++) {
9082 kfree(tr->topts[i].topts);
9086 free_cpumask_var(tr->pipe_cpumask);
9087 free_cpumask_var(tr->tracing_cpumask);
9094 int trace_array_destroy(struct trace_array *this_tr)
9096 struct trace_array *tr;
9102 mutex_lock(&event_mutex);
9103 mutex_lock(&trace_types_lock);
9107 /* Making sure trace array exists before destroying it. */
9108 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9109 if (tr == this_tr) {
9110 ret = __remove_instance(tr);
9115 mutex_unlock(&trace_types_lock);
9116 mutex_unlock(&event_mutex);
9120 EXPORT_SYMBOL_GPL(trace_array_destroy);
9122 static int instance_rmdir(const char *name)
9124 struct trace_array *tr;
9127 mutex_lock(&event_mutex);
9128 mutex_lock(&trace_types_lock);
9131 tr = trace_array_find(name);
9133 ret = __remove_instance(tr);
9135 mutex_unlock(&trace_types_lock);
9136 mutex_unlock(&event_mutex);
9141 static __init void create_trace_instances(struct dentry *d_tracer)
9143 struct trace_array *tr;
9145 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9148 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9151 mutex_lock(&event_mutex);
9152 mutex_lock(&trace_types_lock);
9154 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9157 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9158 "Failed to create instance directory\n"))
9162 mutex_unlock(&trace_types_lock);
9163 mutex_unlock(&event_mutex);
9167 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9169 struct trace_event_file *file;
9172 trace_create_file("available_tracers", 0444, d_tracer,
9173 tr, &show_traces_fops);
9175 trace_create_file("current_tracer", 0644, d_tracer,
9176 tr, &set_tracer_fops);
9178 trace_create_file("tracing_cpumask", 0644, d_tracer,
9179 tr, &tracing_cpumask_fops);
9181 trace_create_file("trace_options", 0644, d_tracer,
9182 tr, &tracing_iter_fops);
9184 trace_create_file("trace", 0644, d_tracer,
9187 trace_create_file("trace_pipe", 0444, d_tracer,
9188 tr, &tracing_pipe_fops);
9190 trace_create_file("buffer_size_kb", 0644, d_tracer,
9191 tr, &tracing_entries_fops);
9193 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9194 tr, &tracing_total_entries_fops);
9196 trace_create_file("free_buffer", 0200, d_tracer,
9197 tr, &tracing_free_buffer_fops);
9199 trace_create_file("trace_marker", 0220, d_tracer,
9200 tr, &tracing_mark_fops);
9202 file = __find_event_file(tr, "ftrace", "print");
9203 if (file && file->dir)
9204 trace_create_file("trigger", 0644, file->dir, file,
9205 &event_trigger_fops);
9206 tr->trace_marker_file = file;
9208 trace_create_file("trace_marker_raw", 0220, d_tracer,
9209 tr, &tracing_mark_raw_fops);
9211 trace_create_file("trace_clock", 0644, d_tracer, tr,
9214 trace_create_file("tracing_on", 0644, d_tracer,
9215 tr, &rb_simple_fops);
9217 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9218 &trace_time_stamp_mode_fops);
9220 tr->buffer_percent = 50;
9222 trace_create_file("buffer_percent", 0444, d_tracer,
9223 tr, &buffer_percent_fops);
9225 create_trace_options_dir(tr);
9227 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9228 trace_create_maxlat_file(tr, d_tracer);
9231 if (ftrace_create_function_files(tr, d_tracer))
9232 MEM_FAIL(1, "Could not allocate function filter files");
9234 #ifdef CONFIG_TRACER_SNAPSHOT
9235 trace_create_file("snapshot", 0644, d_tracer,
9236 tr, &snapshot_fops);
9239 trace_create_file("error_log", 0644, d_tracer,
9240 tr, &tracing_err_log_fops);
9242 for_each_tracing_cpu(cpu)
9243 tracing_init_tracefs_percpu(tr, cpu);
9245 ftrace_init_tracefs(tr, d_tracer);
9248 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9250 struct vfsmount *mnt;
9251 struct file_system_type *type;
9254 * To maintain backward compatibility for tools that mount
9255 * debugfs to get to the tracing facility, tracefs is automatically
9256 * mounted to the debugfs/tracing directory.
9258 type = get_fs_type("tracefs");
9261 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9262 put_filesystem(type);
9271 * tracing_init_dentry - initialize top level trace array
9273 * This is called when creating files or directories in the tracing
9274 * directory. It is called via fs_initcall() by any of the boot up code
9275 * and expects to return the dentry of the top level tracing directory.
9277 int tracing_init_dentry(void)
9279 struct trace_array *tr = &global_trace;
9281 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9282 pr_warn("Tracing disabled due to lockdown\n");
9286 /* The top level trace array uses NULL as parent */
9290 if (WARN_ON(!tracefs_initialized()))
9294 * As there may still be users that expect the tracing
9295 * files to exist in debugfs/tracing, we must automount
9296 * the tracefs file system there, so older tools still
9297 * work with the newer kerenl.
9299 tr->dir = debugfs_create_automount("tracing", NULL,
9300 trace_automount, NULL);
9305 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9306 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9308 static void __init trace_eval_init(void)
9312 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9313 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9316 #ifdef CONFIG_MODULES
9317 static void trace_module_add_evals(struct module *mod)
9319 if (!mod->num_trace_evals)
9323 * Modules with bad taint do not have events created, do
9324 * not bother with enums either.
9326 if (trace_module_has_bad_taint(mod))
9329 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9332 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9333 static void trace_module_remove_evals(struct module *mod)
9335 union trace_eval_map_item *map;
9336 union trace_eval_map_item **last = &trace_eval_maps;
9338 if (!mod->num_trace_evals)
9341 mutex_lock(&trace_eval_mutex);
9343 map = trace_eval_maps;
9346 if (map->head.mod == mod)
9348 map = trace_eval_jmp_to_tail(map);
9349 last = &map->tail.next;
9350 map = map->tail.next;
9355 *last = trace_eval_jmp_to_tail(map)->tail.next;
9358 mutex_unlock(&trace_eval_mutex);
9361 static inline void trace_module_remove_evals(struct module *mod) { }
9362 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9364 static int trace_module_notify(struct notifier_block *self,
9365 unsigned long val, void *data)
9367 struct module *mod = data;
9370 case MODULE_STATE_COMING:
9371 trace_module_add_evals(mod);
9373 case MODULE_STATE_GOING:
9374 trace_module_remove_evals(mod);
9381 static struct notifier_block trace_module_nb = {
9382 .notifier_call = trace_module_notify,
9385 #endif /* CONFIG_MODULES */
9387 static __init int tracer_init_tracefs(void)
9391 trace_access_lock_init();
9393 ret = tracing_init_dentry();
9399 init_tracer_tracefs(&global_trace, NULL);
9400 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9402 trace_create_file("tracing_thresh", 0644, NULL,
9403 &global_trace, &tracing_thresh_fops);
9405 trace_create_file("README", 0444, NULL,
9406 NULL, &tracing_readme_fops);
9408 trace_create_file("saved_cmdlines", 0444, NULL,
9409 NULL, &tracing_saved_cmdlines_fops);
9411 trace_create_file("saved_cmdlines_size", 0644, NULL,
9412 NULL, &tracing_saved_cmdlines_size_fops);
9414 trace_create_file("saved_tgids", 0444, NULL,
9415 NULL, &tracing_saved_tgids_fops);
9419 trace_create_eval_file(NULL);
9421 #ifdef CONFIG_MODULES
9422 register_module_notifier(&trace_module_nb);
9425 #ifdef CONFIG_DYNAMIC_FTRACE
9426 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9427 NULL, &tracing_dyn_info_fops);
9430 create_trace_instances(NULL);
9432 update_tracer_options(&global_trace);
9437 static int trace_panic_handler(struct notifier_block *this,
9438 unsigned long event, void *unused)
9440 if (ftrace_dump_on_oops)
9441 ftrace_dump(ftrace_dump_on_oops);
9445 static struct notifier_block trace_panic_notifier = {
9446 .notifier_call = trace_panic_handler,
9448 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9451 static int trace_die_handler(struct notifier_block *self,
9457 if (ftrace_dump_on_oops)
9458 ftrace_dump(ftrace_dump_on_oops);
9466 static struct notifier_block trace_die_notifier = {
9467 .notifier_call = trace_die_handler,
9472 * printk is set to max of 1024, we really don't need it that big.
9473 * Nothing should be printing 1000 characters anyway.
9475 #define TRACE_MAX_PRINT 1000
9478 * Define here KERN_TRACE so that we have one place to modify
9479 * it if we decide to change what log level the ftrace dump
9482 #define KERN_TRACE KERN_EMERG
9485 trace_printk_seq(struct trace_seq *s)
9487 /* Probably should print a warning here. */
9488 if (s->seq.len >= TRACE_MAX_PRINT)
9489 s->seq.len = TRACE_MAX_PRINT;
9492 * More paranoid code. Although the buffer size is set to
9493 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9494 * an extra layer of protection.
9496 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9497 s->seq.len = s->seq.size - 1;
9499 /* should be zero ended, but we are paranoid. */
9500 s->buffer[s->seq.len] = 0;
9502 printk(KERN_TRACE "%s", s->buffer);
9507 void trace_init_global_iter(struct trace_iterator *iter)
9509 iter->tr = &global_trace;
9510 iter->trace = iter->tr->current_trace;
9511 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9512 iter->array_buffer = &global_trace.array_buffer;
9514 if (iter->trace && iter->trace->open)
9515 iter->trace->open(iter);
9517 /* Annotate start of buffers if we had overruns */
9518 if (ring_buffer_overruns(iter->array_buffer->buffer))
9519 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9521 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9522 if (trace_clocks[iter->tr->clock_id].in_ns)
9523 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9525 /* Can not use kmalloc for iter.temp and iter.fmt */
9526 iter->temp = static_temp_buf;
9527 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9528 iter->fmt = static_fmt_buf;
9529 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9532 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9534 /* use static because iter can be a bit big for the stack */
9535 static struct trace_iterator iter;
9536 static atomic_t dump_running;
9537 struct trace_array *tr = &global_trace;
9538 unsigned int old_userobj;
9539 unsigned long flags;
9542 /* Only allow one dump user at a time. */
9543 if (atomic_inc_return(&dump_running) != 1) {
9544 atomic_dec(&dump_running);
9549 * Always turn off tracing when we dump.
9550 * We don't need to show trace output of what happens
9551 * between multiple crashes.
9553 * If the user does a sysrq-z, then they can re-enable
9554 * tracing with echo 1 > tracing_on.
9558 local_irq_save(flags);
9559 printk_nmi_direct_enter();
9561 /* Simulate the iterator */
9562 trace_init_global_iter(&iter);
9564 for_each_tracing_cpu(cpu) {
9565 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9568 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9570 /* don't look at user memory in panic mode */
9571 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9573 switch (oops_dump_mode) {
9575 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9578 iter.cpu_file = raw_smp_processor_id();
9583 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9584 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9587 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9589 /* Did function tracer already get disabled? */
9590 if (ftrace_is_dead()) {
9591 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9592 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9596 * We need to stop all tracing on all CPUS to read
9597 * the next buffer. This is a bit expensive, but is
9598 * not done often. We fill all what we can read,
9599 * and then release the locks again.
9602 while (!trace_empty(&iter)) {
9605 printk(KERN_TRACE "---------------------------------\n");
9609 trace_iterator_reset(&iter);
9610 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9612 if (trace_find_next_entry_inc(&iter) != NULL) {
9615 ret = print_trace_line(&iter);
9616 if (ret != TRACE_TYPE_NO_CONSUME)
9617 trace_consume(&iter);
9619 touch_nmi_watchdog();
9621 trace_printk_seq(&iter.seq);
9625 printk(KERN_TRACE " (ftrace buffer empty)\n");
9627 printk(KERN_TRACE "---------------------------------\n");
9630 tr->trace_flags |= old_userobj;
9632 for_each_tracing_cpu(cpu) {
9633 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9635 atomic_dec(&dump_running);
9636 printk_nmi_direct_exit();
9637 local_irq_restore(flags);
9639 EXPORT_SYMBOL_GPL(ftrace_dump);
9641 int trace_run_command(const char *buf, int (*createfn)(int, char **))
9648 argv = argv_split(GFP_KERNEL, buf, &argc);
9653 ret = createfn(argc, argv);
9660 #define WRITE_BUFSIZE 4096
9662 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9663 size_t count, loff_t *ppos,
9664 int (*createfn)(int, char **))
9666 char *kbuf, *buf, *tmp;
9671 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9675 while (done < count) {
9676 size = count - done;
9678 if (size >= WRITE_BUFSIZE)
9679 size = WRITE_BUFSIZE - 1;
9681 if (copy_from_user(kbuf, buffer + done, size)) {
9688 tmp = strchr(buf, '\n');
9691 size = tmp - buf + 1;
9694 if (done + size < count) {
9697 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9698 pr_warn("Line length is too long: Should be less than %d\n",
9706 /* Remove comments */
9707 tmp = strchr(buf, '#');
9712 ret = trace_run_command(buf, createfn);
9717 } while (done < count);
9727 __init static int tracer_alloc_buffers(void)
9733 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9734 pr_warn("Tracing disabled due to lockdown\n");
9739 * Make sure we don't accidentally add more trace options
9740 * than we have bits for.
9742 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9744 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9747 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9748 goto out_free_buffer_mask;
9750 /* Only allocate trace_printk buffers if a trace_printk exists */
9751 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9752 /* Must be called before global_trace.buffer is allocated */
9753 trace_printk_init_buffers();
9755 /* To save memory, keep the ring buffer size to its minimum */
9756 if (ring_buffer_expanded)
9757 ring_buf_size = trace_buf_size;
9761 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9762 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9764 raw_spin_lock_init(&global_trace.start_lock);
9767 * The prepare callbacks allocates some memory for the ring buffer. We
9768 * don't free the buffer if the CPU goes down. If we were to free
9769 * the buffer, then the user would lose any trace that was in the
9770 * buffer. The memory will be removed once the "instance" is removed.
9772 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9773 "trace/RB:preapre", trace_rb_cpu_prepare,
9776 goto out_free_cpumask;
9777 /* Used for event triggers */
9779 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9781 goto out_rm_hp_state;
9783 if (trace_create_savedcmd() < 0)
9784 goto out_free_temp_buffer;
9786 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
9787 goto out_free_savedcmd;
9789 /* TODO: make the number of buffers hot pluggable with CPUS */
9790 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9791 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9792 goto out_free_pipe_cpumask;
9794 if (global_trace.buffer_disabled)
9797 if (trace_boot_clock) {
9798 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9800 pr_warn("Trace clock %s not defined, going back to default\n",
9805 * register_tracer() might reference current_trace, so it
9806 * needs to be set before we register anything. This is
9807 * just a bootstrap of current_trace anyway.
9809 global_trace.current_trace = &nop_trace;
9811 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9813 ftrace_init_global_array_ops(&global_trace);
9815 init_trace_flags_index(&global_trace);
9817 register_tracer(&nop_trace);
9819 /* Function tracing may start here (via kernel command line) */
9820 init_function_trace();
9822 /* All seems OK, enable tracing */
9823 tracing_disabled = 0;
9825 atomic_notifier_chain_register(&panic_notifier_list,
9826 &trace_panic_notifier);
9828 register_die_notifier(&trace_die_notifier);
9830 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9832 INIT_LIST_HEAD(&global_trace.systems);
9833 INIT_LIST_HEAD(&global_trace.events);
9834 INIT_LIST_HEAD(&global_trace.hist_vars);
9835 INIT_LIST_HEAD(&global_trace.err_log);
9836 list_add(&global_trace.list, &ftrace_trace_arrays);
9838 apply_trace_boot_options();
9840 register_snapshot_cmd();
9844 out_free_pipe_cpumask:
9845 free_cpumask_var(global_trace.pipe_cpumask);
9847 free_saved_cmdlines_buffer(savedcmd);
9848 out_free_temp_buffer:
9849 ring_buffer_free(temp_buffer);
9851 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9853 free_cpumask_var(global_trace.tracing_cpumask);
9854 out_free_buffer_mask:
9855 free_cpumask_var(tracing_buffer_mask);
9860 void __init early_trace_init(void)
9862 if (tracepoint_printk) {
9863 tracepoint_print_iter =
9864 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9865 if (MEM_FAIL(!tracepoint_print_iter,
9866 "Failed to allocate trace iterator\n"))
9867 tracepoint_printk = 0;
9869 static_key_enable(&tracepoint_printk_key.key);
9871 tracer_alloc_buffers();
9876 void __init trace_init(void)
9881 __init static int clear_boot_tracer(void)
9884 * The default tracer at boot buffer is an init section.
9885 * This function is called in lateinit. If we did not
9886 * find the boot tracer, then clear it out, to prevent
9887 * later registration from accessing the buffer that is
9888 * about to be freed.
9890 if (!default_bootup_tracer)
9893 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9894 default_bootup_tracer);
9895 default_bootup_tracer = NULL;
9900 fs_initcall(tracer_init_tracefs);
9901 late_initcall_sync(clear_boot_tracer);
9903 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9904 __init static int tracing_set_default_clock(void)
9906 /* sched_clock_stable() is determined in late_initcall */
9907 if (!trace_boot_clock && !sched_clock_stable()) {
9908 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9909 pr_warn("Can not set tracing clock due to lockdown\n");
9914 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9915 "If you want to keep using the local clock, then add:\n"
9916 " \"trace_clock=local\"\n"
9917 "on the kernel command line\n");
9918 tracing_set_clock(&global_trace, "global");
9923 late_initcall_sync(tracing_set_default_clock);