2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
78 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
88 static DEFINE_PER_CPU(bool, trace_cmdline_save);
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
96 static int tracing_disabled = 1;
98 cpumask_var_t __read_mostly tracing_buffer_mask;
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
116 enum ftrace_dump_mode ftrace_dump_on_oops;
118 /* When set, tracing will stop when a WARN*() is hit */
119 int __disable_trace_on_warning;
121 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
122 /* Map of enums to their values, for "enum_map" file */
123 struct trace_enum_map_head {
125 unsigned long length;
128 union trace_enum_map_item;
130 struct trace_enum_map_tail {
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
139 static DEFINE_MUTEX(trace_enum_mutex);
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
148 union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
154 static union trace_enum_map_item *trace_enum_maps;
155 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
157 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
159 #define MAX_TRACER_SIZE 100
160 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
161 static char *default_bootup_tracer;
163 static bool allocate_snapshot;
165 static int __init set_cmdline_ftrace(char *str)
167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
168 default_bootup_tracer = bootup_tracer_buf;
169 /* We are using ftrace early, expand it */
170 ring_buffer_expanded = true;
173 __setup("ftrace=", set_cmdline_ftrace);
175 static int __init set_ftrace_dump_on_oops(char *str)
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
189 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
191 static int __init stop_trace_on_warning(char *str)
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
197 __setup("traceoff_on_warning", stop_trace_on_warning);
199 static int __init boot_alloc_snapshot(char *str)
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
206 __setup("alloc_snapshot", boot_alloc_snapshot);
209 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
211 static int __init set_trace_boot_options(char *str)
213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
216 __setup("trace_options=", set_trace_boot_options);
218 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219 static char *trace_boot_clock __initdata;
221 static int __init set_trace_boot_clock(char *str)
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
227 __setup("trace_clock=", set_trace_boot_clock);
229 static int __init set_tracepoint_printk(char *str)
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
235 __setup("tp_printk", set_tracepoint_printk);
237 unsigned long long ns2usecs(cycle_t nsec)
244 /* trace_flags holds trace_options default values */
245 #define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
252 /* trace_options that are only supported by global_trace */
253 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
256 /* trace_flags that are default zero for instances */
257 #define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
272 static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
276 LIST_HEAD(ftrace_trace_arrays);
278 int trace_array_get(struct trace_array *this_tr)
280 struct trace_array *tr;
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
291 mutex_unlock(&trace_types_lock);
296 static void __trace_array_put(struct trace_array *this_tr)
298 WARN_ON(!this_tr->ref);
302 void trace_array_put(struct trace_array *this_tr)
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
309 int call_filter_check_discard(struct trace_event_call *call, void *rec,
310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
315 __trace_event_discard_commit(buffer, event);
322 void trace_free_pid_list(struct trace_pid_list *pid_list)
324 vfree(pid_list->pids);
329 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
330 * @filtered_pids: The list of pids to check
331 * @search_pid: The PID to find in @filtered_pids
333 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
336 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
339 * If pid_max changed after filtered_pids was created, we
340 * by default ignore all pids greater than the previous pid_max.
342 if (search_pid >= filtered_pids->pid_max)
345 return test_bit(search_pid, filtered_pids->pids);
349 * trace_ignore_this_task - should a task be ignored for tracing
350 * @filtered_pids: The list of pids to check
351 * @task: The task that should be ignored if not filtered
353 * Checks if @task should be traced or not from @filtered_pids.
354 * Returns true if @task should *NOT* be traced.
355 * Returns false if @task should be traced.
358 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
361 * Return false, because if filtered_pids does not exist,
362 * all pids are good to trace.
367 return !trace_find_filtered_pid(filtered_pids, task->pid);
371 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
372 * @pid_list: The list to modify
373 * @self: The current task for fork or NULL for exit
374 * @task: The task to add or remove
376 * If adding a task, if @self is defined, the task is only added if @self
377 * is also included in @pid_list. This happens on fork and tasks should
378 * only be added when the parent is listed. If @self is NULL, then the
379 * @task pid will be removed from the list, which would happen on exit
382 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
383 struct task_struct *self,
384 struct task_struct *task)
389 /* For forks, we only add if the forking task is listed */
391 if (!trace_find_filtered_pid(pid_list, self->pid))
395 /* Sorry, but we don't support pid_max changing after setting */
396 if (task->pid >= pid_list->pid_max)
399 /* "self" is set for forks, and NULL for exits */
401 set_bit(task->pid, pid_list->pids);
403 clear_bit(task->pid, pid_list->pids);
407 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
408 * @pid_list: The pid list to show
409 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
410 * @pos: The position of the file
412 * This is used by the seq_file "next" operation to iterate the pids
413 * listed in a trace_pid_list structure.
415 * Returns the pid+1 as we want to display pid of zero, but NULL would
416 * stop the iteration.
418 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
420 unsigned long pid = (unsigned long)v;
424 /* pid already is +1 of the actual prevous bit */
425 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
427 /* Return pid + 1 to allow zero to be represented */
428 if (pid < pid_list->pid_max)
429 return (void *)(pid + 1);
435 * trace_pid_start - Used for seq_file to start reading pid lists
436 * @pid_list: The pid list to show
437 * @pos: The position of the file
439 * This is used by seq_file "start" operation to start the iteration
442 * Returns the pid+1 as we want to display pid of zero, but NULL would
443 * stop the iteration.
445 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
450 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
451 if (pid >= pid_list->pid_max)
454 /* Return pid + 1 so that zero can be the exit value */
455 for (pid++; pid && l < *pos;
456 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
462 * trace_pid_show - show the current pid in seq_file processing
463 * @m: The seq_file structure to write into
464 * @v: A void pointer of the pid (+1) value to display
466 * Can be directly used by seq_file operations to display the current
469 int trace_pid_show(struct seq_file *m, void *v)
471 unsigned long pid = (unsigned long)v - 1;
473 seq_printf(m, "%lu\n", pid);
477 /* 128 should be much more than enough */
478 #define PID_BUF_SIZE 127
480 int trace_pid_write(struct trace_pid_list *filtered_pids,
481 struct trace_pid_list **new_pid_list,
482 const char __user *ubuf, size_t cnt)
484 struct trace_pid_list *pid_list;
485 struct trace_parser parser;
493 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
497 * Always recreate a new array. The write is an all or nothing
498 * operation. Always create a new array when adding new pids by
499 * the user. If the operation fails, then the current list is
502 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
504 trace_parser_put(&parser);
508 pid_list->pid_max = READ_ONCE(pid_max);
510 /* Only truncating will shrink pid_max */
511 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
512 pid_list->pid_max = filtered_pids->pid_max;
514 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
515 if (!pid_list->pids) {
516 trace_parser_put(&parser);
522 /* copy the current bits to the new max */
523 for_each_set_bit(pid, filtered_pids->pids,
524 filtered_pids->pid_max) {
525 set_bit(pid, pid_list->pids);
534 ret = trace_get_user(&parser, ubuf, cnt, &pos);
535 if (ret < 0 || !trace_parser_loaded(&parser))
542 parser.buffer[parser.idx] = 0;
545 if (kstrtoul(parser.buffer, 0, &val))
547 if (val >= pid_list->pid_max)
552 set_bit(pid, pid_list->pids);
555 trace_parser_clear(&parser);
558 trace_parser_put(&parser);
561 trace_free_pid_list(pid_list);
566 /* Cleared the list of pids */
567 trace_free_pid_list(pid_list);
572 *new_pid_list = pid_list;
577 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
581 /* Early boot up does not have a buffer yet */
583 return trace_clock_local();
585 ts = ring_buffer_time_stamp(buf->buffer, cpu);
586 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
591 cycle_t ftrace_now(int cpu)
593 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
597 * tracing_is_enabled - Show if global_trace has been disabled
599 * Shows if the global trace has been enabled or not. It uses the
600 * mirror flag "buffer_disabled" to be used in fast paths such as for
601 * the irqsoff tracer. But it may be inaccurate due to races. If you
602 * need to know the accurate state, use tracing_is_on() which is a little
603 * slower, but accurate.
605 int tracing_is_enabled(void)
608 * For quick access (irqsoff uses this in fast path), just
609 * return the mirror variable of the state of the ring buffer.
610 * It's a little racy, but we don't really care.
613 return !global_trace.buffer_disabled;
617 * trace_buf_size is the size in bytes that is allocated
618 * for a buffer. Note, the number of bytes is always rounded
621 * This number is purposely set to a low number of 16384.
622 * If the dump on oops happens, it will be much appreciated
623 * to not have to wait for all that output. Anyway this can be
624 * boot time and run time configurable.
626 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
628 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
630 /* trace_types holds a link list of available tracers. */
631 static struct tracer *trace_types __read_mostly;
634 * trace_types_lock is used to protect the trace_types list.
636 DEFINE_MUTEX(trace_types_lock);
639 * serialize the access of the ring buffer
641 * ring buffer serializes readers, but it is low level protection.
642 * The validity of the events (which returns by ring_buffer_peek() ..etc)
643 * are not protected by ring buffer.
645 * The content of events may become garbage if we allow other process consumes
646 * these events concurrently:
647 * A) the page of the consumed events may become a normal page
648 * (not reader page) in ring buffer, and this page will be rewrited
649 * by events producer.
650 * B) The page of the consumed events may become a page for splice_read,
651 * and this page will be returned to system.
653 * These primitives allow multi process access to different cpu ring buffer
656 * These primitives don't distinguish read-only and read-consume access.
657 * Multi read-only access are also serialized.
661 static DECLARE_RWSEM(all_cpu_access_lock);
662 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
664 static inline void trace_access_lock(int cpu)
666 if (cpu == RING_BUFFER_ALL_CPUS) {
667 /* gain it for accessing the whole ring buffer. */
668 down_write(&all_cpu_access_lock);
670 /* gain it for accessing a cpu ring buffer. */
672 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
673 down_read(&all_cpu_access_lock);
675 /* Secondly block other access to this @cpu ring buffer. */
676 mutex_lock(&per_cpu(cpu_access_lock, cpu));
680 static inline void trace_access_unlock(int cpu)
682 if (cpu == RING_BUFFER_ALL_CPUS) {
683 up_write(&all_cpu_access_lock);
685 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
686 up_read(&all_cpu_access_lock);
690 static inline void trace_access_lock_init(void)
694 for_each_possible_cpu(cpu)
695 mutex_init(&per_cpu(cpu_access_lock, cpu));
700 static DEFINE_MUTEX(access_lock);
702 static inline void trace_access_lock(int cpu)
705 mutex_lock(&access_lock);
708 static inline void trace_access_unlock(int cpu)
711 mutex_unlock(&access_lock);
714 static inline void trace_access_lock_init(void)
720 #ifdef CONFIG_STACKTRACE
721 static void __ftrace_trace_stack(struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs);
724 static inline void ftrace_trace_stack(struct trace_array *tr,
725 struct ring_buffer *buffer,
727 int skip, int pc, struct pt_regs *regs);
730 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
732 int skip, int pc, struct pt_regs *regs)
735 static inline void ftrace_trace_stack(struct trace_array *tr,
736 struct ring_buffer *buffer,
738 int skip, int pc, struct pt_regs *regs)
744 static void tracer_tracing_on(struct trace_array *tr)
746 if (tr->trace_buffer.buffer)
747 ring_buffer_record_on(tr->trace_buffer.buffer);
749 * This flag is looked at when buffers haven't been allocated
750 * yet, or by some tracers (like irqsoff), that just want to
751 * know if the ring buffer has been disabled, but it can handle
752 * races of where it gets disabled but we still do a record.
753 * As the check is in the fast path of the tracers, it is more
754 * important to be fast than accurate.
756 tr->buffer_disabled = 0;
757 /* Make the flag seen by readers */
762 * tracing_on - enable tracing buffers
764 * This function enables tracing buffers that may have been
765 * disabled with tracing_off.
767 void tracing_on(void)
769 tracer_tracing_on(&global_trace);
771 EXPORT_SYMBOL_GPL(tracing_on);
774 * __trace_puts - write a constant string into the trace buffer.
775 * @ip: The address of the caller
776 * @str: The constant string to write
777 * @size: The size of the string.
779 int __trace_puts(unsigned long ip, const char *str, int size)
781 struct ring_buffer_event *event;
782 struct ring_buffer *buffer;
783 struct print_entry *entry;
784 unsigned long irq_flags;
788 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
791 pc = preempt_count();
793 if (unlikely(tracing_selftest_running || tracing_disabled))
796 alloc = sizeof(*entry) + size + 2; /* possible \n added */
798 local_save_flags(irq_flags);
799 buffer = global_trace.trace_buffer.buffer;
800 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
805 entry = ring_buffer_event_data(event);
808 memcpy(&entry->buf, str, size);
810 /* Add a newline if necessary */
811 if (entry->buf[size - 1] != '\n') {
812 entry->buf[size] = '\n';
813 entry->buf[size + 1] = '\0';
815 entry->buf[size] = '\0';
817 __buffer_unlock_commit(buffer, event);
818 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
822 EXPORT_SYMBOL_GPL(__trace_puts);
825 * __trace_bputs - write the pointer to a constant string into trace buffer
826 * @ip: The address of the caller
827 * @str: The constant string to write to the buffer to
829 int __trace_bputs(unsigned long ip, const char *str)
831 struct ring_buffer_event *event;
832 struct ring_buffer *buffer;
833 struct bputs_entry *entry;
834 unsigned long irq_flags;
835 int size = sizeof(struct bputs_entry);
838 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
841 pc = preempt_count();
843 if (unlikely(tracing_selftest_running || tracing_disabled))
846 local_save_flags(irq_flags);
847 buffer = global_trace.trace_buffer.buffer;
848 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
853 entry = ring_buffer_event_data(event);
857 __buffer_unlock_commit(buffer, event);
858 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
862 EXPORT_SYMBOL_GPL(__trace_bputs);
864 #ifdef CONFIG_TRACER_SNAPSHOT
866 * trace_snapshot - take a snapshot of the current buffer.
868 * This causes a swap between the snapshot buffer and the current live
869 * tracing buffer. You can use this to take snapshots of the live
870 * trace when some condition is triggered, but continue to trace.
872 * Note, make sure to allocate the snapshot with either
873 * a tracing_snapshot_alloc(), or by doing it manually
874 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
876 * If the snapshot buffer is not allocated, it will stop tracing.
877 * Basically making a permanent snapshot.
879 void tracing_snapshot(void)
881 struct trace_array *tr = &global_trace;
882 struct tracer *tracer = tr->current_trace;
886 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
887 internal_trace_puts("*** snapshot is being ignored ***\n");
891 if (!tr->allocated_snapshot) {
892 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
893 internal_trace_puts("*** stopping trace here! ***\n");
898 /* Note, snapshot can not be used when the tracer uses it */
899 if (tracer->use_max_tr) {
900 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
901 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
905 local_irq_save(flags);
906 update_max_tr(tr, current, smp_processor_id());
907 local_irq_restore(flags);
909 EXPORT_SYMBOL_GPL(tracing_snapshot);
911 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
912 struct trace_buffer *size_buf, int cpu_id);
913 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
915 static int alloc_snapshot(struct trace_array *tr)
919 if (!tr->allocated_snapshot) {
921 /* allocate spare buffer */
922 ret = resize_buffer_duplicate_size(&tr->max_buffer,
923 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
927 tr->allocated_snapshot = true;
933 static void free_snapshot(struct trace_array *tr)
936 * We don't free the ring buffer. instead, resize it because
937 * The max_tr ring buffer has some state (e.g. ring->clock) and
938 * we want preserve it.
940 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
941 set_buffer_entries(&tr->max_buffer, 1);
942 tracing_reset_online_cpus(&tr->max_buffer);
943 tr->allocated_snapshot = false;
947 * tracing_alloc_snapshot - allocate snapshot buffer.
949 * This only allocates the snapshot buffer if it isn't already
950 * allocated - it doesn't also take a snapshot.
952 * This is meant to be used in cases where the snapshot buffer needs
953 * to be set up for events that can't sleep but need to be able to
954 * trigger a snapshot.
956 int tracing_alloc_snapshot(void)
958 struct trace_array *tr = &global_trace;
961 ret = alloc_snapshot(tr);
966 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
969 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
971 * This is similar to trace_snapshot(), but it will allocate the
972 * snapshot buffer if it isn't already allocated. Use this only
973 * where it is safe to sleep, as the allocation may sleep.
975 * This causes a swap between the snapshot buffer and the current live
976 * tracing buffer. You can use this to take snapshots of the live
977 * trace when some condition is triggered, but continue to trace.
979 void tracing_snapshot_alloc(void)
983 ret = tracing_alloc_snapshot();
989 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
991 void tracing_snapshot(void)
993 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
995 EXPORT_SYMBOL_GPL(tracing_snapshot);
996 int tracing_alloc_snapshot(void)
998 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1001 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1002 void tracing_snapshot_alloc(void)
1007 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1008 #endif /* CONFIG_TRACER_SNAPSHOT */
1010 static void tracer_tracing_off(struct trace_array *tr)
1012 if (tr->trace_buffer.buffer)
1013 ring_buffer_record_off(tr->trace_buffer.buffer);
1015 * This flag is looked at when buffers haven't been allocated
1016 * yet, or by some tracers (like irqsoff), that just want to
1017 * know if the ring buffer has been disabled, but it can handle
1018 * races of where it gets disabled but we still do a record.
1019 * As the check is in the fast path of the tracers, it is more
1020 * important to be fast than accurate.
1022 tr->buffer_disabled = 1;
1023 /* Make the flag seen by readers */
1028 * tracing_off - turn off tracing buffers
1030 * This function stops the tracing buffers from recording data.
1031 * It does not disable any overhead the tracers themselves may
1032 * be causing. This function simply causes all recording to
1033 * the ring buffers to fail.
1035 void tracing_off(void)
1037 tracer_tracing_off(&global_trace);
1039 EXPORT_SYMBOL_GPL(tracing_off);
1041 void disable_trace_on_warning(void)
1043 if (__disable_trace_on_warning)
1048 * tracer_tracing_is_on - show real state of ring buffer enabled
1049 * @tr : the trace array to know if ring buffer is enabled
1051 * Shows real state of the ring buffer if it is enabled or not.
1053 int tracer_tracing_is_on(struct trace_array *tr)
1055 if (tr->trace_buffer.buffer)
1056 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1057 return !tr->buffer_disabled;
1061 * tracing_is_on - show state of ring buffers enabled
1063 int tracing_is_on(void)
1065 return tracer_tracing_is_on(&global_trace);
1067 EXPORT_SYMBOL_GPL(tracing_is_on);
1069 static int __init set_buf_size(char *str)
1071 unsigned long buf_size;
1075 buf_size = memparse(str, &str);
1076 /* nr_entries can not be zero */
1079 trace_buf_size = buf_size;
1082 __setup("trace_buf_size=", set_buf_size);
1084 static int __init set_tracing_thresh(char *str)
1086 unsigned long threshold;
1091 ret = kstrtoul(str, 0, &threshold);
1094 tracing_thresh = threshold * 1000;
1097 __setup("tracing_thresh=", set_tracing_thresh);
1099 unsigned long nsecs_to_usecs(unsigned long nsecs)
1101 return nsecs / 1000;
1105 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1106 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1107 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1108 * of strings in the order that the enums were defined.
1113 /* These must match the bit postions in trace_iterator_flags */
1114 static const char *trace_options[] = {
1122 int in_ns; /* is this clock in nanoseconds? */
1123 } trace_clocks[] = {
1124 { trace_clock_local, "local", 1 },
1125 { trace_clock_global, "global", 1 },
1126 { trace_clock_counter, "counter", 0 },
1127 { trace_clock_jiffies, "uptime", 0 },
1128 { trace_clock, "perf", 1 },
1129 { ktime_get_mono_fast_ns, "mono", 1 },
1130 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1135 * trace_parser_get_init - gets the buffer for trace parser
1137 int trace_parser_get_init(struct trace_parser *parser, int size)
1139 memset(parser, 0, sizeof(*parser));
1141 parser->buffer = kmalloc(size, GFP_KERNEL);
1142 if (!parser->buffer)
1145 parser->size = size;
1150 * trace_parser_put - frees the buffer for trace parser
1152 void trace_parser_put(struct trace_parser *parser)
1154 kfree(parser->buffer);
1158 * trace_get_user - reads the user input string separated by space
1159 * (matched by isspace(ch))
1161 * For each string found the 'struct trace_parser' is updated,
1162 * and the function returns.
1164 * Returns number of bytes read.
1166 * See kernel/trace/trace.h for 'struct trace_parser' details.
1168 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1169 size_t cnt, loff_t *ppos)
1176 trace_parser_clear(parser);
1178 ret = get_user(ch, ubuf++);
1186 * The parser is not finished with the last write,
1187 * continue reading the user input without skipping spaces.
1189 if (!parser->cont) {
1190 /* skip white space */
1191 while (cnt && isspace(ch)) {
1192 ret = get_user(ch, ubuf++);
1199 /* only spaces were written */
1209 /* read the non-space input */
1210 while (cnt && !isspace(ch)) {
1211 if (parser->idx < parser->size - 1)
1212 parser->buffer[parser->idx++] = ch;
1217 ret = get_user(ch, ubuf++);
1224 /* We either got finished input or we have to wait for another call. */
1226 parser->buffer[parser->idx] = 0;
1227 parser->cont = false;
1228 } else if (parser->idx < parser->size - 1) {
1229 parser->cont = true;
1230 parser->buffer[parser->idx++] = ch;
1243 /* TODO add a seq_buf_to_buffer() */
1244 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1248 if (trace_seq_used(s) <= s->seq.readpos)
1251 len = trace_seq_used(s) - s->seq.readpos;
1254 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1256 s->seq.readpos += cnt;
1260 unsigned long __read_mostly tracing_thresh;
1262 #ifdef CONFIG_TRACER_MAX_TRACE
1264 * Copy the new maximum trace into the separate maximum-trace
1265 * structure. (this way the maximum trace is permanently saved,
1266 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1269 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1271 struct trace_buffer *trace_buf = &tr->trace_buffer;
1272 struct trace_buffer *max_buf = &tr->max_buffer;
1273 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1274 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1277 max_buf->time_start = data->preempt_timestamp;
1279 max_data->saved_latency = tr->max_latency;
1280 max_data->critical_start = data->critical_start;
1281 max_data->critical_end = data->critical_end;
1283 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1284 max_data->pid = tsk->pid;
1286 * If tsk == current, then use current_uid(), as that does not use
1287 * RCU. The irq tracer can be called out of RCU scope.
1290 max_data->uid = current_uid();
1292 max_data->uid = task_uid(tsk);
1294 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1295 max_data->policy = tsk->policy;
1296 max_data->rt_priority = tsk->rt_priority;
1298 /* record this tasks comm */
1299 tracing_record_cmdline(tsk);
1303 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1305 * @tsk: the task with the latency
1306 * @cpu: The cpu that initiated the trace.
1308 * Flip the buffers between the @tr and the max_tr and record information
1309 * about which task was the cause of this latency.
1312 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1314 struct ring_buffer *buf;
1319 WARN_ON_ONCE(!irqs_disabled());
1321 if (!tr->allocated_snapshot) {
1322 /* Only the nop tracer should hit this when disabling */
1323 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1327 arch_spin_lock(&tr->max_lock);
1329 /* Inherit the recordable setting from trace_buffer */
1330 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1331 ring_buffer_record_on(tr->max_buffer.buffer);
1333 ring_buffer_record_off(tr->max_buffer.buffer);
1335 buf = tr->trace_buffer.buffer;
1336 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1337 tr->max_buffer.buffer = buf;
1339 __update_max_tr(tr, tsk, cpu);
1340 arch_spin_unlock(&tr->max_lock);
1344 * update_max_tr_single - only copy one trace over, and reset the rest
1346 * @tsk - task with the latency
1347 * @cpu - the cpu of the buffer to copy.
1349 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1352 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1359 WARN_ON_ONCE(!irqs_disabled());
1360 if (!tr->allocated_snapshot) {
1361 /* Only the nop tracer should hit this when disabling */
1362 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1366 arch_spin_lock(&tr->max_lock);
1368 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1370 if (ret == -EBUSY) {
1372 * We failed to swap the buffer due to a commit taking
1373 * place on this CPU. We fail to record, but we reset
1374 * the max trace buffer (no one writes directly to it)
1375 * and flag that it failed.
1377 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1378 "Failed to swap buffers due to commit in progress\n");
1381 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1383 __update_max_tr(tr, tsk, cpu);
1384 arch_spin_unlock(&tr->max_lock);
1386 #endif /* CONFIG_TRACER_MAX_TRACE */
1388 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1390 /* Iterators are static, they should be filled or empty */
1391 if (trace_buffer_iter(iter, iter->cpu_file))
1394 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1398 #ifdef CONFIG_FTRACE_STARTUP_TEST
1399 static int run_tracer_selftest(struct tracer *type)
1401 struct trace_array *tr = &global_trace;
1402 struct tracer *saved_tracer = tr->current_trace;
1405 if (!type->selftest || tracing_selftest_disabled)
1409 * Run a selftest on this tracer.
1410 * Here we reset the trace buffer, and set the current
1411 * tracer to be this tracer. The tracer can then run some
1412 * internal tracing to verify that everything is in order.
1413 * If we fail, we do not register this tracer.
1415 tracing_reset_online_cpus(&tr->trace_buffer);
1417 tr->current_trace = type;
1419 #ifdef CONFIG_TRACER_MAX_TRACE
1420 if (type->use_max_tr) {
1421 /* If we expanded the buffers, make sure the max is expanded too */
1422 if (ring_buffer_expanded)
1423 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1424 RING_BUFFER_ALL_CPUS);
1425 tr->allocated_snapshot = true;
1429 /* the test is responsible for initializing and enabling */
1430 pr_info("Testing tracer %s: ", type->name);
1431 ret = type->selftest(type, tr);
1432 /* the test is responsible for resetting too */
1433 tr->current_trace = saved_tracer;
1435 printk(KERN_CONT "FAILED!\n");
1436 /* Add the warning after printing 'FAILED' */
1440 /* Only reset on passing, to avoid touching corrupted buffers */
1441 tracing_reset_online_cpus(&tr->trace_buffer);
1443 #ifdef CONFIG_TRACER_MAX_TRACE
1444 if (type->use_max_tr) {
1445 tr->allocated_snapshot = false;
1447 /* Shrink the max buffer again */
1448 if (ring_buffer_expanded)
1449 ring_buffer_resize(tr->max_buffer.buffer, 1,
1450 RING_BUFFER_ALL_CPUS);
1454 printk(KERN_CONT "PASSED\n");
1458 static inline int run_tracer_selftest(struct tracer *type)
1462 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1464 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1466 static void __init apply_trace_boot_options(void);
1469 * register_tracer - register a tracer with the ftrace system.
1470 * @type - the plugin for the tracer
1472 * Register a new plugin tracer.
1474 int __init register_tracer(struct tracer *type)
1480 pr_info("Tracer must have a name\n");
1484 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1485 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1489 mutex_lock(&trace_types_lock);
1491 tracing_selftest_running = true;
1493 for (t = trace_types; t; t = t->next) {
1494 if (strcmp(type->name, t->name) == 0) {
1496 pr_info("Tracer %s already registered\n",
1503 if (!type->set_flag)
1504 type->set_flag = &dummy_set_flag;
1506 /*allocate a dummy tracer_flags*/
1507 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1512 type->flags->val = 0;
1513 type->flags->opts = dummy_tracer_opt;
1515 if (!type->flags->opts)
1516 type->flags->opts = dummy_tracer_opt;
1518 /* store the tracer for __set_tracer_option */
1519 type->flags->trace = type;
1521 ret = run_tracer_selftest(type);
1525 type->next = trace_types;
1527 add_tracer_options(&global_trace, type);
1530 tracing_selftest_running = false;
1531 mutex_unlock(&trace_types_lock);
1533 if (ret || !default_bootup_tracer)
1536 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1539 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1540 /* Do we want this tracer to start on bootup? */
1541 tracing_set_tracer(&global_trace, type->name);
1542 default_bootup_tracer = NULL;
1544 apply_trace_boot_options();
1546 /* disable other selftests, since this will break it. */
1547 tracing_selftest_disabled = true;
1548 #ifdef CONFIG_FTRACE_STARTUP_TEST
1549 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1557 void tracing_reset(struct trace_buffer *buf, int cpu)
1559 struct ring_buffer *buffer = buf->buffer;
1564 ring_buffer_record_disable(buffer);
1566 /* Make sure all commits have finished */
1567 synchronize_sched();
1568 ring_buffer_reset_cpu(buffer, cpu);
1570 ring_buffer_record_enable(buffer);
1573 void tracing_reset_online_cpus(struct trace_buffer *buf)
1575 struct ring_buffer *buffer = buf->buffer;
1581 ring_buffer_record_disable(buffer);
1583 /* Make sure all commits have finished */
1584 synchronize_sched();
1586 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1588 for_each_online_cpu(cpu)
1589 ring_buffer_reset_cpu(buffer, cpu);
1591 ring_buffer_record_enable(buffer);
1594 /* Must have trace_types_lock held */
1595 void tracing_reset_all_online_cpus(void)
1597 struct trace_array *tr;
1599 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1600 tracing_reset_online_cpus(&tr->trace_buffer);
1601 #ifdef CONFIG_TRACER_MAX_TRACE
1602 tracing_reset_online_cpus(&tr->max_buffer);
1607 #define SAVED_CMDLINES_DEFAULT 128
1608 #define NO_CMDLINE_MAP UINT_MAX
1609 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1610 struct saved_cmdlines_buffer {
1611 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1612 unsigned *map_cmdline_to_pid;
1613 unsigned cmdline_num;
1615 char *saved_cmdlines;
1617 static struct saved_cmdlines_buffer *savedcmd;
1619 static inline char *get_saved_cmdlines(int idx)
1621 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1624 static inline void set_cmdline(int idx, const char *cmdline)
1626 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1629 static int allocate_cmdlines_buffer(unsigned int val,
1630 struct saved_cmdlines_buffer *s)
1632 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1634 if (!s->map_cmdline_to_pid)
1637 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1638 if (!s->saved_cmdlines) {
1639 kfree(s->map_cmdline_to_pid);
1644 s->cmdline_num = val;
1645 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1646 sizeof(s->map_pid_to_cmdline));
1647 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1648 val * sizeof(*s->map_cmdline_to_pid));
1653 static int trace_create_savedcmd(void)
1657 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1661 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1671 int is_tracing_stopped(void)
1673 return global_trace.stop_count;
1677 * tracing_start - quick start of the tracer
1679 * If tracing is enabled but was stopped by tracing_stop,
1680 * this will start the tracer back up.
1682 void tracing_start(void)
1684 struct ring_buffer *buffer;
1685 unsigned long flags;
1687 if (tracing_disabled)
1690 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1691 if (--global_trace.stop_count) {
1692 if (global_trace.stop_count < 0) {
1693 /* Someone screwed up their debugging */
1695 global_trace.stop_count = 0;
1700 /* Prevent the buffers from switching */
1701 arch_spin_lock(&global_trace.max_lock);
1703 buffer = global_trace.trace_buffer.buffer;
1705 ring_buffer_record_enable(buffer);
1707 #ifdef CONFIG_TRACER_MAX_TRACE
1708 buffer = global_trace.max_buffer.buffer;
1710 ring_buffer_record_enable(buffer);
1713 arch_spin_unlock(&global_trace.max_lock);
1716 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1719 static void tracing_start_tr(struct trace_array *tr)
1721 struct ring_buffer *buffer;
1722 unsigned long flags;
1724 if (tracing_disabled)
1727 /* If global, we need to also start the max tracer */
1728 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1729 return tracing_start();
1731 raw_spin_lock_irqsave(&tr->start_lock, flags);
1733 if (--tr->stop_count) {
1734 if (tr->stop_count < 0) {
1735 /* Someone screwed up their debugging */
1742 buffer = tr->trace_buffer.buffer;
1744 ring_buffer_record_enable(buffer);
1747 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1751 * tracing_stop - quick stop of the tracer
1753 * Light weight way to stop tracing. Use in conjunction with
1756 void tracing_stop(void)
1758 struct ring_buffer *buffer;
1759 unsigned long flags;
1761 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1762 if (global_trace.stop_count++)
1765 /* Prevent the buffers from switching */
1766 arch_spin_lock(&global_trace.max_lock);
1768 buffer = global_trace.trace_buffer.buffer;
1770 ring_buffer_record_disable(buffer);
1772 #ifdef CONFIG_TRACER_MAX_TRACE
1773 buffer = global_trace.max_buffer.buffer;
1775 ring_buffer_record_disable(buffer);
1778 arch_spin_unlock(&global_trace.max_lock);
1781 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1784 static void tracing_stop_tr(struct trace_array *tr)
1786 struct ring_buffer *buffer;
1787 unsigned long flags;
1789 /* If global, we need to also stop the max tracer */
1790 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1791 return tracing_stop();
1793 raw_spin_lock_irqsave(&tr->start_lock, flags);
1794 if (tr->stop_count++)
1797 buffer = tr->trace_buffer.buffer;
1799 ring_buffer_record_disable(buffer);
1802 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1805 void trace_stop_cmdline_recording(void);
1807 static int trace_save_cmdline(struct task_struct *tsk)
1811 /* treat recording of idle task as a success */
1815 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
1818 * It's not the end of the world if we don't get
1819 * the lock, but we also don't want to spin
1820 * nor do we want to disable interrupts,
1821 * so if we miss here, then better luck next time.
1823 if (!arch_spin_trylock(&trace_cmdline_lock))
1826 idx = savedcmd->map_pid_to_cmdline[tpid];
1827 if (idx == NO_CMDLINE_MAP) {
1828 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1830 savedcmd->map_pid_to_cmdline[tpid] = idx;
1831 savedcmd->cmdline_idx = idx;
1834 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1835 set_cmdline(idx, tsk->comm);
1837 arch_spin_unlock(&trace_cmdline_lock);
1842 static void __trace_find_cmdline(int pid, char comm[])
1848 strcpy(comm, "<idle>");
1852 if (WARN_ON_ONCE(pid < 0)) {
1853 strcpy(comm, "<XXX>");
1857 tpid = pid & (PID_MAX_DEFAULT - 1);
1858 map = savedcmd->map_pid_to_cmdline[tpid];
1859 if (map != NO_CMDLINE_MAP) {
1860 tpid = savedcmd->map_cmdline_to_pid[map];
1862 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1866 strcpy(comm, "<...>");
1869 void trace_find_cmdline(int pid, char comm[])
1872 arch_spin_lock(&trace_cmdline_lock);
1874 __trace_find_cmdline(pid, comm);
1876 arch_spin_unlock(&trace_cmdline_lock);
1880 void tracing_record_cmdline(struct task_struct *tsk)
1882 if (!__this_cpu_read(trace_cmdline_save))
1885 if (trace_save_cmdline(tsk))
1886 __this_cpu_write(trace_cmdline_save, false);
1890 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1893 struct task_struct *tsk = current;
1895 entry->preempt_count = pc & 0xff;
1896 entry->pid = (tsk) ? tsk->pid : 0;
1898 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1899 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1901 TRACE_FLAG_IRQS_NOSUPPORT |
1903 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1904 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1905 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
1906 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1907 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1909 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1911 static __always_inline void
1912 trace_event_setup(struct ring_buffer_event *event,
1913 int type, unsigned long flags, int pc)
1915 struct trace_entry *ent = ring_buffer_event_data(event);
1917 tracing_generic_entry_update(ent, flags, pc);
1921 struct ring_buffer_event *
1922 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1925 unsigned long flags, int pc)
1927 struct ring_buffer_event *event;
1929 event = ring_buffer_lock_reserve(buffer, len);
1931 trace_event_setup(event, type, flags, pc);
1936 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1937 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1938 static int trace_buffered_event_ref;
1941 * trace_buffered_event_enable - enable buffering events
1943 * When events are being filtered, it is quicker to use a temporary
1944 * buffer to write the event data into if there's a likely chance
1945 * that it will not be committed. The discard of the ring buffer
1946 * is not as fast as committing, and is much slower than copying
1949 * When an event is to be filtered, allocate per cpu buffers to
1950 * write the event data into, and if the event is filtered and discarded
1951 * it is simply dropped, otherwise, the entire data is to be committed
1954 void trace_buffered_event_enable(void)
1956 struct ring_buffer_event *event;
1960 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1962 if (trace_buffered_event_ref++)
1965 for_each_tracing_cpu(cpu) {
1966 page = alloc_pages_node(cpu_to_node(cpu),
1967 GFP_KERNEL | __GFP_NORETRY, 0);
1971 event = page_address(page);
1972 memset(event, 0, sizeof(*event));
1974 per_cpu(trace_buffered_event, cpu) = event;
1977 if (cpu == smp_processor_id() &&
1978 this_cpu_read(trace_buffered_event) !=
1979 per_cpu(trace_buffered_event, cpu))
1986 trace_buffered_event_disable();
1989 static void enable_trace_buffered_event(void *data)
1991 /* Probably not needed, but do it anyway */
1993 this_cpu_dec(trace_buffered_event_cnt);
1996 static void disable_trace_buffered_event(void *data)
1998 this_cpu_inc(trace_buffered_event_cnt);
2002 * trace_buffered_event_disable - disable buffering events
2004 * When a filter is removed, it is faster to not use the buffered
2005 * events, and to commit directly into the ring buffer. Free up
2006 * the temp buffers when there are no more users. This requires
2007 * special synchronization with current events.
2009 void trace_buffered_event_disable(void)
2013 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2015 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2018 if (--trace_buffered_event_ref)
2022 /* For each CPU, set the buffer as used. */
2023 smp_call_function_many(tracing_buffer_mask,
2024 disable_trace_buffered_event, NULL, 1);
2027 /* Wait for all current users to finish */
2028 synchronize_sched();
2030 for_each_tracing_cpu(cpu) {
2031 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2032 per_cpu(trace_buffered_event, cpu) = NULL;
2035 * Make sure trace_buffered_event is NULL before clearing
2036 * trace_buffered_event_cnt.
2041 /* Do the work on each cpu */
2042 smp_call_function_many(tracing_buffer_mask,
2043 enable_trace_buffered_event, NULL, 1);
2048 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2050 __this_cpu_write(trace_cmdline_save, true);
2052 /* If this is the temp buffer, we need to commit fully */
2053 if (this_cpu_read(trace_buffered_event) == event) {
2054 /* Length is in event->array[0] */
2055 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2056 /* Release the temp buffer */
2057 this_cpu_dec(trace_buffered_event_cnt);
2059 ring_buffer_unlock_commit(buffer, event);
2062 static struct ring_buffer *temp_buffer;
2064 struct ring_buffer_event *
2065 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2066 struct trace_event_file *trace_file,
2067 int type, unsigned long len,
2068 unsigned long flags, int pc)
2070 struct ring_buffer_event *entry;
2073 *current_rb = trace_file->tr->trace_buffer.buffer;
2075 if ((trace_file->flags &
2076 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2077 (entry = this_cpu_read(trace_buffered_event))) {
2078 /* Try to use the per cpu buffer first */
2079 val = this_cpu_inc_return(trace_buffered_event_cnt);
2080 if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
2081 trace_event_setup(entry, type, flags, pc);
2082 entry->array[0] = len;
2085 this_cpu_dec(trace_buffered_event_cnt);
2088 entry = trace_buffer_lock_reserve(*current_rb,
2089 type, len, flags, pc);
2091 * If tracing is off, but we have triggers enabled
2092 * we still need to look at the event data. Use the temp_buffer
2093 * to store the trace event for the tigger to use. It's recusive
2094 * safe and will not be recorded anywhere.
2096 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2097 *current_rb = temp_buffer;
2098 entry = trace_buffer_lock_reserve(*current_rb,
2099 type, len, flags, pc);
2103 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2105 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2106 struct ring_buffer *buffer,
2107 struct ring_buffer_event *event,
2108 unsigned long flags, int pc,
2109 struct pt_regs *regs)
2111 __buffer_unlock_commit(buffer, event);
2114 * If regs is not set, then skip the following callers:
2115 * trace_buffer_unlock_commit_regs
2116 * event_trigger_unlock_commit
2117 * trace_event_buffer_commit
2118 * trace_event_raw_event_sched_switch
2119 * Note, we can still get here via blktrace, wakeup tracer
2120 * and mmiotrace, but that's ok if they lose a function or
2121 * two. They are that meaningful.
2123 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
2124 ftrace_trace_userstack(tr, buffer, flags, pc);
2128 trace_function(struct trace_array *tr,
2129 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2132 struct trace_event_call *call = &event_function;
2133 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2134 struct ring_buffer_event *event;
2135 struct ftrace_entry *entry;
2137 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2141 entry = ring_buffer_event_data(event);
2143 entry->parent_ip = parent_ip;
2145 if (!call_filter_check_discard(call, entry, buffer, event))
2146 __buffer_unlock_commit(buffer, event);
2149 #ifdef CONFIG_STACKTRACE
2151 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2152 struct ftrace_stack {
2153 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2156 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2157 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2159 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2160 unsigned long flags,
2161 int skip, int pc, struct pt_regs *regs)
2163 struct trace_event_call *call = &event_kernel_stack;
2164 struct ring_buffer_event *event;
2165 struct stack_entry *entry;
2166 struct stack_trace trace;
2168 int size = FTRACE_STACK_ENTRIES;
2170 trace.nr_entries = 0;
2174 * Add two, for this function and the call to save_stack_trace()
2175 * If regs is set, then these functions will not be in the way.
2181 * Since events can happen in NMIs there's no safe way to
2182 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2183 * or NMI comes in, it will just have to use the default
2184 * FTRACE_STACK_SIZE.
2186 preempt_disable_notrace();
2188 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2190 * We don't need any atomic variables, just a barrier.
2191 * If an interrupt comes in, we don't care, because it would
2192 * have exited and put the counter back to what we want.
2193 * We just need a barrier to keep gcc from moving things
2197 if (use_stack == 1) {
2198 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2199 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2202 save_stack_trace_regs(regs, &trace);
2204 save_stack_trace(&trace);
2206 if (trace.nr_entries > size)
2207 size = trace.nr_entries;
2209 /* From now on, use_stack is a boolean */
2212 size *= sizeof(unsigned long);
2214 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2215 (sizeof(*entry) - sizeof(entry->caller)) + size,
2219 entry = ring_buffer_event_data(event);
2221 memset(&entry->caller, 0, size);
2224 memcpy(&entry->caller, trace.entries,
2225 trace.nr_entries * sizeof(unsigned long));
2227 trace.max_entries = FTRACE_STACK_ENTRIES;
2228 trace.entries = entry->caller;
2230 save_stack_trace_regs(regs, &trace);
2232 save_stack_trace(&trace);
2235 entry->size = trace.nr_entries;
2237 if (!call_filter_check_discard(call, entry, buffer, event))
2238 __buffer_unlock_commit(buffer, event);
2241 /* Again, don't let gcc optimize things here */
2243 __this_cpu_dec(ftrace_stack_reserve);
2244 preempt_enable_notrace();
2248 static inline void ftrace_trace_stack(struct trace_array *tr,
2249 struct ring_buffer *buffer,
2250 unsigned long flags,
2251 int skip, int pc, struct pt_regs *regs)
2253 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2256 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2259 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2262 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
2266 * trace_dump_stack - record a stack back trace in the trace buffer
2267 * @skip: Number of functions to skip (helper handlers)
2269 void trace_dump_stack(int skip)
2271 unsigned long flags;
2273 if (tracing_disabled || tracing_selftest_running)
2276 local_save_flags(flags);
2279 * Skip 3 more, seems to get us at the caller of
2283 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2284 flags, skip, preempt_count(), NULL);
2287 static DEFINE_PER_CPU(int, user_stack_count);
2290 ftrace_trace_userstack(struct trace_array *tr,
2291 struct ring_buffer *buffer, unsigned long flags, int pc)
2293 struct trace_event_call *call = &event_user_stack;
2294 struct ring_buffer_event *event;
2295 struct userstack_entry *entry;
2296 struct stack_trace trace;
2298 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
2302 * NMIs can not handle page faults, even with fix ups.
2303 * The save user stack can (and often does) fault.
2305 if (unlikely(in_nmi()))
2309 * prevent recursion, since the user stack tracing may
2310 * trigger other kernel events.
2313 if (__this_cpu_read(user_stack_count))
2316 __this_cpu_inc(user_stack_count);
2318 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2319 sizeof(*entry), flags, pc);
2321 goto out_drop_count;
2322 entry = ring_buffer_event_data(event);
2324 entry->tgid = current->tgid;
2325 memset(&entry->caller, 0, sizeof(entry->caller));
2327 trace.nr_entries = 0;
2328 trace.max_entries = FTRACE_STACK_ENTRIES;
2330 trace.entries = entry->caller;
2332 save_stack_trace_user(&trace);
2333 if (!call_filter_check_discard(call, entry, buffer, event))
2334 __buffer_unlock_commit(buffer, event);
2337 __this_cpu_dec(user_stack_count);
2343 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2345 ftrace_trace_userstack(tr, flags, preempt_count());
2349 #endif /* CONFIG_STACKTRACE */
2351 /* created for use with alloc_percpu */
2352 struct trace_buffer_struct {
2354 char buffer[4][TRACE_BUF_SIZE];
2357 static struct trace_buffer_struct *trace_percpu_buffer;
2360 * Thise allows for lockless recording. If we're nested too deeply, then
2361 * this returns NULL.
2363 static char *get_trace_buf(void)
2365 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2367 if (!buffer || buffer->nesting >= 4)
2372 /* Interrupts must see nesting incremented before we use the buffer */
2374 return &buffer->buffer[buffer->nesting - 1][0];
2377 static void put_trace_buf(void)
2379 /* Don't let the decrement of nesting leak before this */
2381 this_cpu_dec(trace_percpu_buffer->nesting);
2384 static int alloc_percpu_trace_buffer(void)
2386 struct trace_buffer_struct *buffers;
2388 buffers = alloc_percpu(struct trace_buffer_struct);
2389 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2392 trace_percpu_buffer = buffers;
2396 static int buffers_allocated;
2398 void trace_printk_init_buffers(void)
2400 if (buffers_allocated)
2403 if (alloc_percpu_trace_buffer())
2406 /* trace_printk() is for debug use only. Don't use it in production. */
2409 pr_warn("**********************************************************\n");
2410 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2412 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2414 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2415 pr_warn("** unsafe for production use. **\n");
2417 pr_warn("** If you see this message and you are not debugging **\n");
2418 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2420 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2421 pr_warn("**********************************************************\n");
2423 /* Expand the buffers to set size */
2424 tracing_update_buffers();
2426 buffers_allocated = 1;
2429 * trace_printk_init_buffers() can be called by modules.
2430 * If that happens, then we need to start cmdline recording
2431 * directly here. If the global_trace.buffer is already
2432 * allocated here, then this was called by module code.
2434 if (global_trace.trace_buffer.buffer)
2435 tracing_start_cmdline_record();
2438 void trace_printk_start_comm(void)
2440 /* Start tracing comms if trace printk is set */
2441 if (!buffers_allocated)
2443 tracing_start_cmdline_record();
2446 static void trace_printk_start_stop_comm(int enabled)
2448 if (!buffers_allocated)
2452 tracing_start_cmdline_record();
2454 tracing_stop_cmdline_record();
2458 * trace_vbprintk - write binary msg to tracing buffer
2461 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2463 struct trace_event_call *call = &event_bprint;
2464 struct ring_buffer_event *event;
2465 struct ring_buffer *buffer;
2466 struct trace_array *tr = &global_trace;
2467 struct bprint_entry *entry;
2468 unsigned long flags;
2470 int len = 0, size, pc;
2472 if (unlikely(tracing_selftest_running || tracing_disabled))
2475 /* Don't pollute graph traces with trace_vprintk internals */
2476 pause_graph_tracing();
2478 pc = preempt_count();
2479 preempt_disable_notrace();
2481 tbuffer = get_trace_buf();
2487 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2489 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2492 local_save_flags(flags);
2493 size = sizeof(*entry) + sizeof(u32) * len;
2494 buffer = tr->trace_buffer.buffer;
2495 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2499 entry = ring_buffer_event_data(event);
2503 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2504 if (!call_filter_check_discard(call, entry, buffer, event)) {
2505 __buffer_unlock_commit(buffer, event);
2506 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2513 preempt_enable_notrace();
2514 unpause_graph_tracing();
2518 EXPORT_SYMBOL_GPL(trace_vbprintk);
2522 __trace_array_vprintk(struct ring_buffer *buffer,
2523 unsigned long ip, const char *fmt, va_list args)
2525 struct trace_event_call *call = &event_print;
2526 struct ring_buffer_event *event;
2527 int len = 0, size, pc;
2528 struct print_entry *entry;
2529 unsigned long flags;
2532 if (tracing_disabled || tracing_selftest_running)
2535 /* Don't pollute graph traces with trace_vprintk internals */
2536 pause_graph_tracing();
2538 pc = preempt_count();
2539 preempt_disable_notrace();
2542 tbuffer = get_trace_buf();
2548 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2550 local_save_flags(flags);
2551 size = sizeof(*entry) + len + 1;
2552 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2556 entry = ring_buffer_event_data(event);
2559 memcpy(&entry->buf, tbuffer, len + 1);
2560 if (!call_filter_check_discard(call, entry, buffer, event)) {
2561 __buffer_unlock_commit(buffer, event);
2562 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2569 preempt_enable_notrace();
2570 unpause_graph_tracing();
2576 int trace_array_vprintk(struct trace_array *tr,
2577 unsigned long ip, const char *fmt, va_list args)
2579 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2583 int trace_array_printk(struct trace_array *tr,
2584 unsigned long ip, const char *fmt, ...)
2589 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2596 ret = trace_array_vprintk(tr, ip, fmt, ap);
2602 int trace_array_printk_buf(struct ring_buffer *buffer,
2603 unsigned long ip, const char *fmt, ...)
2608 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2612 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2618 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2620 return trace_array_vprintk(&global_trace, ip, fmt, args);
2622 EXPORT_SYMBOL_GPL(trace_vprintk);
2624 static void trace_iterator_increment(struct trace_iterator *iter)
2626 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2630 ring_buffer_read(buf_iter, NULL);
2633 static struct trace_entry *
2634 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2635 unsigned long *lost_events)
2637 struct ring_buffer_event *event;
2638 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2641 event = ring_buffer_iter_peek(buf_iter, ts);
2643 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2647 iter->ent_size = ring_buffer_event_length(event);
2648 return ring_buffer_event_data(event);
2654 static struct trace_entry *
2655 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2656 unsigned long *missing_events, u64 *ent_ts)
2658 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2659 struct trace_entry *ent, *next = NULL;
2660 unsigned long lost_events = 0, next_lost = 0;
2661 int cpu_file = iter->cpu_file;
2662 u64 next_ts = 0, ts;
2668 * If we are in a per_cpu trace file, don't bother by iterating over
2669 * all cpu and peek directly.
2671 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2672 if (ring_buffer_empty_cpu(buffer, cpu_file))
2674 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2676 *ent_cpu = cpu_file;
2681 for_each_tracing_cpu(cpu) {
2683 if (ring_buffer_empty_cpu(buffer, cpu))
2686 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2689 * Pick the entry with the smallest timestamp:
2691 if (ent && (!next || ts < next_ts)) {
2695 next_lost = lost_events;
2696 next_size = iter->ent_size;
2700 iter->ent_size = next_size;
2703 *ent_cpu = next_cpu;
2709 *missing_events = next_lost;
2714 /* Find the next real entry, without updating the iterator itself */
2715 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2716 int *ent_cpu, u64 *ent_ts)
2718 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2721 /* Find the next real entry, and increment the iterator to the next entry */
2722 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2724 iter->ent = __find_next_entry(iter, &iter->cpu,
2725 &iter->lost_events, &iter->ts);
2728 trace_iterator_increment(iter);
2730 return iter->ent ? iter : NULL;
2733 static void trace_consume(struct trace_iterator *iter)
2735 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2736 &iter->lost_events);
2739 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2741 struct trace_iterator *iter = m->private;
2745 WARN_ON_ONCE(iter->leftover);
2749 /* can't go backwards */
2754 ent = trace_find_next_entry_inc(iter);
2758 while (ent && iter->idx < i)
2759 ent = trace_find_next_entry_inc(iter);
2766 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2768 struct ring_buffer_event *event;
2769 struct ring_buffer_iter *buf_iter;
2770 unsigned long entries = 0;
2773 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2775 buf_iter = trace_buffer_iter(iter, cpu);
2779 ring_buffer_iter_reset(buf_iter);
2782 * We could have the case with the max latency tracers
2783 * that a reset never took place on a cpu. This is evident
2784 * by the timestamp being before the start of the buffer.
2786 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2787 if (ts >= iter->trace_buffer->time_start)
2790 ring_buffer_read(buf_iter, NULL);
2793 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2797 * The current tracer is copied to avoid a global locking
2800 static void *s_start(struct seq_file *m, loff_t *pos)
2802 struct trace_iterator *iter = m->private;
2803 struct trace_array *tr = iter->tr;
2804 int cpu_file = iter->cpu_file;
2810 * copy the tracer to avoid using a global lock all around.
2811 * iter->trace is a copy of current_trace, the pointer to the
2812 * name may be used instead of a strcmp(), as iter->trace->name
2813 * will point to the same string as current_trace->name.
2815 mutex_lock(&trace_types_lock);
2816 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2817 *iter->trace = *tr->current_trace;
2818 mutex_unlock(&trace_types_lock);
2820 #ifdef CONFIG_TRACER_MAX_TRACE
2821 if (iter->snapshot && iter->trace->use_max_tr)
2822 return ERR_PTR(-EBUSY);
2825 if (*pos != iter->pos) {
2830 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2831 for_each_tracing_cpu(cpu)
2832 tracing_iter_reset(iter, cpu);
2834 tracing_iter_reset(iter, cpu_file);
2837 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2842 * If we overflowed the seq_file before, then we want
2843 * to just reuse the trace_seq buffer again.
2849 p = s_next(m, p, &l);
2853 trace_event_read_lock();
2854 trace_access_lock(cpu_file);
2858 static void s_stop(struct seq_file *m, void *p)
2860 struct trace_iterator *iter = m->private;
2862 #ifdef CONFIG_TRACER_MAX_TRACE
2863 if (iter->snapshot && iter->trace->use_max_tr)
2867 trace_access_unlock(iter->cpu_file);
2868 trace_event_read_unlock();
2872 get_total_entries(struct trace_buffer *buf,
2873 unsigned long *total, unsigned long *entries)
2875 unsigned long count;
2881 for_each_tracing_cpu(cpu) {
2882 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2884 * If this buffer has skipped entries, then we hold all
2885 * entries for the trace and we need to ignore the
2886 * ones before the time stamp.
2888 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2889 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2890 /* total is the same as the entries */
2894 ring_buffer_overrun_cpu(buf->buffer, cpu);
2899 static void print_lat_help_header(struct seq_file *m)
2901 seq_puts(m, "# _------=> CPU# \n"
2902 "# / _-----=> irqs-off \n"
2903 "# | / _----=> need-resched \n"
2904 "# || / _---=> hardirq/softirq \n"
2905 "# ||| / _--=> preempt-depth \n"
2907 "# cmd pid ||||| time | caller \n"
2908 "# \\ / ||||| \\ | / \n");
2911 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2913 unsigned long total;
2914 unsigned long entries;
2916 get_total_entries(buf, &total, &entries);
2917 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2918 entries, total, num_online_cpus());
2922 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2924 print_event_info(buf, m);
2925 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2929 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2931 print_event_info(buf, m);
2932 seq_puts(m, "# _-----=> irqs-off\n"
2933 "# / _----=> need-resched\n"
2934 "# | / _---=> hardirq/softirq\n"
2935 "# || / _--=> preempt-depth\n"
2937 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2938 "# | | | |||| | |\n");
2942 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2944 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2945 struct trace_buffer *buf = iter->trace_buffer;
2946 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2947 struct tracer *type = iter->trace;
2948 unsigned long entries;
2949 unsigned long total;
2950 const char *name = "preemption";
2954 get_total_entries(buf, &total, &entries);
2956 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2958 seq_puts(m, "# -----------------------------------"
2959 "---------------------------------\n");
2960 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2961 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2962 nsecs_to_usecs(data->saved_latency),
2966 #if defined(CONFIG_PREEMPT_NONE)
2968 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2970 #elif defined(CONFIG_PREEMPT)
2975 /* These are reserved for later use */
2978 seq_printf(m, " #P:%d)\n", num_online_cpus());
2982 seq_puts(m, "# -----------------\n");
2983 seq_printf(m, "# | task: %.16s-%d "
2984 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2985 data->comm, data->pid,
2986 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2987 data->policy, data->rt_priority);
2988 seq_puts(m, "# -----------------\n");
2990 if (data->critical_start) {
2991 seq_puts(m, "# => started at: ");
2992 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2993 trace_print_seq(m, &iter->seq);
2994 seq_puts(m, "\n# => ended at: ");
2995 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2996 trace_print_seq(m, &iter->seq);
2997 seq_puts(m, "\n#\n");
3003 static void test_cpu_buff_start(struct trace_iterator *iter)
3005 struct trace_seq *s = &iter->seq;
3006 struct trace_array *tr = iter->tr;
3008 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3011 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3014 if (cpumask_available(iter->started) &&
3015 cpumask_test_cpu(iter->cpu, iter->started))
3018 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3021 if (cpumask_available(iter->started))
3022 cpumask_set_cpu(iter->cpu, iter->started);
3024 /* Don't print started cpu buffer for the first entry of the trace */
3026 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3030 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3032 struct trace_array *tr = iter->tr;
3033 struct trace_seq *s = &iter->seq;
3034 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3035 struct trace_entry *entry;
3036 struct trace_event *event;
3040 test_cpu_buff_start(iter);
3042 event = ftrace_find_event(entry->type);
3044 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3045 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3046 trace_print_lat_context(iter);
3048 trace_print_context(iter);
3051 if (trace_seq_has_overflowed(s))
3052 return TRACE_TYPE_PARTIAL_LINE;
3055 return event->funcs->trace(iter, sym_flags, event);
3057 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3059 return trace_handle_return(s);
3062 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3064 struct trace_array *tr = iter->tr;
3065 struct trace_seq *s = &iter->seq;
3066 struct trace_entry *entry;
3067 struct trace_event *event;
3071 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3072 trace_seq_printf(s, "%d %d %llu ",
3073 entry->pid, iter->cpu, iter->ts);
3075 if (trace_seq_has_overflowed(s))
3076 return TRACE_TYPE_PARTIAL_LINE;
3078 event = ftrace_find_event(entry->type);
3080 return event->funcs->raw(iter, 0, event);
3082 trace_seq_printf(s, "%d ?\n", entry->type);
3084 return trace_handle_return(s);
3087 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3089 struct trace_array *tr = iter->tr;
3090 struct trace_seq *s = &iter->seq;
3091 unsigned char newline = '\n';
3092 struct trace_entry *entry;
3093 struct trace_event *event;
3097 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3098 SEQ_PUT_HEX_FIELD(s, entry->pid);
3099 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3100 SEQ_PUT_HEX_FIELD(s, iter->ts);
3101 if (trace_seq_has_overflowed(s))
3102 return TRACE_TYPE_PARTIAL_LINE;
3105 event = ftrace_find_event(entry->type);
3107 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3108 if (ret != TRACE_TYPE_HANDLED)
3112 SEQ_PUT_FIELD(s, newline);
3114 return trace_handle_return(s);
3117 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3119 struct trace_array *tr = iter->tr;
3120 struct trace_seq *s = &iter->seq;
3121 struct trace_entry *entry;
3122 struct trace_event *event;
3126 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3127 SEQ_PUT_FIELD(s, entry->pid);
3128 SEQ_PUT_FIELD(s, iter->cpu);
3129 SEQ_PUT_FIELD(s, iter->ts);
3130 if (trace_seq_has_overflowed(s))
3131 return TRACE_TYPE_PARTIAL_LINE;
3134 event = ftrace_find_event(entry->type);
3135 return event ? event->funcs->binary(iter, 0, event) :
3139 int trace_empty(struct trace_iterator *iter)
3141 struct ring_buffer_iter *buf_iter;
3144 /* If we are looking at one CPU buffer, only check that one */
3145 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3146 cpu = iter->cpu_file;
3147 buf_iter = trace_buffer_iter(iter, cpu);
3149 if (!ring_buffer_iter_empty(buf_iter))
3152 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3158 for_each_tracing_cpu(cpu) {
3159 buf_iter = trace_buffer_iter(iter, cpu);
3161 if (!ring_buffer_iter_empty(buf_iter))
3164 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3172 /* Called with trace_event_read_lock() held. */
3173 enum print_line_t print_trace_line(struct trace_iterator *iter)
3175 struct trace_array *tr = iter->tr;
3176 unsigned long trace_flags = tr->trace_flags;
3177 enum print_line_t ret;
3179 if (iter->lost_events) {
3180 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3181 iter->cpu, iter->lost_events);
3182 if (trace_seq_has_overflowed(&iter->seq))
3183 return TRACE_TYPE_PARTIAL_LINE;
3186 if (iter->trace && iter->trace->print_line) {
3187 ret = iter->trace->print_line(iter);
3188 if (ret != TRACE_TYPE_UNHANDLED)
3192 if (iter->ent->type == TRACE_BPUTS &&
3193 trace_flags & TRACE_ITER_PRINTK &&
3194 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3195 return trace_print_bputs_msg_only(iter);
3197 if (iter->ent->type == TRACE_BPRINT &&
3198 trace_flags & TRACE_ITER_PRINTK &&
3199 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3200 return trace_print_bprintk_msg_only(iter);
3202 if (iter->ent->type == TRACE_PRINT &&
3203 trace_flags & TRACE_ITER_PRINTK &&
3204 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3205 return trace_print_printk_msg_only(iter);
3207 if (trace_flags & TRACE_ITER_BIN)
3208 return print_bin_fmt(iter);
3210 if (trace_flags & TRACE_ITER_HEX)
3211 return print_hex_fmt(iter);
3213 if (trace_flags & TRACE_ITER_RAW)
3214 return print_raw_fmt(iter);
3216 return print_trace_fmt(iter);
3219 void trace_latency_header(struct seq_file *m)
3221 struct trace_iterator *iter = m->private;
3222 struct trace_array *tr = iter->tr;
3224 /* print nothing if the buffers are empty */
3225 if (trace_empty(iter))
3228 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3229 print_trace_header(m, iter);
3231 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3232 print_lat_help_header(m);
3235 void trace_default_header(struct seq_file *m)
3237 struct trace_iterator *iter = m->private;
3238 struct trace_array *tr = iter->tr;
3239 unsigned long trace_flags = tr->trace_flags;
3241 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3244 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3245 /* print nothing if the buffers are empty */
3246 if (trace_empty(iter))
3248 print_trace_header(m, iter);
3249 if (!(trace_flags & TRACE_ITER_VERBOSE))
3250 print_lat_help_header(m);
3252 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3253 if (trace_flags & TRACE_ITER_IRQ_INFO)
3254 print_func_help_header_irq(iter->trace_buffer, m);
3256 print_func_help_header(iter->trace_buffer, m);
3261 static void test_ftrace_alive(struct seq_file *m)
3263 if (!ftrace_is_dead())
3265 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3266 "# MAY BE MISSING FUNCTION EVENTS\n");
3269 #ifdef CONFIG_TRACER_MAX_TRACE
3270 static void show_snapshot_main_help(struct seq_file *m)
3272 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3273 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3274 "# Takes a snapshot of the main buffer.\n"
3275 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3276 "# (Doesn't have to be '2' works with any number that\n"
3277 "# is not a '0' or '1')\n");
3280 static void show_snapshot_percpu_help(struct seq_file *m)
3282 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3283 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3284 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3285 "# Takes a snapshot of the main buffer for this cpu.\n");
3287 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3288 "# Must use main snapshot file to allocate.\n");
3290 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3291 "# (Doesn't have to be '2' works with any number that\n"
3292 "# is not a '0' or '1')\n");
3295 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3297 if (iter->tr->allocated_snapshot)
3298 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3300 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3302 seq_puts(m, "# Snapshot commands:\n");
3303 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3304 show_snapshot_main_help(m);
3306 show_snapshot_percpu_help(m);
3309 /* Should never be called */
3310 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3313 static int s_show(struct seq_file *m, void *v)
3315 struct trace_iterator *iter = v;
3318 if (iter->ent == NULL) {
3320 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3322 test_ftrace_alive(m);
3324 if (iter->snapshot && trace_empty(iter))
3325 print_snapshot_help(m, iter);
3326 else if (iter->trace && iter->trace->print_header)
3327 iter->trace->print_header(m);
3329 trace_default_header(m);
3331 } else if (iter->leftover) {
3333 * If we filled the seq_file buffer earlier, we
3334 * want to just show it now.
3336 ret = trace_print_seq(m, &iter->seq);
3338 /* ret should this time be zero, but you never know */
3339 iter->leftover = ret;
3342 print_trace_line(iter);
3343 ret = trace_print_seq(m, &iter->seq);
3345 * If we overflow the seq_file buffer, then it will
3346 * ask us for this data again at start up.
3348 * ret is 0 if seq_file write succeeded.
3351 iter->leftover = ret;
3358 * Should be used after trace_array_get(), trace_types_lock
3359 * ensures that i_cdev was already initialized.
3361 static inline int tracing_get_cpu(struct inode *inode)
3363 if (inode->i_cdev) /* See trace_create_cpu_file() */
3364 return (long)inode->i_cdev - 1;
3365 return RING_BUFFER_ALL_CPUS;
3368 static const struct seq_operations tracer_seq_ops = {
3375 static struct trace_iterator *
3376 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3378 struct trace_array *tr = inode->i_private;
3379 struct trace_iterator *iter;
3382 if (tracing_disabled)
3383 return ERR_PTR(-ENODEV);
3385 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3387 return ERR_PTR(-ENOMEM);
3389 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3391 if (!iter->buffer_iter)
3395 * We make a copy of the current tracer to avoid concurrent
3396 * changes on it while we are reading.
3398 mutex_lock(&trace_types_lock);
3399 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3403 *iter->trace = *tr->current_trace;
3405 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3410 #ifdef CONFIG_TRACER_MAX_TRACE
3411 /* Currently only the top directory has a snapshot */
3412 if (tr->current_trace->print_max || snapshot)
3413 iter->trace_buffer = &tr->max_buffer;
3416 iter->trace_buffer = &tr->trace_buffer;
3417 iter->snapshot = snapshot;
3419 iter->cpu_file = tracing_get_cpu(inode);
3420 mutex_init(&iter->mutex);
3422 /* Notify the tracer early; before we stop tracing. */
3423 if (iter->trace && iter->trace->open)
3424 iter->trace->open(iter);
3426 /* Annotate start of buffers if we had overruns */
3427 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3428 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3430 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3431 if (trace_clocks[tr->clock_id].in_ns)
3432 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3434 /* stop the trace while dumping if we are not opening "snapshot" */
3435 if (!iter->snapshot)
3436 tracing_stop_tr(tr);
3438 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3439 for_each_tracing_cpu(cpu) {
3440 iter->buffer_iter[cpu] =
3441 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3444 ring_buffer_read_prepare_sync();
3445 for_each_tracing_cpu(cpu) {
3446 ring_buffer_read_start(iter->buffer_iter[cpu]);
3447 tracing_iter_reset(iter, cpu);
3450 cpu = iter->cpu_file;
3451 iter->buffer_iter[cpu] =
3452 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3454 ring_buffer_read_prepare_sync();
3455 ring_buffer_read_start(iter->buffer_iter[cpu]);
3456 tracing_iter_reset(iter, cpu);
3459 mutex_unlock(&trace_types_lock);
3464 mutex_unlock(&trace_types_lock);
3466 kfree(iter->buffer_iter);
3468 seq_release_private(inode, file);
3469 return ERR_PTR(-ENOMEM);
3472 int tracing_open_generic(struct inode *inode, struct file *filp)
3474 if (tracing_disabled)
3477 filp->private_data = inode->i_private;
3481 bool tracing_is_disabled(void)
3483 return (tracing_disabled) ? true: false;
3487 * Open and update trace_array ref count.
3488 * Must have the current trace_array passed to it.
3490 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3492 struct trace_array *tr = inode->i_private;
3494 if (tracing_disabled)
3497 if (trace_array_get(tr) < 0)
3500 filp->private_data = inode->i_private;
3505 static int tracing_release(struct inode *inode, struct file *file)
3507 struct trace_array *tr = inode->i_private;
3508 struct seq_file *m = file->private_data;
3509 struct trace_iterator *iter;
3512 if (!(file->f_mode & FMODE_READ)) {
3513 trace_array_put(tr);
3517 /* Writes do not use seq_file */
3519 mutex_lock(&trace_types_lock);
3521 for_each_tracing_cpu(cpu) {
3522 if (iter->buffer_iter[cpu])
3523 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3526 if (iter->trace && iter->trace->close)
3527 iter->trace->close(iter);
3529 if (!iter->snapshot)
3530 /* reenable tracing if it was previously enabled */
3531 tracing_start_tr(tr);
3533 __trace_array_put(tr);
3535 mutex_unlock(&trace_types_lock);
3537 mutex_destroy(&iter->mutex);
3538 free_cpumask_var(iter->started);
3540 kfree(iter->buffer_iter);
3541 seq_release_private(inode, file);
3546 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3548 struct trace_array *tr = inode->i_private;
3550 trace_array_put(tr);
3554 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3556 struct trace_array *tr = inode->i_private;
3558 trace_array_put(tr);
3560 return single_release(inode, file);
3563 static int tracing_open(struct inode *inode, struct file *file)
3565 struct trace_array *tr = inode->i_private;
3566 struct trace_iterator *iter;
3569 if (trace_array_get(tr) < 0)
3572 /* If this file was open for write, then erase contents */
3573 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3574 int cpu = tracing_get_cpu(inode);
3575 struct trace_buffer *trace_buf = &tr->trace_buffer;
3577 #ifdef CONFIG_TRACER_MAX_TRACE
3578 if (tr->current_trace->print_max)
3579 trace_buf = &tr->max_buffer;
3582 if (cpu == RING_BUFFER_ALL_CPUS)
3583 tracing_reset_online_cpus(trace_buf);
3585 tracing_reset(trace_buf, cpu);
3588 if (file->f_mode & FMODE_READ) {
3589 iter = __tracing_open(inode, file, false);
3591 ret = PTR_ERR(iter);
3592 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3593 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3597 trace_array_put(tr);
3603 * Some tracers are not suitable for instance buffers.
3604 * A tracer is always available for the global array (toplevel)
3605 * or if it explicitly states that it is.
3608 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3610 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3613 /* Find the next tracer that this trace array may use */
3614 static struct tracer *
3615 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3617 while (t && !trace_ok_for_array(t, tr))
3624 t_next(struct seq_file *m, void *v, loff_t *pos)
3626 struct trace_array *tr = m->private;
3627 struct tracer *t = v;
3632 t = get_tracer_for_array(tr, t->next);
3637 static void *t_start(struct seq_file *m, loff_t *pos)
3639 struct trace_array *tr = m->private;
3643 mutex_lock(&trace_types_lock);
3645 t = get_tracer_for_array(tr, trace_types);
3646 for (; t && l < *pos; t = t_next(m, t, &l))
3652 static void t_stop(struct seq_file *m, void *p)
3654 mutex_unlock(&trace_types_lock);
3657 static int t_show(struct seq_file *m, void *v)
3659 struct tracer *t = v;
3664 seq_puts(m, t->name);
3673 static const struct seq_operations show_traces_seq_ops = {
3680 static int show_traces_open(struct inode *inode, struct file *file)
3682 struct trace_array *tr = inode->i_private;
3686 if (tracing_disabled)
3689 if (trace_array_get(tr) < 0)
3692 ret = seq_open(file, &show_traces_seq_ops);
3694 trace_array_put(tr);
3698 m = file->private_data;
3704 static int show_traces_release(struct inode *inode, struct file *file)
3706 struct trace_array *tr = inode->i_private;
3708 trace_array_put(tr);
3709 return seq_release(inode, file);
3713 tracing_write_stub(struct file *filp, const char __user *ubuf,
3714 size_t count, loff_t *ppos)
3719 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3723 if (file->f_mode & FMODE_READ)
3724 ret = seq_lseek(file, offset, whence);
3726 file->f_pos = ret = 0;
3731 static const struct file_operations tracing_fops = {
3732 .open = tracing_open,
3734 .write = tracing_write_stub,
3735 .llseek = tracing_lseek,
3736 .release = tracing_release,
3739 static const struct file_operations show_traces_fops = {
3740 .open = show_traces_open,
3742 .llseek = seq_lseek,
3743 .release = show_traces_release,
3747 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3748 size_t count, loff_t *ppos)
3750 struct trace_array *tr = file_inode(filp)->i_private;
3754 len = snprintf(NULL, 0, "%*pb\n",
3755 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3756 mask_str = kmalloc(len, GFP_KERNEL);
3760 len = snprintf(mask_str, len, "%*pb\n",
3761 cpumask_pr_args(tr->tracing_cpumask));
3766 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
3775 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3776 size_t count, loff_t *ppos)
3778 struct trace_array *tr = file_inode(filp)->i_private;
3779 cpumask_var_t tracing_cpumask_new;
3782 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3785 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3789 local_irq_disable();
3790 arch_spin_lock(&tr->max_lock);
3791 for_each_tracing_cpu(cpu) {
3793 * Increase/decrease the disabled counter if we are
3794 * about to flip a bit in the cpumask:
3796 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3797 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3798 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3799 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3801 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3802 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3803 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3804 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3807 arch_spin_unlock(&tr->max_lock);
3810 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3811 free_cpumask_var(tracing_cpumask_new);
3816 free_cpumask_var(tracing_cpumask_new);
3821 static const struct file_operations tracing_cpumask_fops = {
3822 .open = tracing_open_generic_tr,
3823 .read = tracing_cpumask_read,
3824 .write = tracing_cpumask_write,
3825 .release = tracing_release_generic_tr,
3826 .llseek = generic_file_llseek,
3829 static int tracing_trace_options_show(struct seq_file *m, void *v)
3831 struct tracer_opt *trace_opts;
3832 struct trace_array *tr = m->private;
3836 mutex_lock(&trace_types_lock);
3837 tracer_flags = tr->current_trace->flags->val;
3838 trace_opts = tr->current_trace->flags->opts;
3840 for (i = 0; trace_options[i]; i++) {
3841 if (tr->trace_flags & (1 << i))
3842 seq_printf(m, "%s\n", trace_options[i]);
3844 seq_printf(m, "no%s\n", trace_options[i]);
3847 for (i = 0; trace_opts[i].name; i++) {
3848 if (tracer_flags & trace_opts[i].bit)
3849 seq_printf(m, "%s\n", trace_opts[i].name);
3851 seq_printf(m, "no%s\n", trace_opts[i].name);
3853 mutex_unlock(&trace_types_lock);
3858 static int __set_tracer_option(struct trace_array *tr,
3859 struct tracer_flags *tracer_flags,
3860 struct tracer_opt *opts, int neg)
3862 struct tracer *trace = tracer_flags->trace;
3865 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3870 tracer_flags->val &= ~opts->bit;
3872 tracer_flags->val |= opts->bit;
3876 /* Try to assign a tracer specific option */
3877 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3879 struct tracer *trace = tr->current_trace;
3880 struct tracer_flags *tracer_flags = trace->flags;
3881 struct tracer_opt *opts = NULL;
3884 for (i = 0; tracer_flags->opts[i].name; i++) {
3885 opts = &tracer_flags->opts[i];
3887 if (strcmp(cmp, opts->name) == 0)
3888 return __set_tracer_option(tr, trace->flags, opts, neg);
3894 /* Some tracers require overwrite to stay enabled */
3895 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3897 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3903 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3905 /* do nothing if flag is already set */
3906 if (!!(tr->trace_flags & mask) == !!enabled)
3909 /* Give the tracer a chance to approve the change */
3910 if (tr->current_trace->flag_changed)
3911 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3915 tr->trace_flags |= mask;
3917 tr->trace_flags &= ~mask;
3919 if (mask == TRACE_ITER_RECORD_CMD)
3920 trace_event_enable_cmd_record(enabled);
3922 if (mask == TRACE_ITER_EVENT_FORK)
3923 trace_event_follow_fork(tr, enabled);
3925 if (mask == TRACE_ITER_OVERWRITE) {
3926 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3927 #ifdef CONFIG_TRACER_MAX_TRACE
3928 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3932 if (mask == TRACE_ITER_PRINTK) {
3933 trace_printk_start_stop_comm(enabled);
3934 trace_printk_control(enabled);
3940 static int trace_set_options(struct trace_array *tr, char *option)
3946 size_t orig_len = strlen(option);
3948 cmp = strstrip(option);
3950 if (strncmp(cmp, "no", 2) == 0) {
3955 mutex_lock(&trace_types_lock);
3957 for (i = 0; trace_options[i]; i++) {
3958 if (strcmp(cmp, trace_options[i]) == 0) {
3959 ret = set_tracer_flag(tr, 1 << i, !neg);
3964 /* If no option could be set, test the specific tracer options */
3965 if (!trace_options[i])
3966 ret = set_tracer_option(tr, cmp, neg);
3968 mutex_unlock(&trace_types_lock);
3971 * If the first trailing whitespace is replaced with '\0' by strstrip,
3972 * turn it back into a space.
3974 if (orig_len > strlen(option))
3975 option[strlen(option)] = ' ';
3980 static void __init apply_trace_boot_options(void)
3982 char *buf = trace_boot_options_buf;
3986 option = strsep(&buf, ",");
3992 trace_set_options(&global_trace, option);
3994 /* Put back the comma to allow this to be called again */
4001 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4002 size_t cnt, loff_t *ppos)
4004 struct seq_file *m = filp->private_data;
4005 struct trace_array *tr = m->private;
4009 if (cnt >= sizeof(buf))
4012 if (copy_from_user(buf, ubuf, cnt))
4017 ret = trace_set_options(tr, buf);
4026 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4028 struct trace_array *tr = inode->i_private;
4031 if (tracing_disabled)
4034 if (trace_array_get(tr) < 0)
4037 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4039 trace_array_put(tr);
4044 static const struct file_operations tracing_iter_fops = {
4045 .open = tracing_trace_options_open,
4047 .llseek = seq_lseek,
4048 .release = tracing_single_release_tr,
4049 .write = tracing_trace_options_write,
4052 static const char readme_msg[] =
4053 "tracing mini-HOWTO:\n\n"
4054 "# echo 0 > tracing_on : quick way to disable tracing\n"
4055 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4056 " Important files:\n"
4057 " trace\t\t\t- The static contents of the buffer\n"
4058 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4059 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4060 " current_tracer\t- function and latency tracers\n"
4061 " available_tracers\t- list of configured tracers for current_tracer\n"
4062 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4063 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4064 " trace_clock\t\t-change the clock used to order events\n"
4065 " local: Per cpu clock but may not be synced across CPUs\n"
4066 " global: Synced across CPUs but slows tracing down.\n"
4067 " counter: Not a clock, but just an increment\n"
4068 " uptime: Jiffy counter from time of boot\n"
4069 " perf: Same clock that perf events use\n"
4070 #ifdef CONFIG_X86_64
4071 " x86-tsc: TSC cycle counter\n"
4073 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4074 " tracing_cpumask\t- Limit which CPUs to trace\n"
4075 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4076 "\t\t\t Remove sub-buffer with rmdir\n"
4077 " trace_options\t\t- Set format or modify how tracing happens\n"
4078 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4079 "\t\t\t option name\n"
4080 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4081 #ifdef CONFIG_DYNAMIC_FTRACE
4082 "\n available_filter_functions - list of functions that can be filtered on\n"
4083 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4084 "\t\t\t functions\n"
4085 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4086 "\t modules: Can select a group via module\n"
4087 "\t Format: :mod:<module-name>\n"
4088 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4089 "\t triggers: a command to perform when function is hit\n"
4090 "\t Format: <function>:<trigger>[:count]\n"
4091 "\t trigger: traceon, traceoff\n"
4092 "\t\t enable_event:<system>:<event>\n"
4093 "\t\t disable_event:<system>:<event>\n"
4094 #ifdef CONFIG_STACKTRACE
4097 #ifdef CONFIG_TRACER_SNAPSHOT
4102 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4103 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4104 "\t The first one will disable tracing every time do_fault is hit\n"
4105 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4106 "\t The first time do trap is hit and it disables tracing, the\n"
4107 "\t counter will decrement to 2. If tracing is already disabled,\n"
4108 "\t the counter will not decrement. It only decrements when the\n"
4109 "\t trigger did work\n"
4110 "\t To remove trigger without count:\n"
4111 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4112 "\t To remove trigger with a count:\n"
4113 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4114 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4115 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4116 "\t modules: Can select a group via module command :mod:\n"
4117 "\t Does not accept triggers\n"
4118 #endif /* CONFIG_DYNAMIC_FTRACE */
4119 #ifdef CONFIG_FUNCTION_TRACER
4120 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4123 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4124 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4125 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4126 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4128 #ifdef CONFIG_TRACER_SNAPSHOT
4129 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4130 "\t\t\t snapshot buffer. Read the contents for more\n"
4131 "\t\t\t information\n"
4133 #ifdef CONFIG_STACK_TRACER
4134 " stack_trace\t\t- Shows the max stack trace when active\n"
4135 " stack_max_size\t- Shows current max stack size that was traced\n"
4136 "\t\t\t Write into this file to reset the max size (trigger a\n"
4137 "\t\t\t new trace)\n"
4138 #ifdef CONFIG_DYNAMIC_FTRACE
4139 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4142 #endif /* CONFIG_STACK_TRACER */
4143 #ifdef CONFIG_KPROBE_EVENT
4144 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4145 "\t\t\t Write into this file to define/undefine new trace events.\n"
4147 #ifdef CONFIG_UPROBE_EVENT
4148 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4149 "\t\t\t Write into this file to define/undefine new trace events.\n"
4151 #if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4152 "\t accepts: event-definitions (one definition per line)\n"
4153 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4154 "\t -:[<group>/]<event>\n"
4155 #ifdef CONFIG_KPROBE_EVENT
4156 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4158 #ifdef CONFIG_UPROBE_EVENT
4159 "\t place: <path>:<offset>\n"
4161 "\t args: <name>=fetcharg[:type]\n"
4162 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4163 "\t $stack<index>, $stack, $retval, $comm\n"
4164 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4165 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4167 " events/\t\t- Directory containing all trace event subsystems:\n"
4168 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4169 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4170 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4172 " filter\t\t- If set, only events passing filter are traced\n"
4173 " events/<system>/<event>/\t- Directory containing control files for\n"
4175 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4176 " filter\t\t- If set, only events passing filter are traced\n"
4177 " trigger\t\t- If set, a command to perform when event is hit\n"
4178 "\t Format: <trigger>[:count][if <filter>]\n"
4179 "\t trigger: traceon, traceoff\n"
4180 "\t enable_event:<system>:<event>\n"
4181 "\t disable_event:<system>:<event>\n"
4182 #ifdef CONFIG_HIST_TRIGGERS
4183 "\t enable_hist:<system>:<event>\n"
4184 "\t disable_hist:<system>:<event>\n"
4186 #ifdef CONFIG_STACKTRACE
4189 #ifdef CONFIG_TRACER_SNAPSHOT
4192 #ifdef CONFIG_HIST_TRIGGERS
4193 "\t\t hist (see below)\n"
4195 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4196 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4197 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4198 "\t events/block/block_unplug/trigger\n"
4199 "\t The first disables tracing every time block_unplug is hit.\n"
4200 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4201 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4202 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4203 "\t Like function triggers, the counter is only decremented if it\n"
4204 "\t enabled or disabled tracing.\n"
4205 "\t To remove a trigger without a count:\n"
4206 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4207 "\t To remove a trigger with a count:\n"
4208 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4209 "\t Filters can be ignored when removing a trigger.\n"
4210 #ifdef CONFIG_HIST_TRIGGERS
4211 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4212 "\t Format: hist:keys=<field1[,field2,...]>\n"
4213 "\t [:values=<field1[,field2,...]>]\n"
4214 "\t [:sort=<field1[,field2,...]>]\n"
4215 "\t [:size=#entries]\n"
4216 "\t [:pause][:continue][:clear]\n"
4217 "\t [:name=histname1]\n"
4218 "\t [if <filter>]\n\n"
4219 "\t When a matching event is hit, an entry is added to a hash\n"
4220 "\t table using the key(s) and value(s) named, and the value of a\n"
4221 "\t sum called 'hitcount' is incremented. Keys and values\n"
4222 "\t correspond to fields in the event's format description. Keys\n"
4223 "\t can be any field, or the special string 'stacktrace'.\n"
4224 "\t Compound keys consisting of up to two fields can be specified\n"
4225 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4226 "\t fields. Sort keys consisting of up to two fields can be\n"
4227 "\t specified using the 'sort' keyword. The sort direction can\n"
4228 "\t be modified by appending '.descending' or '.ascending' to a\n"
4229 "\t sort field. The 'size' parameter can be used to specify more\n"
4230 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4231 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4232 "\t its histogram data will be shared with other triggers of the\n"
4233 "\t same name, and trigger hits will update this common data.\n\n"
4234 "\t Reading the 'hist' file for the event will dump the hash\n"
4235 "\t table in its entirety to stdout. If there are multiple hist\n"
4236 "\t triggers attached to an event, there will be a table for each\n"
4237 "\t trigger in the output. The table displayed for a named\n"
4238 "\t trigger will be the same as any other instance having the\n"
4239 "\t same name. The default format used to display a given field\n"
4240 "\t can be modified by appending any of the following modifiers\n"
4241 "\t to the field name, as applicable:\n\n"
4242 "\t .hex display a number as a hex value\n"
4243 "\t .sym display an address as a symbol\n"
4244 "\t .sym-offset display an address as a symbol and offset\n"
4245 "\t .execname display a common_pid as a program name\n"
4246 "\t .syscall display a syscall id as a syscall name\n\n"
4247 "\t .log2 display log2 value rather than raw number\n\n"
4248 "\t The 'pause' parameter can be used to pause an existing hist\n"
4249 "\t trigger or to start a hist trigger but not log any events\n"
4250 "\t until told to do so. 'continue' can be used to start or\n"
4251 "\t restart a paused hist trigger.\n\n"
4252 "\t The 'clear' parameter will clear the contents of a running\n"
4253 "\t hist trigger and leave its current paused/active state\n"
4255 "\t The enable_hist and disable_hist triggers can be used to\n"
4256 "\t have one event conditionally start and stop another event's\n"
4257 "\t already-attached hist trigger. The syntax is analagous to\n"
4258 "\t the enable_event and disable_event triggers.\n"
4263 tracing_readme_read(struct file *filp, char __user *ubuf,
4264 size_t cnt, loff_t *ppos)
4266 return simple_read_from_buffer(ubuf, cnt, ppos,
4267 readme_msg, strlen(readme_msg));
4270 static const struct file_operations tracing_readme_fops = {
4271 .open = tracing_open_generic,
4272 .read = tracing_readme_read,
4273 .llseek = generic_file_llseek,
4276 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4278 unsigned int *ptr = v;
4280 if (*pos || m->count)
4285 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4287 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4296 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4302 arch_spin_lock(&trace_cmdline_lock);
4304 v = &savedcmd->map_cmdline_to_pid[0];
4306 v = saved_cmdlines_next(m, v, &l);
4314 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4316 arch_spin_unlock(&trace_cmdline_lock);
4320 static int saved_cmdlines_show(struct seq_file *m, void *v)
4322 char buf[TASK_COMM_LEN];
4323 unsigned int *pid = v;
4325 __trace_find_cmdline(*pid, buf);
4326 seq_printf(m, "%d %s\n", *pid, buf);
4330 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4331 .start = saved_cmdlines_start,
4332 .next = saved_cmdlines_next,
4333 .stop = saved_cmdlines_stop,
4334 .show = saved_cmdlines_show,
4337 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4339 if (tracing_disabled)
4342 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4345 static const struct file_operations tracing_saved_cmdlines_fops = {
4346 .open = tracing_saved_cmdlines_open,
4348 .llseek = seq_lseek,
4349 .release = seq_release,
4353 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4354 size_t cnt, loff_t *ppos)
4359 arch_spin_lock(&trace_cmdline_lock);
4360 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4361 arch_spin_unlock(&trace_cmdline_lock);
4363 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4366 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4368 kfree(s->saved_cmdlines);
4369 kfree(s->map_cmdline_to_pid);
4373 static int tracing_resize_saved_cmdlines(unsigned int val)
4375 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4377 s = kmalloc(sizeof(*s), GFP_KERNEL);
4381 if (allocate_cmdlines_buffer(val, s) < 0) {
4386 arch_spin_lock(&trace_cmdline_lock);
4387 savedcmd_temp = savedcmd;
4389 arch_spin_unlock(&trace_cmdline_lock);
4390 free_saved_cmdlines_buffer(savedcmd_temp);
4396 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4397 size_t cnt, loff_t *ppos)
4402 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4406 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4407 if (!val || val > PID_MAX_DEFAULT)
4410 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4419 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4420 .open = tracing_open_generic,
4421 .read = tracing_saved_cmdlines_size_read,
4422 .write = tracing_saved_cmdlines_size_write,
4425 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
4426 static union trace_enum_map_item *
4427 update_enum_map(union trace_enum_map_item *ptr)
4429 if (!ptr->map.enum_string) {
4430 if (ptr->tail.next) {
4431 ptr = ptr->tail.next;
4432 /* Set ptr to the next real item (skip head) */
4440 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4442 union trace_enum_map_item *ptr = v;
4445 * Paranoid! If ptr points to end, we don't want to increment past it.
4446 * This really should never happen.
4448 ptr = update_enum_map(ptr);
4449 if (WARN_ON_ONCE(!ptr))
4456 ptr = update_enum_map(ptr);
4461 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4463 union trace_enum_map_item *v;
4466 mutex_lock(&trace_enum_mutex);
4468 v = trace_enum_maps;
4472 while (v && l < *pos) {
4473 v = enum_map_next(m, v, &l);
4479 static void enum_map_stop(struct seq_file *m, void *v)
4481 mutex_unlock(&trace_enum_mutex);
4484 static int enum_map_show(struct seq_file *m, void *v)
4486 union trace_enum_map_item *ptr = v;
4488 seq_printf(m, "%s %ld (%s)\n",
4489 ptr->map.enum_string, ptr->map.enum_value,
4495 static const struct seq_operations tracing_enum_map_seq_ops = {
4496 .start = enum_map_start,
4497 .next = enum_map_next,
4498 .stop = enum_map_stop,
4499 .show = enum_map_show,
4502 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4504 if (tracing_disabled)
4507 return seq_open(filp, &tracing_enum_map_seq_ops);
4510 static const struct file_operations tracing_enum_map_fops = {
4511 .open = tracing_enum_map_open,
4513 .llseek = seq_lseek,
4514 .release = seq_release,
4517 static inline union trace_enum_map_item *
4518 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4520 /* Return tail of array given the head */
4521 return ptr + ptr->head.length + 1;
4525 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4528 struct trace_enum_map **stop;
4529 struct trace_enum_map **map;
4530 union trace_enum_map_item *map_array;
4531 union trace_enum_map_item *ptr;
4536 * The trace_enum_maps contains the map plus a head and tail item,
4537 * where the head holds the module and length of array, and the
4538 * tail holds a pointer to the next list.
4540 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4542 pr_warn("Unable to allocate trace enum mapping\n");
4546 mutex_lock(&trace_enum_mutex);
4548 if (!trace_enum_maps)
4549 trace_enum_maps = map_array;
4551 ptr = trace_enum_maps;
4553 ptr = trace_enum_jmp_to_tail(ptr);
4554 if (!ptr->tail.next)
4556 ptr = ptr->tail.next;
4559 ptr->tail.next = map_array;
4561 map_array->head.mod = mod;
4562 map_array->head.length = len;
4565 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4566 map_array->map = **map;
4569 memset(map_array, 0, sizeof(*map_array));
4571 mutex_unlock(&trace_enum_mutex);
4574 static void trace_create_enum_file(struct dentry *d_tracer)
4576 trace_create_file("enum_map", 0444, d_tracer,
4577 NULL, &tracing_enum_map_fops);
4580 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4581 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4582 static inline void trace_insert_enum_map_file(struct module *mod,
4583 struct trace_enum_map **start, int len) { }
4584 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4586 static void trace_insert_enum_map(struct module *mod,
4587 struct trace_enum_map **start, int len)
4589 struct trace_enum_map **map;
4596 trace_event_enum_update(map, len);
4598 trace_insert_enum_map_file(mod, start, len);
4602 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4603 size_t cnt, loff_t *ppos)
4605 struct trace_array *tr = filp->private_data;
4606 char buf[MAX_TRACER_SIZE+2];
4609 mutex_lock(&trace_types_lock);
4610 r = sprintf(buf, "%s\n", tr->current_trace->name);
4611 mutex_unlock(&trace_types_lock);
4613 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4616 int tracer_init(struct tracer *t, struct trace_array *tr)
4618 tracing_reset_online_cpus(&tr->trace_buffer);
4622 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4626 for_each_tracing_cpu(cpu)
4627 per_cpu_ptr(buf->data, cpu)->entries = val;
4630 #ifdef CONFIG_TRACER_MAX_TRACE
4631 /* resize @tr's buffer to the size of @size_tr's entries */
4632 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4633 struct trace_buffer *size_buf, int cpu_id)
4637 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4638 for_each_tracing_cpu(cpu) {
4639 ret = ring_buffer_resize(trace_buf->buffer,
4640 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4643 per_cpu_ptr(trace_buf->data, cpu)->entries =
4644 per_cpu_ptr(size_buf->data, cpu)->entries;
4647 ret = ring_buffer_resize(trace_buf->buffer,
4648 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4650 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4651 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4656 #endif /* CONFIG_TRACER_MAX_TRACE */
4658 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4659 unsigned long size, int cpu)
4664 * If kernel or user changes the size of the ring buffer
4665 * we use the size that was given, and we can forget about
4666 * expanding it later.
4668 ring_buffer_expanded = true;
4670 /* May be called before buffers are initialized */
4671 if (!tr->trace_buffer.buffer)
4674 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4678 #ifdef CONFIG_TRACER_MAX_TRACE
4679 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4680 !tr->current_trace->use_max_tr)
4683 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4685 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4686 &tr->trace_buffer, cpu);
4689 * AARGH! We are left with different
4690 * size max buffer!!!!
4691 * The max buffer is our "snapshot" buffer.
4692 * When a tracer needs a snapshot (one of the
4693 * latency tracers), it swaps the max buffer
4694 * with the saved snap shot. We succeeded to
4695 * update the size of the main buffer, but failed to
4696 * update the size of the max buffer. But when we tried
4697 * to reset the main buffer to the original size, we
4698 * failed there too. This is very unlikely to
4699 * happen, but if it does, warn and kill all
4703 tracing_disabled = 1;
4708 if (cpu == RING_BUFFER_ALL_CPUS)
4709 set_buffer_entries(&tr->max_buffer, size);
4711 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4714 #endif /* CONFIG_TRACER_MAX_TRACE */
4716 if (cpu == RING_BUFFER_ALL_CPUS)
4717 set_buffer_entries(&tr->trace_buffer, size);
4719 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4724 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4725 unsigned long size, int cpu_id)
4729 mutex_lock(&trace_types_lock);
4731 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4732 /* make sure, this cpu is enabled in the mask */
4733 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4739 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4744 mutex_unlock(&trace_types_lock);
4751 * tracing_update_buffers - used by tracing facility to expand ring buffers
4753 * To save on memory when the tracing is never used on a system with it
4754 * configured in. The ring buffers are set to a minimum size. But once
4755 * a user starts to use the tracing facility, then they need to grow
4756 * to their default size.
4758 * This function is to be called when a tracer is about to be used.
4760 int tracing_update_buffers(void)
4764 mutex_lock(&trace_types_lock);
4765 if (!ring_buffer_expanded)
4766 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4767 RING_BUFFER_ALL_CPUS);
4768 mutex_unlock(&trace_types_lock);
4773 struct trace_option_dentry;
4776 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4779 * Used to clear out the tracer before deletion of an instance.
4780 * Must have trace_types_lock held.
4782 static void tracing_set_nop(struct trace_array *tr)
4784 if (tr->current_trace == &nop_trace)
4787 tr->current_trace->enabled--;
4789 if (tr->current_trace->reset)
4790 tr->current_trace->reset(tr);
4792 tr->current_trace = &nop_trace;
4795 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4797 /* Only enable if the directory has been created already. */
4801 create_trace_option_files(tr, t);
4804 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4807 #ifdef CONFIG_TRACER_MAX_TRACE
4812 mutex_lock(&trace_types_lock);
4814 if (!ring_buffer_expanded) {
4815 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4816 RING_BUFFER_ALL_CPUS);
4822 for (t = trace_types; t; t = t->next) {
4823 if (strcmp(t->name, buf) == 0)
4830 if (t == tr->current_trace)
4833 /* Some tracers are only allowed for the top level buffer */
4834 if (!trace_ok_for_array(t, tr)) {
4839 /* If trace pipe files are being read, we can't change the tracer */
4840 if (tr->current_trace->ref) {
4845 trace_branch_disable();
4847 tr->current_trace->enabled--;
4849 if (tr->current_trace->reset)
4850 tr->current_trace->reset(tr);
4852 /* Current trace needs to be nop_trace before synchronize_sched */
4853 tr->current_trace = &nop_trace;
4855 #ifdef CONFIG_TRACER_MAX_TRACE
4856 had_max_tr = tr->allocated_snapshot;
4858 if (had_max_tr && !t->use_max_tr) {
4860 * We need to make sure that the update_max_tr sees that
4861 * current_trace changed to nop_trace to keep it from
4862 * swapping the buffers after we resize it.
4863 * The update_max_tr is called from interrupts disabled
4864 * so a synchronized_sched() is sufficient.
4866 synchronize_sched();
4871 #ifdef CONFIG_TRACER_MAX_TRACE
4872 if (t->use_max_tr && !had_max_tr) {
4873 ret = alloc_snapshot(tr);
4880 ret = tracer_init(t, tr);
4885 tr->current_trace = t;
4886 tr->current_trace->enabled++;
4887 trace_branch_enable(tr);
4889 mutex_unlock(&trace_types_lock);
4895 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4896 size_t cnt, loff_t *ppos)
4898 struct trace_array *tr = filp->private_data;
4899 char buf[MAX_TRACER_SIZE+1];
4906 if (cnt > MAX_TRACER_SIZE)
4907 cnt = MAX_TRACER_SIZE;
4909 if (copy_from_user(buf, ubuf, cnt))
4914 /* strip ending whitespace. */
4915 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4918 err = tracing_set_tracer(tr, buf);
4928 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4929 size_t cnt, loff_t *ppos)
4934 r = snprintf(buf, sizeof(buf), "%ld\n",
4935 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4936 if (r > sizeof(buf))
4938 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4942 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4943 size_t cnt, loff_t *ppos)
4948 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4958 tracing_thresh_read(struct file *filp, char __user *ubuf,
4959 size_t cnt, loff_t *ppos)
4961 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4965 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4966 size_t cnt, loff_t *ppos)
4968 struct trace_array *tr = filp->private_data;
4971 mutex_lock(&trace_types_lock);
4972 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4976 if (tr->current_trace->update_thresh) {
4977 ret = tr->current_trace->update_thresh(tr);
4984 mutex_unlock(&trace_types_lock);
4989 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
4992 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4993 size_t cnt, loff_t *ppos)
4995 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4999 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5000 size_t cnt, loff_t *ppos)
5002 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5007 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5009 struct trace_array *tr = inode->i_private;
5010 struct trace_iterator *iter;
5013 if (tracing_disabled)
5016 if (trace_array_get(tr) < 0)
5019 mutex_lock(&trace_types_lock);
5021 /* create a buffer to store the information to pass to userspace */
5022 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5025 __trace_array_put(tr);
5029 trace_seq_init(&iter->seq);
5030 iter->trace = tr->current_trace;
5032 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5037 /* trace pipe does not show start of buffer */
5038 cpumask_setall(iter->started);
5040 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5041 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5043 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5044 if (trace_clocks[tr->clock_id].in_ns)
5045 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5048 iter->trace_buffer = &tr->trace_buffer;
5049 iter->cpu_file = tracing_get_cpu(inode);
5050 mutex_init(&iter->mutex);
5051 filp->private_data = iter;
5053 if (iter->trace->pipe_open)
5054 iter->trace->pipe_open(iter);
5056 nonseekable_open(inode, filp);
5058 tr->current_trace->ref++;
5060 mutex_unlock(&trace_types_lock);
5065 __trace_array_put(tr);
5066 mutex_unlock(&trace_types_lock);
5070 static int tracing_release_pipe(struct inode *inode, struct file *file)
5072 struct trace_iterator *iter = file->private_data;
5073 struct trace_array *tr = inode->i_private;
5075 mutex_lock(&trace_types_lock);
5077 tr->current_trace->ref--;
5079 if (iter->trace->pipe_close)
5080 iter->trace->pipe_close(iter);
5082 mutex_unlock(&trace_types_lock);
5084 free_cpumask_var(iter->started);
5085 mutex_destroy(&iter->mutex);
5088 trace_array_put(tr);
5094 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5096 struct trace_array *tr = iter->tr;
5098 /* Iterators are static, they should be filled or empty */
5099 if (trace_buffer_iter(iter, iter->cpu_file))
5100 return POLLIN | POLLRDNORM;
5102 if (tr->trace_flags & TRACE_ITER_BLOCK)
5104 * Always select as readable when in blocking mode
5106 return POLLIN | POLLRDNORM;
5108 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5113 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5115 struct trace_iterator *iter = filp->private_data;
5117 return trace_poll(iter, filp, poll_table);
5120 /* Must be called with iter->mutex held. */
5121 static int tracing_wait_pipe(struct file *filp)
5123 struct trace_iterator *iter = filp->private_data;
5126 while (trace_empty(iter)) {
5128 if ((filp->f_flags & O_NONBLOCK)) {
5133 * We block until we read something and tracing is disabled.
5134 * We still block if tracing is disabled, but we have never
5135 * read anything. This allows a user to cat this file, and
5136 * then enable tracing. But after we have read something,
5137 * we give an EOF when tracing is again disabled.
5139 * iter->pos will be 0 if we haven't read anything.
5141 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5144 mutex_unlock(&iter->mutex);
5146 ret = wait_on_pipe(iter, false);
5148 mutex_lock(&iter->mutex);
5161 tracing_read_pipe(struct file *filp, char __user *ubuf,
5162 size_t cnt, loff_t *ppos)
5164 struct trace_iterator *iter = filp->private_data;
5168 * Avoid more than one consumer on a single file descriptor
5169 * This is just a matter of traces coherency, the ring buffer itself
5172 mutex_lock(&iter->mutex);
5174 /* return any leftover data */
5175 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5179 trace_seq_init(&iter->seq);
5181 if (iter->trace->read) {
5182 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5188 sret = tracing_wait_pipe(filp);
5192 /* stop when tracing is finished */
5193 if (trace_empty(iter)) {
5198 if (cnt >= PAGE_SIZE)
5199 cnt = PAGE_SIZE - 1;
5201 /* reset all but tr, trace, and overruns */
5202 memset(&iter->seq, 0,
5203 sizeof(struct trace_iterator) -
5204 offsetof(struct trace_iterator, seq));
5205 cpumask_clear(iter->started);
5206 trace_seq_init(&iter->seq);
5209 trace_event_read_lock();
5210 trace_access_lock(iter->cpu_file);
5211 while (trace_find_next_entry_inc(iter) != NULL) {
5212 enum print_line_t ret;
5213 int save_len = iter->seq.seq.len;
5215 ret = print_trace_line(iter);
5216 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5217 /* don't print partial lines */
5218 iter->seq.seq.len = save_len;
5221 if (ret != TRACE_TYPE_NO_CONSUME)
5222 trace_consume(iter);
5224 if (trace_seq_used(&iter->seq) >= cnt)
5228 * Setting the full flag means we reached the trace_seq buffer
5229 * size and we should leave by partial output condition above.
5230 * One of the trace_seq_* functions is not used properly.
5232 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5235 trace_access_unlock(iter->cpu_file);
5236 trace_event_read_unlock();
5238 /* Now copy what we have to the user */
5239 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5240 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5241 trace_seq_init(&iter->seq);
5244 * If there was nothing to send to user, in spite of consuming trace
5245 * entries, go back to wait for more entries.
5251 mutex_unlock(&iter->mutex);
5256 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5259 __free_page(spd->pages[idx]);
5262 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5264 .confirm = generic_pipe_buf_confirm,
5265 .release = generic_pipe_buf_release,
5266 .steal = generic_pipe_buf_steal,
5267 .get = generic_pipe_buf_get,
5271 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5277 /* Seq buffer is page-sized, exactly what we need. */
5279 save_len = iter->seq.seq.len;
5280 ret = print_trace_line(iter);
5282 if (trace_seq_has_overflowed(&iter->seq)) {
5283 iter->seq.seq.len = save_len;
5288 * This should not be hit, because it should only
5289 * be set if the iter->seq overflowed. But check it
5290 * anyway to be safe.
5292 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5293 iter->seq.seq.len = save_len;
5297 count = trace_seq_used(&iter->seq) - save_len;
5300 iter->seq.seq.len = save_len;
5304 if (ret != TRACE_TYPE_NO_CONSUME)
5305 trace_consume(iter);
5307 if (!trace_find_next_entry_inc(iter)) {
5317 static ssize_t tracing_splice_read_pipe(struct file *filp,
5319 struct pipe_inode_info *pipe,
5323 struct page *pages_def[PIPE_DEF_BUFFERS];
5324 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5325 struct trace_iterator *iter = filp->private_data;
5326 struct splice_pipe_desc spd = {
5328 .partial = partial_def,
5329 .nr_pages = 0, /* This gets updated below. */
5330 .nr_pages_max = PIPE_DEF_BUFFERS,
5332 .ops = &tracing_pipe_buf_ops,
5333 .spd_release = tracing_spd_release_pipe,
5339 if (splice_grow_spd(pipe, &spd))
5342 mutex_lock(&iter->mutex);
5344 if (iter->trace->splice_read) {
5345 ret = iter->trace->splice_read(iter, filp,
5346 ppos, pipe, len, flags);
5351 ret = tracing_wait_pipe(filp);
5355 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5360 trace_event_read_lock();
5361 trace_access_lock(iter->cpu_file);
5363 /* Fill as many pages as possible. */
5364 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5365 spd.pages[i] = alloc_page(GFP_KERNEL);
5369 rem = tracing_fill_pipe_page(rem, iter);
5371 /* Copy the data into the page, so we can start over. */
5372 ret = trace_seq_to_buffer(&iter->seq,
5373 page_address(spd.pages[i]),
5374 trace_seq_used(&iter->seq));
5376 __free_page(spd.pages[i]);
5379 spd.partial[i].offset = 0;
5380 spd.partial[i].len = trace_seq_used(&iter->seq);
5382 trace_seq_init(&iter->seq);
5385 trace_access_unlock(iter->cpu_file);
5386 trace_event_read_unlock();
5387 mutex_unlock(&iter->mutex);
5392 ret = splice_to_pipe(pipe, &spd);
5396 splice_shrink_spd(&spd);
5400 mutex_unlock(&iter->mutex);
5405 tracing_entries_read(struct file *filp, char __user *ubuf,
5406 size_t cnt, loff_t *ppos)
5408 struct inode *inode = file_inode(filp);
5409 struct trace_array *tr = inode->i_private;
5410 int cpu = tracing_get_cpu(inode);
5415 mutex_lock(&trace_types_lock);
5417 if (cpu == RING_BUFFER_ALL_CPUS) {
5418 int cpu, buf_size_same;
5423 /* check if all cpu sizes are same */
5424 for_each_tracing_cpu(cpu) {
5425 /* fill in the size from first enabled cpu */
5427 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5428 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5434 if (buf_size_same) {
5435 if (!ring_buffer_expanded)
5436 r = sprintf(buf, "%lu (expanded: %lu)\n",
5438 trace_buf_size >> 10);
5440 r = sprintf(buf, "%lu\n", size >> 10);
5442 r = sprintf(buf, "X\n");
5444 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5446 mutex_unlock(&trace_types_lock);
5448 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5453 tracing_entries_write(struct file *filp, const char __user *ubuf,
5454 size_t cnt, loff_t *ppos)
5456 struct inode *inode = file_inode(filp);
5457 struct trace_array *tr = inode->i_private;
5461 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5465 /* must have at least 1 entry */
5469 /* value is in KB */
5471 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5481 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5482 size_t cnt, loff_t *ppos)
5484 struct trace_array *tr = filp->private_data;
5487 unsigned long size = 0, expanded_size = 0;
5489 mutex_lock(&trace_types_lock);
5490 for_each_tracing_cpu(cpu) {
5491 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5492 if (!ring_buffer_expanded)
5493 expanded_size += trace_buf_size >> 10;
5495 if (ring_buffer_expanded)
5496 r = sprintf(buf, "%lu\n", size);
5498 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5499 mutex_unlock(&trace_types_lock);
5501 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5505 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5506 size_t cnt, loff_t *ppos)
5509 * There is no need to read what the user has written, this function
5510 * is just to make sure that there is no error when "echo" is used
5519 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5521 struct trace_array *tr = inode->i_private;
5523 /* disable tracing ? */
5524 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5525 tracer_tracing_off(tr);
5526 /* resize the ring buffer to 0 */
5527 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5529 trace_array_put(tr);
5535 tracing_mark_write(struct file *filp, const char __user *ubuf,
5536 size_t cnt, loff_t *fpos)
5538 unsigned long addr = (unsigned long)ubuf;
5539 struct trace_array *tr = filp->private_data;
5540 struct ring_buffer_event *event;
5541 struct ring_buffer *buffer;
5542 struct print_entry *entry;
5543 unsigned long irq_flags;
5544 struct page *pages[2];
5554 if (tracing_disabled)
5557 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5560 if (cnt > TRACE_BUF_SIZE)
5561 cnt = TRACE_BUF_SIZE;
5564 * Userspace is injecting traces into the kernel trace buffer.
5565 * We want to be as non intrusive as possible.
5566 * To do so, we do not want to allocate any special buffers
5567 * or take any locks, but instead write the userspace data
5568 * straight into the ring buffer.
5570 * First we need to pin the userspace buffer into memory,
5571 * which, most likely it is, because it just referenced it.
5572 * But there's no guarantee that it is. By using get_user_pages_fast()
5573 * and kmap_atomic/kunmap_atomic() we can get access to the
5574 * pages directly. We then write the data directly into the
5577 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5579 /* check if we cross pages */
5580 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5583 offset = addr & (PAGE_SIZE - 1);
5586 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5587 if (ret < nr_pages) {
5589 put_page(pages[ret]);
5594 for (i = 0; i < nr_pages; i++)
5595 map_page[i] = kmap_atomic(pages[i]);
5597 local_save_flags(irq_flags);
5598 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5599 buffer = tr->trace_buffer.buffer;
5600 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5601 irq_flags, preempt_count());
5603 /* Ring buffer disabled, return as if not open for write */
5608 entry = ring_buffer_event_data(event);
5609 entry->ip = _THIS_IP_;
5611 if (nr_pages == 2) {
5612 len = PAGE_SIZE - offset;
5613 memcpy(&entry->buf, map_page[0] + offset, len);
5614 memcpy(&entry->buf[len], map_page[1], cnt - len);
5616 memcpy(&entry->buf, map_page[0] + offset, cnt);
5618 if (entry->buf[cnt - 1] != '\n') {
5619 entry->buf[cnt] = '\n';
5620 entry->buf[cnt + 1] = '\0';
5622 entry->buf[cnt] = '\0';
5624 __buffer_unlock_commit(buffer, event);
5631 for (i = nr_pages - 1; i >= 0; i--) {
5632 kunmap_atomic(map_page[i]);
5639 static int tracing_clock_show(struct seq_file *m, void *v)
5641 struct trace_array *tr = m->private;
5644 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5646 "%s%s%s%s", i ? " " : "",
5647 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5648 i == tr->clock_id ? "]" : "");
5654 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5658 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5659 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5662 if (i == ARRAY_SIZE(trace_clocks))
5665 mutex_lock(&trace_types_lock);
5669 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5672 * New clock may not be consistent with the previous clock.
5673 * Reset the buffer so that it doesn't have incomparable timestamps.
5675 tracing_reset_online_cpus(&tr->trace_buffer);
5677 #ifdef CONFIG_TRACER_MAX_TRACE
5678 if (tr->max_buffer.buffer)
5679 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5680 tracing_reset_online_cpus(&tr->max_buffer);
5683 mutex_unlock(&trace_types_lock);
5688 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5689 size_t cnt, loff_t *fpos)
5691 struct seq_file *m = filp->private_data;
5692 struct trace_array *tr = m->private;
5694 const char *clockstr;
5697 if (cnt >= sizeof(buf))
5700 if (copy_from_user(buf, ubuf, cnt))
5705 clockstr = strstrip(buf);
5707 ret = tracing_set_clock(tr, clockstr);
5716 static int tracing_clock_open(struct inode *inode, struct file *file)
5718 struct trace_array *tr = inode->i_private;
5721 if (tracing_disabled)
5724 if (trace_array_get(tr))
5727 ret = single_open(file, tracing_clock_show, inode->i_private);
5729 trace_array_put(tr);
5734 struct ftrace_buffer_info {
5735 struct trace_iterator iter;
5740 #ifdef CONFIG_TRACER_SNAPSHOT
5741 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5743 struct trace_array *tr = inode->i_private;
5744 struct trace_iterator *iter;
5748 if (trace_array_get(tr) < 0)
5751 if (file->f_mode & FMODE_READ) {
5752 iter = __tracing_open(inode, file, true);
5754 ret = PTR_ERR(iter);
5756 /* Writes still need the seq_file to hold the private data */
5758 m = kzalloc(sizeof(*m), GFP_KERNEL);
5761 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5769 iter->trace_buffer = &tr->max_buffer;
5770 iter->cpu_file = tracing_get_cpu(inode);
5772 file->private_data = m;
5776 trace_array_put(tr);
5782 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5785 struct seq_file *m = filp->private_data;
5786 struct trace_iterator *iter = m->private;
5787 struct trace_array *tr = iter->tr;
5791 ret = tracing_update_buffers();
5795 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5799 mutex_lock(&trace_types_lock);
5801 if (tr->current_trace->use_max_tr) {
5808 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5812 if (tr->allocated_snapshot)
5816 /* Only allow per-cpu swap if the ring buffer supports it */
5817 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5818 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5823 if (!tr->allocated_snapshot)
5824 ret = resize_buffer_duplicate_size(&tr->max_buffer,
5825 &tr->trace_buffer, iter->cpu_file);
5827 ret = alloc_snapshot(tr);
5832 local_irq_disable();
5833 /* Now, we're going to swap */
5834 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5835 update_max_tr(tr, current, smp_processor_id());
5837 update_max_tr_single(tr, current, iter->cpu_file);
5841 if (tr->allocated_snapshot) {
5842 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5843 tracing_reset_online_cpus(&tr->max_buffer);
5845 tracing_reset(&tr->max_buffer, iter->cpu_file);
5855 mutex_unlock(&trace_types_lock);
5859 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5861 struct seq_file *m = file->private_data;
5864 ret = tracing_release(inode, file);
5866 if (file->f_mode & FMODE_READ)
5869 /* If write only, the seq_file is just a stub */
5877 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5878 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5879 size_t count, loff_t *ppos);
5880 static int tracing_buffers_release(struct inode *inode, struct file *file);
5881 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5882 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5884 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5886 struct ftrace_buffer_info *info;
5889 ret = tracing_buffers_open(inode, filp);
5893 info = filp->private_data;
5895 if (info->iter.trace->use_max_tr) {
5896 tracing_buffers_release(inode, filp);
5900 info->iter.snapshot = true;
5901 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5906 #endif /* CONFIG_TRACER_SNAPSHOT */
5909 static const struct file_operations tracing_thresh_fops = {
5910 .open = tracing_open_generic,
5911 .read = tracing_thresh_read,
5912 .write = tracing_thresh_write,
5913 .llseek = generic_file_llseek,
5916 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5917 static const struct file_operations tracing_max_lat_fops = {
5918 .open = tracing_open_generic,
5919 .read = tracing_max_lat_read,
5920 .write = tracing_max_lat_write,
5921 .llseek = generic_file_llseek,
5925 static const struct file_operations set_tracer_fops = {
5926 .open = tracing_open_generic,
5927 .read = tracing_set_trace_read,
5928 .write = tracing_set_trace_write,
5929 .llseek = generic_file_llseek,
5932 static const struct file_operations tracing_pipe_fops = {
5933 .open = tracing_open_pipe,
5934 .poll = tracing_poll_pipe,
5935 .read = tracing_read_pipe,
5936 .splice_read = tracing_splice_read_pipe,
5937 .release = tracing_release_pipe,
5938 .llseek = no_llseek,
5941 static const struct file_operations tracing_entries_fops = {
5942 .open = tracing_open_generic_tr,
5943 .read = tracing_entries_read,
5944 .write = tracing_entries_write,
5945 .llseek = generic_file_llseek,
5946 .release = tracing_release_generic_tr,
5949 static const struct file_operations tracing_total_entries_fops = {
5950 .open = tracing_open_generic_tr,
5951 .read = tracing_total_entries_read,
5952 .llseek = generic_file_llseek,
5953 .release = tracing_release_generic_tr,
5956 static const struct file_operations tracing_free_buffer_fops = {
5957 .open = tracing_open_generic_tr,
5958 .write = tracing_free_buffer_write,
5959 .release = tracing_free_buffer_release,
5962 static const struct file_operations tracing_mark_fops = {
5963 .open = tracing_open_generic_tr,
5964 .write = tracing_mark_write,
5965 .llseek = generic_file_llseek,
5966 .release = tracing_release_generic_tr,
5969 static const struct file_operations trace_clock_fops = {
5970 .open = tracing_clock_open,
5972 .llseek = seq_lseek,
5973 .release = tracing_single_release_tr,
5974 .write = tracing_clock_write,
5977 #ifdef CONFIG_TRACER_SNAPSHOT
5978 static const struct file_operations snapshot_fops = {
5979 .open = tracing_snapshot_open,
5981 .write = tracing_snapshot_write,
5982 .llseek = tracing_lseek,
5983 .release = tracing_snapshot_release,
5986 static const struct file_operations snapshot_raw_fops = {
5987 .open = snapshot_raw_open,
5988 .read = tracing_buffers_read,
5989 .release = tracing_buffers_release,
5990 .splice_read = tracing_buffers_splice_read,
5991 .llseek = no_llseek,
5994 #endif /* CONFIG_TRACER_SNAPSHOT */
5996 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5998 struct trace_array *tr = inode->i_private;
5999 struct ftrace_buffer_info *info;
6002 if (tracing_disabled)
6005 if (trace_array_get(tr) < 0)
6008 info = kzalloc(sizeof(*info), GFP_KERNEL);
6010 trace_array_put(tr);
6014 mutex_lock(&trace_types_lock);
6017 info->iter.cpu_file = tracing_get_cpu(inode);
6018 info->iter.trace = tr->current_trace;
6019 info->iter.trace_buffer = &tr->trace_buffer;
6021 /* Force reading ring buffer for first read */
6022 info->read = (unsigned int)-1;
6024 filp->private_data = info;
6026 tr->current_trace->ref++;
6028 mutex_unlock(&trace_types_lock);
6030 ret = nonseekable_open(inode, filp);
6032 trace_array_put(tr);
6038 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6040 struct ftrace_buffer_info *info = filp->private_data;
6041 struct trace_iterator *iter = &info->iter;
6043 return trace_poll(iter, filp, poll_table);
6047 tracing_buffers_read(struct file *filp, char __user *ubuf,
6048 size_t count, loff_t *ppos)
6050 struct ftrace_buffer_info *info = filp->private_data;
6051 struct trace_iterator *iter = &info->iter;
6058 #ifdef CONFIG_TRACER_MAX_TRACE
6059 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6064 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6069 /* Do we have previous read data to read? */
6070 if (info->read < PAGE_SIZE)
6074 trace_access_lock(iter->cpu_file);
6075 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6079 trace_access_unlock(iter->cpu_file);
6082 if (trace_empty(iter)) {
6083 if ((filp->f_flags & O_NONBLOCK))
6086 ret = wait_on_pipe(iter, false);
6097 size = PAGE_SIZE - info->read;
6101 ret = copy_to_user(ubuf, info->spare + info->read, size);
6113 static int tracing_buffers_release(struct inode *inode, struct file *file)
6115 struct ftrace_buffer_info *info = file->private_data;
6116 struct trace_iterator *iter = &info->iter;
6118 mutex_lock(&trace_types_lock);
6120 iter->tr->current_trace->ref--;
6122 __trace_array_put(iter->tr);
6125 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
6128 mutex_unlock(&trace_types_lock);
6134 struct ring_buffer *buffer;
6139 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6140 struct pipe_buffer *buf)
6142 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6147 ring_buffer_free_read_page(ref->buffer, ref->page);
6152 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6153 struct pipe_buffer *buf)
6155 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6157 if (ref->ref > INT_MAX/2)
6164 /* Pipe buffer operations for a buffer. */
6165 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6167 .confirm = generic_pipe_buf_confirm,
6168 .release = buffer_pipe_buf_release,
6169 .steal = generic_pipe_buf_steal,
6170 .get = buffer_pipe_buf_get,
6174 * Callback from splice_to_pipe(), if we need to release some pages
6175 * at the end of the spd in case we error'ed out in filling the pipe.
6177 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6179 struct buffer_ref *ref =
6180 (struct buffer_ref *)spd->partial[i].private;
6185 ring_buffer_free_read_page(ref->buffer, ref->page);
6187 spd->partial[i].private = 0;
6191 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6192 struct pipe_inode_info *pipe, size_t len,
6195 struct ftrace_buffer_info *info = file->private_data;
6196 struct trace_iterator *iter = &info->iter;
6197 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6198 struct page *pages_def[PIPE_DEF_BUFFERS];
6199 struct splice_pipe_desc spd = {
6201 .partial = partial_def,
6202 .nr_pages_max = PIPE_DEF_BUFFERS,
6204 .ops = &buffer_pipe_buf_ops,
6205 .spd_release = buffer_spd_release,
6207 struct buffer_ref *ref;
6211 #ifdef CONFIG_TRACER_MAX_TRACE
6212 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6216 if (*ppos & (PAGE_SIZE - 1))
6219 if (len & (PAGE_SIZE - 1)) {
6220 if (len < PAGE_SIZE)
6225 if (splice_grow_spd(pipe, &spd))
6229 trace_access_lock(iter->cpu_file);
6230 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6232 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6236 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6243 ref->buffer = iter->trace_buffer->buffer;
6244 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6251 r = ring_buffer_read_page(ref->buffer, &ref->page,
6252 len, iter->cpu_file, 1);
6254 ring_buffer_free_read_page(ref->buffer, ref->page);
6259 page = virt_to_page(ref->page);
6261 spd.pages[i] = page;
6262 spd.partial[i].len = PAGE_SIZE;
6263 spd.partial[i].offset = 0;
6264 spd.partial[i].private = (unsigned long)ref;
6268 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6271 trace_access_unlock(iter->cpu_file);
6274 /* did we read anything? */
6275 if (!spd.nr_pages) {
6280 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6283 ret = wait_on_pipe(iter, true);
6290 ret = splice_to_pipe(pipe, &spd);
6292 splice_shrink_spd(&spd);
6297 static const struct file_operations tracing_buffers_fops = {
6298 .open = tracing_buffers_open,
6299 .read = tracing_buffers_read,
6300 .poll = tracing_buffers_poll,
6301 .release = tracing_buffers_release,
6302 .splice_read = tracing_buffers_splice_read,
6303 .llseek = no_llseek,
6307 tracing_stats_read(struct file *filp, char __user *ubuf,
6308 size_t count, loff_t *ppos)
6310 struct inode *inode = file_inode(filp);
6311 struct trace_array *tr = inode->i_private;
6312 struct trace_buffer *trace_buf = &tr->trace_buffer;
6313 int cpu = tracing_get_cpu(inode);
6314 struct trace_seq *s;
6316 unsigned long long t;
6317 unsigned long usec_rem;
6319 s = kmalloc(sizeof(*s), GFP_KERNEL);
6325 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6326 trace_seq_printf(s, "entries: %ld\n", cnt);
6328 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6329 trace_seq_printf(s, "overrun: %ld\n", cnt);
6331 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6332 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6334 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6335 trace_seq_printf(s, "bytes: %ld\n", cnt);
6337 if (trace_clocks[tr->clock_id].in_ns) {
6338 /* local or global for trace_clock */
6339 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6340 usec_rem = do_div(t, USEC_PER_SEC);
6341 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6344 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6345 usec_rem = do_div(t, USEC_PER_SEC);
6346 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6348 /* counter or tsc mode for trace_clock */
6349 trace_seq_printf(s, "oldest event ts: %llu\n",
6350 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6352 trace_seq_printf(s, "now ts: %llu\n",
6353 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6356 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6357 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6359 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6360 trace_seq_printf(s, "read events: %ld\n", cnt);
6362 count = simple_read_from_buffer(ubuf, count, ppos,
6363 s->buffer, trace_seq_used(s));
6370 static const struct file_operations tracing_stats_fops = {
6371 .open = tracing_open_generic_tr,
6372 .read = tracing_stats_read,
6373 .llseek = generic_file_llseek,
6374 .release = tracing_release_generic_tr,
6377 #ifdef CONFIG_DYNAMIC_FTRACE
6379 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
6385 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6386 size_t cnt, loff_t *ppos)
6388 static char ftrace_dyn_info_buffer[1024];
6389 static DEFINE_MUTEX(dyn_info_mutex);
6390 unsigned long *p = filp->private_data;
6391 char *buf = ftrace_dyn_info_buffer;
6392 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
6395 mutex_lock(&dyn_info_mutex);
6396 r = sprintf(buf, "%ld ", *p);
6398 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
6401 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6403 mutex_unlock(&dyn_info_mutex);
6408 static const struct file_operations tracing_dyn_info_fops = {
6409 .open = tracing_open_generic,
6410 .read = tracing_read_dyn_info,
6411 .llseek = generic_file_llseek,
6413 #endif /* CONFIG_DYNAMIC_FTRACE */
6415 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6417 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6423 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6425 unsigned long *count = (long *)data;
6437 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6438 struct ftrace_probe_ops *ops, void *data)
6440 long count = (long)data;
6442 seq_printf(m, "%ps:", (void *)ip);
6444 seq_puts(m, "snapshot");
6447 seq_puts(m, ":unlimited\n");
6449 seq_printf(m, ":count=%ld\n", count);
6454 static struct ftrace_probe_ops snapshot_probe_ops = {
6455 .func = ftrace_snapshot,
6456 .print = ftrace_snapshot_print,
6459 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6460 .func = ftrace_count_snapshot,
6461 .print = ftrace_snapshot_print,
6465 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6466 char *glob, char *cmd, char *param, int enable)
6468 struct ftrace_probe_ops *ops;
6469 void *count = (void *)-1;
6473 /* hash funcs only work with set_ftrace_filter */
6477 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6479 if (glob[0] == '!') {
6480 unregister_ftrace_function_probe_func(glob+1, ops);
6487 number = strsep(¶m, ":");
6489 if (!strlen(number))
6493 * We use the callback data field (which is a pointer)
6496 ret = kstrtoul(number, 0, (unsigned long *)&count);
6501 ret = alloc_snapshot(&global_trace);
6505 ret = register_ftrace_function_probe(glob, ops, count);
6508 return ret < 0 ? ret : 0;
6511 static struct ftrace_func_command ftrace_snapshot_cmd = {
6513 .func = ftrace_trace_snapshot_callback,
6516 static __init int register_snapshot_cmd(void)
6518 return register_ftrace_command(&ftrace_snapshot_cmd);
6521 static inline __init int register_snapshot_cmd(void) { return 0; }
6522 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6524 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6526 if (WARN_ON(!tr->dir))
6527 return ERR_PTR(-ENODEV);
6529 /* Top directory uses NULL as the parent */
6530 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6533 /* All sub buffers have a descriptor */
6537 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6539 struct dentry *d_tracer;
6542 return tr->percpu_dir;
6544 d_tracer = tracing_get_dentry(tr);
6545 if (IS_ERR(d_tracer))
6548 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6550 WARN_ONCE(!tr->percpu_dir,
6551 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6553 return tr->percpu_dir;
6556 static struct dentry *
6557 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6558 void *data, long cpu, const struct file_operations *fops)
6560 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6562 if (ret) /* See tracing_get_cpu() */
6563 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6568 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6570 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6571 struct dentry *d_cpu;
6572 char cpu_dir[30]; /* 30 characters should be more than enough */
6577 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6578 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6580 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6584 /* per cpu trace_pipe */
6585 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6586 tr, cpu, &tracing_pipe_fops);
6589 trace_create_cpu_file("trace", 0644, d_cpu,
6590 tr, cpu, &tracing_fops);
6592 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6593 tr, cpu, &tracing_buffers_fops);
6595 trace_create_cpu_file("stats", 0444, d_cpu,
6596 tr, cpu, &tracing_stats_fops);
6598 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6599 tr, cpu, &tracing_entries_fops);
6601 #ifdef CONFIG_TRACER_SNAPSHOT
6602 trace_create_cpu_file("snapshot", 0644, d_cpu,
6603 tr, cpu, &snapshot_fops);
6605 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6606 tr, cpu, &snapshot_raw_fops);
6610 #ifdef CONFIG_FTRACE_SELFTEST
6611 /* Let selftest have access to static functions in this file */
6612 #include "trace_selftest.c"
6616 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6619 struct trace_option_dentry *topt = filp->private_data;
6622 if (topt->flags->val & topt->opt->bit)
6627 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6631 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6634 struct trace_option_dentry *topt = filp->private_data;
6638 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6642 if (val != 0 && val != 1)
6645 if (!!(topt->flags->val & topt->opt->bit) != val) {
6646 mutex_lock(&trace_types_lock);
6647 ret = __set_tracer_option(topt->tr, topt->flags,
6649 mutex_unlock(&trace_types_lock);
6660 static const struct file_operations trace_options_fops = {
6661 .open = tracing_open_generic,
6662 .read = trace_options_read,
6663 .write = trace_options_write,
6664 .llseek = generic_file_llseek,
6668 * In order to pass in both the trace_array descriptor as well as the index
6669 * to the flag that the trace option file represents, the trace_array
6670 * has a character array of trace_flags_index[], which holds the index
6671 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6672 * The address of this character array is passed to the flag option file
6673 * read/write callbacks.
6675 * In order to extract both the index and the trace_array descriptor,
6676 * get_tr_index() uses the following algorithm.
6680 * As the pointer itself contains the address of the index (remember
6683 * Then to get the trace_array descriptor, by subtracting that index
6684 * from the ptr, we get to the start of the index itself.
6686 * ptr - idx == &index[0]
6688 * Then a simple container_of() from that pointer gets us to the
6689 * trace_array descriptor.
6691 static void get_tr_index(void *data, struct trace_array **ptr,
6692 unsigned int *pindex)
6694 *pindex = *(unsigned char *)data;
6696 *ptr = container_of(data - *pindex, struct trace_array,
6701 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6704 void *tr_index = filp->private_data;
6705 struct trace_array *tr;
6709 get_tr_index(tr_index, &tr, &index);
6711 if (tr->trace_flags & (1 << index))
6716 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6720 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6723 void *tr_index = filp->private_data;
6724 struct trace_array *tr;
6729 get_tr_index(tr_index, &tr, &index);
6731 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6735 if (val != 0 && val != 1)
6738 mutex_lock(&trace_types_lock);
6739 ret = set_tracer_flag(tr, 1 << index, val);
6740 mutex_unlock(&trace_types_lock);
6750 static const struct file_operations trace_options_core_fops = {
6751 .open = tracing_open_generic,
6752 .read = trace_options_core_read,
6753 .write = trace_options_core_write,
6754 .llseek = generic_file_llseek,
6757 struct dentry *trace_create_file(const char *name,
6759 struct dentry *parent,
6761 const struct file_operations *fops)
6765 ret = tracefs_create_file(name, mode, parent, data, fops);
6767 pr_warn("Could not create tracefs '%s' entry\n", name);
6773 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6775 struct dentry *d_tracer;
6780 d_tracer = tracing_get_dentry(tr);
6781 if (IS_ERR(d_tracer))
6784 tr->options = tracefs_create_dir("options", d_tracer);
6786 pr_warn("Could not create tracefs directory 'options'\n");
6794 create_trace_option_file(struct trace_array *tr,
6795 struct trace_option_dentry *topt,
6796 struct tracer_flags *flags,
6797 struct tracer_opt *opt)
6799 struct dentry *t_options;
6801 t_options = trace_options_init_dentry(tr);
6805 topt->flags = flags;
6809 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6810 &trace_options_fops);
6815 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6817 struct trace_option_dentry *topts;
6818 struct trace_options *tr_topts;
6819 struct tracer_flags *flags;
6820 struct tracer_opt *opts;
6827 flags = tracer->flags;
6829 if (!flags || !flags->opts)
6833 * If this is an instance, only create flags for tracers
6834 * the instance may have.
6836 if (!trace_ok_for_array(tracer, tr))
6839 for (i = 0; i < tr->nr_topts; i++) {
6840 /* Make sure there's no duplicate flags. */
6841 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6847 for (cnt = 0; opts[cnt].name; cnt++)
6850 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6854 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6861 tr->topts = tr_topts;
6862 tr->topts[tr->nr_topts].tracer = tracer;
6863 tr->topts[tr->nr_topts].topts = topts;
6866 for (cnt = 0; opts[cnt].name; cnt++) {
6867 create_trace_option_file(tr, &topts[cnt], flags,
6869 WARN_ONCE(topts[cnt].entry == NULL,
6870 "Failed to create trace option: %s",
6875 static struct dentry *
6876 create_trace_option_core_file(struct trace_array *tr,
6877 const char *option, long index)
6879 struct dentry *t_options;
6881 t_options = trace_options_init_dentry(tr);
6885 return trace_create_file(option, 0644, t_options,
6886 (void *)&tr->trace_flags_index[index],
6887 &trace_options_core_fops);
6890 static void create_trace_options_dir(struct trace_array *tr)
6892 struct dentry *t_options;
6893 bool top_level = tr == &global_trace;
6896 t_options = trace_options_init_dentry(tr);
6900 for (i = 0; trace_options[i]; i++) {
6902 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6903 create_trace_option_core_file(tr, trace_options[i], i);
6908 rb_simple_read(struct file *filp, char __user *ubuf,
6909 size_t cnt, loff_t *ppos)
6911 struct trace_array *tr = filp->private_data;
6915 r = tracer_tracing_is_on(tr);
6916 r = sprintf(buf, "%d\n", r);
6918 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6922 rb_simple_write(struct file *filp, const char __user *ubuf,
6923 size_t cnt, loff_t *ppos)
6925 struct trace_array *tr = filp->private_data;
6926 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6930 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6935 mutex_lock(&trace_types_lock);
6936 if (!!val == tracer_tracing_is_on(tr)) {
6937 val = 0; /* do nothing */
6939 tracer_tracing_on(tr);
6940 if (tr->current_trace->start)
6941 tr->current_trace->start(tr);
6943 tracer_tracing_off(tr);
6944 if (tr->current_trace->stop)
6945 tr->current_trace->stop(tr);
6947 mutex_unlock(&trace_types_lock);
6955 static const struct file_operations rb_simple_fops = {
6956 .open = tracing_open_generic_tr,
6957 .read = rb_simple_read,
6958 .write = rb_simple_write,
6959 .release = tracing_release_generic_tr,
6960 .llseek = default_llseek,
6963 struct dentry *trace_instance_dir;
6966 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6969 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6971 enum ring_buffer_flags rb_flags;
6973 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6977 buf->buffer = ring_buffer_alloc(size, rb_flags);
6981 buf->data = alloc_percpu(struct trace_array_cpu);
6983 ring_buffer_free(buf->buffer);
6988 /* Allocate the first page for all buffers */
6989 set_buffer_entries(&tr->trace_buffer,
6990 ring_buffer_size(tr->trace_buffer.buffer, 0));
6995 static int allocate_trace_buffers(struct trace_array *tr, int size)
6999 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7003 #ifdef CONFIG_TRACER_MAX_TRACE
7004 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7005 allocate_snapshot ? size : 1);
7007 ring_buffer_free(tr->trace_buffer.buffer);
7008 tr->trace_buffer.buffer = NULL;
7009 free_percpu(tr->trace_buffer.data);
7010 tr->trace_buffer.data = NULL;
7013 tr->allocated_snapshot = allocate_snapshot;
7016 * Only the top level trace array gets its snapshot allocated
7017 * from the kernel command line.
7019 allocate_snapshot = false;
7023 * Because of some magic with the way alloc_percpu() works on
7024 * x86_64, we need to synchronize the pgd of all the tables,
7025 * otherwise the trace events that happen in x86_64 page fault
7026 * handlers can't cope with accessing the chance that a
7027 * alloc_percpu()'d memory might be touched in the page fault trace
7028 * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
7029 * calls in tracing, because something might get triggered within a
7030 * page fault trace event!
7032 vmalloc_sync_mappings();
7037 static void free_trace_buffer(struct trace_buffer *buf)
7040 ring_buffer_free(buf->buffer);
7042 free_percpu(buf->data);
7047 static void free_trace_buffers(struct trace_array *tr)
7052 free_trace_buffer(&tr->trace_buffer);
7054 #ifdef CONFIG_TRACER_MAX_TRACE
7055 free_trace_buffer(&tr->max_buffer);
7059 static void init_trace_flags_index(struct trace_array *tr)
7063 /* Used by the trace options files */
7064 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7065 tr->trace_flags_index[i] = i;
7068 static void __update_tracer_options(struct trace_array *tr)
7072 for (t = trace_types; t; t = t->next)
7073 add_tracer_options(tr, t);
7076 static void update_tracer_options(struct trace_array *tr)
7078 mutex_lock(&trace_types_lock);
7079 __update_tracer_options(tr);
7080 mutex_unlock(&trace_types_lock);
7083 static int instance_mkdir(const char *name)
7085 struct trace_array *tr;
7088 mutex_lock(&trace_types_lock);
7091 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7092 if (tr->name && strcmp(tr->name, name) == 0)
7097 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7101 tr->name = kstrdup(name, GFP_KERNEL);
7105 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7108 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7110 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7112 raw_spin_lock_init(&tr->start_lock);
7114 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7116 tr->current_trace = &nop_trace;
7118 INIT_LIST_HEAD(&tr->systems);
7119 INIT_LIST_HEAD(&tr->events);
7121 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7124 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7128 ret = event_trace_add_tracer(tr->dir, tr);
7130 tracefs_remove_recursive(tr->dir);
7134 init_tracer_tracefs(tr, tr->dir);
7135 init_trace_flags_index(tr);
7136 __update_tracer_options(tr);
7138 list_add(&tr->list, &ftrace_trace_arrays);
7140 mutex_unlock(&trace_types_lock);
7145 free_trace_buffers(tr);
7146 free_cpumask_var(tr->tracing_cpumask);
7151 mutex_unlock(&trace_types_lock);
7157 static int instance_rmdir(const char *name)
7159 struct trace_array *tr;
7164 mutex_lock(&trace_types_lock);
7167 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7168 if (tr->name && strcmp(tr->name, name) == 0) {
7177 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7180 list_del(&tr->list);
7182 /* Disable all the flags that were enabled coming in */
7183 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7184 if ((1 << i) & ZEROED_TRACE_FLAGS)
7185 set_tracer_flag(tr, 1 << i, 0);
7188 tracing_set_nop(tr);
7189 event_trace_del_tracer(tr);
7190 ftrace_clear_pids(tr);
7191 ftrace_destroy_function_files(tr);
7192 tracefs_remove_recursive(tr->dir);
7193 free_trace_buffers(tr);
7195 for (i = 0; i < tr->nr_topts; i++) {
7196 kfree(tr->topts[i].topts);
7200 free_cpumask_var(tr->tracing_cpumask);
7207 mutex_unlock(&trace_types_lock);
7212 static __init void create_trace_instances(struct dentry *d_tracer)
7214 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7217 if (WARN_ON(!trace_instance_dir))
7222 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7226 trace_create_file("available_tracers", 0444, d_tracer,
7227 tr, &show_traces_fops);
7229 trace_create_file("current_tracer", 0644, d_tracer,
7230 tr, &set_tracer_fops);
7232 trace_create_file("tracing_cpumask", 0644, d_tracer,
7233 tr, &tracing_cpumask_fops);
7235 trace_create_file("trace_options", 0644, d_tracer,
7236 tr, &tracing_iter_fops);
7238 trace_create_file("trace", 0644, d_tracer,
7241 trace_create_file("trace_pipe", 0444, d_tracer,
7242 tr, &tracing_pipe_fops);
7244 trace_create_file("buffer_size_kb", 0644, d_tracer,
7245 tr, &tracing_entries_fops);
7247 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7248 tr, &tracing_total_entries_fops);
7250 trace_create_file("free_buffer", 0200, d_tracer,
7251 tr, &tracing_free_buffer_fops);
7253 trace_create_file("trace_marker", 0220, d_tracer,
7254 tr, &tracing_mark_fops);
7256 trace_create_file("trace_clock", 0644, d_tracer, tr,
7259 trace_create_file("tracing_on", 0644, d_tracer,
7260 tr, &rb_simple_fops);
7262 create_trace_options_dir(tr);
7264 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7265 trace_create_file("tracing_max_latency", 0644, d_tracer,
7266 &tr->max_latency, &tracing_max_lat_fops);
7269 if (ftrace_create_function_files(tr, d_tracer))
7270 WARN(1, "Could not allocate function filter files");
7272 #ifdef CONFIG_TRACER_SNAPSHOT
7273 trace_create_file("snapshot", 0644, d_tracer,
7274 tr, &snapshot_fops);
7277 for_each_tracing_cpu(cpu)
7278 tracing_init_tracefs_percpu(tr, cpu);
7280 ftrace_init_tracefs(tr, d_tracer);
7283 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
7285 struct vfsmount *mnt;
7286 struct file_system_type *type;
7289 * To maintain backward compatibility for tools that mount
7290 * debugfs to get to the tracing facility, tracefs is automatically
7291 * mounted to the debugfs/tracing directory.
7293 type = get_fs_type("tracefs");
7296 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
7297 put_filesystem(type);
7306 * tracing_init_dentry - initialize top level trace array
7308 * This is called when creating files or directories in the tracing
7309 * directory. It is called via fs_initcall() by any of the boot up code
7310 * and expects to return the dentry of the top level tracing directory.
7312 struct dentry *tracing_init_dentry(void)
7314 struct trace_array *tr = &global_trace;
7316 /* The top level trace array uses NULL as parent */
7320 if (WARN_ON(!tracefs_initialized()) ||
7321 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7322 WARN_ON(!debugfs_initialized())))
7323 return ERR_PTR(-ENODEV);
7326 * As there may still be users that expect the tracing
7327 * files to exist in debugfs/tracing, we must automount
7328 * the tracefs file system there, so older tools still
7329 * work with the newer kerenl.
7331 tr->dir = debugfs_create_automount("tracing", NULL,
7332 trace_automount, NULL);
7334 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7335 return ERR_PTR(-ENOMEM);
7341 extern struct trace_enum_map *__start_ftrace_enum_maps[];
7342 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7344 static void __init trace_enum_init(void)
7348 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
7349 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
7352 #ifdef CONFIG_MODULES
7353 static void trace_module_add_enums(struct module *mod)
7355 if (!mod->num_trace_enums)
7359 * Modules with bad taint do not have events created, do
7360 * not bother with enums either.
7362 if (trace_module_has_bad_taint(mod))
7365 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
7368 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
7369 static void trace_module_remove_enums(struct module *mod)
7371 union trace_enum_map_item *map;
7372 union trace_enum_map_item **last = &trace_enum_maps;
7374 if (!mod->num_trace_enums)
7377 mutex_lock(&trace_enum_mutex);
7379 map = trace_enum_maps;
7382 if (map->head.mod == mod)
7384 map = trace_enum_jmp_to_tail(map);
7385 last = &map->tail.next;
7386 map = map->tail.next;
7391 *last = trace_enum_jmp_to_tail(map)->tail.next;
7394 mutex_unlock(&trace_enum_mutex);
7397 static inline void trace_module_remove_enums(struct module *mod) { }
7398 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7400 static int trace_module_notify(struct notifier_block *self,
7401 unsigned long val, void *data)
7403 struct module *mod = data;
7406 case MODULE_STATE_COMING:
7407 trace_module_add_enums(mod);
7409 case MODULE_STATE_GOING:
7410 trace_module_remove_enums(mod);
7417 static struct notifier_block trace_module_nb = {
7418 .notifier_call = trace_module_notify,
7421 #endif /* CONFIG_MODULES */
7423 static __init int tracer_init_tracefs(void)
7425 struct dentry *d_tracer;
7427 trace_access_lock_init();
7429 d_tracer = tracing_init_dentry();
7430 if (IS_ERR(d_tracer))
7433 init_tracer_tracefs(&global_trace, d_tracer);
7434 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
7436 trace_create_file("tracing_thresh", 0644, d_tracer,
7437 &global_trace, &tracing_thresh_fops);
7439 trace_create_file("README", 0444, d_tracer,
7440 NULL, &tracing_readme_fops);
7442 trace_create_file("saved_cmdlines", 0444, d_tracer,
7443 NULL, &tracing_saved_cmdlines_fops);
7445 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7446 NULL, &tracing_saved_cmdlines_size_fops);
7450 trace_create_enum_file(d_tracer);
7452 #ifdef CONFIG_MODULES
7453 register_module_notifier(&trace_module_nb);
7456 #ifdef CONFIG_DYNAMIC_FTRACE
7457 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7458 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7461 create_trace_instances(d_tracer);
7463 update_tracer_options(&global_trace);
7468 static int trace_panic_handler(struct notifier_block *this,
7469 unsigned long event, void *unused)
7471 if (ftrace_dump_on_oops)
7472 ftrace_dump(ftrace_dump_on_oops);
7476 static struct notifier_block trace_panic_notifier = {
7477 .notifier_call = trace_panic_handler,
7479 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7482 static int trace_die_handler(struct notifier_block *self,
7488 if (ftrace_dump_on_oops)
7489 ftrace_dump(ftrace_dump_on_oops);
7497 static struct notifier_block trace_die_notifier = {
7498 .notifier_call = trace_die_handler,
7503 * printk is set to max of 1024, we really don't need it that big.
7504 * Nothing should be printing 1000 characters anyway.
7506 #define TRACE_MAX_PRINT 1000
7509 * Define here KERN_TRACE so that we have one place to modify
7510 * it if we decide to change what log level the ftrace dump
7513 #define KERN_TRACE KERN_EMERG
7516 trace_printk_seq(struct trace_seq *s)
7518 /* Probably should print a warning here. */
7519 if (s->seq.len >= TRACE_MAX_PRINT)
7520 s->seq.len = TRACE_MAX_PRINT;
7523 * More paranoid code. Although the buffer size is set to
7524 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7525 * an extra layer of protection.
7527 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7528 s->seq.len = s->seq.size - 1;
7530 /* should be zero ended, but we are paranoid. */
7531 s->buffer[s->seq.len] = 0;
7533 printk(KERN_TRACE "%s", s->buffer);
7538 void trace_init_global_iter(struct trace_iterator *iter)
7540 iter->tr = &global_trace;
7541 iter->trace = iter->tr->current_trace;
7542 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7543 iter->trace_buffer = &global_trace.trace_buffer;
7545 if (iter->trace && iter->trace->open)
7546 iter->trace->open(iter);
7548 /* Annotate start of buffers if we had overruns */
7549 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7550 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7552 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7553 if (trace_clocks[iter->tr->clock_id].in_ns)
7554 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7557 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7559 /* use static because iter can be a bit big for the stack */
7560 static struct trace_iterator iter;
7561 static atomic_t dump_running;
7562 struct trace_array *tr = &global_trace;
7563 unsigned int old_userobj;
7564 unsigned long flags;
7567 /* Only allow one dump user at a time. */
7568 if (atomic_inc_return(&dump_running) != 1) {
7569 atomic_dec(&dump_running);
7574 * Always turn off tracing when we dump.
7575 * We don't need to show trace output of what happens
7576 * between multiple crashes.
7578 * If the user does a sysrq-z, then they can re-enable
7579 * tracing with echo 1 > tracing_on.
7583 local_irq_save(flags);
7585 /* Simulate the iterator */
7586 trace_init_global_iter(&iter);
7588 for_each_tracing_cpu(cpu) {
7589 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7592 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7594 /* don't look at user memory in panic mode */
7595 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7597 switch (oops_dump_mode) {
7599 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7602 iter.cpu_file = raw_smp_processor_id();
7607 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7608 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7611 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7613 /* Did function tracer already get disabled? */
7614 if (ftrace_is_dead()) {
7615 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7616 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7620 * We need to stop all tracing on all CPUS to read the
7621 * the next buffer. This is a bit expensive, but is
7622 * not done often. We fill all what we can read,
7623 * and then release the locks again.
7626 while (!trace_empty(&iter)) {
7629 printk(KERN_TRACE "---------------------------------\n");
7633 trace_iterator_reset(&iter);
7634 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7636 if (trace_find_next_entry_inc(&iter) != NULL) {
7639 ret = print_trace_line(&iter);
7640 if (ret != TRACE_TYPE_NO_CONSUME)
7641 trace_consume(&iter);
7643 touch_nmi_watchdog();
7645 trace_printk_seq(&iter.seq);
7649 printk(KERN_TRACE " (ftrace buffer empty)\n");
7651 printk(KERN_TRACE "---------------------------------\n");
7654 tr->trace_flags |= old_userobj;
7656 for_each_tracing_cpu(cpu) {
7657 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7659 atomic_dec(&dump_running);
7660 local_irq_restore(flags);
7662 EXPORT_SYMBOL_GPL(ftrace_dump);
7664 __init static int tracer_alloc_buffers(void)
7670 * Make sure we don't accidently add more trace options
7671 * than we have bits for.
7673 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7675 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7678 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7679 goto out_free_buffer_mask;
7681 /* Only allocate trace_printk buffers if a trace_printk exists */
7682 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
7683 /* Must be called before global_trace.buffer is allocated */
7684 trace_printk_init_buffers();
7686 /* To save memory, keep the ring buffer size to its minimum */
7687 if (ring_buffer_expanded)
7688 ring_buf_size = trace_buf_size;
7692 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7693 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7695 raw_spin_lock_init(&global_trace.start_lock);
7697 /* Used for event triggers */
7698 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7700 goto out_free_cpumask;
7702 if (trace_create_savedcmd() < 0)
7703 goto out_free_temp_buffer;
7705 /* TODO: make the number of buffers hot pluggable with CPUS */
7706 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7707 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7709 goto out_free_savedcmd;
7712 if (global_trace.buffer_disabled)
7715 if (trace_boot_clock) {
7716 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7718 pr_warn("Trace clock %s not defined, going back to default\n",
7723 * register_tracer() might reference current_trace, so it
7724 * needs to be set before we register anything. This is
7725 * just a bootstrap of current_trace anyway.
7727 global_trace.current_trace = &nop_trace;
7729 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7731 ftrace_init_global_array_ops(&global_trace);
7733 init_trace_flags_index(&global_trace);
7735 register_tracer(&nop_trace);
7737 /* All seems OK, enable tracing */
7738 tracing_disabled = 0;
7740 atomic_notifier_chain_register(&panic_notifier_list,
7741 &trace_panic_notifier);
7743 register_die_notifier(&trace_die_notifier);
7745 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7747 INIT_LIST_HEAD(&global_trace.systems);
7748 INIT_LIST_HEAD(&global_trace.events);
7749 list_add(&global_trace.list, &ftrace_trace_arrays);
7751 apply_trace_boot_options();
7753 register_snapshot_cmd();
7758 free_saved_cmdlines_buffer(savedcmd);
7759 out_free_temp_buffer:
7760 ring_buffer_free(temp_buffer);
7762 free_cpumask_var(global_trace.tracing_cpumask);
7763 out_free_buffer_mask:
7764 free_cpumask_var(tracing_buffer_mask);
7769 void __init trace_init(void)
7771 if (tracepoint_printk) {
7772 tracepoint_print_iter =
7773 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7774 if (WARN_ON(!tracepoint_print_iter))
7775 tracepoint_printk = 0;
7777 tracer_alloc_buffers();
7781 __init static int clear_boot_tracer(void)
7784 * The default tracer at boot buffer is an init section.
7785 * This function is called in lateinit. If we did not
7786 * find the boot tracer, then clear it out, to prevent
7787 * later registration from accessing the buffer that is
7788 * about to be freed.
7790 if (!default_bootup_tracer)
7793 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7794 default_bootup_tracer);
7795 default_bootup_tracer = NULL;
7800 fs_initcall(tracer_init_tracefs);
7801 late_initcall_sync(clear_boot_tracer);