1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
100 static int tracing_disabled = 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
129 unsigned long length;
132 union trace_eval_map_item;
134 struct trace_eval_map_tail {
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
163 #define MAX_TRACER_SIZE 100
164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
165 static char *default_bootup_tracer;
167 static bool allocate_snapshot;
169 static int __init set_cmdline_ftrace(char *str)
171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
172 default_bootup_tracer = bootup_tracer_buf;
173 /* We are using ftrace early, expand it */
174 ring_buffer_expanded = true;
177 __setup("ftrace=", set_cmdline_ftrace);
179 static int __init set_ftrace_dump_on_oops(char *str)
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
195 static int __init stop_trace_on_warning(char *str)
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
201 __setup("traceoff_on_warning", stop_trace_on_warning);
203 static int __init boot_alloc_snapshot(char *str)
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
210 __setup("alloc_snapshot", boot_alloc_snapshot);
213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
215 static int __init set_trace_boot_options(char *str)
217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
220 __setup("trace_options=", set_trace_boot_options);
222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223 static char *trace_boot_clock __initdata;
225 static int __init set_trace_boot_clock(char *str)
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
231 __setup("trace_clock=", set_trace_boot_clock);
233 static int __init set_tracepoint_printk(char *str)
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
239 __setup("tp_printk", set_tracepoint_printk);
241 unsigned long long ns2usecs(u64 nsec)
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
268 static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
272 LIST_HEAD(ftrace_trace_arrays);
274 int trace_array_get(struct trace_array *this_tr)
276 struct trace_array *tr;
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 mutex_unlock(&trace_types_lock);
292 static void __trace_array_put(struct trace_array *this_tr)
294 WARN_ON(!this_tr->ref);
298 void trace_array_put(struct trace_array *this_tr)
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
305 int call_filter_check_discard(struct trace_event_call *call, void *rec,
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
311 __trace_event_discard_commit(buffer, event);
318 void trace_free_pid_list(struct trace_pid_list *pid_list)
320 vfree(pid_list->pids);
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
338 if (search_pid >= filtered_pids->pid_max)
341 return test_bit(search_pid, filtered_pids->pids);
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
385 /* For forks, we only add if the forking task is listed */
387 if (!trace_find_filtered_pid(pid_list, self->pid))
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
395 /* "self" is set for forks, and NULL for exits */
397 set_bit(task->pid, pid_list->pids);
399 clear_bit(task->pid, pid_list->pids);
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
416 unsigned long pid = (unsigned long)v;
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
435 * This is used by seq_file "start" operation to start the iteration
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
462 * Can be directly used by seq_file operations to display the current
465 int trace_pid_show(struct seq_file *m, void *v)
467 unsigned long pid = (unsigned long)v - 1;
469 seq_printf(m, "%lu\n", pid);
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE 127
476 int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
500 trace_parser_put(&parser);
504 pid_list->pid_max = READ_ONCE(pid_max);
506 /* Only truncating will shrink pid_max */
507 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
508 pid_list->pid_max = filtered_pids->pid_max;
510 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
511 if (!pid_list->pids) {
512 trace_parser_put(&parser);
518 /* copy the current bits to the new max */
519 for_each_set_bit(pid, filtered_pids->pids,
520 filtered_pids->pid_max) {
521 set_bit(pid, pid_list->pids);
530 ret = trace_get_user(&parser, ubuf, cnt, &pos);
531 if (ret < 0 || !trace_parser_loaded(&parser))
539 if (kstrtoul(parser.buffer, 0, &val))
541 if (val >= pid_list->pid_max)
546 set_bit(pid, pid_list->pids);
549 trace_parser_clear(&parser);
552 trace_parser_put(&parser);
555 trace_free_pid_list(pid_list);
560 /* Cleared the list of pids */
561 trace_free_pid_list(pid_list);
566 *new_pid_list = pid_list;
571 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
575 /* Early boot up does not have a buffer yet */
577 return trace_clock_local();
579 ts = ring_buffer_time_stamp(buf->buffer, cpu);
580 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
585 u64 ftrace_now(int cpu)
587 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
591 * tracing_is_enabled - Show if global_trace has been disabled
593 * Shows if the global trace has been enabled or not. It uses the
594 * mirror flag "buffer_disabled" to be used in fast paths such as for
595 * the irqsoff tracer. But it may be inaccurate due to races. If you
596 * need to know the accurate state, use tracing_is_on() which is a little
597 * slower, but accurate.
599 int tracing_is_enabled(void)
602 * For quick access (irqsoff uses this in fast path), just
603 * return the mirror variable of the state of the ring buffer.
604 * It's a little racy, but we don't really care.
607 return !global_trace.buffer_disabled;
611 * trace_buf_size is the size in bytes that is allocated
612 * for a buffer. Note, the number of bytes is always rounded
615 * This number is purposely set to a low number of 16384.
616 * If the dump on oops happens, it will be much appreciated
617 * to not have to wait for all that output. Anyway this can be
618 * boot time and run time configurable.
620 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
622 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
624 /* trace_types holds a link list of available tracers. */
625 static struct tracer *trace_types __read_mostly;
628 * trace_types_lock is used to protect the trace_types list.
630 DEFINE_MUTEX(trace_types_lock);
633 * serialize the access of the ring buffer
635 * ring buffer serializes readers, but it is low level protection.
636 * The validity of the events (which returns by ring_buffer_peek() ..etc)
637 * are not protected by ring buffer.
639 * The content of events may become garbage if we allow other process consumes
640 * these events concurrently:
641 * A) the page of the consumed events may become a normal page
642 * (not reader page) in ring buffer, and this page will be rewrited
643 * by events producer.
644 * B) The page of the consumed events may become a page for splice_read,
645 * and this page will be returned to system.
647 * These primitives allow multi process access to different cpu ring buffer
650 * These primitives don't distinguish read-only and read-consume access.
651 * Multi read-only access are also serialized.
655 static DECLARE_RWSEM(all_cpu_access_lock);
656 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
658 static inline void trace_access_lock(int cpu)
660 if (cpu == RING_BUFFER_ALL_CPUS) {
661 /* gain it for accessing the whole ring buffer. */
662 down_write(&all_cpu_access_lock);
664 /* gain it for accessing a cpu ring buffer. */
666 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
667 down_read(&all_cpu_access_lock);
669 /* Secondly block other access to this @cpu ring buffer. */
670 mutex_lock(&per_cpu(cpu_access_lock, cpu));
674 static inline void trace_access_unlock(int cpu)
676 if (cpu == RING_BUFFER_ALL_CPUS) {
677 up_write(&all_cpu_access_lock);
679 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
680 up_read(&all_cpu_access_lock);
684 static inline void trace_access_lock_init(void)
688 for_each_possible_cpu(cpu)
689 mutex_init(&per_cpu(cpu_access_lock, cpu));
694 static DEFINE_MUTEX(access_lock);
696 static inline void trace_access_lock(int cpu)
699 mutex_lock(&access_lock);
702 static inline void trace_access_unlock(int cpu)
705 mutex_unlock(&access_lock);
708 static inline void trace_access_lock_init(void)
714 #ifdef CONFIG_STACKTRACE
715 static void __ftrace_trace_stack(struct ring_buffer *buffer,
717 int skip, int pc, struct pt_regs *regs);
718 static inline void ftrace_trace_stack(struct trace_array *tr,
719 struct ring_buffer *buffer,
721 int skip, int pc, struct pt_regs *regs);
724 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
726 int skip, int pc, struct pt_regs *regs)
729 static inline void ftrace_trace_stack(struct trace_array *tr,
730 struct ring_buffer *buffer,
732 int skip, int pc, struct pt_regs *regs)
738 static __always_inline void
739 trace_event_setup(struct ring_buffer_event *event,
740 int type, unsigned long flags, int pc)
742 struct trace_entry *ent = ring_buffer_event_data(event);
744 tracing_generic_entry_update(ent, flags, pc);
748 static __always_inline struct ring_buffer_event *
749 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
752 unsigned long flags, int pc)
754 struct ring_buffer_event *event;
756 event = ring_buffer_lock_reserve(buffer, len);
758 trace_event_setup(event, type, flags, pc);
763 void tracer_tracing_on(struct trace_array *tr)
765 if (tr->trace_buffer.buffer)
766 ring_buffer_record_on(tr->trace_buffer.buffer);
768 * This flag is looked at when buffers haven't been allocated
769 * yet, or by some tracers (like irqsoff), that just want to
770 * know if the ring buffer has been disabled, but it can handle
771 * races of where it gets disabled but we still do a record.
772 * As the check is in the fast path of the tracers, it is more
773 * important to be fast than accurate.
775 tr->buffer_disabled = 0;
776 /* Make the flag seen by readers */
781 * tracing_on - enable tracing buffers
783 * This function enables tracing buffers that may have been
784 * disabled with tracing_off.
786 void tracing_on(void)
788 tracer_tracing_on(&global_trace);
790 EXPORT_SYMBOL_GPL(tracing_on);
793 static __always_inline void
794 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
796 __this_cpu_write(trace_taskinfo_save, true);
798 /* If this is the temp buffer, we need to commit fully */
799 if (this_cpu_read(trace_buffered_event) == event) {
800 /* Length is in event->array[0] */
801 ring_buffer_write(buffer, event->array[0], &event->array[1]);
802 /* Release the temp buffer */
803 this_cpu_dec(trace_buffered_event_cnt);
805 ring_buffer_unlock_commit(buffer, event);
809 * __trace_puts - write a constant string into the trace buffer.
810 * @ip: The address of the caller
811 * @str: The constant string to write
812 * @size: The size of the string.
814 int __trace_puts(unsigned long ip, const char *str, int size)
816 struct ring_buffer_event *event;
817 struct ring_buffer *buffer;
818 struct print_entry *entry;
819 unsigned long irq_flags;
823 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
826 pc = preempt_count();
828 if (unlikely(tracing_selftest_running || tracing_disabled))
831 alloc = sizeof(*entry) + size + 2; /* possible \n added */
833 local_save_flags(irq_flags);
834 buffer = global_trace.trace_buffer.buffer;
835 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
840 entry = ring_buffer_event_data(event);
843 memcpy(&entry->buf, str, size);
845 /* Add a newline if necessary */
846 if (entry->buf[size - 1] != '\n') {
847 entry->buf[size] = '\n';
848 entry->buf[size + 1] = '\0';
850 entry->buf[size] = '\0';
852 __buffer_unlock_commit(buffer, event);
853 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
857 EXPORT_SYMBOL_GPL(__trace_puts);
860 * __trace_bputs - write the pointer to a constant string into trace buffer
861 * @ip: The address of the caller
862 * @str: The constant string to write to the buffer to
864 int __trace_bputs(unsigned long ip, const char *str)
866 struct ring_buffer_event *event;
867 struct ring_buffer *buffer;
868 struct bputs_entry *entry;
869 unsigned long irq_flags;
870 int size = sizeof(struct bputs_entry);
873 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
876 pc = preempt_count();
878 if (unlikely(tracing_selftest_running || tracing_disabled))
881 local_save_flags(irq_flags);
882 buffer = global_trace.trace_buffer.buffer;
883 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
888 entry = ring_buffer_event_data(event);
892 __buffer_unlock_commit(buffer, event);
893 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
897 EXPORT_SYMBOL_GPL(__trace_bputs);
899 #ifdef CONFIG_TRACER_SNAPSHOT
900 void tracing_snapshot_instance(struct trace_array *tr)
902 struct tracer *tracer = tr->current_trace;
906 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
907 internal_trace_puts("*** snapshot is being ignored ***\n");
911 if (!tr->allocated_snapshot) {
912 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
913 internal_trace_puts("*** stopping trace here! ***\n");
918 /* Note, snapshot can not be used when the tracer uses it */
919 if (tracer->use_max_tr) {
920 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
921 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
925 local_irq_save(flags);
926 update_max_tr(tr, current, smp_processor_id());
927 local_irq_restore(flags);
931 * tracing_snapshot - take a snapshot of the current buffer.
933 * This causes a swap between the snapshot buffer and the current live
934 * tracing buffer. You can use this to take snapshots of the live
935 * trace when some condition is triggered, but continue to trace.
937 * Note, make sure to allocate the snapshot with either
938 * a tracing_snapshot_alloc(), or by doing it manually
939 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
941 * If the snapshot buffer is not allocated, it will stop tracing.
942 * Basically making a permanent snapshot.
944 void tracing_snapshot(void)
946 struct trace_array *tr = &global_trace;
948 tracing_snapshot_instance(tr);
950 EXPORT_SYMBOL_GPL(tracing_snapshot);
952 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
953 struct trace_buffer *size_buf, int cpu_id);
954 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
956 int tracing_alloc_snapshot_instance(struct trace_array *tr)
960 if (!tr->allocated_snapshot) {
962 /* allocate spare buffer */
963 ret = resize_buffer_duplicate_size(&tr->max_buffer,
964 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
968 tr->allocated_snapshot = true;
974 static void free_snapshot(struct trace_array *tr)
977 * We don't free the ring buffer. instead, resize it because
978 * The max_tr ring buffer has some state (e.g. ring->clock) and
979 * we want preserve it.
981 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
982 set_buffer_entries(&tr->max_buffer, 1);
983 tracing_reset_online_cpus(&tr->max_buffer);
984 tr->allocated_snapshot = false;
988 * tracing_alloc_snapshot - allocate snapshot buffer.
990 * This only allocates the snapshot buffer if it isn't already
991 * allocated - it doesn't also take a snapshot.
993 * This is meant to be used in cases where the snapshot buffer needs
994 * to be set up for events that can't sleep but need to be able to
995 * trigger a snapshot.
997 int tracing_alloc_snapshot(void)
999 struct trace_array *tr = &global_trace;
1002 ret = tracing_alloc_snapshot_instance(tr);
1007 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1010 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1012 * This is similar to tracing_snapshot(), but it will allocate the
1013 * snapshot buffer if it isn't already allocated. Use this only
1014 * where it is safe to sleep, as the allocation may sleep.
1016 * This causes a swap between the snapshot buffer and the current live
1017 * tracing buffer. You can use this to take snapshots of the live
1018 * trace when some condition is triggered, but continue to trace.
1020 void tracing_snapshot_alloc(void)
1024 ret = tracing_alloc_snapshot();
1030 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1032 void tracing_snapshot(void)
1034 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1036 EXPORT_SYMBOL_GPL(tracing_snapshot);
1037 int tracing_alloc_snapshot(void)
1039 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1042 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1043 void tracing_snapshot_alloc(void)
1048 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1049 #endif /* CONFIG_TRACER_SNAPSHOT */
1051 void tracer_tracing_off(struct trace_array *tr)
1053 if (tr->trace_buffer.buffer)
1054 ring_buffer_record_off(tr->trace_buffer.buffer);
1056 * This flag is looked at when buffers haven't been allocated
1057 * yet, or by some tracers (like irqsoff), that just want to
1058 * know if the ring buffer has been disabled, but it can handle
1059 * races of where it gets disabled but we still do a record.
1060 * As the check is in the fast path of the tracers, it is more
1061 * important to be fast than accurate.
1063 tr->buffer_disabled = 1;
1064 /* Make the flag seen by readers */
1069 * tracing_off - turn off tracing buffers
1071 * This function stops the tracing buffers from recording data.
1072 * It does not disable any overhead the tracers themselves may
1073 * be causing. This function simply causes all recording to
1074 * the ring buffers to fail.
1076 void tracing_off(void)
1078 tracer_tracing_off(&global_trace);
1080 EXPORT_SYMBOL_GPL(tracing_off);
1082 void disable_trace_on_warning(void)
1084 if (__disable_trace_on_warning)
1089 * tracer_tracing_is_on - show real state of ring buffer enabled
1090 * @tr : the trace array to know if ring buffer is enabled
1092 * Shows real state of the ring buffer if it is enabled or not.
1094 bool tracer_tracing_is_on(struct trace_array *tr)
1096 if (tr->trace_buffer.buffer)
1097 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1098 return !tr->buffer_disabled;
1102 * tracing_is_on - show state of ring buffers enabled
1104 int tracing_is_on(void)
1106 return tracer_tracing_is_on(&global_trace);
1108 EXPORT_SYMBOL_GPL(tracing_is_on);
1110 static int __init set_buf_size(char *str)
1112 unsigned long buf_size;
1116 buf_size = memparse(str, &str);
1117 /* nr_entries can not be zero */
1120 trace_buf_size = buf_size;
1123 __setup("trace_buf_size=", set_buf_size);
1125 static int __init set_tracing_thresh(char *str)
1127 unsigned long threshold;
1132 ret = kstrtoul(str, 0, &threshold);
1135 tracing_thresh = threshold * 1000;
1138 __setup("tracing_thresh=", set_tracing_thresh);
1140 unsigned long nsecs_to_usecs(unsigned long nsecs)
1142 return nsecs / 1000;
1146 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1147 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1148 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1149 * of strings in the order that the evals (enum) were defined.
1154 /* These must match the bit postions in trace_iterator_flags */
1155 static const char *trace_options[] = {
1163 int in_ns; /* is this clock in nanoseconds? */
1164 } trace_clocks[] = {
1165 { trace_clock_local, "local", 1 },
1166 { trace_clock_global, "global", 1 },
1167 { trace_clock_counter, "counter", 0 },
1168 { trace_clock_jiffies, "uptime", 0 },
1169 { trace_clock, "perf", 1 },
1170 { ktime_get_mono_fast_ns, "mono", 1 },
1171 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1172 { ktime_get_boot_fast_ns, "boot", 1 },
1176 bool trace_clock_in_ns(struct trace_array *tr)
1178 if (trace_clocks[tr->clock_id].in_ns)
1185 * trace_parser_get_init - gets the buffer for trace parser
1187 int trace_parser_get_init(struct trace_parser *parser, int size)
1189 memset(parser, 0, sizeof(*parser));
1191 parser->buffer = kmalloc(size, GFP_KERNEL);
1192 if (!parser->buffer)
1195 parser->size = size;
1200 * trace_parser_put - frees the buffer for trace parser
1202 void trace_parser_put(struct trace_parser *parser)
1204 kfree(parser->buffer);
1205 parser->buffer = NULL;
1209 * trace_get_user - reads the user input string separated by space
1210 * (matched by isspace(ch))
1212 * For each string found the 'struct trace_parser' is updated,
1213 * and the function returns.
1215 * Returns number of bytes read.
1217 * See kernel/trace/trace.h for 'struct trace_parser' details.
1219 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1220 size_t cnt, loff_t *ppos)
1227 trace_parser_clear(parser);
1229 ret = get_user(ch, ubuf++);
1237 * The parser is not finished with the last write,
1238 * continue reading the user input without skipping spaces.
1240 if (!parser->cont) {
1241 /* skip white space */
1242 while (cnt && isspace(ch)) {
1243 ret = get_user(ch, ubuf++);
1252 /* only spaces were written */
1253 if (isspace(ch) || !ch) {
1260 /* read the non-space input */
1261 while (cnt && !isspace(ch) && ch) {
1262 if (parser->idx < parser->size - 1)
1263 parser->buffer[parser->idx++] = ch;
1268 ret = get_user(ch, ubuf++);
1275 /* We either got finished input or we have to wait for another call. */
1276 if (isspace(ch) || !ch) {
1277 parser->buffer[parser->idx] = 0;
1278 parser->cont = false;
1279 } else if (parser->idx < parser->size - 1) {
1280 parser->cont = true;
1281 parser->buffer[parser->idx++] = ch;
1282 /* Make sure the parsed string always terminates with '\0'. */
1283 parser->buffer[parser->idx] = 0;
1296 /* TODO add a seq_buf_to_buffer() */
1297 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1301 if (trace_seq_used(s) <= s->seq.readpos)
1304 len = trace_seq_used(s) - s->seq.readpos;
1307 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1309 s->seq.readpos += cnt;
1313 unsigned long __read_mostly tracing_thresh;
1315 #ifdef CONFIG_TRACER_MAX_TRACE
1317 * Copy the new maximum trace into the separate maximum-trace
1318 * structure. (this way the maximum trace is permanently saved,
1319 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1322 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1324 struct trace_buffer *trace_buf = &tr->trace_buffer;
1325 struct trace_buffer *max_buf = &tr->max_buffer;
1326 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1327 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1330 max_buf->time_start = data->preempt_timestamp;
1332 max_data->saved_latency = tr->max_latency;
1333 max_data->critical_start = data->critical_start;
1334 max_data->critical_end = data->critical_end;
1336 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1337 max_data->pid = tsk->pid;
1339 * If tsk == current, then use current_uid(), as that does not use
1340 * RCU. The irq tracer can be called out of RCU scope.
1343 max_data->uid = current_uid();
1345 max_data->uid = task_uid(tsk);
1347 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1348 max_data->policy = tsk->policy;
1349 max_data->rt_priority = tsk->rt_priority;
1351 /* record this tasks comm */
1352 tracing_record_cmdline(tsk);
1356 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1358 * @tsk: the task with the latency
1359 * @cpu: The cpu that initiated the trace.
1361 * Flip the buffers between the @tr and the max_tr and record information
1362 * about which task was the cause of this latency.
1365 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1370 WARN_ON_ONCE(!irqs_disabled());
1372 if (!tr->allocated_snapshot) {
1373 /* Only the nop tracer should hit this when disabling */
1374 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1378 arch_spin_lock(&tr->max_lock);
1380 /* Inherit the recordable setting from trace_buffer */
1381 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1382 ring_buffer_record_on(tr->max_buffer.buffer);
1384 ring_buffer_record_off(tr->max_buffer.buffer);
1386 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1388 __update_max_tr(tr, tsk, cpu);
1389 arch_spin_unlock(&tr->max_lock);
1393 * update_max_tr_single - only copy one trace over, and reset the rest
1395 * @tsk - task with the latency
1396 * @cpu - the cpu of the buffer to copy.
1398 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1401 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1408 WARN_ON_ONCE(!irqs_disabled());
1409 if (!tr->allocated_snapshot) {
1410 /* Only the nop tracer should hit this when disabling */
1411 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1415 arch_spin_lock(&tr->max_lock);
1417 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1419 if (ret == -EBUSY) {
1421 * We failed to swap the buffer due to a commit taking
1422 * place on this CPU. We fail to record, but we reset
1423 * the max trace buffer (no one writes directly to it)
1424 * and flag that it failed.
1426 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1427 "Failed to swap buffers due to commit in progress\n");
1430 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1432 __update_max_tr(tr, tsk, cpu);
1433 arch_spin_unlock(&tr->max_lock);
1435 #endif /* CONFIG_TRACER_MAX_TRACE */
1437 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1439 /* Iterators are static, they should be filled or empty */
1440 if (trace_buffer_iter(iter, iter->cpu_file))
1443 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1447 #ifdef CONFIG_FTRACE_STARTUP_TEST
1448 static bool selftests_can_run;
1450 struct trace_selftests {
1451 struct list_head list;
1452 struct tracer *type;
1455 static LIST_HEAD(postponed_selftests);
1457 static int save_selftest(struct tracer *type)
1459 struct trace_selftests *selftest;
1461 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1465 selftest->type = type;
1466 list_add(&selftest->list, &postponed_selftests);
1470 static int run_tracer_selftest(struct tracer *type)
1472 struct trace_array *tr = &global_trace;
1473 struct tracer *saved_tracer = tr->current_trace;
1476 if (!type->selftest || tracing_selftest_disabled)
1480 * If a tracer registers early in boot up (before scheduling is
1481 * initialized and such), then do not run its selftests yet.
1482 * Instead, run it a little later in the boot process.
1484 if (!selftests_can_run)
1485 return save_selftest(type);
1488 * Run a selftest on this tracer.
1489 * Here we reset the trace buffer, and set the current
1490 * tracer to be this tracer. The tracer can then run some
1491 * internal tracing to verify that everything is in order.
1492 * If we fail, we do not register this tracer.
1494 tracing_reset_online_cpus(&tr->trace_buffer);
1496 tr->current_trace = type;
1498 #ifdef CONFIG_TRACER_MAX_TRACE
1499 if (type->use_max_tr) {
1500 /* If we expanded the buffers, make sure the max is expanded too */
1501 if (ring_buffer_expanded)
1502 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1503 RING_BUFFER_ALL_CPUS);
1504 tr->allocated_snapshot = true;
1508 /* the test is responsible for initializing and enabling */
1509 pr_info("Testing tracer %s: ", type->name);
1510 ret = type->selftest(type, tr);
1511 /* the test is responsible for resetting too */
1512 tr->current_trace = saved_tracer;
1514 printk(KERN_CONT "FAILED!\n");
1515 /* Add the warning after printing 'FAILED' */
1519 /* Only reset on passing, to avoid touching corrupted buffers */
1520 tracing_reset_online_cpus(&tr->trace_buffer);
1522 #ifdef CONFIG_TRACER_MAX_TRACE
1523 if (type->use_max_tr) {
1524 tr->allocated_snapshot = false;
1526 /* Shrink the max buffer again */
1527 if (ring_buffer_expanded)
1528 ring_buffer_resize(tr->max_buffer.buffer, 1,
1529 RING_BUFFER_ALL_CPUS);
1533 printk(KERN_CONT "PASSED\n");
1537 static __init int init_trace_selftests(void)
1539 struct trace_selftests *p, *n;
1540 struct tracer *t, **last;
1543 selftests_can_run = true;
1545 mutex_lock(&trace_types_lock);
1547 if (list_empty(&postponed_selftests))
1550 pr_info("Running postponed tracer tests:\n");
1552 tracing_selftest_running = true;
1553 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1554 ret = run_tracer_selftest(p->type);
1555 /* If the test fails, then warn and remove from available_tracers */
1557 WARN(1, "tracer: %s failed selftest, disabling\n",
1559 last = &trace_types;
1560 for (t = trace_types; t; t = t->next) {
1571 tracing_selftest_running = false;
1574 mutex_unlock(&trace_types_lock);
1578 core_initcall(init_trace_selftests);
1580 static inline int run_tracer_selftest(struct tracer *type)
1584 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1586 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1588 static void __init apply_trace_boot_options(void);
1591 * register_tracer - register a tracer with the ftrace system.
1592 * @type - the plugin for the tracer
1594 * Register a new plugin tracer.
1596 int __init register_tracer(struct tracer *type)
1602 pr_info("Tracer must have a name\n");
1606 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1607 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1611 mutex_lock(&trace_types_lock);
1613 tracing_selftest_running = true;
1615 for (t = trace_types; t; t = t->next) {
1616 if (strcmp(type->name, t->name) == 0) {
1618 pr_info("Tracer %s already registered\n",
1625 if (!type->set_flag)
1626 type->set_flag = &dummy_set_flag;
1628 /*allocate a dummy tracer_flags*/
1629 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1634 type->flags->val = 0;
1635 type->flags->opts = dummy_tracer_opt;
1637 if (!type->flags->opts)
1638 type->flags->opts = dummy_tracer_opt;
1640 /* store the tracer for __set_tracer_option */
1641 type->flags->trace = type;
1643 ret = run_tracer_selftest(type);
1647 type->next = trace_types;
1649 add_tracer_options(&global_trace, type);
1652 tracing_selftest_running = false;
1653 mutex_unlock(&trace_types_lock);
1655 if (ret || !default_bootup_tracer)
1658 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1661 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1662 /* Do we want this tracer to start on bootup? */
1663 tracing_set_tracer(&global_trace, type->name);
1664 default_bootup_tracer = NULL;
1666 apply_trace_boot_options();
1668 /* disable other selftests, since this will break it. */
1669 tracing_selftest_disabled = true;
1670 #ifdef CONFIG_FTRACE_STARTUP_TEST
1671 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1679 void tracing_reset(struct trace_buffer *buf, int cpu)
1681 struct ring_buffer *buffer = buf->buffer;
1686 ring_buffer_record_disable(buffer);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690 ring_buffer_reset_cpu(buffer, cpu);
1692 ring_buffer_record_enable(buffer);
1695 void tracing_reset_online_cpus(struct trace_buffer *buf)
1697 struct ring_buffer *buffer = buf->buffer;
1703 ring_buffer_record_disable(buffer);
1705 /* Make sure all commits have finished */
1706 synchronize_sched();
1708 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1710 for_each_online_cpu(cpu)
1711 ring_buffer_reset_cpu(buffer, cpu);
1713 ring_buffer_record_enable(buffer);
1716 /* Must have trace_types_lock held */
1717 void tracing_reset_all_online_cpus(void)
1719 struct trace_array *tr;
1721 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1722 if (!tr->clear_trace)
1724 tr->clear_trace = false;
1725 tracing_reset_online_cpus(&tr->trace_buffer);
1726 #ifdef CONFIG_TRACER_MAX_TRACE
1727 tracing_reset_online_cpus(&tr->max_buffer);
1733 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
1734 * is the tgid last observed corresponding to pid=i.
1736 static int *tgid_map;
1738 /* The maximum valid index into tgid_map. */
1739 static size_t tgid_map_max;
1741 #define SAVED_CMDLINES_DEFAULT 128
1742 #define NO_CMDLINE_MAP UINT_MAX
1743 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1744 struct saved_cmdlines_buffer {
1745 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1746 unsigned *map_cmdline_to_pid;
1747 unsigned cmdline_num;
1749 char *saved_cmdlines;
1751 static struct saved_cmdlines_buffer *savedcmd;
1753 static inline char *get_saved_cmdlines(int idx)
1755 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1758 static inline void set_cmdline(int idx, const char *cmdline)
1760 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1763 static int allocate_cmdlines_buffer(unsigned int val,
1764 struct saved_cmdlines_buffer *s)
1766 s->map_cmdline_to_pid = kmalloc_array(val,
1767 sizeof(*s->map_cmdline_to_pid),
1769 if (!s->map_cmdline_to_pid)
1772 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1773 if (!s->saved_cmdlines) {
1774 kfree(s->map_cmdline_to_pid);
1779 s->cmdline_num = val;
1780 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1781 sizeof(s->map_pid_to_cmdline));
1782 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1783 val * sizeof(*s->map_cmdline_to_pid));
1788 static int trace_create_savedcmd(void)
1792 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1796 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1806 int is_tracing_stopped(void)
1808 return global_trace.stop_count;
1812 * tracing_start - quick start of the tracer
1814 * If tracing is enabled but was stopped by tracing_stop,
1815 * this will start the tracer back up.
1817 void tracing_start(void)
1819 struct ring_buffer *buffer;
1820 unsigned long flags;
1822 if (tracing_disabled)
1825 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1826 if (--global_trace.stop_count) {
1827 if (global_trace.stop_count < 0) {
1828 /* Someone screwed up their debugging */
1830 global_trace.stop_count = 0;
1835 /* Prevent the buffers from switching */
1836 arch_spin_lock(&global_trace.max_lock);
1838 buffer = global_trace.trace_buffer.buffer;
1840 ring_buffer_record_enable(buffer);
1842 #ifdef CONFIG_TRACER_MAX_TRACE
1843 buffer = global_trace.max_buffer.buffer;
1845 ring_buffer_record_enable(buffer);
1848 arch_spin_unlock(&global_trace.max_lock);
1851 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1854 static void tracing_start_tr(struct trace_array *tr)
1856 struct ring_buffer *buffer;
1857 unsigned long flags;
1859 if (tracing_disabled)
1862 /* If global, we need to also start the max tracer */
1863 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1864 return tracing_start();
1866 raw_spin_lock_irqsave(&tr->start_lock, flags);
1868 if (--tr->stop_count) {
1869 if (tr->stop_count < 0) {
1870 /* Someone screwed up their debugging */
1877 buffer = tr->trace_buffer.buffer;
1879 ring_buffer_record_enable(buffer);
1882 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1886 * tracing_stop - quick stop of the tracer
1888 * Light weight way to stop tracing. Use in conjunction with
1891 void tracing_stop(void)
1893 struct ring_buffer *buffer;
1894 unsigned long flags;
1896 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1897 if (global_trace.stop_count++)
1900 /* Prevent the buffers from switching */
1901 arch_spin_lock(&global_trace.max_lock);
1903 buffer = global_trace.trace_buffer.buffer;
1905 ring_buffer_record_disable(buffer);
1907 #ifdef CONFIG_TRACER_MAX_TRACE
1908 buffer = global_trace.max_buffer.buffer;
1910 ring_buffer_record_disable(buffer);
1913 arch_spin_unlock(&global_trace.max_lock);
1916 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1919 static void tracing_stop_tr(struct trace_array *tr)
1921 struct ring_buffer *buffer;
1922 unsigned long flags;
1924 /* If global, we need to also stop the max tracer */
1925 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1926 return tracing_stop();
1928 raw_spin_lock_irqsave(&tr->start_lock, flags);
1929 if (tr->stop_count++)
1932 buffer = tr->trace_buffer.buffer;
1934 ring_buffer_record_disable(buffer);
1937 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1940 static int trace_save_cmdline(struct task_struct *tsk)
1944 /* treat recording of idle task as a success */
1948 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
1951 * It's not the end of the world if we don't get
1952 * the lock, but we also don't want to spin
1953 * nor do we want to disable interrupts,
1954 * so if we miss here, then better luck next time.
1956 if (!arch_spin_trylock(&trace_cmdline_lock))
1959 idx = savedcmd->map_pid_to_cmdline[tpid];
1960 if (idx == NO_CMDLINE_MAP) {
1961 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1963 savedcmd->map_pid_to_cmdline[tpid] = idx;
1964 savedcmd->cmdline_idx = idx;
1967 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1968 set_cmdline(idx, tsk->comm);
1970 arch_spin_unlock(&trace_cmdline_lock);
1975 static void __trace_find_cmdline(int pid, char comm[])
1981 strcpy(comm, "<idle>");
1985 if (WARN_ON_ONCE(pid < 0)) {
1986 strcpy(comm, "<XXX>");
1990 tpid = pid & (PID_MAX_DEFAULT - 1);
1991 map = savedcmd->map_pid_to_cmdline[tpid];
1992 if (map != NO_CMDLINE_MAP) {
1993 tpid = savedcmd->map_cmdline_to_pid[map];
1995 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1999 strcpy(comm, "<...>");
2002 void trace_find_cmdline(int pid, char comm[])
2005 arch_spin_lock(&trace_cmdline_lock);
2007 __trace_find_cmdline(pid, comm);
2009 arch_spin_unlock(&trace_cmdline_lock);
2013 static int *trace_find_tgid_ptr(int pid)
2016 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2017 * if we observe a non-NULL tgid_map then we also observe the correct
2020 int *map = smp_load_acquire(&tgid_map);
2022 if (unlikely(!map || pid > tgid_map_max))
2028 int trace_find_tgid(int pid)
2030 int *ptr = trace_find_tgid_ptr(pid);
2032 return ptr ? *ptr : 0;
2035 static int trace_save_tgid(struct task_struct *tsk)
2039 /* treat recording of idle task as a success */
2043 ptr = trace_find_tgid_ptr(tsk->pid);
2051 static bool tracing_record_taskinfo_skip(int flags)
2053 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2055 if (!__this_cpu_read(trace_taskinfo_save))
2061 * tracing_record_taskinfo - record the task info of a task
2063 * @task - task to record
2064 * @flags - TRACE_RECORD_CMDLINE for recording comm
2065 * - TRACE_RECORD_TGID for recording tgid
2067 void tracing_record_taskinfo(struct task_struct *task, int flags)
2071 if (tracing_record_taskinfo_skip(flags))
2075 * Record as much task information as possible. If some fail, continue
2076 * to try to record the others.
2078 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2079 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2081 /* If recording any information failed, retry again soon. */
2085 __this_cpu_write(trace_taskinfo_save, false);
2089 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2091 * @prev - previous task during sched_switch
2092 * @next - next task during sched_switch
2093 * @flags - TRACE_RECORD_CMDLINE for recording comm
2094 * TRACE_RECORD_TGID for recording tgid
2096 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2097 struct task_struct *next, int flags)
2101 if (tracing_record_taskinfo_skip(flags))
2105 * Record as much task information as possible. If some fail, continue
2106 * to try to record the others.
2108 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2109 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2110 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2111 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2113 /* If recording any information failed, retry again soon. */
2117 __this_cpu_write(trace_taskinfo_save, false);
2120 /* Helpers to record a specific task information */
2121 void tracing_record_cmdline(struct task_struct *task)
2123 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2126 void tracing_record_tgid(struct task_struct *task)
2128 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2132 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2133 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2134 * simplifies those functions and keeps them in sync.
2136 enum print_line_t trace_handle_return(struct trace_seq *s)
2138 return trace_seq_has_overflowed(s) ?
2139 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2141 EXPORT_SYMBOL_GPL(trace_handle_return);
2144 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2147 struct task_struct *tsk = current;
2149 entry->preempt_count = pc & 0xff;
2150 entry->pid = (tsk) ? tsk->pid : 0;
2152 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2153 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2155 TRACE_FLAG_IRQS_NOSUPPORT |
2157 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2158 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2159 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2160 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2161 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2163 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2165 struct ring_buffer_event *
2166 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2169 unsigned long flags, int pc)
2171 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2174 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2175 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2176 static int trace_buffered_event_ref;
2179 * trace_buffered_event_enable - enable buffering events
2181 * When events are being filtered, it is quicker to use a temporary
2182 * buffer to write the event data into if there's a likely chance
2183 * that it will not be committed. The discard of the ring buffer
2184 * is not as fast as committing, and is much slower than copying
2187 * When an event is to be filtered, allocate per cpu buffers to
2188 * write the event data into, and if the event is filtered and discarded
2189 * it is simply dropped, otherwise, the entire data is to be committed
2192 void trace_buffered_event_enable(void)
2194 struct ring_buffer_event *event;
2198 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2200 if (trace_buffered_event_ref++)
2203 for_each_tracing_cpu(cpu) {
2204 page = alloc_pages_node(cpu_to_node(cpu),
2205 GFP_KERNEL | __GFP_NORETRY, 0);
2209 event = page_address(page);
2210 memset(event, 0, sizeof(*event));
2212 per_cpu(trace_buffered_event, cpu) = event;
2215 if (cpu == smp_processor_id() &&
2216 this_cpu_read(trace_buffered_event) !=
2217 per_cpu(trace_buffered_event, cpu))
2224 trace_buffered_event_disable();
2227 static void enable_trace_buffered_event(void *data)
2229 /* Probably not needed, but do it anyway */
2231 this_cpu_dec(trace_buffered_event_cnt);
2234 static void disable_trace_buffered_event(void *data)
2236 this_cpu_inc(trace_buffered_event_cnt);
2240 * trace_buffered_event_disable - disable buffering events
2242 * When a filter is removed, it is faster to not use the buffered
2243 * events, and to commit directly into the ring buffer. Free up
2244 * the temp buffers when there are no more users. This requires
2245 * special synchronization with current events.
2247 void trace_buffered_event_disable(void)
2251 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2253 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2256 if (--trace_buffered_event_ref)
2260 /* For each CPU, set the buffer as used. */
2261 smp_call_function_many(tracing_buffer_mask,
2262 disable_trace_buffered_event, NULL, 1);
2265 /* Wait for all current users to finish */
2266 synchronize_sched();
2268 for_each_tracing_cpu(cpu) {
2269 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2270 per_cpu(trace_buffered_event, cpu) = NULL;
2273 * Make sure trace_buffered_event is NULL before clearing
2274 * trace_buffered_event_cnt.
2279 /* Do the work on each cpu */
2280 smp_call_function_many(tracing_buffer_mask,
2281 enable_trace_buffered_event, NULL, 1);
2285 static struct ring_buffer *temp_buffer;
2287 struct ring_buffer_event *
2288 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2289 struct trace_event_file *trace_file,
2290 int type, unsigned long len,
2291 unsigned long flags, int pc)
2293 struct ring_buffer_event *entry;
2296 *current_rb = trace_file->tr->trace_buffer.buffer;
2298 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2299 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2300 (entry = this_cpu_read(trace_buffered_event))) {
2301 /* Try to use the per cpu buffer first */
2302 val = this_cpu_inc_return(trace_buffered_event_cnt);
2303 if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
2304 trace_event_setup(entry, type, flags, pc);
2305 entry->array[0] = len;
2308 this_cpu_dec(trace_buffered_event_cnt);
2311 entry = __trace_buffer_lock_reserve(*current_rb,
2312 type, len, flags, pc);
2314 * If tracing is off, but we have triggers enabled
2315 * we still need to look at the event data. Use the temp_buffer
2316 * to store the trace event for the tigger to use. It's recusive
2317 * safe and will not be recorded anywhere.
2319 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2320 *current_rb = temp_buffer;
2321 entry = __trace_buffer_lock_reserve(*current_rb,
2322 type, len, flags, pc);
2326 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2328 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2329 static DEFINE_MUTEX(tracepoint_printk_mutex);
2331 static void output_printk(struct trace_event_buffer *fbuffer)
2333 struct trace_event_call *event_call;
2334 struct trace_event *event;
2335 unsigned long flags;
2336 struct trace_iterator *iter = tracepoint_print_iter;
2338 /* We should never get here if iter is NULL */
2339 if (WARN_ON_ONCE(!iter))
2342 event_call = fbuffer->trace_file->event_call;
2343 if (!event_call || !event_call->event.funcs ||
2344 !event_call->event.funcs->trace)
2347 event = &fbuffer->trace_file->event_call->event;
2349 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2350 trace_seq_init(&iter->seq);
2351 iter->ent = fbuffer->entry;
2352 event_call->event.funcs->trace(iter, 0, event);
2353 trace_seq_putc(&iter->seq, 0);
2354 printk("%s", iter->seq.buffer);
2356 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2359 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2360 void __user *buffer, size_t *lenp,
2363 int save_tracepoint_printk;
2366 mutex_lock(&tracepoint_printk_mutex);
2367 save_tracepoint_printk = tracepoint_printk;
2369 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2372 * This will force exiting early, as tracepoint_printk
2373 * is always zero when tracepoint_printk_iter is not allocated
2375 if (!tracepoint_print_iter)
2376 tracepoint_printk = 0;
2378 if (save_tracepoint_printk == tracepoint_printk)
2381 if (tracepoint_printk)
2382 static_key_enable(&tracepoint_printk_key.key);
2384 static_key_disable(&tracepoint_printk_key.key);
2387 mutex_unlock(&tracepoint_printk_mutex);
2392 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2394 if (static_key_false(&tracepoint_printk_key.key))
2395 output_printk(fbuffer);
2397 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2398 fbuffer->event, fbuffer->entry,
2399 fbuffer->flags, fbuffer->pc);
2401 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2406 * trace_buffer_unlock_commit_regs()
2407 * trace_event_buffer_commit()
2408 * trace_event_raw_event_xxx()
2410 # define STACK_SKIP 3
2412 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2413 struct ring_buffer *buffer,
2414 struct ring_buffer_event *event,
2415 unsigned long flags, int pc,
2416 struct pt_regs *regs)
2418 __buffer_unlock_commit(buffer, event);
2421 * If regs is not set, then skip the necessary functions.
2422 * Note, we can still get here via blktrace, wakeup tracer
2423 * and mmiotrace, but that's ok if they lose a function or
2424 * two. They are not that meaningful.
2426 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2427 ftrace_trace_userstack(tr, buffer, flags, pc);
2431 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2434 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2435 struct ring_buffer_event *event)
2437 __buffer_unlock_commit(buffer, event);
2441 trace_process_export(struct trace_export *export,
2442 struct ring_buffer_event *event)
2444 struct trace_entry *entry;
2445 unsigned int size = 0;
2447 entry = ring_buffer_event_data(event);
2448 size = ring_buffer_event_length(event);
2449 export->write(export, entry, size);
2452 static DEFINE_MUTEX(ftrace_export_lock);
2454 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2456 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2458 static inline void ftrace_exports_enable(void)
2460 static_branch_enable(&ftrace_exports_enabled);
2463 static inline void ftrace_exports_disable(void)
2465 static_branch_disable(&ftrace_exports_enabled);
2468 void ftrace_exports(struct ring_buffer_event *event)
2470 struct trace_export *export;
2472 preempt_disable_notrace();
2474 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2476 trace_process_export(export, event);
2477 export = rcu_dereference_raw_notrace(export->next);
2480 preempt_enable_notrace();
2484 add_trace_export(struct trace_export **list, struct trace_export *export)
2486 rcu_assign_pointer(export->next, *list);
2488 * We are entering export into the list but another
2489 * CPU might be walking that list. We need to make sure
2490 * the export->next pointer is valid before another CPU sees
2491 * the export pointer included into the list.
2493 rcu_assign_pointer(*list, export);
2497 rm_trace_export(struct trace_export **list, struct trace_export *export)
2499 struct trace_export **p;
2501 for (p = list; *p != NULL; p = &(*p)->next)
2508 rcu_assign_pointer(*p, (*p)->next);
2514 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2517 ftrace_exports_enable();
2519 add_trace_export(list, export);
2523 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2527 ret = rm_trace_export(list, export);
2529 ftrace_exports_disable();
2534 int register_ftrace_export(struct trace_export *export)
2536 if (WARN_ON_ONCE(!export->write))
2539 mutex_lock(&ftrace_export_lock);
2541 add_ftrace_export(&ftrace_exports_list, export);
2543 mutex_unlock(&ftrace_export_lock);
2547 EXPORT_SYMBOL_GPL(register_ftrace_export);
2549 int unregister_ftrace_export(struct trace_export *export)
2553 mutex_lock(&ftrace_export_lock);
2555 ret = rm_ftrace_export(&ftrace_exports_list, export);
2557 mutex_unlock(&ftrace_export_lock);
2561 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2564 trace_function(struct trace_array *tr,
2565 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2568 struct trace_event_call *call = &event_function;
2569 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2570 struct ring_buffer_event *event;
2571 struct ftrace_entry *entry;
2573 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2577 entry = ring_buffer_event_data(event);
2579 entry->parent_ip = parent_ip;
2581 if (!call_filter_check_discard(call, entry, buffer, event)) {
2582 if (static_branch_unlikely(&ftrace_exports_enabled))
2583 ftrace_exports(event);
2584 __buffer_unlock_commit(buffer, event);
2588 #ifdef CONFIG_STACKTRACE
2590 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2591 struct ftrace_stack {
2592 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2595 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2596 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2598 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2599 unsigned long flags,
2600 int skip, int pc, struct pt_regs *regs)
2602 struct trace_event_call *call = &event_kernel_stack;
2603 struct ring_buffer_event *event;
2604 struct stack_entry *entry;
2605 struct stack_trace trace;
2607 int size = FTRACE_STACK_ENTRIES;
2609 trace.nr_entries = 0;
2613 * Add one, for this function and the call to save_stack_trace()
2614 * If regs is set, then these functions will not be in the way.
2616 #ifndef CONFIG_UNWINDER_ORC
2622 * Since events can happen in NMIs there's no safe way to
2623 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2624 * or NMI comes in, it will just have to use the default
2625 * FTRACE_STACK_SIZE.
2627 preempt_disable_notrace();
2629 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2631 * We don't need any atomic variables, just a barrier.
2632 * If an interrupt comes in, we don't care, because it would
2633 * have exited and put the counter back to what we want.
2634 * We just need a barrier to keep gcc from moving things
2638 if (use_stack == 1) {
2639 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2640 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2643 save_stack_trace_regs(regs, &trace);
2645 save_stack_trace(&trace);
2647 if (trace.nr_entries > size)
2648 size = trace.nr_entries;
2650 /* From now on, use_stack is a boolean */
2653 size *= sizeof(unsigned long);
2655 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2656 (sizeof(*entry) - sizeof(entry->caller)) + size,
2660 entry = ring_buffer_event_data(event);
2662 memset(&entry->caller, 0, size);
2665 memcpy(&entry->caller, trace.entries,
2666 trace.nr_entries * sizeof(unsigned long));
2668 trace.max_entries = FTRACE_STACK_ENTRIES;
2669 trace.entries = entry->caller;
2671 save_stack_trace_regs(regs, &trace);
2673 save_stack_trace(&trace);
2676 entry->size = trace.nr_entries;
2678 if (!call_filter_check_discard(call, entry, buffer, event))
2679 __buffer_unlock_commit(buffer, event);
2682 /* Again, don't let gcc optimize things here */
2684 __this_cpu_dec(ftrace_stack_reserve);
2685 preempt_enable_notrace();
2689 static inline void ftrace_trace_stack(struct trace_array *tr,
2690 struct ring_buffer *buffer,
2691 unsigned long flags,
2692 int skip, int pc, struct pt_regs *regs)
2694 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2697 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2700 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2703 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2705 if (rcu_is_watching()) {
2706 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2711 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2712 * but if the above rcu_is_watching() failed, then the NMI
2713 * triggered someplace critical, and rcu_irq_enter() should
2714 * not be called from NMI.
2716 if (unlikely(in_nmi()))
2719 rcu_irq_enter_irqson();
2720 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2721 rcu_irq_exit_irqson();
2725 * trace_dump_stack - record a stack back trace in the trace buffer
2726 * @skip: Number of functions to skip (helper handlers)
2728 void trace_dump_stack(int skip)
2730 unsigned long flags;
2732 if (tracing_disabled || tracing_selftest_running)
2735 local_save_flags(flags);
2737 #ifndef CONFIG_UNWINDER_ORC
2738 /* Skip 1 to skip this function. */
2741 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2742 flags, skip, preempt_count(), NULL);
2745 static DEFINE_PER_CPU(int, user_stack_count);
2748 ftrace_trace_userstack(struct trace_array *tr,
2749 struct ring_buffer *buffer, unsigned long flags, int pc)
2751 struct trace_event_call *call = &event_user_stack;
2752 struct ring_buffer_event *event;
2753 struct userstack_entry *entry;
2754 struct stack_trace trace;
2756 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
2760 * NMIs can not handle page faults, even with fix ups.
2761 * The save user stack can (and often does) fault.
2763 if (unlikely(in_nmi()))
2767 * prevent recursion, since the user stack tracing may
2768 * trigger other kernel events.
2771 if (__this_cpu_read(user_stack_count))
2774 __this_cpu_inc(user_stack_count);
2776 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2777 sizeof(*entry), flags, pc);
2779 goto out_drop_count;
2780 entry = ring_buffer_event_data(event);
2782 entry->tgid = current->tgid;
2783 memset(&entry->caller, 0, sizeof(entry->caller));
2785 trace.nr_entries = 0;
2786 trace.max_entries = FTRACE_STACK_ENTRIES;
2788 trace.entries = entry->caller;
2790 save_stack_trace_user(&trace);
2791 if (!call_filter_check_discard(call, entry, buffer, event))
2792 __buffer_unlock_commit(buffer, event);
2795 __this_cpu_dec(user_stack_count);
2801 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2803 ftrace_trace_userstack(tr, flags, preempt_count());
2807 #endif /* CONFIG_STACKTRACE */
2809 /* created for use with alloc_percpu */
2810 struct trace_buffer_struct {
2812 char buffer[4][TRACE_BUF_SIZE];
2815 static struct trace_buffer_struct *trace_percpu_buffer;
2818 * Thise allows for lockless recording. If we're nested too deeply, then
2819 * this returns NULL.
2821 static char *get_trace_buf(void)
2823 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2825 if (!buffer || buffer->nesting >= 4)
2830 /* Interrupts must see nesting incremented before we use the buffer */
2832 return &buffer->buffer[buffer->nesting - 1][0];
2835 static void put_trace_buf(void)
2837 /* Don't let the decrement of nesting leak before this */
2839 this_cpu_dec(trace_percpu_buffer->nesting);
2842 static int alloc_percpu_trace_buffer(void)
2844 struct trace_buffer_struct *buffers;
2846 buffers = alloc_percpu(struct trace_buffer_struct);
2847 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2850 trace_percpu_buffer = buffers;
2854 static int buffers_allocated;
2856 void trace_printk_init_buffers(void)
2858 if (buffers_allocated)
2861 if (alloc_percpu_trace_buffer())
2864 /* trace_printk() is for debug use only. Don't use it in production. */
2867 pr_warn("**********************************************************\n");
2868 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2870 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2872 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2873 pr_warn("** unsafe for production use. **\n");
2875 pr_warn("** If you see this message and you are not debugging **\n");
2876 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2878 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2879 pr_warn("**********************************************************\n");
2881 /* Expand the buffers to set size */
2882 tracing_update_buffers();
2884 buffers_allocated = 1;
2887 * trace_printk_init_buffers() can be called by modules.
2888 * If that happens, then we need to start cmdline recording
2889 * directly here. If the global_trace.buffer is already
2890 * allocated here, then this was called by module code.
2892 if (global_trace.trace_buffer.buffer)
2893 tracing_start_cmdline_record();
2896 void trace_printk_start_comm(void)
2898 /* Start tracing comms if trace printk is set */
2899 if (!buffers_allocated)
2901 tracing_start_cmdline_record();
2904 static void trace_printk_start_stop_comm(int enabled)
2906 if (!buffers_allocated)
2910 tracing_start_cmdline_record();
2912 tracing_stop_cmdline_record();
2916 * trace_vbprintk - write binary msg to tracing buffer
2919 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2921 struct trace_event_call *call = &event_bprint;
2922 struct ring_buffer_event *event;
2923 struct ring_buffer *buffer;
2924 struct trace_array *tr = &global_trace;
2925 struct bprint_entry *entry;
2926 unsigned long flags;
2928 int len = 0, size, pc;
2930 if (unlikely(tracing_selftest_running || tracing_disabled))
2933 /* Don't pollute graph traces with trace_vprintk internals */
2934 pause_graph_tracing();
2936 pc = preempt_count();
2937 preempt_disable_notrace();
2939 tbuffer = get_trace_buf();
2945 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2947 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2950 local_save_flags(flags);
2951 size = sizeof(*entry) + sizeof(u32) * len;
2952 buffer = tr->trace_buffer.buffer;
2953 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2957 entry = ring_buffer_event_data(event);
2961 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2962 if (!call_filter_check_discard(call, entry, buffer, event)) {
2963 __buffer_unlock_commit(buffer, event);
2964 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2971 preempt_enable_notrace();
2972 unpause_graph_tracing();
2976 EXPORT_SYMBOL_GPL(trace_vbprintk);
2980 __trace_array_vprintk(struct ring_buffer *buffer,
2981 unsigned long ip, const char *fmt, va_list args)
2983 struct trace_event_call *call = &event_print;
2984 struct ring_buffer_event *event;
2985 int len = 0, size, pc;
2986 struct print_entry *entry;
2987 unsigned long flags;
2990 if (tracing_disabled || tracing_selftest_running)
2993 /* Don't pollute graph traces with trace_vprintk internals */
2994 pause_graph_tracing();
2996 pc = preempt_count();
2997 preempt_disable_notrace();
3000 tbuffer = get_trace_buf();
3006 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3008 local_save_flags(flags);
3009 size = sizeof(*entry) + len + 1;
3010 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3014 entry = ring_buffer_event_data(event);
3017 memcpy(&entry->buf, tbuffer, len + 1);
3018 if (!call_filter_check_discard(call, entry, buffer, event)) {
3019 __buffer_unlock_commit(buffer, event);
3020 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3027 preempt_enable_notrace();
3028 unpause_graph_tracing();
3034 int trace_array_vprintk(struct trace_array *tr,
3035 unsigned long ip, const char *fmt, va_list args)
3037 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3041 int trace_array_printk(struct trace_array *tr,
3042 unsigned long ip, const char *fmt, ...)
3047 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3054 ret = trace_array_vprintk(tr, ip, fmt, ap);
3060 int trace_array_printk_buf(struct ring_buffer *buffer,
3061 unsigned long ip, const char *fmt, ...)
3066 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3070 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3076 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3078 return trace_array_vprintk(&global_trace, ip, fmt, args);
3080 EXPORT_SYMBOL_GPL(trace_vprintk);
3082 static void trace_iterator_increment(struct trace_iterator *iter)
3084 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3088 ring_buffer_read(buf_iter, NULL);
3091 static struct trace_entry *
3092 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3093 unsigned long *lost_events)
3095 struct ring_buffer_event *event;
3096 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3099 event = ring_buffer_iter_peek(buf_iter, ts);
3101 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3105 iter->ent_size = ring_buffer_event_length(event);
3106 return ring_buffer_event_data(event);
3112 static struct trace_entry *
3113 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3114 unsigned long *missing_events, u64 *ent_ts)
3116 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3117 struct trace_entry *ent, *next = NULL;
3118 unsigned long lost_events = 0, next_lost = 0;
3119 int cpu_file = iter->cpu_file;
3120 u64 next_ts = 0, ts;
3126 * If we are in a per_cpu trace file, don't bother by iterating over
3127 * all cpu and peek directly.
3129 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3130 if (ring_buffer_empty_cpu(buffer, cpu_file))
3132 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3134 *ent_cpu = cpu_file;
3139 for_each_tracing_cpu(cpu) {
3141 if (ring_buffer_empty_cpu(buffer, cpu))
3144 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3147 * Pick the entry with the smallest timestamp:
3149 if (ent && (!next || ts < next_ts)) {
3153 next_lost = lost_events;
3154 next_size = iter->ent_size;
3158 iter->ent_size = next_size;
3161 *ent_cpu = next_cpu;
3167 *missing_events = next_lost;
3172 /* Find the next real entry, without updating the iterator itself */
3173 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3174 int *ent_cpu, u64 *ent_ts)
3176 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3179 /* Find the next real entry, and increment the iterator to the next entry */
3180 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3182 iter->ent = __find_next_entry(iter, &iter->cpu,
3183 &iter->lost_events, &iter->ts);
3186 trace_iterator_increment(iter);
3188 return iter->ent ? iter : NULL;
3191 static void trace_consume(struct trace_iterator *iter)
3193 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3194 &iter->lost_events);
3197 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3199 struct trace_iterator *iter = m->private;
3203 WARN_ON_ONCE(iter->leftover);
3207 /* can't go backwards */
3212 ent = trace_find_next_entry_inc(iter);
3216 while (ent && iter->idx < i)
3217 ent = trace_find_next_entry_inc(iter);
3224 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3226 struct ring_buffer_event *event;
3227 struct ring_buffer_iter *buf_iter;
3228 unsigned long entries = 0;
3231 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3233 buf_iter = trace_buffer_iter(iter, cpu);
3237 ring_buffer_iter_reset(buf_iter);
3240 * We could have the case with the max latency tracers
3241 * that a reset never took place on a cpu. This is evident
3242 * by the timestamp being before the start of the buffer.
3244 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3245 if (ts >= iter->trace_buffer->time_start)
3248 ring_buffer_read(buf_iter, NULL);
3251 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3255 * The current tracer is copied to avoid a global locking
3258 static void *s_start(struct seq_file *m, loff_t *pos)
3260 struct trace_iterator *iter = m->private;
3261 struct trace_array *tr = iter->tr;
3262 int cpu_file = iter->cpu_file;
3268 * copy the tracer to avoid using a global lock all around.
3269 * iter->trace is a copy of current_trace, the pointer to the
3270 * name may be used instead of a strcmp(), as iter->trace->name
3271 * will point to the same string as current_trace->name.
3273 mutex_lock(&trace_types_lock);
3274 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3275 *iter->trace = *tr->current_trace;
3276 mutex_unlock(&trace_types_lock);
3278 #ifdef CONFIG_TRACER_MAX_TRACE
3279 if (iter->snapshot && iter->trace->use_max_tr)
3280 return ERR_PTR(-EBUSY);
3283 if (*pos != iter->pos) {
3288 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3289 for_each_tracing_cpu(cpu)
3290 tracing_iter_reset(iter, cpu);
3292 tracing_iter_reset(iter, cpu_file);
3295 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3300 * If we overflowed the seq_file before, then we want
3301 * to just reuse the trace_seq buffer again.
3307 p = s_next(m, p, &l);
3311 trace_event_read_lock();
3312 trace_access_lock(cpu_file);
3316 static void s_stop(struct seq_file *m, void *p)
3318 struct trace_iterator *iter = m->private;
3320 #ifdef CONFIG_TRACER_MAX_TRACE
3321 if (iter->snapshot && iter->trace->use_max_tr)
3325 trace_access_unlock(iter->cpu_file);
3326 trace_event_read_unlock();
3330 get_total_entries(struct trace_buffer *buf,
3331 unsigned long *total, unsigned long *entries)
3333 unsigned long count;
3339 for_each_tracing_cpu(cpu) {
3340 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3342 * If this buffer has skipped entries, then we hold all
3343 * entries for the trace and we need to ignore the
3344 * ones before the time stamp.
3346 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3347 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3348 /* total is the same as the entries */
3352 ring_buffer_overrun_cpu(buf->buffer, cpu);
3357 static void print_lat_help_header(struct seq_file *m)
3359 seq_puts(m, "# _------=> CPU# \n"
3360 "# / _-----=> irqs-off \n"
3361 "# | / _----=> need-resched \n"
3362 "# || / _---=> hardirq/softirq \n"
3363 "# ||| / _--=> preempt-depth \n"
3365 "# cmd pid ||||| time | caller \n"
3366 "# \\ / ||||| \\ | / \n");
3369 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3371 unsigned long total;
3372 unsigned long entries;
3374 get_total_entries(buf, &total, &entries);
3375 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3376 entries, total, num_online_cpus());
3380 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3383 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3385 print_event_info(buf, m);
3387 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3388 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3391 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3394 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3395 const char tgid_space[] = " ";
3396 const char space[] = " ";
3398 print_event_info(buf, m);
3400 seq_printf(m, "# %s _-----=> irqs-off\n",
3401 tgid ? tgid_space : space);
3402 seq_printf(m, "# %s / _----=> need-resched\n",
3403 tgid ? tgid_space : space);
3404 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3405 tgid ? tgid_space : space);
3406 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3407 tgid ? tgid_space : space);
3408 seq_printf(m, "# %s||| / delay\n",
3409 tgid ? tgid_space : space);
3410 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3411 tgid ? " TGID " : space);
3412 seq_printf(m, "# | | %s | |||| | |\n",
3413 tgid ? " | " : space);
3417 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3419 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3420 struct trace_buffer *buf = iter->trace_buffer;
3421 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3422 struct tracer *type = iter->trace;
3423 unsigned long entries;
3424 unsigned long total;
3425 const char *name = "preemption";
3429 get_total_entries(buf, &total, &entries);
3431 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3433 seq_puts(m, "# -----------------------------------"
3434 "---------------------------------\n");
3435 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3436 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3437 nsecs_to_usecs(data->saved_latency),
3441 #if defined(CONFIG_PREEMPT_NONE)
3443 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3445 #elif defined(CONFIG_PREEMPT)
3450 /* These are reserved for later use */
3453 seq_printf(m, " #P:%d)\n", num_online_cpus());
3457 seq_puts(m, "# -----------------\n");
3458 seq_printf(m, "# | task: %.16s-%d "
3459 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3460 data->comm, data->pid,
3461 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3462 data->policy, data->rt_priority);
3463 seq_puts(m, "# -----------------\n");
3465 if (data->critical_start) {
3466 seq_puts(m, "# => started at: ");
3467 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3468 trace_print_seq(m, &iter->seq);
3469 seq_puts(m, "\n# => ended at: ");
3470 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3471 trace_print_seq(m, &iter->seq);
3472 seq_puts(m, "\n#\n");
3478 static void test_cpu_buff_start(struct trace_iterator *iter)
3480 struct trace_seq *s = &iter->seq;
3481 struct trace_array *tr = iter->tr;
3483 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3486 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3489 if (cpumask_available(iter->started) &&
3490 cpumask_test_cpu(iter->cpu, iter->started))
3493 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3496 if (cpumask_available(iter->started))
3497 cpumask_set_cpu(iter->cpu, iter->started);
3499 /* Don't print started cpu buffer for the first entry of the trace */
3501 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3505 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3507 struct trace_array *tr = iter->tr;
3508 struct trace_seq *s = &iter->seq;
3509 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3510 struct trace_entry *entry;
3511 struct trace_event *event;
3515 test_cpu_buff_start(iter);
3517 event = ftrace_find_event(entry->type);
3519 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3520 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3521 trace_print_lat_context(iter);
3523 trace_print_context(iter);
3526 if (trace_seq_has_overflowed(s))
3527 return TRACE_TYPE_PARTIAL_LINE;
3530 return event->funcs->trace(iter, sym_flags, event);
3532 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3534 return trace_handle_return(s);
3537 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3539 struct trace_array *tr = iter->tr;
3540 struct trace_seq *s = &iter->seq;
3541 struct trace_entry *entry;
3542 struct trace_event *event;
3546 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3547 trace_seq_printf(s, "%d %d %llu ",
3548 entry->pid, iter->cpu, iter->ts);
3550 if (trace_seq_has_overflowed(s))
3551 return TRACE_TYPE_PARTIAL_LINE;
3553 event = ftrace_find_event(entry->type);
3555 return event->funcs->raw(iter, 0, event);
3557 trace_seq_printf(s, "%d ?\n", entry->type);
3559 return trace_handle_return(s);
3562 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3564 struct trace_array *tr = iter->tr;
3565 struct trace_seq *s = &iter->seq;
3566 unsigned char newline = '\n';
3567 struct trace_entry *entry;
3568 struct trace_event *event;
3572 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3573 SEQ_PUT_HEX_FIELD(s, entry->pid);
3574 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3575 SEQ_PUT_HEX_FIELD(s, iter->ts);
3576 if (trace_seq_has_overflowed(s))
3577 return TRACE_TYPE_PARTIAL_LINE;
3580 event = ftrace_find_event(entry->type);
3582 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3583 if (ret != TRACE_TYPE_HANDLED)
3587 SEQ_PUT_FIELD(s, newline);
3589 return trace_handle_return(s);
3592 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3594 struct trace_array *tr = iter->tr;
3595 struct trace_seq *s = &iter->seq;
3596 struct trace_entry *entry;
3597 struct trace_event *event;
3601 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3602 SEQ_PUT_FIELD(s, entry->pid);
3603 SEQ_PUT_FIELD(s, iter->cpu);
3604 SEQ_PUT_FIELD(s, iter->ts);
3605 if (trace_seq_has_overflowed(s))
3606 return TRACE_TYPE_PARTIAL_LINE;
3609 event = ftrace_find_event(entry->type);
3610 return event ? event->funcs->binary(iter, 0, event) :
3614 int trace_empty(struct trace_iterator *iter)
3616 struct ring_buffer_iter *buf_iter;
3619 /* If we are looking at one CPU buffer, only check that one */
3620 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3621 cpu = iter->cpu_file;
3622 buf_iter = trace_buffer_iter(iter, cpu);
3624 if (!ring_buffer_iter_empty(buf_iter))
3627 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3633 for_each_tracing_cpu(cpu) {
3634 buf_iter = trace_buffer_iter(iter, cpu);
3636 if (!ring_buffer_iter_empty(buf_iter))
3639 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3647 /* Called with trace_event_read_lock() held. */
3648 enum print_line_t print_trace_line(struct trace_iterator *iter)
3650 struct trace_array *tr = iter->tr;
3651 unsigned long trace_flags = tr->trace_flags;
3652 enum print_line_t ret;
3654 if (iter->lost_events) {
3655 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3656 iter->cpu, iter->lost_events);
3657 if (trace_seq_has_overflowed(&iter->seq))
3658 return TRACE_TYPE_PARTIAL_LINE;
3661 if (iter->trace && iter->trace->print_line) {
3662 ret = iter->trace->print_line(iter);
3663 if (ret != TRACE_TYPE_UNHANDLED)
3667 if (iter->ent->type == TRACE_BPUTS &&
3668 trace_flags & TRACE_ITER_PRINTK &&
3669 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3670 return trace_print_bputs_msg_only(iter);
3672 if (iter->ent->type == TRACE_BPRINT &&
3673 trace_flags & TRACE_ITER_PRINTK &&
3674 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3675 return trace_print_bprintk_msg_only(iter);
3677 if (iter->ent->type == TRACE_PRINT &&
3678 trace_flags & TRACE_ITER_PRINTK &&
3679 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3680 return trace_print_printk_msg_only(iter);
3682 if (trace_flags & TRACE_ITER_BIN)
3683 return print_bin_fmt(iter);
3685 if (trace_flags & TRACE_ITER_HEX)
3686 return print_hex_fmt(iter);
3688 if (trace_flags & TRACE_ITER_RAW)
3689 return print_raw_fmt(iter);
3691 return print_trace_fmt(iter);
3694 void trace_latency_header(struct seq_file *m)
3696 struct trace_iterator *iter = m->private;
3697 struct trace_array *tr = iter->tr;
3699 /* print nothing if the buffers are empty */
3700 if (trace_empty(iter))
3703 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3704 print_trace_header(m, iter);
3706 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3707 print_lat_help_header(m);
3710 void trace_default_header(struct seq_file *m)
3712 struct trace_iterator *iter = m->private;
3713 struct trace_array *tr = iter->tr;
3714 unsigned long trace_flags = tr->trace_flags;
3716 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3719 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3720 /* print nothing if the buffers are empty */
3721 if (trace_empty(iter))
3723 print_trace_header(m, iter);
3724 if (!(trace_flags & TRACE_ITER_VERBOSE))
3725 print_lat_help_header(m);
3727 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3728 if (trace_flags & TRACE_ITER_IRQ_INFO)
3729 print_func_help_header_irq(iter->trace_buffer,
3732 print_func_help_header(iter->trace_buffer, m,
3738 static void test_ftrace_alive(struct seq_file *m)
3740 if (!ftrace_is_dead())
3742 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3743 "# MAY BE MISSING FUNCTION EVENTS\n");
3746 #ifdef CONFIG_TRACER_MAX_TRACE
3747 static void show_snapshot_main_help(struct seq_file *m)
3749 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3750 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3751 "# Takes a snapshot of the main buffer.\n"
3752 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3753 "# (Doesn't have to be '2' works with any number that\n"
3754 "# is not a '0' or '1')\n");
3757 static void show_snapshot_percpu_help(struct seq_file *m)
3759 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3760 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3761 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3762 "# Takes a snapshot of the main buffer for this cpu.\n");
3764 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3765 "# Must use main snapshot file to allocate.\n");
3767 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3768 "# (Doesn't have to be '2' works with any number that\n"
3769 "# is not a '0' or '1')\n");
3772 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3774 if (iter->tr->allocated_snapshot)
3775 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3777 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3779 seq_puts(m, "# Snapshot commands:\n");
3780 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3781 show_snapshot_main_help(m);
3783 show_snapshot_percpu_help(m);
3786 /* Should never be called */
3787 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3790 static int s_show(struct seq_file *m, void *v)
3792 struct trace_iterator *iter = v;
3795 if (iter->ent == NULL) {
3797 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3799 test_ftrace_alive(m);
3801 if (iter->snapshot && trace_empty(iter))
3802 print_snapshot_help(m, iter);
3803 else if (iter->trace && iter->trace->print_header)
3804 iter->trace->print_header(m);
3806 trace_default_header(m);
3808 } else if (iter->leftover) {
3810 * If we filled the seq_file buffer earlier, we
3811 * want to just show it now.
3813 ret = trace_print_seq(m, &iter->seq);
3815 /* ret should this time be zero, but you never know */
3816 iter->leftover = ret;
3819 print_trace_line(iter);
3820 ret = trace_print_seq(m, &iter->seq);
3822 * If we overflow the seq_file buffer, then it will
3823 * ask us for this data again at start up.
3825 * ret is 0 if seq_file write succeeded.
3828 iter->leftover = ret;
3835 * Should be used after trace_array_get(), trace_types_lock
3836 * ensures that i_cdev was already initialized.
3838 static inline int tracing_get_cpu(struct inode *inode)
3840 if (inode->i_cdev) /* See trace_create_cpu_file() */
3841 return (long)inode->i_cdev - 1;
3842 return RING_BUFFER_ALL_CPUS;
3845 static const struct seq_operations tracer_seq_ops = {
3852 static struct trace_iterator *
3853 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3855 struct trace_array *tr = inode->i_private;
3856 struct trace_iterator *iter;
3859 if (tracing_disabled)
3860 return ERR_PTR(-ENODEV);
3862 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3864 return ERR_PTR(-ENOMEM);
3866 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3868 if (!iter->buffer_iter)
3872 * We make a copy of the current tracer to avoid concurrent
3873 * changes on it while we are reading.
3875 mutex_lock(&trace_types_lock);
3876 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3880 *iter->trace = *tr->current_trace;
3882 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3887 #ifdef CONFIG_TRACER_MAX_TRACE
3888 /* Currently only the top directory has a snapshot */
3889 if (tr->current_trace->print_max || snapshot)
3890 iter->trace_buffer = &tr->max_buffer;
3893 iter->trace_buffer = &tr->trace_buffer;
3894 iter->snapshot = snapshot;
3896 iter->cpu_file = tracing_get_cpu(inode);
3897 mutex_init(&iter->mutex);
3899 /* Notify the tracer early; before we stop tracing. */
3900 if (iter->trace && iter->trace->open)
3901 iter->trace->open(iter);
3903 /* Annotate start of buffers if we had overruns */
3904 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3905 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3907 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3908 if (trace_clocks[tr->clock_id].in_ns)
3909 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3911 /* stop the trace while dumping if we are not opening "snapshot" */
3912 if (!iter->snapshot)
3913 tracing_stop_tr(tr);
3915 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3916 for_each_tracing_cpu(cpu) {
3917 iter->buffer_iter[cpu] =
3918 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3921 ring_buffer_read_prepare_sync();
3922 for_each_tracing_cpu(cpu) {
3923 ring_buffer_read_start(iter->buffer_iter[cpu]);
3924 tracing_iter_reset(iter, cpu);
3927 cpu = iter->cpu_file;
3928 iter->buffer_iter[cpu] =
3929 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3931 ring_buffer_read_prepare_sync();
3932 ring_buffer_read_start(iter->buffer_iter[cpu]);
3933 tracing_iter_reset(iter, cpu);
3936 mutex_unlock(&trace_types_lock);
3941 mutex_unlock(&trace_types_lock);
3943 kfree(iter->buffer_iter);
3945 seq_release_private(inode, file);
3946 return ERR_PTR(-ENOMEM);
3949 int tracing_open_generic(struct inode *inode, struct file *filp)
3951 if (tracing_disabled)
3954 filp->private_data = inode->i_private;
3958 bool tracing_is_disabled(void)
3960 return (tracing_disabled) ? true: false;
3964 * Open and update trace_array ref count.
3965 * Must have the current trace_array passed to it.
3967 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3969 struct trace_array *tr = inode->i_private;
3971 if (tracing_disabled)
3974 if (trace_array_get(tr) < 0)
3977 filp->private_data = inode->i_private;
3982 static int tracing_release(struct inode *inode, struct file *file)
3984 struct trace_array *tr = inode->i_private;
3985 struct seq_file *m = file->private_data;
3986 struct trace_iterator *iter;
3989 if (!(file->f_mode & FMODE_READ)) {
3990 trace_array_put(tr);
3994 /* Writes do not use seq_file */
3996 mutex_lock(&trace_types_lock);
3998 for_each_tracing_cpu(cpu) {
3999 if (iter->buffer_iter[cpu])
4000 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4003 if (iter->trace && iter->trace->close)
4004 iter->trace->close(iter);
4006 if (!iter->snapshot)
4007 /* reenable tracing if it was previously enabled */
4008 tracing_start_tr(tr);
4010 __trace_array_put(tr);
4012 mutex_unlock(&trace_types_lock);
4014 mutex_destroy(&iter->mutex);
4015 free_cpumask_var(iter->started);
4017 kfree(iter->buffer_iter);
4018 seq_release_private(inode, file);
4023 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4025 struct trace_array *tr = inode->i_private;
4027 trace_array_put(tr);
4031 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4033 struct trace_array *tr = inode->i_private;
4035 trace_array_put(tr);
4037 return single_release(inode, file);
4040 static int tracing_open(struct inode *inode, struct file *file)
4042 struct trace_array *tr = inode->i_private;
4043 struct trace_iterator *iter;
4046 if (trace_array_get(tr) < 0)
4049 /* If this file was open for write, then erase contents */
4050 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4051 int cpu = tracing_get_cpu(inode);
4052 struct trace_buffer *trace_buf = &tr->trace_buffer;
4054 #ifdef CONFIG_TRACER_MAX_TRACE
4055 if (tr->current_trace->print_max)
4056 trace_buf = &tr->max_buffer;
4059 if (cpu == RING_BUFFER_ALL_CPUS)
4060 tracing_reset_online_cpus(trace_buf);
4062 tracing_reset(trace_buf, cpu);
4065 if (file->f_mode & FMODE_READ) {
4066 iter = __tracing_open(inode, file, false);
4068 ret = PTR_ERR(iter);
4069 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4070 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4074 trace_array_put(tr);
4080 * Some tracers are not suitable for instance buffers.
4081 * A tracer is always available for the global array (toplevel)
4082 * or if it explicitly states that it is.
4085 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4087 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4090 /* Find the next tracer that this trace array may use */
4091 static struct tracer *
4092 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4094 while (t && !trace_ok_for_array(t, tr))
4101 t_next(struct seq_file *m, void *v, loff_t *pos)
4103 struct trace_array *tr = m->private;
4104 struct tracer *t = v;
4109 t = get_tracer_for_array(tr, t->next);
4114 static void *t_start(struct seq_file *m, loff_t *pos)
4116 struct trace_array *tr = m->private;
4120 mutex_lock(&trace_types_lock);
4122 t = get_tracer_for_array(tr, trace_types);
4123 for (; t && l < *pos; t = t_next(m, t, &l))
4129 static void t_stop(struct seq_file *m, void *p)
4131 mutex_unlock(&trace_types_lock);
4134 static int t_show(struct seq_file *m, void *v)
4136 struct tracer *t = v;
4141 seq_puts(m, t->name);
4150 static const struct seq_operations show_traces_seq_ops = {
4157 static int show_traces_open(struct inode *inode, struct file *file)
4159 struct trace_array *tr = inode->i_private;
4163 if (tracing_disabled)
4166 if (trace_array_get(tr) < 0)
4169 ret = seq_open(file, &show_traces_seq_ops);
4171 trace_array_put(tr);
4175 m = file->private_data;
4181 static int show_traces_release(struct inode *inode, struct file *file)
4183 struct trace_array *tr = inode->i_private;
4185 trace_array_put(tr);
4186 return seq_release(inode, file);
4190 tracing_write_stub(struct file *filp, const char __user *ubuf,
4191 size_t count, loff_t *ppos)
4196 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4200 if (file->f_mode & FMODE_READ)
4201 ret = seq_lseek(file, offset, whence);
4203 file->f_pos = ret = 0;
4208 static const struct file_operations tracing_fops = {
4209 .open = tracing_open,
4211 .write = tracing_write_stub,
4212 .llseek = tracing_lseek,
4213 .release = tracing_release,
4216 static const struct file_operations show_traces_fops = {
4217 .open = show_traces_open,
4219 .llseek = seq_lseek,
4220 .release = show_traces_release,
4224 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4225 size_t count, loff_t *ppos)
4227 struct trace_array *tr = file_inode(filp)->i_private;
4231 len = snprintf(NULL, 0, "%*pb\n",
4232 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4233 mask_str = kmalloc(len, GFP_KERNEL);
4237 len = snprintf(mask_str, len, "%*pb\n",
4238 cpumask_pr_args(tr->tracing_cpumask));
4243 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4252 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4253 size_t count, loff_t *ppos)
4255 struct trace_array *tr = file_inode(filp)->i_private;
4256 cpumask_var_t tracing_cpumask_new;
4259 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4262 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4266 local_irq_disable();
4267 arch_spin_lock(&tr->max_lock);
4268 for_each_tracing_cpu(cpu) {
4270 * Increase/decrease the disabled counter if we are
4271 * about to flip a bit in the cpumask:
4273 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4274 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4275 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4276 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4278 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4279 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4280 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4281 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4284 arch_spin_unlock(&tr->max_lock);
4287 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4288 free_cpumask_var(tracing_cpumask_new);
4293 free_cpumask_var(tracing_cpumask_new);
4298 static const struct file_operations tracing_cpumask_fops = {
4299 .open = tracing_open_generic_tr,
4300 .read = tracing_cpumask_read,
4301 .write = tracing_cpumask_write,
4302 .release = tracing_release_generic_tr,
4303 .llseek = generic_file_llseek,
4306 static int tracing_trace_options_show(struct seq_file *m, void *v)
4308 struct tracer_opt *trace_opts;
4309 struct trace_array *tr = m->private;
4313 mutex_lock(&trace_types_lock);
4314 tracer_flags = tr->current_trace->flags->val;
4315 trace_opts = tr->current_trace->flags->opts;
4317 for (i = 0; trace_options[i]; i++) {
4318 if (tr->trace_flags & (1 << i))
4319 seq_printf(m, "%s\n", trace_options[i]);
4321 seq_printf(m, "no%s\n", trace_options[i]);
4324 for (i = 0; trace_opts[i].name; i++) {
4325 if (tracer_flags & trace_opts[i].bit)
4326 seq_printf(m, "%s\n", trace_opts[i].name);
4328 seq_printf(m, "no%s\n", trace_opts[i].name);
4330 mutex_unlock(&trace_types_lock);
4335 static int __set_tracer_option(struct trace_array *tr,
4336 struct tracer_flags *tracer_flags,
4337 struct tracer_opt *opts, int neg)
4339 struct tracer *trace = tracer_flags->trace;
4342 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4347 tracer_flags->val &= ~opts->bit;
4349 tracer_flags->val |= opts->bit;
4353 /* Try to assign a tracer specific option */
4354 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4356 struct tracer *trace = tr->current_trace;
4357 struct tracer_flags *tracer_flags = trace->flags;
4358 struct tracer_opt *opts = NULL;
4361 for (i = 0; tracer_flags->opts[i].name; i++) {
4362 opts = &tracer_flags->opts[i];
4364 if (strcmp(cmp, opts->name) == 0)
4365 return __set_tracer_option(tr, trace->flags, opts, neg);
4371 /* Some tracers require overwrite to stay enabled */
4372 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4374 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4380 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4384 if ((mask == TRACE_ITER_RECORD_TGID) ||
4385 (mask == TRACE_ITER_RECORD_CMD))
4386 lockdep_assert_held(&event_mutex);
4388 /* do nothing if flag is already set */
4389 if (!!(tr->trace_flags & mask) == !!enabled)
4392 /* Give the tracer a chance to approve the change */
4393 if (tr->current_trace->flag_changed)
4394 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4398 tr->trace_flags |= mask;
4400 tr->trace_flags &= ~mask;
4402 if (mask == TRACE_ITER_RECORD_CMD)
4403 trace_event_enable_cmd_record(enabled);
4405 if (mask == TRACE_ITER_RECORD_TGID) {
4407 tgid_map_max = pid_max;
4408 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
4412 * Pairs with smp_load_acquire() in
4413 * trace_find_tgid_ptr() to ensure that if it observes
4414 * the tgid_map we just allocated then it also observes
4415 * the corresponding tgid_map_max value.
4417 smp_store_release(&tgid_map, map);
4420 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4424 trace_event_enable_tgid_record(enabled);
4427 if (mask == TRACE_ITER_EVENT_FORK)
4428 trace_event_follow_fork(tr, enabled);
4430 if (mask == TRACE_ITER_FUNC_FORK)
4431 ftrace_pid_follow_fork(tr, enabled);
4433 if (mask == TRACE_ITER_OVERWRITE) {
4434 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4435 #ifdef CONFIG_TRACER_MAX_TRACE
4436 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4440 if (mask == TRACE_ITER_PRINTK) {
4441 trace_printk_start_stop_comm(enabled);
4442 trace_printk_control(enabled);
4448 static int trace_set_options(struct trace_array *tr, char *option)
4453 size_t orig_len = strlen(option);
4455 cmp = strstrip(option);
4457 if (strncmp(cmp, "no", 2) == 0) {
4462 mutex_lock(&event_mutex);
4463 mutex_lock(&trace_types_lock);
4465 ret = match_string(trace_options, -1, cmp);
4466 /* If no option could be set, test the specific tracer options */
4468 ret = set_tracer_option(tr, cmp, neg);
4470 ret = set_tracer_flag(tr, 1 << ret, !neg);
4472 mutex_unlock(&trace_types_lock);
4473 mutex_unlock(&event_mutex);
4476 * If the first trailing whitespace is replaced with '\0' by strstrip,
4477 * turn it back into a space.
4479 if (orig_len > strlen(option))
4480 option[strlen(option)] = ' ';
4485 static void __init apply_trace_boot_options(void)
4487 char *buf = trace_boot_options_buf;
4491 option = strsep(&buf, ",");
4497 trace_set_options(&global_trace, option);
4499 /* Put back the comma to allow this to be called again */
4506 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4507 size_t cnt, loff_t *ppos)
4509 struct seq_file *m = filp->private_data;
4510 struct trace_array *tr = m->private;
4514 if (cnt >= sizeof(buf))
4517 if (copy_from_user(buf, ubuf, cnt))
4522 ret = trace_set_options(tr, buf);
4531 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4533 struct trace_array *tr = inode->i_private;
4536 if (tracing_disabled)
4539 if (trace_array_get(tr) < 0)
4542 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4544 trace_array_put(tr);
4549 static const struct file_operations tracing_iter_fops = {
4550 .open = tracing_trace_options_open,
4552 .llseek = seq_lseek,
4553 .release = tracing_single_release_tr,
4554 .write = tracing_trace_options_write,
4557 static const char readme_msg[] =
4558 "tracing mini-HOWTO:\n\n"
4559 "# echo 0 > tracing_on : quick way to disable tracing\n"
4560 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4561 " Important files:\n"
4562 " trace\t\t\t- The static contents of the buffer\n"
4563 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4564 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4565 " current_tracer\t- function and latency tracers\n"
4566 " available_tracers\t- list of configured tracers for current_tracer\n"
4567 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4568 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4569 " trace_clock\t\t-change the clock used to order events\n"
4570 " local: Per cpu clock but may not be synced across CPUs\n"
4571 " global: Synced across CPUs but slows tracing down.\n"
4572 " counter: Not a clock, but just an increment\n"
4573 " uptime: Jiffy counter from time of boot\n"
4574 " perf: Same clock that perf events use\n"
4575 #ifdef CONFIG_X86_64
4576 " x86-tsc: TSC cycle counter\n"
4578 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4579 " delta: Delta difference against a buffer-wide timestamp\n"
4580 " absolute: Absolute (standalone) timestamp\n"
4581 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4582 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4583 " tracing_cpumask\t- Limit which CPUs to trace\n"
4584 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4585 "\t\t\t Remove sub-buffer with rmdir\n"
4586 " trace_options\t\t- Set format or modify how tracing happens\n"
4587 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4588 "\t\t\t option name\n"
4589 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4590 #ifdef CONFIG_DYNAMIC_FTRACE
4591 "\n available_filter_functions - list of functions that can be filtered on\n"
4592 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4593 "\t\t\t functions\n"
4594 "\t accepts: func_full_name or glob-matching-pattern\n"
4595 "\t modules: Can select a group via module\n"
4596 "\t Format: :mod:<module-name>\n"
4597 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4598 "\t triggers: a command to perform when function is hit\n"
4599 "\t Format: <function>:<trigger>[:count]\n"
4600 "\t trigger: traceon, traceoff\n"
4601 "\t\t enable_event:<system>:<event>\n"
4602 "\t\t disable_event:<system>:<event>\n"
4603 #ifdef CONFIG_STACKTRACE
4606 #ifdef CONFIG_TRACER_SNAPSHOT
4611 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4612 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4613 "\t The first one will disable tracing every time do_fault is hit\n"
4614 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4615 "\t The first time do trap is hit and it disables tracing, the\n"
4616 "\t counter will decrement to 2. If tracing is already disabled,\n"
4617 "\t the counter will not decrement. It only decrements when the\n"
4618 "\t trigger did work\n"
4619 "\t To remove trigger without count:\n"
4620 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4621 "\t To remove trigger with a count:\n"
4622 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4623 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4624 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4625 "\t modules: Can select a group via module command :mod:\n"
4626 "\t Does not accept triggers\n"
4627 #endif /* CONFIG_DYNAMIC_FTRACE */
4628 #ifdef CONFIG_FUNCTION_TRACER
4629 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4632 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4633 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4634 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4635 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4637 #ifdef CONFIG_TRACER_SNAPSHOT
4638 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4639 "\t\t\t snapshot buffer. Read the contents for more\n"
4640 "\t\t\t information\n"
4642 #ifdef CONFIG_STACK_TRACER
4643 " stack_trace\t\t- Shows the max stack trace when active\n"
4644 " stack_max_size\t- Shows current max stack size that was traced\n"
4645 "\t\t\t Write into this file to reset the max size (trigger a\n"
4646 "\t\t\t new trace)\n"
4647 #ifdef CONFIG_DYNAMIC_FTRACE
4648 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4651 #endif /* CONFIG_STACK_TRACER */
4652 #ifdef CONFIG_KPROBE_EVENTS
4653 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4654 "\t\t\t Write into this file to define/undefine new trace events.\n"
4656 #ifdef CONFIG_UPROBE_EVENTS
4657 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4658 "\t\t\t Write into this file to define/undefine new trace events.\n"
4660 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4661 "\t accepts: event-definitions (one definition per line)\n"
4662 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4663 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4664 "\t -:[<group>/]<event>\n"
4665 #ifdef CONFIG_KPROBE_EVENTS
4666 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4667 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4669 #ifdef CONFIG_UPROBE_EVENTS
4670 "\t place: <path>:<offset>\n"
4672 "\t args: <name>=fetcharg[:type]\n"
4673 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4674 "\t $stack<index>, $stack, $retval, $comm\n"
4675 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4676 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4678 " events/\t\t- Directory containing all trace event subsystems:\n"
4679 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4680 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4681 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4683 " filter\t\t- If set, only events passing filter are traced\n"
4684 " events/<system>/<event>/\t- Directory containing control files for\n"
4686 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4687 " filter\t\t- If set, only events passing filter are traced\n"
4688 " trigger\t\t- If set, a command to perform when event is hit\n"
4689 "\t Format: <trigger>[:count][if <filter>]\n"
4690 "\t trigger: traceon, traceoff\n"
4691 "\t enable_event:<system>:<event>\n"
4692 "\t disable_event:<system>:<event>\n"
4693 #ifdef CONFIG_HIST_TRIGGERS
4694 "\t enable_hist:<system>:<event>\n"
4695 "\t disable_hist:<system>:<event>\n"
4697 #ifdef CONFIG_STACKTRACE
4700 #ifdef CONFIG_TRACER_SNAPSHOT
4703 #ifdef CONFIG_HIST_TRIGGERS
4704 "\t\t hist (see below)\n"
4706 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4707 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4708 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4709 "\t events/block/block_unplug/trigger\n"
4710 "\t The first disables tracing every time block_unplug is hit.\n"
4711 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4712 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4713 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4714 "\t Like function triggers, the counter is only decremented if it\n"
4715 "\t enabled or disabled tracing.\n"
4716 "\t To remove a trigger without a count:\n"
4717 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4718 "\t To remove a trigger with a count:\n"
4719 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4720 "\t Filters can be ignored when removing a trigger.\n"
4721 #ifdef CONFIG_HIST_TRIGGERS
4722 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4723 "\t Format: hist:keys=<field1[,field2,...]>\n"
4724 "\t [:values=<field1[,field2,...]>]\n"
4725 "\t [:sort=<field1[,field2,...]>]\n"
4726 "\t [:size=#entries]\n"
4727 "\t [:pause][:continue][:clear]\n"
4728 "\t [:name=histname1]\n"
4729 "\t [if <filter>]\n\n"
4730 "\t Note, special fields can be used as well:\n"
4731 "\t common_timestamp - to record current timestamp\n"
4732 "\t common_cpu - to record the CPU the event happened on\n"
4734 "\t When a matching event is hit, an entry is added to a hash\n"
4735 "\t table using the key(s) and value(s) named, and the value of a\n"
4736 "\t sum called 'hitcount' is incremented. Keys and values\n"
4737 "\t correspond to fields in the event's format description. Keys\n"
4738 "\t can be any field, or the special string 'stacktrace'.\n"
4739 "\t Compound keys consisting of up to two fields can be specified\n"
4740 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4741 "\t fields. Sort keys consisting of up to two fields can be\n"
4742 "\t specified using the 'sort' keyword. The sort direction can\n"
4743 "\t be modified by appending '.descending' or '.ascending' to a\n"
4744 "\t sort field. The 'size' parameter can be used to specify more\n"
4745 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4746 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4747 "\t its histogram data will be shared with other triggers of the\n"
4748 "\t same name, and trigger hits will update this common data.\n\n"
4749 "\t Reading the 'hist' file for the event will dump the hash\n"
4750 "\t table in its entirety to stdout. If there are multiple hist\n"
4751 "\t triggers attached to an event, there will be a table for each\n"
4752 "\t trigger in the output. The table displayed for a named\n"
4753 "\t trigger will be the same as any other instance having the\n"
4754 "\t same name. The default format used to display a given field\n"
4755 "\t can be modified by appending any of the following modifiers\n"
4756 "\t to the field name, as applicable:\n\n"
4757 "\t .hex display a number as a hex value\n"
4758 "\t .sym display an address as a symbol\n"
4759 "\t .sym-offset display an address as a symbol and offset\n"
4760 "\t .execname display a common_pid as a program name\n"
4761 "\t .syscall display a syscall id as a syscall name\n"
4762 "\t .log2 display log2 value rather than raw number\n"
4763 "\t .usecs display a common_timestamp in microseconds\n\n"
4764 "\t The 'pause' parameter can be used to pause an existing hist\n"
4765 "\t trigger or to start a hist trigger but not log any events\n"
4766 "\t until told to do so. 'continue' can be used to start or\n"
4767 "\t restart a paused hist trigger.\n\n"
4768 "\t The 'clear' parameter will clear the contents of a running\n"
4769 "\t hist trigger and leave its current paused/active state\n"
4771 "\t The enable_hist and disable_hist triggers can be used to\n"
4772 "\t have one event conditionally start and stop another event's\n"
4773 "\t already-attached hist trigger. The syntax is analagous to\n"
4774 "\t the enable_event and disable_event triggers.\n"
4779 tracing_readme_read(struct file *filp, char __user *ubuf,
4780 size_t cnt, loff_t *ppos)
4782 return simple_read_from_buffer(ubuf, cnt, ppos,
4783 readme_msg, strlen(readme_msg));
4786 static const struct file_operations tracing_readme_fops = {
4787 .open = tracing_open_generic,
4788 .read = tracing_readme_read,
4789 .llseek = generic_file_llseek,
4792 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4796 return trace_find_tgid_ptr(pid);
4799 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4803 return trace_find_tgid_ptr(pid);
4806 static void saved_tgids_stop(struct seq_file *m, void *v)
4810 static int saved_tgids_show(struct seq_file *m, void *v)
4812 int *entry = (int *)v;
4813 int pid = entry - tgid_map;
4819 seq_printf(m, "%d %d\n", pid, tgid);
4823 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4824 .start = saved_tgids_start,
4825 .stop = saved_tgids_stop,
4826 .next = saved_tgids_next,
4827 .show = saved_tgids_show,
4830 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4832 if (tracing_disabled)
4835 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4839 static const struct file_operations tracing_saved_tgids_fops = {
4840 .open = tracing_saved_tgids_open,
4842 .llseek = seq_lseek,
4843 .release = seq_release,
4846 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4848 unsigned int *ptr = v;
4850 if (*pos || m->count)
4855 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4857 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4866 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4872 arch_spin_lock(&trace_cmdline_lock);
4874 v = &savedcmd->map_cmdline_to_pid[0];
4876 v = saved_cmdlines_next(m, v, &l);
4884 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4886 arch_spin_unlock(&trace_cmdline_lock);
4890 static int saved_cmdlines_show(struct seq_file *m, void *v)
4892 char buf[TASK_COMM_LEN];
4893 unsigned int *pid = v;
4895 __trace_find_cmdline(*pid, buf);
4896 seq_printf(m, "%d %s\n", *pid, buf);
4900 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4901 .start = saved_cmdlines_start,
4902 .next = saved_cmdlines_next,
4903 .stop = saved_cmdlines_stop,
4904 .show = saved_cmdlines_show,
4907 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4909 if (tracing_disabled)
4912 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4915 static const struct file_operations tracing_saved_cmdlines_fops = {
4916 .open = tracing_saved_cmdlines_open,
4918 .llseek = seq_lseek,
4919 .release = seq_release,
4923 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4924 size_t cnt, loff_t *ppos)
4929 arch_spin_lock(&trace_cmdline_lock);
4930 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4931 arch_spin_unlock(&trace_cmdline_lock);
4933 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4936 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4938 kfree(s->saved_cmdlines);
4939 kfree(s->map_cmdline_to_pid);
4943 static int tracing_resize_saved_cmdlines(unsigned int val)
4945 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4947 s = kmalloc(sizeof(*s), GFP_KERNEL);
4951 if (allocate_cmdlines_buffer(val, s) < 0) {
4956 arch_spin_lock(&trace_cmdline_lock);
4957 savedcmd_temp = savedcmd;
4959 arch_spin_unlock(&trace_cmdline_lock);
4960 free_saved_cmdlines_buffer(savedcmd_temp);
4966 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4967 size_t cnt, loff_t *ppos)
4972 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4976 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4977 if (!val || val > PID_MAX_DEFAULT)
4980 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4989 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4990 .open = tracing_open_generic,
4991 .read = tracing_saved_cmdlines_size_read,
4992 .write = tracing_saved_cmdlines_size_write,
4995 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4996 static union trace_eval_map_item *
4997 update_eval_map(union trace_eval_map_item *ptr)
4999 if (!ptr->map.eval_string) {
5000 if (ptr->tail.next) {
5001 ptr = ptr->tail.next;
5002 /* Set ptr to the next real item (skip head) */
5010 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5012 union trace_eval_map_item *ptr = v;
5015 * Paranoid! If ptr points to end, we don't want to increment past it.
5016 * This really should never happen.
5018 ptr = update_eval_map(ptr);
5019 if (WARN_ON_ONCE(!ptr))
5026 ptr = update_eval_map(ptr);
5031 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5033 union trace_eval_map_item *v;
5036 mutex_lock(&trace_eval_mutex);
5038 v = trace_eval_maps;
5042 while (v && l < *pos) {
5043 v = eval_map_next(m, v, &l);
5049 static void eval_map_stop(struct seq_file *m, void *v)
5051 mutex_unlock(&trace_eval_mutex);
5054 static int eval_map_show(struct seq_file *m, void *v)
5056 union trace_eval_map_item *ptr = v;
5058 seq_printf(m, "%s %ld (%s)\n",
5059 ptr->map.eval_string, ptr->map.eval_value,
5065 static const struct seq_operations tracing_eval_map_seq_ops = {
5066 .start = eval_map_start,
5067 .next = eval_map_next,
5068 .stop = eval_map_stop,
5069 .show = eval_map_show,
5072 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5074 if (tracing_disabled)
5077 return seq_open(filp, &tracing_eval_map_seq_ops);
5080 static const struct file_operations tracing_eval_map_fops = {
5081 .open = tracing_eval_map_open,
5083 .llseek = seq_lseek,
5084 .release = seq_release,
5087 static inline union trace_eval_map_item *
5088 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5090 /* Return tail of array given the head */
5091 return ptr + ptr->head.length + 1;
5095 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5098 struct trace_eval_map **stop;
5099 struct trace_eval_map **map;
5100 union trace_eval_map_item *map_array;
5101 union trace_eval_map_item *ptr;
5106 * The trace_eval_maps contains the map plus a head and tail item,
5107 * where the head holds the module and length of array, and the
5108 * tail holds a pointer to the next list.
5110 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5112 pr_warn("Unable to allocate trace eval mapping\n");
5116 mutex_lock(&trace_eval_mutex);
5118 if (!trace_eval_maps)
5119 trace_eval_maps = map_array;
5121 ptr = trace_eval_maps;
5123 ptr = trace_eval_jmp_to_tail(ptr);
5124 if (!ptr->tail.next)
5126 ptr = ptr->tail.next;
5129 ptr->tail.next = map_array;
5131 map_array->head.mod = mod;
5132 map_array->head.length = len;
5135 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5136 map_array->map = **map;
5139 memset(map_array, 0, sizeof(*map_array));
5141 mutex_unlock(&trace_eval_mutex);
5144 static void trace_create_eval_file(struct dentry *d_tracer)
5146 trace_create_file("eval_map", 0444, d_tracer,
5147 NULL, &tracing_eval_map_fops);
5150 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5151 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5152 static inline void trace_insert_eval_map_file(struct module *mod,
5153 struct trace_eval_map **start, int len) { }
5154 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5156 static void trace_insert_eval_map(struct module *mod,
5157 struct trace_eval_map **start, int len)
5159 struct trace_eval_map **map;
5166 trace_event_eval_update(map, len);
5168 trace_insert_eval_map_file(mod, start, len);
5172 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5173 size_t cnt, loff_t *ppos)
5175 struct trace_array *tr = filp->private_data;
5176 char buf[MAX_TRACER_SIZE+2];
5179 mutex_lock(&trace_types_lock);
5180 r = sprintf(buf, "%s\n", tr->current_trace->name);
5181 mutex_unlock(&trace_types_lock);
5183 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5186 int tracer_init(struct tracer *t, struct trace_array *tr)
5188 tracing_reset_online_cpus(&tr->trace_buffer);
5192 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5196 for_each_tracing_cpu(cpu)
5197 per_cpu_ptr(buf->data, cpu)->entries = val;
5200 #ifdef CONFIG_TRACER_MAX_TRACE
5201 /* resize @tr's buffer to the size of @size_tr's entries */
5202 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5203 struct trace_buffer *size_buf, int cpu_id)
5207 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5208 for_each_tracing_cpu(cpu) {
5209 ret = ring_buffer_resize(trace_buf->buffer,
5210 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5213 per_cpu_ptr(trace_buf->data, cpu)->entries =
5214 per_cpu_ptr(size_buf->data, cpu)->entries;
5217 ret = ring_buffer_resize(trace_buf->buffer,
5218 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5220 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5221 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5226 #endif /* CONFIG_TRACER_MAX_TRACE */
5228 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5229 unsigned long size, int cpu)
5234 * If kernel or user changes the size of the ring buffer
5235 * we use the size that was given, and we can forget about
5236 * expanding it later.
5238 ring_buffer_expanded = true;
5240 /* May be called before buffers are initialized */
5241 if (!tr->trace_buffer.buffer)
5244 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5248 #ifdef CONFIG_TRACER_MAX_TRACE
5249 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5250 !tr->current_trace->use_max_tr)
5253 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5255 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5256 &tr->trace_buffer, cpu);
5259 * AARGH! We are left with different
5260 * size max buffer!!!!
5261 * The max buffer is our "snapshot" buffer.
5262 * When a tracer needs a snapshot (one of the
5263 * latency tracers), it swaps the max buffer
5264 * with the saved snap shot. We succeeded to
5265 * update the size of the main buffer, but failed to
5266 * update the size of the max buffer. But when we tried
5267 * to reset the main buffer to the original size, we
5268 * failed there too. This is very unlikely to
5269 * happen, but if it does, warn and kill all
5273 tracing_disabled = 1;
5278 if (cpu == RING_BUFFER_ALL_CPUS)
5279 set_buffer_entries(&tr->max_buffer, size);
5281 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5284 #endif /* CONFIG_TRACER_MAX_TRACE */
5286 if (cpu == RING_BUFFER_ALL_CPUS)
5287 set_buffer_entries(&tr->trace_buffer, size);
5289 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5294 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5295 unsigned long size, int cpu_id)
5299 mutex_lock(&trace_types_lock);
5301 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5302 /* make sure, this cpu is enabled in the mask */
5303 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5309 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5314 mutex_unlock(&trace_types_lock);
5321 * tracing_update_buffers - used by tracing facility to expand ring buffers
5323 * To save on memory when the tracing is never used on a system with it
5324 * configured in. The ring buffers are set to a minimum size. But once
5325 * a user starts to use the tracing facility, then they need to grow
5326 * to their default size.
5328 * This function is to be called when a tracer is about to be used.
5330 int tracing_update_buffers(void)
5334 mutex_lock(&trace_types_lock);
5335 if (!ring_buffer_expanded)
5336 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5337 RING_BUFFER_ALL_CPUS);
5338 mutex_unlock(&trace_types_lock);
5343 struct trace_option_dentry;
5346 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5349 * Used to clear out the tracer before deletion of an instance.
5350 * Must have trace_types_lock held.
5352 static void tracing_set_nop(struct trace_array *tr)
5354 if (tr->current_trace == &nop_trace)
5357 tr->current_trace->enabled--;
5359 if (tr->current_trace->reset)
5360 tr->current_trace->reset(tr);
5362 tr->current_trace = &nop_trace;
5365 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5367 /* Only enable if the directory has been created already. */
5371 create_trace_option_files(tr, t);
5374 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5377 #ifdef CONFIG_TRACER_MAX_TRACE
5382 mutex_lock(&trace_types_lock);
5384 if (!ring_buffer_expanded) {
5385 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5386 RING_BUFFER_ALL_CPUS);
5392 for (t = trace_types; t; t = t->next) {
5393 if (strcmp(t->name, buf) == 0)
5400 if (t == tr->current_trace)
5403 /* Some tracers won't work on kernel command line */
5404 if (system_state < SYSTEM_RUNNING && t->noboot) {
5405 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5410 /* Some tracers are only allowed for the top level buffer */
5411 if (!trace_ok_for_array(t, tr)) {
5416 /* If trace pipe files are being read, we can't change the tracer */
5417 if (tr->current_trace->ref) {
5422 trace_branch_disable();
5424 tr->current_trace->enabled--;
5426 if (tr->current_trace->reset)
5427 tr->current_trace->reset(tr);
5429 /* Current trace needs to be nop_trace before synchronize_sched */
5430 tr->current_trace = &nop_trace;
5432 #ifdef CONFIG_TRACER_MAX_TRACE
5433 had_max_tr = tr->allocated_snapshot;
5435 if (had_max_tr && !t->use_max_tr) {
5437 * We need to make sure that the update_max_tr sees that
5438 * current_trace changed to nop_trace to keep it from
5439 * swapping the buffers after we resize it.
5440 * The update_max_tr is called from interrupts disabled
5441 * so a synchronized_sched() is sufficient.
5443 synchronize_sched();
5448 #ifdef CONFIG_TRACER_MAX_TRACE
5449 if (t->use_max_tr && !had_max_tr) {
5450 ret = tracing_alloc_snapshot_instance(tr);
5457 ret = tracer_init(t, tr);
5462 tr->current_trace = t;
5463 tr->current_trace->enabled++;
5464 trace_branch_enable(tr);
5466 mutex_unlock(&trace_types_lock);
5472 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5473 size_t cnt, loff_t *ppos)
5475 struct trace_array *tr = filp->private_data;
5476 char buf[MAX_TRACER_SIZE+1];
5483 if (cnt > MAX_TRACER_SIZE)
5484 cnt = MAX_TRACER_SIZE;
5486 if (copy_from_user(buf, ubuf, cnt))
5491 /* strip ending whitespace. */
5492 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5495 err = tracing_set_tracer(tr, buf);
5505 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5506 size_t cnt, loff_t *ppos)
5511 r = snprintf(buf, sizeof(buf), "%ld\n",
5512 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5513 if (r > sizeof(buf))
5515 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5519 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5520 size_t cnt, loff_t *ppos)
5525 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5535 tracing_thresh_read(struct file *filp, char __user *ubuf,
5536 size_t cnt, loff_t *ppos)
5538 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5542 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5543 size_t cnt, loff_t *ppos)
5545 struct trace_array *tr = filp->private_data;
5548 mutex_lock(&trace_types_lock);
5549 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5553 if (tr->current_trace->update_thresh) {
5554 ret = tr->current_trace->update_thresh(tr);
5561 mutex_unlock(&trace_types_lock);
5566 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5569 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5570 size_t cnt, loff_t *ppos)
5572 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5576 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5577 size_t cnt, loff_t *ppos)
5579 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5584 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5586 struct trace_array *tr = inode->i_private;
5587 struct trace_iterator *iter;
5590 if (tracing_disabled)
5593 if (trace_array_get(tr) < 0)
5596 mutex_lock(&trace_types_lock);
5598 /* create a buffer to store the information to pass to userspace */
5599 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5602 __trace_array_put(tr);
5606 trace_seq_init(&iter->seq);
5607 iter->trace = tr->current_trace;
5609 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5614 /* trace pipe does not show start of buffer */
5615 cpumask_setall(iter->started);
5617 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5618 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5620 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5621 if (trace_clocks[tr->clock_id].in_ns)
5622 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5625 iter->trace_buffer = &tr->trace_buffer;
5626 iter->cpu_file = tracing_get_cpu(inode);
5627 mutex_init(&iter->mutex);
5628 filp->private_data = iter;
5630 if (iter->trace->pipe_open)
5631 iter->trace->pipe_open(iter);
5633 nonseekable_open(inode, filp);
5635 tr->current_trace->ref++;
5637 mutex_unlock(&trace_types_lock);
5642 __trace_array_put(tr);
5643 mutex_unlock(&trace_types_lock);
5647 static int tracing_release_pipe(struct inode *inode, struct file *file)
5649 struct trace_iterator *iter = file->private_data;
5650 struct trace_array *tr = inode->i_private;
5652 mutex_lock(&trace_types_lock);
5654 tr->current_trace->ref--;
5656 if (iter->trace->pipe_close)
5657 iter->trace->pipe_close(iter);
5659 mutex_unlock(&trace_types_lock);
5661 free_cpumask_var(iter->started);
5662 mutex_destroy(&iter->mutex);
5665 trace_array_put(tr);
5671 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5673 struct trace_array *tr = iter->tr;
5675 /* Iterators are static, they should be filled or empty */
5676 if (trace_buffer_iter(iter, iter->cpu_file))
5677 return EPOLLIN | EPOLLRDNORM;
5679 if (tr->trace_flags & TRACE_ITER_BLOCK)
5681 * Always select as readable when in blocking mode
5683 return EPOLLIN | EPOLLRDNORM;
5685 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5690 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5692 struct trace_iterator *iter = filp->private_data;
5694 return trace_poll(iter, filp, poll_table);
5697 /* Must be called with iter->mutex held. */
5698 static int tracing_wait_pipe(struct file *filp)
5700 struct trace_iterator *iter = filp->private_data;
5703 while (trace_empty(iter)) {
5705 if ((filp->f_flags & O_NONBLOCK)) {
5710 * We block until we read something and tracing is disabled.
5711 * We still block if tracing is disabled, but we have never
5712 * read anything. This allows a user to cat this file, and
5713 * then enable tracing. But after we have read something,
5714 * we give an EOF when tracing is again disabled.
5716 * iter->pos will be 0 if we haven't read anything.
5718 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5721 mutex_unlock(&iter->mutex);
5723 ret = wait_on_pipe(iter, false);
5725 mutex_lock(&iter->mutex);
5738 tracing_read_pipe(struct file *filp, char __user *ubuf,
5739 size_t cnt, loff_t *ppos)
5741 struct trace_iterator *iter = filp->private_data;
5745 * Avoid more than one consumer on a single file descriptor
5746 * This is just a matter of traces coherency, the ring buffer itself
5749 mutex_lock(&iter->mutex);
5751 /* return any leftover data */
5752 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5756 trace_seq_init(&iter->seq);
5758 if (iter->trace->read) {
5759 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5765 sret = tracing_wait_pipe(filp);
5769 /* stop when tracing is finished */
5770 if (trace_empty(iter)) {
5775 if (cnt >= PAGE_SIZE)
5776 cnt = PAGE_SIZE - 1;
5778 /* reset all but tr, trace, and overruns */
5779 memset(&iter->seq, 0,
5780 sizeof(struct trace_iterator) -
5781 offsetof(struct trace_iterator, seq));
5782 cpumask_clear(iter->started);
5783 trace_seq_init(&iter->seq);
5786 trace_event_read_lock();
5787 trace_access_lock(iter->cpu_file);
5788 while (trace_find_next_entry_inc(iter) != NULL) {
5789 enum print_line_t ret;
5790 int save_len = iter->seq.seq.len;
5792 ret = print_trace_line(iter);
5793 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5794 /* don't print partial lines */
5795 iter->seq.seq.len = save_len;
5798 if (ret != TRACE_TYPE_NO_CONSUME)
5799 trace_consume(iter);
5801 if (trace_seq_used(&iter->seq) >= cnt)
5805 * Setting the full flag means we reached the trace_seq buffer
5806 * size and we should leave by partial output condition above.
5807 * One of the trace_seq_* functions is not used properly.
5809 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5812 trace_access_unlock(iter->cpu_file);
5813 trace_event_read_unlock();
5815 /* Now copy what we have to the user */
5816 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5817 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5818 trace_seq_init(&iter->seq);
5821 * If there was nothing to send to user, in spite of consuming trace
5822 * entries, go back to wait for more entries.
5828 mutex_unlock(&iter->mutex);
5833 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5836 __free_page(spd->pages[idx]);
5839 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5841 .confirm = generic_pipe_buf_confirm,
5842 .release = generic_pipe_buf_release,
5843 .steal = generic_pipe_buf_steal,
5844 .get = generic_pipe_buf_get,
5848 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5854 /* Seq buffer is page-sized, exactly what we need. */
5856 save_len = iter->seq.seq.len;
5857 ret = print_trace_line(iter);
5859 if (trace_seq_has_overflowed(&iter->seq)) {
5860 iter->seq.seq.len = save_len;
5865 * This should not be hit, because it should only
5866 * be set if the iter->seq overflowed. But check it
5867 * anyway to be safe.
5869 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5870 iter->seq.seq.len = save_len;
5874 count = trace_seq_used(&iter->seq) - save_len;
5877 iter->seq.seq.len = save_len;
5881 if (ret != TRACE_TYPE_NO_CONSUME)
5882 trace_consume(iter);
5884 if (!trace_find_next_entry_inc(iter)) {
5894 static ssize_t tracing_splice_read_pipe(struct file *filp,
5896 struct pipe_inode_info *pipe,
5900 struct page *pages_def[PIPE_DEF_BUFFERS];
5901 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5902 struct trace_iterator *iter = filp->private_data;
5903 struct splice_pipe_desc spd = {
5905 .partial = partial_def,
5906 .nr_pages = 0, /* This gets updated below. */
5907 .nr_pages_max = PIPE_DEF_BUFFERS,
5908 .ops = &tracing_pipe_buf_ops,
5909 .spd_release = tracing_spd_release_pipe,
5915 if (splice_grow_spd(pipe, &spd))
5918 mutex_lock(&iter->mutex);
5920 if (iter->trace->splice_read) {
5921 ret = iter->trace->splice_read(iter, filp,
5922 ppos, pipe, len, flags);
5927 ret = tracing_wait_pipe(filp);
5931 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5936 trace_event_read_lock();
5937 trace_access_lock(iter->cpu_file);
5939 /* Fill as many pages as possible. */
5940 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5941 spd.pages[i] = alloc_page(GFP_KERNEL);
5945 rem = tracing_fill_pipe_page(rem, iter);
5947 /* Copy the data into the page, so we can start over. */
5948 ret = trace_seq_to_buffer(&iter->seq,
5949 page_address(spd.pages[i]),
5950 trace_seq_used(&iter->seq));
5952 __free_page(spd.pages[i]);
5955 spd.partial[i].offset = 0;
5956 spd.partial[i].len = trace_seq_used(&iter->seq);
5958 trace_seq_init(&iter->seq);
5961 trace_access_unlock(iter->cpu_file);
5962 trace_event_read_unlock();
5963 mutex_unlock(&iter->mutex);
5968 ret = splice_to_pipe(pipe, &spd);
5972 splice_shrink_spd(&spd);
5976 mutex_unlock(&iter->mutex);
5981 tracing_entries_read(struct file *filp, char __user *ubuf,
5982 size_t cnt, loff_t *ppos)
5984 struct inode *inode = file_inode(filp);
5985 struct trace_array *tr = inode->i_private;
5986 int cpu = tracing_get_cpu(inode);
5991 mutex_lock(&trace_types_lock);
5993 if (cpu == RING_BUFFER_ALL_CPUS) {
5994 int cpu, buf_size_same;
5999 /* check if all cpu sizes are same */
6000 for_each_tracing_cpu(cpu) {
6001 /* fill in the size from first enabled cpu */
6003 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6004 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6010 if (buf_size_same) {
6011 if (!ring_buffer_expanded)
6012 r = sprintf(buf, "%lu (expanded: %lu)\n",
6014 trace_buf_size >> 10);
6016 r = sprintf(buf, "%lu\n", size >> 10);
6018 r = sprintf(buf, "X\n");
6020 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6022 mutex_unlock(&trace_types_lock);
6024 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6029 tracing_entries_write(struct file *filp, const char __user *ubuf,
6030 size_t cnt, loff_t *ppos)
6032 struct inode *inode = file_inode(filp);
6033 struct trace_array *tr = inode->i_private;
6037 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6041 /* must have at least 1 entry */
6045 /* value is in KB */
6047 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6057 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6058 size_t cnt, loff_t *ppos)
6060 struct trace_array *tr = filp->private_data;
6063 unsigned long size = 0, expanded_size = 0;
6065 mutex_lock(&trace_types_lock);
6066 for_each_tracing_cpu(cpu) {
6067 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6068 if (!ring_buffer_expanded)
6069 expanded_size += trace_buf_size >> 10;
6071 if (ring_buffer_expanded)
6072 r = sprintf(buf, "%lu\n", size);
6074 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6075 mutex_unlock(&trace_types_lock);
6077 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6081 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6082 size_t cnt, loff_t *ppos)
6085 * There is no need to read what the user has written, this function
6086 * is just to make sure that there is no error when "echo" is used
6095 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6097 struct trace_array *tr = inode->i_private;
6099 /* disable tracing ? */
6100 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6101 tracer_tracing_off(tr);
6102 /* resize the ring buffer to 0 */
6103 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6105 trace_array_put(tr);
6111 tracing_mark_write(struct file *filp, const char __user *ubuf,
6112 size_t cnt, loff_t *fpos)
6114 struct trace_array *tr = filp->private_data;
6115 struct ring_buffer_event *event;
6116 enum event_trigger_type tt = ETT_NONE;
6117 struct ring_buffer *buffer;
6118 struct print_entry *entry;
6119 unsigned long irq_flags;
6120 const char faulted[] = "<faulted>";
6125 /* Used in tracing_mark_raw_write() as well */
6126 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6128 if (tracing_disabled)
6131 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6134 if (cnt > TRACE_BUF_SIZE)
6135 cnt = TRACE_BUF_SIZE;
6137 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6139 local_save_flags(irq_flags);
6140 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6142 /* If less than "<faulted>", then make sure we can still add that */
6143 if (cnt < FAULTED_SIZE)
6144 size += FAULTED_SIZE - cnt;
6146 buffer = tr->trace_buffer.buffer;
6147 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6148 irq_flags, preempt_count());
6149 if (unlikely(!event))
6150 /* Ring buffer disabled, return as if not open for write */
6153 entry = ring_buffer_event_data(event);
6154 entry->ip = _THIS_IP_;
6156 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6158 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6165 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6166 /* do not add \n before testing triggers, but add \0 */
6167 entry->buf[cnt] = '\0';
6168 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6171 if (entry->buf[cnt - 1] != '\n') {
6172 entry->buf[cnt] = '\n';
6173 entry->buf[cnt + 1] = '\0';
6175 entry->buf[cnt] = '\0';
6177 __buffer_unlock_commit(buffer, event);
6180 event_triggers_post_call(tr->trace_marker_file, tt);
6188 /* Limit it for now to 3K (including tag) */
6189 #define RAW_DATA_MAX_SIZE (1024*3)
6192 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6193 size_t cnt, loff_t *fpos)
6195 struct trace_array *tr = filp->private_data;
6196 struct ring_buffer_event *event;
6197 struct ring_buffer *buffer;
6198 struct raw_data_entry *entry;
6199 const char faulted[] = "<faulted>";
6200 unsigned long irq_flags;
6205 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6207 if (tracing_disabled)
6210 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6213 /* The marker must at least have a tag id */
6214 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6217 if (cnt > TRACE_BUF_SIZE)
6218 cnt = TRACE_BUF_SIZE;
6220 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6222 local_save_flags(irq_flags);
6223 size = sizeof(*entry) + cnt;
6224 if (cnt < FAULT_SIZE_ID)
6225 size += FAULT_SIZE_ID - cnt;
6227 buffer = tr->trace_buffer.buffer;
6228 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6229 irq_flags, preempt_count());
6231 /* Ring buffer disabled, return as if not open for write */
6234 entry = ring_buffer_event_data(event);
6236 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6239 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6244 __buffer_unlock_commit(buffer, event);
6252 static int tracing_clock_show(struct seq_file *m, void *v)
6254 struct trace_array *tr = m->private;
6257 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6259 "%s%s%s%s", i ? " " : "",
6260 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6261 i == tr->clock_id ? "]" : "");
6267 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6271 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6272 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6275 if (i == ARRAY_SIZE(trace_clocks))
6278 mutex_lock(&trace_types_lock);
6282 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6285 * New clock may not be consistent with the previous clock.
6286 * Reset the buffer so that it doesn't have incomparable timestamps.
6288 tracing_reset_online_cpus(&tr->trace_buffer);
6290 #ifdef CONFIG_TRACER_MAX_TRACE
6291 if (tr->max_buffer.buffer)
6292 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6293 tracing_reset_online_cpus(&tr->max_buffer);
6296 mutex_unlock(&trace_types_lock);
6301 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6302 size_t cnt, loff_t *fpos)
6304 struct seq_file *m = filp->private_data;
6305 struct trace_array *tr = m->private;
6307 const char *clockstr;
6310 if (cnt >= sizeof(buf))
6313 if (copy_from_user(buf, ubuf, cnt))
6318 clockstr = strstrip(buf);
6320 ret = tracing_set_clock(tr, clockstr);
6329 static int tracing_clock_open(struct inode *inode, struct file *file)
6331 struct trace_array *tr = inode->i_private;
6334 if (tracing_disabled)
6337 if (trace_array_get(tr))
6340 ret = single_open(file, tracing_clock_show, inode->i_private);
6342 trace_array_put(tr);
6347 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6349 struct trace_array *tr = m->private;
6351 mutex_lock(&trace_types_lock);
6353 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6354 seq_puts(m, "delta [absolute]\n");
6356 seq_puts(m, "[delta] absolute\n");
6358 mutex_unlock(&trace_types_lock);
6363 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6365 struct trace_array *tr = inode->i_private;
6368 if (tracing_disabled)
6371 if (trace_array_get(tr))
6374 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6376 trace_array_put(tr);
6381 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6385 mutex_lock(&trace_types_lock);
6387 if (abs && tr->time_stamp_abs_ref++)
6391 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6396 if (--tr->time_stamp_abs_ref)
6400 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6402 #ifdef CONFIG_TRACER_MAX_TRACE
6403 if (tr->max_buffer.buffer)
6404 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6407 mutex_unlock(&trace_types_lock);
6412 struct ftrace_buffer_info {
6413 struct trace_iterator iter;
6415 unsigned int spare_cpu;
6419 #ifdef CONFIG_TRACER_SNAPSHOT
6420 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6422 struct trace_array *tr = inode->i_private;
6423 struct trace_iterator *iter;
6427 if (trace_array_get(tr) < 0)
6430 if (file->f_mode & FMODE_READ) {
6431 iter = __tracing_open(inode, file, true);
6433 ret = PTR_ERR(iter);
6435 /* Writes still need the seq_file to hold the private data */
6437 m = kzalloc(sizeof(*m), GFP_KERNEL);
6440 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6448 iter->trace_buffer = &tr->max_buffer;
6449 iter->cpu_file = tracing_get_cpu(inode);
6451 file->private_data = m;
6455 trace_array_put(tr);
6461 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6464 struct seq_file *m = filp->private_data;
6465 struct trace_iterator *iter = m->private;
6466 struct trace_array *tr = iter->tr;
6470 ret = tracing_update_buffers();
6474 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6478 mutex_lock(&trace_types_lock);
6480 if (tr->current_trace->use_max_tr) {
6487 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6491 if (tr->allocated_snapshot)
6495 /* Only allow per-cpu swap if the ring buffer supports it */
6496 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6497 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6502 if (tr->allocated_snapshot)
6503 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6504 &tr->trace_buffer, iter->cpu_file);
6506 ret = tracing_alloc_snapshot_instance(tr);
6509 local_irq_disable();
6510 /* Now, we're going to swap */
6511 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6512 update_max_tr(tr, current, smp_processor_id());
6514 update_max_tr_single(tr, current, iter->cpu_file);
6518 if (tr->allocated_snapshot) {
6519 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6520 tracing_reset_online_cpus(&tr->max_buffer);
6522 tracing_reset(&tr->max_buffer, iter->cpu_file);
6532 mutex_unlock(&trace_types_lock);
6536 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6538 struct seq_file *m = file->private_data;
6541 ret = tracing_release(inode, file);
6543 if (file->f_mode & FMODE_READ)
6546 /* If write only, the seq_file is just a stub */
6554 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6555 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6556 size_t count, loff_t *ppos);
6557 static int tracing_buffers_release(struct inode *inode, struct file *file);
6558 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6559 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6561 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6563 struct ftrace_buffer_info *info;
6566 ret = tracing_buffers_open(inode, filp);
6570 info = filp->private_data;
6572 if (info->iter.trace->use_max_tr) {
6573 tracing_buffers_release(inode, filp);
6577 info->iter.snapshot = true;
6578 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6583 #endif /* CONFIG_TRACER_SNAPSHOT */
6586 static const struct file_operations tracing_thresh_fops = {
6587 .open = tracing_open_generic,
6588 .read = tracing_thresh_read,
6589 .write = tracing_thresh_write,
6590 .llseek = generic_file_llseek,
6593 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6594 static const struct file_operations tracing_max_lat_fops = {
6595 .open = tracing_open_generic,
6596 .read = tracing_max_lat_read,
6597 .write = tracing_max_lat_write,
6598 .llseek = generic_file_llseek,
6602 static const struct file_operations set_tracer_fops = {
6603 .open = tracing_open_generic,
6604 .read = tracing_set_trace_read,
6605 .write = tracing_set_trace_write,
6606 .llseek = generic_file_llseek,
6609 static const struct file_operations tracing_pipe_fops = {
6610 .open = tracing_open_pipe,
6611 .poll = tracing_poll_pipe,
6612 .read = tracing_read_pipe,
6613 .splice_read = tracing_splice_read_pipe,
6614 .release = tracing_release_pipe,
6615 .llseek = no_llseek,
6618 static const struct file_operations tracing_entries_fops = {
6619 .open = tracing_open_generic_tr,
6620 .read = tracing_entries_read,
6621 .write = tracing_entries_write,
6622 .llseek = generic_file_llseek,
6623 .release = tracing_release_generic_tr,
6626 static const struct file_operations tracing_total_entries_fops = {
6627 .open = tracing_open_generic_tr,
6628 .read = tracing_total_entries_read,
6629 .llseek = generic_file_llseek,
6630 .release = tracing_release_generic_tr,
6633 static const struct file_operations tracing_free_buffer_fops = {
6634 .open = tracing_open_generic_tr,
6635 .write = tracing_free_buffer_write,
6636 .release = tracing_free_buffer_release,
6639 static const struct file_operations tracing_mark_fops = {
6640 .open = tracing_open_generic_tr,
6641 .write = tracing_mark_write,
6642 .llseek = generic_file_llseek,
6643 .release = tracing_release_generic_tr,
6646 static const struct file_operations tracing_mark_raw_fops = {
6647 .open = tracing_open_generic_tr,
6648 .write = tracing_mark_raw_write,
6649 .llseek = generic_file_llseek,
6650 .release = tracing_release_generic_tr,
6653 static const struct file_operations trace_clock_fops = {
6654 .open = tracing_clock_open,
6656 .llseek = seq_lseek,
6657 .release = tracing_single_release_tr,
6658 .write = tracing_clock_write,
6661 static const struct file_operations trace_time_stamp_mode_fops = {
6662 .open = tracing_time_stamp_mode_open,
6664 .llseek = seq_lseek,
6665 .release = tracing_single_release_tr,
6668 #ifdef CONFIG_TRACER_SNAPSHOT
6669 static const struct file_operations snapshot_fops = {
6670 .open = tracing_snapshot_open,
6672 .write = tracing_snapshot_write,
6673 .llseek = tracing_lseek,
6674 .release = tracing_snapshot_release,
6677 static const struct file_operations snapshot_raw_fops = {
6678 .open = snapshot_raw_open,
6679 .read = tracing_buffers_read,
6680 .release = tracing_buffers_release,
6681 .splice_read = tracing_buffers_splice_read,
6682 .llseek = no_llseek,
6685 #endif /* CONFIG_TRACER_SNAPSHOT */
6687 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6689 struct trace_array *tr = inode->i_private;
6690 struct ftrace_buffer_info *info;
6693 if (tracing_disabled)
6696 if (trace_array_get(tr) < 0)
6699 info = kzalloc(sizeof(*info), GFP_KERNEL);
6701 trace_array_put(tr);
6705 mutex_lock(&trace_types_lock);
6708 info->iter.cpu_file = tracing_get_cpu(inode);
6709 info->iter.trace = tr->current_trace;
6710 info->iter.trace_buffer = &tr->trace_buffer;
6712 /* Force reading ring buffer for first read */
6713 info->read = (unsigned int)-1;
6715 filp->private_data = info;
6717 tr->current_trace->ref++;
6719 mutex_unlock(&trace_types_lock);
6721 ret = nonseekable_open(inode, filp);
6723 trace_array_put(tr);
6729 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6731 struct ftrace_buffer_info *info = filp->private_data;
6732 struct trace_iterator *iter = &info->iter;
6734 return trace_poll(iter, filp, poll_table);
6738 tracing_buffers_read(struct file *filp, char __user *ubuf,
6739 size_t count, loff_t *ppos)
6741 struct ftrace_buffer_info *info = filp->private_data;
6742 struct trace_iterator *iter = &info->iter;
6749 #ifdef CONFIG_TRACER_MAX_TRACE
6750 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6755 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6757 if (IS_ERR(info->spare)) {
6758 ret = PTR_ERR(info->spare);
6761 info->spare_cpu = iter->cpu_file;
6767 /* Do we have previous read data to read? */
6768 if (info->read < PAGE_SIZE)
6772 trace_access_lock(iter->cpu_file);
6773 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6777 trace_access_unlock(iter->cpu_file);
6780 if (trace_empty(iter)) {
6781 if ((filp->f_flags & O_NONBLOCK))
6784 ret = wait_on_pipe(iter, false);
6795 size = PAGE_SIZE - info->read;
6799 ret = copy_to_user(ubuf, info->spare + info->read, size);
6811 static int tracing_buffers_release(struct inode *inode, struct file *file)
6813 struct ftrace_buffer_info *info = file->private_data;
6814 struct trace_iterator *iter = &info->iter;
6816 mutex_lock(&trace_types_lock);
6818 iter->tr->current_trace->ref--;
6820 __trace_array_put(iter->tr);
6823 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6824 info->spare_cpu, info->spare);
6827 mutex_unlock(&trace_types_lock);
6833 struct ring_buffer *buffer;
6836 refcount_t refcount;
6839 static void buffer_ref_release(struct buffer_ref *ref)
6841 if (!refcount_dec_and_test(&ref->refcount))
6843 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6847 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6848 struct pipe_buffer *buf)
6850 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6852 buffer_ref_release(ref);
6856 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6857 struct pipe_buffer *buf)
6859 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6861 if (refcount_read(&ref->refcount) > INT_MAX/2)
6864 refcount_inc(&ref->refcount);
6868 /* Pipe buffer operations for a buffer. */
6869 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6871 .confirm = generic_pipe_buf_confirm,
6872 .release = buffer_pipe_buf_release,
6873 .steal = generic_pipe_buf_nosteal,
6874 .get = buffer_pipe_buf_get,
6878 * Callback from splice_to_pipe(), if we need to release some pages
6879 * at the end of the spd in case we error'ed out in filling the pipe.
6881 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6883 struct buffer_ref *ref =
6884 (struct buffer_ref *)spd->partial[i].private;
6886 buffer_ref_release(ref);
6887 spd->partial[i].private = 0;
6891 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6892 struct pipe_inode_info *pipe, size_t len,
6895 struct ftrace_buffer_info *info = file->private_data;
6896 struct trace_iterator *iter = &info->iter;
6897 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6898 struct page *pages_def[PIPE_DEF_BUFFERS];
6899 struct splice_pipe_desc spd = {
6901 .partial = partial_def,
6902 .nr_pages_max = PIPE_DEF_BUFFERS,
6903 .ops = &buffer_pipe_buf_ops,
6904 .spd_release = buffer_spd_release,
6906 struct buffer_ref *ref;
6910 #ifdef CONFIG_TRACER_MAX_TRACE
6911 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6915 if (*ppos & (PAGE_SIZE - 1))
6918 if (len & (PAGE_SIZE - 1)) {
6919 if (len < PAGE_SIZE)
6924 if (splice_grow_spd(pipe, &spd))
6928 trace_access_lock(iter->cpu_file);
6929 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6931 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6935 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6941 refcount_set(&ref->refcount, 1);
6942 ref->buffer = iter->trace_buffer->buffer;
6943 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6944 if (IS_ERR(ref->page)) {
6945 ret = PTR_ERR(ref->page);
6950 ref->cpu = iter->cpu_file;
6952 r = ring_buffer_read_page(ref->buffer, &ref->page,
6953 len, iter->cpu_file, 1);
6955 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6961 page = virt_to_page(ref->page);
6963 spd.pages[i] = page;
6964 spd.partial[i].len = PAGE_SIZE;
6965 spd.partial[i].offset = 0;
6966 spd.partial[i].private = (unsigned long)ref;
6970 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6973 trace_access_unlock(iter->cpu_file);
6976 /* did we read anything? */
6977 if (!spd.nr_pages) {
6982 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6985 ret = wait_on_pipe(iter, true);
6992 ret = splice_to_pipe(pipe, &spd);
6994 splice_shrink_spd(&spd);
6999 static const struct file_operations tracing_buffers_fops = {
7000 .open = tracing_buffers_open,
7001 .read = tracing_buffers_read,
7002 .poll = tracing_buffers_poll,
7003 .release = tracing_buffers_release,
7004 .splice_read = tracing_buffers_splice_read,
7005 .llseek = no_llseek,
7009 tracing_stats_read(struct file *filp, char __user *ubuf,
7010 size_t count, loff_t *ppos)
7012 struct inode *inode = file_inode(filp);
7013 struct trace_array *tr = inode->i_private;
7014 struct trace_buffer *trace_buf = &tr->trace_buffer;
7015 int cpu = tracing_get_cpu(inode);
7016 struct trace_seq *s;
7018 unsigned long long t;
7019 unsigned long usec_rem;
7021 s = kmalloc(sizeof(*s), GFP_KERNEL);
7027 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7028 trace_seq_printf(s, "entries: %ld\n", cnt);
7030 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7031 trace_seq_printf(s, "overrun: %ld\n", cnt);
7033 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7034 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7036 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7037 trace_seq_printf(s, "bytes: %ld\n", cnt);
7039 if (trace_clocks[tr->clock_id].in_ns) {
7040 /* local or global for trace_clock */
7041 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7042 usec_rem = do_div(t, USEC_PER_SEC);
7043 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7046 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7047 usec_rem = do_div(t, USEC_PER_SEC);
7048 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7050 /* counter or tsc mode for trace_clock */
7051 trace_seq_printf(s, "oldest event ts: %llu\n",
7052 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7054 trace_seq_printf(s, "now ts: %llu\n",
7055 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7058 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7059 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7061 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7062 trace_seq_printf(s, "read events: %ld\n", cnt);
7064 count = simple_read_from_buffer(ubuf, count, ppos,
7065 s->buffer, trace_seq_used(s));
7072 static const struct file_operations tracing_stats_fops = {
7073 .open = tracing_open_generic_tr,
7074 .read = tracing_stats_read,
7075 .llseek = generic_file_llseek,
7076 .release = tracing_release_generic_tr,
7079 #ifdef CONFIG_DYNAMIC_FTRACE
7082 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7083 size_t cnt, loff_t *ppos)
7085 unsigned long *p = filp->private_data;
7086 char buf[64]; /* Not too big for a shallow stack */
7089 r = scnprintf(buf, 63, "%ld", *p);
7092 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7095 static const struct file_operations tracing_dyn_info_fops = {
7096 .open = tracing_open_generic,
7097 .read = tracing_read_dyn_info,
7098 .llseek = generic_file_llseek,
7100 #endif /* CONFIG_DYNAMIC_FTRACE */
7102 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7104 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7105 struct trace_array *tr, struct ftrace_probe_ops *ops,
7108 tracing_snapshot_instance(tr);
7112 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7113 struct trace_array *tr, struct ftrace_probe_ops *ops,
7116 struct ftrace_func_mapper *mapper = data;
7120 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7130 tracing_snapshot_instance(tr);
7134 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7135 struct ftrace_probe_ops *ops, void *data)
7137 struct ftrace_func_mapper *mapper = data;
7140 seq_printf(m, "%ps:", (void *)ip);
7142 seq_puts(m, "snapshot");
7145 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7148 seq_printf(m, ":count=%ld\n", *count);
7150 seq_puts(m, ":unlimited\n");
7156 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7157 unsigned long ip, void *init_data, void **data)
7159 struct ftrace_func_mapper *mapper = *data;
7162 mapper = allocate_ftrace_func_mapper();
7168 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7172 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7173 unsigned long ip, void *data)
7175 struct ftrace_func_mapper *mapper = data;
7180 free_ftrace_func_mapper(mapper, NULL);
7184 ftrace_func_mapper_remove_ip(mapper, ip);
7187 static struct ftrace_probe_ops snapshot_probe_ops = {
7188 .func = ftrace_snapshot,
7189 .print = ftrace_snapshot_print,
7192 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7193 .func = ftrace_count_snapshot,
7194 .print = ftrace_snapshot_print,
7195 .init = ftrace_snapshot_init,
7196 .free = ftrace_snapshot_free,
7200 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7201 char *glob, char *cmd, char *param, int enable)
7203 struct ftrace_probe_ops *ops;
7204 void *count = (void *)-1;
7211 /* hash funcs only work with set_ftrace_filter */
7215 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7218 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7223 number = strsep(¶m, ":");
7225 if (!strlen(number))
7229 * We use the callback data field (which is a pointer)
7232 ret = kstrtoul(number, 0, (unsigned long *)&count);
7237 ret = tracing_alloc_snapshot_instance(tr);
7241 ret = register_ftrace_function_probe(glob, tr, ops, count);
7244 return ret < 0 ? ret : 0;
7247 static struct ftrace_func_command ftrace_snapshot_cmd = {
7249 .func = ftrace_trace_snapshot_callback,
7252 static __init int register_snapshot_cmd(void)
7254 return register_ftrace_command(&ftrace_snapshot_cmd);
7257 static inline __init int register_snapshot_cmd(void) { return 0; }
7258 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7260 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7262 if (WARN_ON(!tr->dir))
7263 return ERR_PTR(-ENODEV);
7265 /* Top directory uses NULL as the parent */
7266 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7269 /* All sub buffers have a descriptor */
7273 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7275 struct dentry *d_tracer;
7278 return tr->percpu_dir;
7280 d_tracer = tracing_get_dentry(tr);
7281 if (IS_ERR(d_tracer))
7284 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7286 WARN_ONCE(!tr->percpu_dir,
7287 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7289 return tr->percpu_dir;
7292 static struct dentry *
7293 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7294 void *data, long cpu, const struct file_operations *fops)
7296 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7298 if (ret) /* See tracing_get_cpu() */
7299 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7304 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7306 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7307 struct dentry *d_cpu;
7308 char cpu_dir[30]; /* 30 characters should be more than enough */
7313 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7314 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7316 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7320 /* per cpu trace_pipe */
7321 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7322 tr, cpu, &tracing_pipe_fops);
7325 trace_create_cpu_file("trace", 0644, d_cpu,
7326 tr, cpu, &tracing_fops);
7328 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7329 tr, cpu, &tracing_buffers_fops);
7331 trace_create_cpu_file("stats", 0444, d_cpu,
7332 tr, cpu, &tracing_stats_fops);
7334 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7335 tr, cpu, &tracing_entries_fops);
7337 #ifdef CONFIG_TRACER_SNAPSHOT
7338 trace_create_cpu_file("snapshot", 0644, d_cpu,
7339 tr, cpu, &snapshot_fops);
7341 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7342 tr, cpu, &snapshot_raw_fops);
7346 #ifdef CONFIG_FTRACE_SELFTEST
7347 /* Let selftest have access to static functions in this file */
7348 #include "trace_selftest.c"
7352 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7355 struct trace_option_dentry *topt = filp->private_data;
7358 if (topt->flags->val & topt->opt->bit)
7363 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7367 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7370 struct trace_option_dentry *topt = filp->private_data;
7374 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7378 if (val != 0 && val != 1)
7381 if (!!(topt->flags->val & topt->opt->bit) != val) {
7382 mutex_lock(&trace_types_lock);
7383 ret = __set_tracer_option(topt->tr, topt->flags,
7385 mutex_unlock(&trace_types_lock);
7396 static const struct file_operations trace_options_fops = {
7397 .open = tracing_open_generic,
7398 .read = trace_options_read,
7399 .write = trace_options_write,
7400 .llseek = generic_file_llseek,
7404 * In order to pass in both the trace_array descriptor as well as the index
7405 * to the flag that the trace option file represents, the trace_array
7406 * has a character array of trace_flags_index[], which holds the index
7407 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7408 * The address of this character array is passed to the flag option file
7409 * read/write callbacks.
7411 * In order to extract both the index and the trace_array descriptor,
7412 * get_tr_index() uses the following algorithm.
7416 * As the pointer itself contains the address of the index (remember
7419 * Then to get the trace_array descriptor, by subtracting that index
7420 * from the ptr, we get to the start of the index itself.
7422 * ptr - idx == &index[0]
7424 * Then a simple container_of() from that pointer gets us to the
7425 * trace_array descriptor.
7427 static void get_tr_index(void *data, struct trace_array **ptr,
7428 unsigned int *pindex)
7430 *pindex = *(unsigned char *)data;
7432 *ptr = container_of(data - *pindex, struct trace_array,
7437 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7440 void *tr_index = filp->private_data;
7441 struct trace_array *tr;
7445 get_tr_index(tr_index, &tr, &index);
7447 if (tr->trace_flags & (1 << index))
7452 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7456 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7459 void *tr_index = filp->private_data;
7460 struct trace_array *tr;
7465 get_tr_index(tr_index, &tr, &index);
7467 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7471 if (val != 0 && val != 1)
7474 mutex_lock(&event_mutex);
7475 mutex_lock(&trace_types_lock);
7476 ret = set_tracer_flag(tr, 1 << index, val);
7477 mutex_unlock(&trace_types_lock);
7478 mutex_unlock(&event_mutex);
7488 static const struct file_operations trace_options_core_fops = {
7489 .open = tracing_open_generic,
7490 .read = trace_options_core_read,
7491 .write = trace_options_core_write,
7492 .llseek = generic_file_llseek,
7495 struct dentry *trace_create_file(const char *name,
7497 struct dentry *parent,
7499 const struct file_operations *fops)
7503 ret = tracefs_create_file(name, mode, parent, data, fops);
7505 pr_warn("Could not create tracefs '%s' entry\n", name);
7511 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7513 struct dentry *d_tracer;
7518 d_tracer = tracing_get_dentry(tr);
7519 if (IS_ERR(d_tracer))
7522 tr->options = tracefs_create_dir("options", d_tracer);
7524 pr_warn("Could not create tracefs directory 'options'\n");
7532 create_trace_option_file(struct trace_array *tr,
7533 struct trace_option_dentry *topt,
7534 struct tracer_flags *flags,
7535 struct tracer_opt *opt)
7537 struct dentry *t_options;
7539 t_options = trace_options_init_dentry(tr);
7543 topt->flags = flags;
7547 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7548 &trace_options_fops);
7553 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7555 struct trace_option_dentry *topts;
7556 struct trace_options *tr_topts;
7557 struct tracer_flags *flags;
7558 struct tracer_opt *opts;
7565 flags = tracer->flags;
7567 if (!flags || !flags->opts)
7571 * If this is an instance, only create flags for tracers
7572 * the instance may have.
7574 if (!trace_ok_for_array(tracer, tr))
7577 for (i = 0; i < tr->nr_topts; i++) {
7578 /* Make sure there's no duplicate flags. */
7579 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7585 for (cnt = 0; opts[cnt].name; cnt++)
7588 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7592 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7599 tr->topts = tr_topts;
7600 tr->topts[tr->nr_topts].tracer = tracer;
7601 tr->topts[tr->nr_topts].topts = topts;
7604 for (cnt = 0; opts[cnt].name; cnt++) {
7605 create_trace_option_file(tr, &topts[cnt], flags,
7607 WARN_ONCE(topts[cnt].entry == NULL,
7608 "Failed to create trace option: %s",
7613 static struct dentry *
7614 create_trace_option_core_file(struct trace_array *tr,
7615 const char *option, long index)
7617 struct dentry *t_options;
7619 t_options = trace_options_init_dentry(tr);
7623 return trace_create_file(option, 0644, t_options,
7624 (void *)&tr->trace_flags_index[index],
7625 &trace_options_core_fops);
7628 static void create_trace_options_dir(struct trace_array *tr)
7630 struct dentry *t_options;
7631 bool top_level = tr == &global_trace;
7634 t_options = trace_options_init_dentry(tr);
7638 for (i = 0; trace_options[i]; i++) {
7640 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7641 create_trace_option_core_file(tr, trace_options[i], i);
7646 rb_simple_read(struct file *filp, char __user *ubuf,
7647 size_t cnt, loff_t *ppos)
7649 struct trace_array *tr = filp->private_data;
7653 r = tracer_tracing_is_on(tr);
7654 r = sprintf(buf, "%d\n", r);
7656 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7660 rb_simple_write(struct file *filp, const char __user *ubuf,
7661 size_t cnt, loff_t *ppos)
7663 struct trace_array *tr = filp->private_data;
7664 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7668 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7673 mutex_lock(&trace_types_lock);
7674 if (!!val == tracer_tracing_is_on(tr)) {
7675 val = 0; /* do nothing */
7677 tracer_tracing_on(tr);
7678 if (tr->current_trace->start)
7679 tr->current_trace->start(tr);
7681 tracer_tracing_off(tr);
7682 if (tr->current_trace->stop)
7683 tr->current_trace->stop(tr);
7685 mutex_unlock(&trace_types_lock);
7693 static const struct file_operations rb_simple_fops = {
7694 .open = tracing_open_generic_tr,
7695 .read = rb_simple_read,
7696 .write = rb_simple_write,
7697 .release = tracing_release_generic_tr,
7698 .llseek = default_llseek,
7701 struct dentry *trace_instance_dir;
7704 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7707 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7709 enum ring_buffer_flags rb_flags;
7711 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7715 buf->buffer = ring_buffer_alloc(size, rb_flags);
7719 buf->data = alloc_percpu(struct trace_array_cpu);
7721 ring_buffer_free(buf->buffer);
7726 /* Allocate the first page for all buffers */
7727 set_buffer_entries(&tr->trace_buffer,
7728 ring_buffer_size(tr->trace_buffer.buffer, 0));
7733 static int allocate_trace_buffers(struct trace_array *tr, int size)
7737 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7741 #ifdef CONFIG_TRACER_MAX_TRACE
7742 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7743 allocate_snapshot ? size : 1);
7745 ring_buffer_free(tr->trace_buffer.buffer);
7746 tr->trace_buffer.buffer = NULL;
7747 free_percpu(tr->trace_buffer.data);
7748 tr->trace_buffer.data = NULL;
7751 tr->allocated_snapshot = allocate_snapshot;
7754 * Only the top level trace array gets its snapshot allocated
7755 * from the kernel command line.
7757 allocate_snapshot = false;
7761 * Because of some magic with the way alloc_percpu() works on
7762 * x86_64, we need to synchronize the pgd of all the tables,
7763 * otherwise the trace events that happen in x86_64 page fault
7764 * handlers can't cope with accessing the chance that a
7765 * alloc_percpu()'d memory might be touched in the page fault trace
7766 * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
7767 * calls in tracing, because something might get triggered within a
7768 * page fault trace event!
7770 vmalloc_sync_mappings();
7775 static void free_trace_buffer(struct trace_buffer *buf)
7778 ring_buffer_free(buf->buffer);
7780 free_percpu(buf->data);
7785 static void free_trace_buffers(struct trace_array *tr)
7790 free_trace_buffer(&tr->trace_buffer);
7792 #ifdef CONFIG_TRACER_MAX_TRACE
7793 free_trace_buffer(&tr->max_buffer);
7797 static void init_trace_flags_index(struct trace_array *tr)
7801 /* Used by the trace options files */
7802 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7803 tr->trace_flags_index[i] = i;
7806 static void __update_tracer_options(struct trace_array *tr)
7810 for (t = trace_types; t; t = t->next)
7811 add_tracer_options(tr, t);
7814 static void update_tracer_options(struct trace_array *tr)
7816 mutex_lock(&trace_types_lock);
7817 __update_tracer_options(tr);
7818 mutex_unlock(&trace_types_lock);
7821 static int instance_mkdir(const char *name)
7823 struct trace_array *tr;
7826 mutex_lock(&event_mutex);
7827 mutex_lock(&trace_types_lock);
7830 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7831 if (tr->name && strcmp(tr->name, name) == 0)
7836 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7840 tr->name = kstrdup(name, GFP_KERNEL);
7844 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7847 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7849 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7851 raw_spin_lock_init(&tr->start_lock);
7853 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7855 tr->current_trace = &nop_trace;
7857 INIT_LIST_HEAD(&tr->systems);
7858 INIT_LIST_HEAD(&tr->events);
7859 INIT_LIST_HEAD(&tr->hist_vars);
7861 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7864 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7868 ret = event_trace_add_tracer(tr->dir, tr);
7870 tracefs_remove_recursive(tr->dir);
7874 ftrace_init_trace_array(tr);
7876 init_tracer_tracefs(tr, tr->dir);
7877 init_trace_flags_index(tr);
7878 __update_tracer_options(tr);
7880 list_add(&tr->list, &ftrace_trace_arrays);
7882 mutex_unlock(&trace_types_lock);
7883 mutex_unlock(&event_mutex);
7888 free_trace_buffers(tr);
7889 free_cpumask_var(tr->tracing_cpumask);
7894 mutex_unlock(&trace_types_lock);
7895 mutex_unlock(&event_mutex);
7901 static int instance_rmdir(const char *name)
7903 struct trace_array *tr;
7908 mutex_lock(&event_mutex);
7909 mutex_lock(&trace_types_lock);
7912 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7913 if (tr->name && strcmp(tr->name, name) == 0) {
7922 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7925 list_del(&tr->list);
7927 /* Disable all the flags that were enabled coming in */
7928 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7929 if ((1 << i) & ZEROED_TRACE_FLAGS)
7930 set_tracer_flag(tr, 1 << i, 0);
7933 tracing_set_nop(tr);
7934 clear_ftrace_function_probes(tr);
7935 event_trace_del_tracer(tr);
7936 ftrace_clear_pids(tr);
7937 ftrace_destroy_function_files(tr);
7938 tracefs_remove_recursive(tr->dir);
7939 free_trace_buffers(tr);
7941 for (i = 0; i < tr->nr_topts; i++) {
7942 kfree(tr->topts[i].topts);
7946 free_cpumask_var(tr->tracing_cpumask);
7953 mutex_unlock(&trace_types_lock);
7954 mutex_unlock(&event_mutex);
7959 static __init void create_trace_instances(struct dentry *d_tracer)
7961 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7964 if (WARN_ON(!trace_instance_dir))
7969 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7971 struct trace_event_file *file;
7974 trace_create_file("available_tracers", 0444, d_tracer,
7975 tr, &show_traces_fops);
7977 trace_create_file("current_tracer", 0644, d_tracer,
7978 tr, &set_tracer_fops);
7980 trace_create_file("tracing_cpumask", 0644, d_tracer,
7981 tr, &tracing_cpumask_fops);
7983 trace_create_file("trace_options", 0644, d_tracer,
7984 tr, &tracing_iter_fops);
7986 trace_create_file("trace", 0644, d_tracer,
7989 trace_create_file("trace_pipe", 0444, d_tracer,
7990 tr, &tracing_pipe_fops);
7992 trace_create_file("buffer_size_kb", 0644, d_tracer,
7993 tr, &tracing_entries_fops);
7995 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7996 tr, &tracing_total_entries_fops);
7998 trace_create_file("free_buffer", 0200, d_tracer,
7999 tr, &tracing_free_buffer_fops);
8001 trace_create_file("trace_marker", 0220, d_tracer,
8002 tr, &tracing_mark_fops);
8004 file = __find_event_file(tr, "ftrace", "print");
8005 if (file && file->dir)
8006 trace_create_file("trigger", 0644, file->dir, file,
8007 &event_trigger_fops);
8008 tr->trace_marker_file = file;
8010 trace_create_file("trace_marker_raw", 0220, d_tracer,
8011 tr, &tracing_mark_raw_fops);
8013 trace_create_file("trace_clock", 0644, d_tracer, tr,
8016 trace_create_file("tracing_on", 0644, d_tracer,
8017 tr, &rb_simple_fops);
8019 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8020 &trace_time_stamp_mode_fops);
8022 create_trace_options_dir(tr);
8024 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8025 trace_create_file("tracing_max_latency", 0644, d_tracer,
8026 &tr->max_latency, &tracing_max_lat_fops);
8029 if (ftrace_create_function_files(tr, d_tracer))
8030 WARN(1, "Could not allocate function filter files");
8032 #ifdef CONFIG_TRACER_SNAPSHOT
8033 trace_create_file("snapshot", 0644, d_tracer,
8034 tr, &snapshot_fops);
8037 for_each_tracing_cpu(cpu)
8038 tracing_init_tracefs_percpu(tr, cpu);
8040 ftrace_init_tracefs(tr, d_tracer);
8043 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8045 struct vfsmount *mnt;
8046 struct file_system_type *type;
8049 * To maintain backward compatibility for tools that mount
8050 * debugfs to get to the tracing facility, tracefs is automatically
8051 * mounted to the debugfs/tracing directory.
8053 type = get_fs_type("tracefs");
8056 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8057 put_filesystem(type);
8066 * tracing_init_dentry - initialize top level trace array
8068 * This is called when creating files or directories in the tracing
8069 * directory. It is called via fs_initcall() by any of the boot up code
8070 * and expects to return the dentry of the top level tracing directory.
8072 struct dentry *tracing_init_dentry(void)
8074 struct trace_array *tr = &global_trace;
8076 /* The top level trace array uses NULL as parent */
8080 if (WARN_ON(!tracefs_initialized()) ||
8081 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8082 WARN_ON(!debugfs_initialized())))
8083 return ERR_PTR(-ENODEV);
8086 * As there may still be users that expect the tracing
8087 * files to exist in debugfs/tracing, we must automount
8088 * the tracefs file system there, so older tools still
8089 * work with the newer kerenl.
8091 tr->dir = debugfs_create_automount("tracing", NULL,
8092 trace_automount, NULL);
8094 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8095 return ERR_PTR(-ENOMEM);
8101 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8102 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8104 static void __init trace_eval_init(void)
8108 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8109 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8112 #ifdef CONFIG_MODULES
8113 static void trace_module_add_evals(struct module *mod)
8115 if (!mod->num_trace_evals)
8119 * Modules with bad taint do not have events created, do
8120 * not bother with enums either.
8122 if (trace_module_has_bad_taint(mod))
8125 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8128 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8129 static void trace_module_remove_evals(struct module *mod)
8131 union trace_eval_map_item *map;
8132 union trace_eval_map_item **last = &trace_eval_maps;
8134 if (!mod->num_trace_evals)
8137 mutex_lock(&trace_eval_mutex);
8139 map = trace_eval_maps;
8142 if (map->head.mod == mod)
8144 map = trace_eval_jmp_to_tail(map);
8145 last = &map->tail.next;
8146 map = map->tail.next;
8151 *last = trace_eval_jmp_to_tail(map)->tail.next;
8154 mutex_unlock(&trace_eval_mutex);
8157 static inline void trace_module_remove_evals(struct module *mod) { }
8158 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8160 static int trace_module_notify(struct notifier_block *self,
8161 unsigned long val, void *data)
8163 struct module *mod = data;
8166 case MODULE_STATE_COMING:
8167 trace_module_add_evals(mod);
8169 case MODULE_STATE_GOING:
8170 trace_module_remove_evals(mod);
8177 static struct notifier_block trace_module_nb = {
8178 .notifier_call = trace_module_notify,
8181 #endif /* CONFIG_MODULES */
8183 static __init int tracer_init_tracefs(void)
8185 struct dentry *d_tracer;
8187 trace_access_lock_init();
8189 d_tracer = tracing_init_dentry();
8190 if (IS_ERR(d_tracer))
8195 init_tracer_tracefs(&global_trace, d_tracer);
8196 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8198 trace_create_file("tracing_thresh", 0644, d_tracer,
8199 &global_trace, &tracing_thresh_fops);
8201 trace_create_file("README", 0444, d_tracer,
8202 NULL, &tracing_readme_fops);
8204 trace_create_file("saved_cmdlines", 0444, d_tracer,
8205 NULL, &tracing_saved_cmdlines_fops);
8207 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8208 NULL, &tracing_saved_cmdlines_size_fops);
8210 trace_create_file("saved_tgids", 0444, d_tracer,
8211 NULL, &tracing_saved_tgids_fops);
8215 trace_create_eval_file(d_tracer);
8217 #ifdef CONFIG_MODULES
8218 register_module_notifier(&trace_module_nb);
8221 #ifdef CONFIG_DYNAMIC_FTRACE
8222 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8223 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8226 create_trace_instances(d_tracer);
8228 update_tracer_options(&global_trace);
8233 static int trace_panic_handler(struct notifier_block *this,
8234 unsigned long event, void *unused)
8236 if (ftrace_dump_on_oops)
8237 ftrace_dump(ftrace_dump_on_oops);
8241 static struct notifier_block trace_panic_notifier = {
8242 .notifier_call = trace_panic_handler,
8244 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8247 static int trace_die_handler(struct notifier_block *self,
8253 if (ftrace_dump_on_oops)
8254 ftrace_dump(ftrace_dump_on_oops);
8262 static struct notifier_block trace_die_notifier = {
8263 .notifier_call = trace_die_handler,
8268 * printk is set to max of 1024, we really don't need it that big.
8269 * Nothing should be printing 1000 characters anyway.
8271 #define TRACE_MAX_PRINT 1000
8274 * Define here KERN_TRACE so that we have one place to modify
8275 * it if we decide to change what log level the ftrace dump
8278 #define KERN_TRACE KERN_EMERG
8281 trace_printk_seq(struct trace_seq *s)
8283 /* Probably should print a warning here. */
8284 if (s->seq.len >= TRACE_MAX_PRINT)
8285 s->seq.len = TRACE_MAX_PRINT;
8288 * More paranoid code. Although the buffer size is set to
8289 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8290 * an extra layer of protection.
8292 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8293 s->seq.len = s->seq.size - 1;
8295 /* should be zero ended, but we are paranoid. */
8296 s->buffer[s->seq.len] = 0;
8298 printk(KERN_TRACE "%s", s->buffer);
8303 void trace_init_global_iter(struct trace_iterator *iter)
8305 iter->tr = &global_trace;
8306 iter->trace = iter->tr->current_trace;
8307 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8308 iter->trace_buffer = &global_trace.trace_buffer;
8310 if (iter->trace && iter->trace->open)
8311 iter->trace->open(iter);
8313 /* Annotate start of buffers if we had overruns */
8314 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8315 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8317 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8318 if (trace_clocks[iter->tr->clock_id].in_ns)
8319 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8322 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8324 /* use static because iter can be a bit big for the stack */
8325 static struct trace_iterator iter;
8326 static atomic_t dump_running;
8327 struct trace_array *tr = &global_trace;
8328 unsigned int old_userobj;
8329 unsigned long flags;
8332 /* Only allow one dump user at a time. */
8333 if (atomic_inc_return(&dump_running) != 1) {
8334 atomic_dec(&dump_running);
8339 * Always turn off tracing when we dump.
8340 * We don't need to show trace output of what happens
8341 * between multiple crashes.
8343 * If the user does a sysrq-z, then they can re-enable
8344 * tracing with echo 1 > tracing_on.
8348 local_irq_save(flags);
8349 printk_nmi_direct_enter();
8351 /* Simulate the iterator */
8352 trace_init_global_iter(&iter);
8354 for_each_tracing_cpu(cpu) {
8355 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8358 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8360 /* don't look at user memory in panic mode */
8361 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8363 switch (oops_dump_mode) {
8365 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8368 iter.cpu_file = raw_smp_processor_id();
8373 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8374 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8377 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8379 /* Did function tracer already get disabled? */
8380 if (ftrace_is_dead()) {
8381 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8382 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8386 * We need to stop all tracing on all CPUS to read the
8387 * the next buffer. This is a bit expensive, but is
8388 * not done often. We fill all what we can read,
8389 * and then release the locks again.
8392 while (!trace_empty(&iter)) {
8395 printk(KERN_TRACE "---------------------------------\n");
8399 trace_iterator_reset(&iter);
8400 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8402 if (trace_find_next_entry_inc(&iter) != NULL) {
8405 ret = print_trace_line(&iter);
8406 if (ret != TRACE_TYPE_NO_CONSUME)
8407 trace_consume(&iter);
8409 touch_nmi_watchdog();
8411 trace_printk_seq(&iter.seq);
8415 printk(KERN_TRACE " (ftrace buffer empty)\n");
8417 printk(KERN_TRACE "---------------------------------\n");
8420 tr->trace_flags |= old_userobj;
8422 for_each_tracing_cpu(cpu) {
8423 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8425 atomic_dec(&dump_running);
8426 printk_nmi_direct_exit();
8427 local_irq_restore(flags);
8429 EXPORT_SYMBOL_GPL(ftrace_dump);
8431 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8438 argv = argv_split(GFP_KERNEL, buf, &argc);
8443 ret = createfn(argc, argv);
8450 #define WRITE_BUFSIZE 4096
8452 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8453 size_t count, loff_t *ppos,
8454 int (*createfn)(int, char **))
8456 char *kbuf, *buf, *tmp;
8461 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8465 while (done < count) {
8466 size = count - done;
8468 if (size >= WRITE_BUFSIZE)
8469 size = WRITE_BUFSIZE - 1;
8471 if (copy_from_user(kbuf, buffer + done, size)) {
8478 tmp = strchr(buf, '\n');
8481 size = tmp - buf + 1;
8484 if (done + size < count) {
8487 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8488 pr_warn("Line length is too long: Should be less than %d\n",
8496 /* Remove comments */
8497 tmp = strchr(buf, '#');
8502 ret = trace_run_command(buf, createfn);
8507 } while (done < count);
8517 __init static int tracer_alloc_buffers(void)
8523 * Make sure we don't accidently add more trace options
8524 * than we have bits for.
8526 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8528 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8531 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8532 goto out_free_buffer_mask;
8534 /* Only allocate trace_printk buffers if a trace_printk exists */
8535 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
8536 /* Must be called before global_trace.buffer is allocated */
8537 trace_printk_init_buffers();
8539 /* To save memory, keep the ring buffer size to its minimum */
8540 if (ring_buffer_expanded)
8541 ring_buf_size = trace_buf_size;
8545 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8546 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8548 raw_spin_lock_init(&global_trace.start_lock);
8551 * The prepare callbacks allocates some memory for the ring buffer. We
8552 * don't free the buffer if the if the CPU goes down. If we were to free
8553 * the buffer, then the user would lose any trace that was in the
8554 * buffer. The memory will be removed once the "instance" is removed.
8556 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8557 "trace/RB:preapre", trace_rb_cpu_prepare,
8560 goto out_free_cpumask;
8561 /* Used for event triggers */
8563 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8565 goto out_rm_hp_state;
8567 if (trace_create_savedcmd() < 0)
8568 goto out_free_temp_buffer;
8570 /* TODO: make the number of buffers hot pluggable with CPUS */
8571 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8572 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8574 goto out_free_savedcmd;
8577 if (global_trace.buffer_disabled)
8580 if (trace_boot_clock) {
8581 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8583 pr_warn("Trace clock %s not defined, going back to default\n",
8588 * register_tracer() might reference current_trace, so it
8589 * needs to be set before we register anything. This is
8590 * just a bootstrap of current_trace anyway.
8592 global_trace.current_trace = &nop_trace;
8594 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8596 ftrace_init_global_array_ops(&global_trace);
8598 init_trace_flags_index(&global_trace);
8600 register_tracer(&nop_trace);
8602 /* Function tracing may start here (via kernel command line) */
8603 init_function_trace();
8605 /* All seems OK, enable tracing */
8606 tracing_disabled = 0;
8608 atomic_notifier_chain_register(&panic_notifier_list,
8609 &trace_panic_notifier);
8611 register_die_notifier(&trace_die_notifier);
8613 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8615 INIT_LIST_HEAD(&global_trace.systems);
8616 INIT_LIST_HEAD(&global_trace.events);
8617 INIT_LIST_HEAD(&global_trace.hist_vars);
8618 list_add(&global_trace.list, &ftrace_trace_arrays);
8620 apply_trace_boot_options();
8622 register_snapshot_cmd();
8627 free_saved_cmdlines_buffer(savedcmd);
8628 out_free_temp_buffer:
8629 ring_buffer_free(temp_buffer);
8631 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8633 free_cpumask_var(global_trace.tracing_cpumask);
8634 out_free_buffer_mask:
8635 free_cpumask_var(tracing_buffer_mask);
8640 void __init early_trace_init(void)
8642 if (tracepoint_printk) {
8643 tracepoint_print_iter =
8644 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8645 if (WARN_ON(!tracepoint_print_iter))
8646 tracepoint_printk = 0;
8648 static_key_enable(&tracepoint_printk_key.key);
8650 tracer_alloc_buffers();
8653 void __init trace_init(void)
8658 __init static int clear_boot_tracer(void)
8661 * The default tracer at boot buffer is an init section.
8662 * This function is called in lateinit. If we did not
8663 * find the boot tracer, then clear it out, to prevent
8664 * later registration from accessing the buffer that is
8665 * about to be freed.
8667 if (!default_bootup_tracer)
8670 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8671 default_bootup_tracer);
8672 default_bootup_tracer = NULL;
8677 fs_initcall(tracer_init_tracefs);
8678 late_initcall_sync(clear_boot_tracer);
8680 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8681 __init static int tracing_set_default_clock(void)
8683 /* sched_clock_stable() is determined in late_initcall */
8684 if (!trace_boot_clock && !sched_clock_stable()) {
8686 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8687 "If you want to keep using the local clock, then add:\n"
8688 " \"trace_clock=local\"\n"
8689 "on the kernel command line\n");
8690 tracing_set_clock(&global_trace, "global");
8695 late_initcall_sync(tracing_set_default_clock);