4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Implements 3 trace clock variants, with differing scalability/precision
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
13 * Tracer plugins will chose a default from these clocks.
15 #include <linux/spinlock.h>
16 #include <linux/irqflags.h>
17 #include <linux/hardirq.h>
18 #include <linux/module.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/clock.h>
22 #include <linux/ktime.h>
23 #include <linux/trace_clock.h>
26 * trace_clock_local(): the simplest and least coherent tracing clock.
28 * Useful for tracing that does not cross to other CPUs nor
29 * does it go through idle events.
31 u64 notrace trace_clock_local(void)
36 * sched_clock() is an architecture implemented, fast, scalable,
37 * lockless clock. It is not guaranteed to be coherent across
38 * CPUs, nor across CPU idle events.
40 preempt_disable_notrace();
41 clock = sched_clock();
42 preempt_enable_notrace();
46 EXPORT_SYMBOL_GPL(trace_clock_local);
49 * trace_clock(): 'between' trace clock. Not completely serialized,
50 * but not completely incorrect when crossing CPUs either.
52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
53 * jitter between CPUs. So it's a pretty scalable clock, but there
54 * can be offsets in the trace data.
56 u64 notrace trace_clock(void)
60 EXPORT_SYMBOL_GPL(trace_clock);
63 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
64 * Note that this use of jiffies_64 is not completely safe on
65 * 32-bit systems. But the window is tiny, and the effect if
66 * we are affected is that we will have an obviously bogus
67 * timestamp on a trace event - i.e. not life threatening.
69 u64 notrace trace_clock_jiffies(void)
71 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
73 EXPORT_SYMBOL_GPL(trace_clock_jiffies);
76 * trace_clock_global(): special globally coherent trace clock
78 * It has higher overhead than the other trace clocks but is still
79 * an order of magnitude faster than GTOD derived hardware clocks.
81 * Used by plugins that need globally coherent timestamps.
84 /* keep prev_time and lock in the same cacheline. */
88 } trace_clock_struct ____cacheline_aligned_in_smp =
90 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
93 u64 notrace trace_clock_global(void)
99 local_irq_save(flags);
101 this_cpu = raw_smp_processor_id();
104 * The global clock "guarantees" that the events are ordered
105 * between CPUs. But if two events on two different CPUS call
106 * trace_clock_global at roughly the same time, it really does
107 * not matter which one gets the earlier time. Just make sure
108 * that the same CPU will always show a monotonic clock.
110 * Use a read memory barrier to get the latest written
111 * time that was recorded.
114 prev_time = READ_ONCE(trace_clock_struct.prev_time);
115 now = sched_clock_cpu(this_cpu);
117 /* Make sure that now is always greater than or equal to prev_time */
118 if ((s64)(now - prev_time) < 0)
122 * If in an NMI context then dont risk lockups and simply return
125 if (unlikely(in_nmi()))
128 /* Tracing can cause strange recursion, always use a try lock */
129 if (arch_spin_trylock(&trace_clock_struct.lock)) {
130 /* Reread prev_time in case it was already updated */
131 prev_time = READ_ONCE(trace_clock_struct.prev_time);
132 if ((s64)(now - prev_time) < 0)
135 trace_clock_struct.prev_time = now;
137 /* The unlock acts as the wmb for the above rmb */
138 arch_spin_unlock(&trace_clock_struct.lock);
141 local_irq_restore(flags);
145 EXPORT_SYMBOL_GPL(trace_clock_global);
147 static atomic64_t trace_counter;
150 * trace_clock_counter(): simply an atomic counter.
151 * Use the trace_counter "counter" for cases where you do not care
152 * about timings, but are interested in strict ordering.
154 u64 notrace trace_clock_counter(void)
156 return atomic64_add_return(1, &trace_counter);