2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
14 #include <uapi/linux/time.h>
15 #include <asm/vgtod.h>
17 #include <asm/unistd.h>
19 #include <linux/math64.h>
20 #include <linux/time.h>
22 #define gtod (&VVAR(vsyscall_gtod_data))
24 extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
25 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
26 extern time_t __vdso_time(time_t *t);
28 #ifdef CONFIG_PARAVIRT_CLOCK
29 extern u8 pvclock_page
30 __attribute__((visibility("hidden")));
35 #include <linux/kernel.h>
36 #include <asm/vsyscall.h>
37 #include <asm/fixmap.h>
38 #include <asm/pvclock.h>
40 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
43 asm ("syscall" : "=a" (ret), "=m" (*ts) :
44 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
45 "memory", "rcx", "r11");
49 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
53 asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
54 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
55 "memory", "rcx", "r11");
59 #ifdef CONFIG_PARAVIRT_CLOCK
61 static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
63 return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
66 static notrace cycle_t vread_pvclock(int *mode)
68 const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
71 u64 last, delta, pvti_system_time;
72 u32 version, pvti_tsc_to_system_mul, pvti_tsc_shift;
75 * Note: The kernel and hypervisor must guarantee that cpu ID
76 * number maps 1:1 to per-CPU pvclock time info.
78 * Because the hypervisor is entirely unaware of guest userspace
79 * preemption, it cannot guarantee that per-CPU pvclock time
80 * info is updated if the underlying CPU changes or that that
81 * version is increased whenever underlying CPU changes.
83 * On KVM, we are guaranteed that pvti updates for any vCPU are
84 * atomic as seen by *all* vCPUs. This is an even stronger
85 * guarantee than we get with a normal seqlock.
87 * On Xen, we don't appear to have that guarantee, but Xen still
88 * supplies a valid seqlock using the version field.
90 * We only do pvclock vdso timing at all if
91 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
92 * mean that all vCPUs have matching pvti and that the TSC is
93 * synced, so we can just look at vCPU 0's pvti.
96 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
102 version = pvti->version;
104 /* This is also a read barrier, so we'll read version first. */
105 tsc = rdtsc_ordered();
107 pvti_tsc_to_system_mul = pvti->tsc_to_system_mul;
108 pvti_tsc_shift = pvti->tsc_shift;
109 pvti_system_time = pvti->system_time;
110 pvti_tsc = pvti->tsc_timestamp;
112 /* Make sure that the version double-check is last. */
114 } while (unlikely((version & 1) || version != pvti->version));
116 delta = tsc - pvti_tsc;
117 ret = pvti_system_time +
118 pvclock_scale_delta(delta, pvti_tsc_to_system_mul,
121 /* refer to tsc.c read_tsc() comment for rationale */
122 last = gtod->cycle_last;
124 if (likely(ret >= last))
133 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
138 "mov %%ebx, %%edx \n"
139 "mov %[clock], %%ebx \n"
140 "call __kernel_vsyscall \n"
141 "mov %%edx, %%ebx \n"
142 : "=a" (ret), "=m" (*ts)
143 : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
148 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
153 "mov %%ebx, %%edx \n"
154 "mov %[tv], %%ebx \n"
155 "call __kernel_vsyscall \n"
156 "mov %%edx, %%ebx \n"
157 : "=a" (ret), "=m" (*tv), "=m" (*tz)
158 : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
163 #ifdef CONFIG_PARAVIRT_CLOCK
165 static notrace cycle_t vread_pvclock(int *mode)
174 notrace static cycle_t vread_tsc(void)
176 cycle_t ret = (cycle_t)rdtsc_ordered();
177 u64 last = gtod->cycle_last;
179 if (likely(ret >= last))
183 * GCC likes to generate cmov here, but this branch is extremely
184 * predictable (it's just a funciton of time and the likely is
185 * very likely) and there's a data dependence, so force GCC
186 * to generate a branch instead. I don't barrier() because
187 * we don't actually need a barrier, and if this function
188 * ever gets inlined it will generate worse code.
194 notrace static inline u64 vgetsns(int *mode)
199 if (gtod->vclock_mode == VCLOCK_TSC)
200 cycles = vread_tsc();
201 #ifdef CONFIG_PARAVIRT_CLOCK
202 else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
203 cycles = vread_pvclock(mode);
207 v = (cycles - gtod->cycle_last) & gtod->mask;
208 return v * gtod->mult;
211 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
212 notrace static int __always_inline do_realtime(struct timespec *ts)
219 seq = gtod_read_begin(gtod);
220 mode = gtod->vclock_mode;
221 ts->tv_sec = gtod->wall_time_sec;
222 ns = gtod->wall_time_snsec;
223 ns += vgetsns(&mode);
225 } while (unlikely(gtod_read_retry(gtod, seq)));
227 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
233 notrace static int __always_inline do_monotonic(struct timespec *ts)
240 seq = gtod_read_begin(gtod);
241 mode = gtod->vclock_mode;
242 ts->tv_sec = gtod->monotonic_time_sec;
243 ns = gtod->monotonic_time_snsec;
244 ns += vgetsns(&mode);
246 } while (unlikely(gtod_read_retry(gtod, seq)));
248 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
254 notrace static void do_realtime_coarse(struct timespec *ts)
258 seq = gtod_read_begin(gtod);
259 ts->tv_sec = gtod->wall_time_coarse_sec;
260 ts->tv_nsec = gtod->wall_time_coarse_nsec;
261 } while (unlikely(gtod_read_retry(gtod, seq)));
264 notrace static void do_monotonic_coarse(struct timespec *ts)
268 seq = gtod_read_begin(gtod);
269 ts->tv_sec = gtod->monotonic_time_coarse_sec;
270 ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
271 } while (unlikely(gtod_read_retry(gtod, seq)));
274 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
278 if (do_realtime(ts) == VCLOCK_NONE)
281 case CLOCK_MONOTONIC:
282 if (do_monotonic(ts) == VCLOCK_NONE)
285 case CLOCK_REALTIME_COARSE:
286 do_realtime_coarse(ts);
288 case CLOCK_MONOTONIC_COARSE:
289 do_monotonic_coarse(ts);
297 return vdso_fallback_gettime(clock, ts);
299 int clock_gettime(clockid_t, struct timespec *)
300 __attribute__((weak, alias("__vdso_clock_gettime")));
302 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
304 if (likely(tv != NULL)) {
305 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
306 return vdso_fallback_gtod(tv, tz);
309 if (unlikely(tz != NULL)) {
310 tz->tz_minuteswest = gtod->tz_minuteswest;
311 tz->tz_dsttime = gtod->tz_dsttime;
316 int gettimeofday(struct timeval *, struct timezone *)
317 __attribute__((weak, alias("__vdso_gettimeofday")));
320 * This will break when the xtime seconds get inaccurate, but that is
323 notrace time_t __vdso_time(time_t *t)
325 /* This is atomic on x86 so we don't need any locks. */
326 time_t result = ACCESS_ONCE(gtod->wall_time_sec);
333 __attribute__((weak, alias("__vdso_time")));