2 * Time of day based timer functions.
5 * Copyright IBM Corp. 1999, 2008
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 * Derived from "arch/i386/kernel/time.c"
11 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
14 #define KMSG_COMPONENT "time"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 #include <linux/kernel_stat.h>
18 #include <linux/errno.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/sched/clock.h>
22 #include <linux/kernel.h>
23 #include <linux/param.h>
24 #include <linux/string.h>
26 #include <linux/interrupt.h>
27 #include <linux/cpu.h>
28 #include <linux/stop_machine.h>
29 #include <linux/time.h>
30 #include <linux/device.h>
31 #include <linux/delay.h>
32 #include <linux/init.h>
33 #include <linux/smp.h>
34 #include <linux/types.h>
35 #include <linux/profile.h>
36 #include <linux/timex.h>
37 #include <linux/notifier.h>
38 #include <linux/timekeeper_internal.h>
39 #include <linux/clockchips.h>
40 #include <linux/gfp.h>
41 #include <linux/kprobes.h>
42 #include <linux/uaccess.h>
43 #include <asm/facility.h>
44 #include <asm/delay.h>
45 #include <asm/div64.h>
48 #include <asm/irq_regs.h>
49 #include <asm/vtimer.h>
54 unsigned char tod_clock_base[16] __aligned(8) = {
55 /* Force to data section. */
56 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
57 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
59 EXPORT_SYMBOL_GPL(tod_clock_base);
61 u64 clock_comparator_max = -1ULL;
62 EXPORT_SYMBOL_GPL(clock_comparator_max);
64 static DEFINE_PER_CPU(struct clock_event_device, comparators);
66 ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
67 EXPORT_SYMBOL(s390_epoch_delta_notifier);
69 unsigned char ptff_function_mask[16];
71 static unsigned long long lpar_offset;
72 static unsigned long long initial_leap_seconds;
73 static unsigned long long tod_steering_end;
74 static long long tod_steering_delta;
77 * Get time offsets with PTFF
79 void __init time_early_init(void)
84 /* Initialize TOD steering parameters */
85 tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
86 vdso_data->ts_end = tod_steering_end;
88 if (!test_facility(28))
91 ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
94 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
95 lpar_offset = qto.tod_epoch_difference;
97 /* get initial leap seconds */
98 if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
99 initial_leap_seconds = (unsigned long long)
100 ((long) qui.old_leap * 4096000000L);
104 * Scheduler clock - returns current time in nanosec units.
106 unsigned long long notrace sched_clock(void)
108 return tod_to_ns(get_tod_clock_monotonic());
110 NOKPROBE_SYMBOL(sched_clock);
113 * Monotonic_clock - returns # of nanoseconds passed since time_init()
115 unsigned long long monotonic_clock(void)
117 return sched_clock();
119 EXPORT_SYMBOL(monotonic_clock);
121 static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
123 unsigned long long high, low, rem, sec, nsec;
125 /* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */
126 high = (*(unsigned long long *) clk) >> 4;
127 low = (*(unsigned long long *)&clk[7]) << 4;
128 /* Calculate seconds and nano-seconds */
130 rem = do_div(sec, 1000000);
131 nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32;
137 void clock_comparator_work(void)
139 struct clock_event_device *cd;
141 S390_lowcore.clock_comparator = clock_comparator_max;
142 cd = this_cpu_ptr(&comparators);
143 cd->event_handler(cd);
146 static int s390_next_event(unsigned long delta,
147 struct clock_event_device *evt)
149 S390_lowcore.clock_comparator = get_tod_clock() + delta;
150 set_clock_comparator(S390_lowcore.clock_comparator);
155 * Set up lowcore and control register of the current cpu to
156 * enable TOD clock and clock comparator interrupts.
158 void init_cpu_timer(void)
160 struct clock_event_device *cd;
163 S390_lowcore.clock_comparator = clock_comparator_max;
164 set_clock_comparator(S390_lowcore.clock_comparator);
166 cpu = smp_processor_id();
167 cd = &per_cpu(comparators, cpu);
168 cd->name = "comparator";
169 cd->features = CLOCK_EVT_FEAT_ONESHOT;
172 cd->min_delta_ns = 1;
173 cd->min_delta_ticks = 1;
174 cd->max_delta_ns = LONG_MAX;
175 cd->max_delta_ticks = ULONG_MAX;
177 cd->cpumask = cpumask_of(cpu);
178 cd->set_next_event = s390_next_event;
180 clockevents_register_device(cd);
182 /* Enable clock comparator timer interrupt. */
185 /* Always allow the timing alert external interrupt. */
189 static void clock_comparator_interrupt(struct ext_code ext_code,
190 unsigned int param32,
191 unsigned long param64)
193 inc_irq_stat(IRQEXT_CLK);
194 if (S390_lowcore.clock_comparator == clock_comparator_max)
195 set_clock_comparator(S390_lowcore.clock_comparator);
198 static void stp_timing_alert(struct stp_irq_parm *);
200 static void timing_alert_interrupt(struct ext_code ext_code,
201 unsigned int param32, unsigned long param64)
203 inc_irq_stat(IRQEXT_TLA);
204 if (param32 & 0x00038000)
205 stp_timing_alert((struct stp_irq_parm *) ¶m32);
208 static void stp_reset(void);
210 void read_persistent_clock64(struct timespec64 *ts)
212 unsigned char clk[STORE_CLOCK_EXT_SIZE];
215 delta = initial_leap_seconds + TOD_UNIX_EPOCH;
216 get_tod_clock_ext(clk);
217 *(__u64 *) &clk[1] -= delta;
218 if (*(__u64 *) &clk[1] > delta)
220 ext_to_timespec64(clk, ts);
223 void read_boot_clock64(struct timespec64 *ts)
225 unsigned char clk[STORE_CLOCK_EXT_SIZE];
228 delta = initial_leap_seconds + TOD_UNIX_EPOCH;
229 memcpy(clk, tod_clock_base, 16);
230 *(__u64 *) &clk[1] -= delta;
231 if (*(__u64 *) &clk[1] > delta)
233 ext_to_timespec64(clk, ts);
236 static u64 read_tod_clock(struct clocksource *cs)
238 unsigned long long now, adj;
240 preempt_disable(); /* protect from changes to steering parameters */
241 now = get_tod_clock();
242 adj = tod_steering_end - now;
243 if (unlikely((s64) adj >= 0))
245 * manually steer by 1 cycle every 2^16 cycles. This
246 * corresponds to shifting the tod delta by 15. 1s is
247 * therefore steered in ~9h. The adjust will decrease
248 * over time, until it finally reaches 0.
250 now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
255 static struct clocksource clocksource_tod = {
258 .read = read_tod_clock,
262 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
265 struct clocksource * __init clocksource_default_clock(void)
267 return &clocksource_tod;
270 void update_vsyscall(struct timekeeper *tk)
274 if (tk->tkr_mono.clock != &clocksource_tod)
277 /* Make userspace gettimeofday spin until we're done. */
278 ++vdso_data->tb_update_count;
280 vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
281 vdso_data->xtime_clock_sec = tk->xtime_sec;
282 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
283 vdso_data->wtom_clock_sec =
284 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
285 vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
286 + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
287 nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
288 while (vdso_data->wtom_clock_nsec >= nsecps) {
289 vdso_data->wtom_clock_nsec -= nsecps;
290 vdso_data->wtom_clock_sec++;
293 vdso_data->xtime_coarse_sec = tk->xtime_sec;
294 vdso_data->xtime_coarse_nsec =
295 (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
296 vdso_data->wtom_coarse_sec =
297 vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
298 vdso_data->wtom_coarse_nsec =
299 vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
300 while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
301 vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
302 vdso_data->wtom_coarse_sec++;
305 vdso_data->tk_mult = tk->tkr_mono.mult;
306 vdso_data->tk_shift = tk->tkr_mono.shift;
308 ++vdso_data->tb_update_count;
311 extern struct timezone sys_tz;
313 void update_vsyscall_tz(void)
315 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
316 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
320 * Initialize the TOD clock and the CPU timer of
323 void __init time_init(void)
325 /* Reset time synchronization interfaces. */
328 /* request the clock comparator external interrupt */
329 if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
330 panic("Couldn't request external interrupt 0x1004");
332 /* request the timing alert external interrupt */
333 if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
334 panic("Couldn't request external interrupt 0x1406");
336 if (__clocksource_register(&clocksource_tod) != 0)
337 panic("Could not register TOD clock source");
339 /* Enable TOD clock interrupts on the boot cpu. */
342 /* Enable cpu timer interrupts on the boot cpu. */
346 static DEFINE_PER_CPU(atomic_t, clock_sync_word);
347 static DEFINE_MUTEX(clock_sync_mutex);
348 static unsigned long clock_sync_flags;
350 #define CLOCK_SYNC_HAS_STP 0
351 #define CLOCK_SYNC_STP 1
352 #define CLOCK_SYNC_STPINFO_VALID 2
355 * The get_clock function for the physical clock. It will get the current
356 * TOD clock, subtract the LPAR offset and write the result to *clock.
357 * The function returns 0 if the clock is in sync with the external time
358 * source. If the clock mode is local it will return -EOPNOTSUPP and
359 * -EAGAIN if the clock is not in sync with the external reference.
361 int get_phys_clock(unsigned long *clock)
364 unsigned int sw0, sw1;
366 sw_ptr = &get_cpu_var(clock_sync_word);
367 sw0 = atomic_read(sw_ptr);
368 *clock = get_tod_clock() - lpar_offset;
369 sw1 = atomic_read(sw_ptr);
370 put_cpu_var(clock_sync_word);
371 if (sw0 == sw1 && (sw0 & 0x80000000U))
372 /* Success: time is in sync. */
374 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
376 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
380 EXPORT_SYMBOL(get_phys_clock);
383 * Make get_phys_clock() return -EAGAIN.
385 static void disable_sync_clock(void *dummy)
387 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
389 * Clear the in-sync bit 2^31. All get_phys_clock calls will
390 * fail until the sync bit is turned back on. In addition
391 * increase the "sequence" counter to avoid the race of an
392 * stp event and the complete recovery against get_phys_clock.
394 atomic_andnot(0x80000000, sw_ptr);
399 * Make get_phys_clock() return 0 again.
400 * Needs to be called from a context disabled for preemption.
402 static void enable_sync_clock(void)
404 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
405 atomic_or(0x80000000, sw_ptr);
409 * Function to check if the clock is in sync.
411 static inline int check_sync_clock(void)
416 sw_ptr = &get_cpu_var(clock_sync_word);
417 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
418 put_cpu_var(clock_sync_word);
423 * Apply clock delta to the global data structures.
424 * This is called once on the CPU that performed the clock sync.
426 static void clock_sync_global(unsigned long long delta)
428 unsigned long now, adj;
431 /* Fixup the monotonic sched clock. */
432 *(unsigned long long *) &tod_clock_base[1] += delta;
433 if (*(unsigned long long *) &tod_clock_base[1] < delta)
436 /* Adjust TOD steering parameters. */
437 vdso_data->tb_update_count++;
438 now = get_tod_clock();
439 adj = tod_steering_end - now;
440 if (unlikely((s64) adj >= 0))
441 /* Calculate how much of the old adjustment is left. */
442 tod_steering_delta = (tod_steering_delta < 0) ?
443 -(adj >> 15) : (adj >> 15);
444 tod_steering_delta += delta;
445 if ((abs(tod_steering_delta) >> 48) != 0)
446 panic("TOD clock sync offset %lli is too large to drift\n",
448 tod_steering_end = now + (abs(tod_steering_delta) << 15);
449 vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
450 vdso_data->ts_end = tod_steering_end;
451 vdso_data->tb_update_count++;
452 /* Update LPAR offset. */
453 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
454 lpar_offset = qto.tod_epoch_difference;
455 /* Call the TOD clock change notifier. */
456 atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
460 * Apply clock delta to the per-CPU data structures of this CPU.
461 * This is called for each online CPU after the call to clock_sync_global.
463 static void clock_sync_local(unsigned long long delta)
465 /* Add the delta to the clock comparator. */
466 if (S390_lowcore.clock_comparator != clock_comparator_max) {
467 S390_lowcore.clock_comparator += delta;
468 set_clock_comparator(S390_lowcore.clock_comparator);
470 /* Adjust the last_update_clock time-stamp. */
471 S390_lowcore.last_update_clock += delta;
474 /* Single threaded workqueue used for stp sync events */
475 static struct workqueue_struct *time_sync_wq;
477 static void __init time_init_wq(void)
481 time_sync_wq = create_singlethread_workqueue("timesync");
484 struct clock_sync_data {
487 unsigned long long clock_delta;
491 * Server Time Protocol (STP) code.
493 static bool stp_online;
494 static struct stp_sstpi stp_info;
495 static void *stp_page;
497 static void stp_work_fn(struct work_struct *work);
498 static DEFINE_MUTEX(stp_work_mutex);
499 static DECLARE_WORK(stp_work, stp_work_fn);
500 static struct timer_list stp_timer;
502 static int __init early_parse_stp(char *p)
504 return kstrtobool(p, &stp_online);
506 early_param("stp", early_parse_stp);
509 * Reset STP attachment.
511 static void __init stp_reset(void)
515 stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
516 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
518 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
519 else if (stp_online) {
520 pr_warn("The real or virtual hardware system does not provide an STP interface\n");
521 free_page((unsigned long) stp_page);
527 static void stp_timeout(unsigned long dummy)
529 queue_work(time_sync_wq, &stp_work);
532 static int __init stp_init(void)
534 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
536 setup_timer(&stp_timer, stp_timeout, 0UL);
540 queue_work(time_sync_wq, &stp_work);
544 arch_initcall(stp_init);
547 * STP timing alert. There are three causes:
548 * 1) timing status change
549 * 2) link availability change
550 * 3) time control parameter change
551 * In all three cases we are only interested in the clock source state.
552 * If a STP clock source is now available use it.
554 static void stp_timing_alert(struct stp_irq_parm *intparm)
556 if (intparm->tsc || intparm->lac || intparm->tcpc)
557 queue_work(time_sync_wq, &stp_work);
561 * STP sync check machine check. This is called when the timing state
562 * changes from the synchronized state to the unsynchronized state.
563 * After a STP sync check the clock is not in sync. The machine check
564 * is broadcasted to all cpus at the same time.
566 int stp_sync_check(void)
568 disable_sync_clock(NULL);
573 * STP island condition machine check. This is called when an attached
574 * server attempts to communicate over an STP link and the servers
575 * have matching CTN ids and have a valid stratum-1 configuration
576 * but the configurations do not match.
578 int stp_island_check(void)
580 disable_sync_clock(NULL);
584 void stp_queue_work(void)
586 queue_work(time_sync_wq, &stp_work);
589 static int __store_stpinfo(void)
591 int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
594 clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
596 set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
600 static int stpinfo_valid(void)
602 return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
605 static int stp_sync_clock(void *data)
607 struct clock_sync_data *sync = data;
608 unsigned long long clock_delta;
613 if (xchg(&first, 1) == 0) {
614 /* Wait until all other cpus entered the sync function. */
615 while (atomic_read(&sync->cpus) != 0)
618 if (stp_info.todoff[0] || stp_info.todoff[1] ||
619 stp_info.todoff[2] || stp_info.todoff[3] ||
621 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
624 sync->clock_delta = clock_delta;
625 clock_sync_global(clock_delta);
626 rc = __store_stpinfo();
627 if (rc == 0 && stp_info.tmd != 2)
631 sync->in_sync = rc ? -EAGAIN : 1;
635 atomic_dec(&sync->cpus);
636 /* Wait for in_sync to be set. */
637 while (READ_ONCE(sync->in_sync) == 0)
640 if (sync->in_sync != 1)
641 /* Didn't work. Clear per-cpu in sync bit again. */
642 disable_sync_clock(NULL);
643 /* Apply clock delta to per-CPU fields of this CPU. */
644 clock_sync_local(sync->clock_delta);
650 * STP work. Check for the STP state and take over the clock
651 * synchronization if the STP clock source is usable.
653 static void stp_work_fn(struct work_struct *work)
655 struct clock_sync_data stp_sync;
658 /* prevent multiple execution. */
659 mutex_lock(&stp_work_mutex);
662 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
663 del_timer_sync(&stp_timer);
667 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL);
671 rc = __store_stpinfo();
672 if (rc || stp_info.c == 0)
675 /* Skip synchronization if the clock is already in sync. */
676 if (check_sync_clock())
679 memset(&stp_sync, 0, sizeof(stp_sync));
681 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
682 stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
685 if (!check_sync_clock())
687 * There is a usable clock but the synchonization failed.
688 * Retry after a second.
690 mod_timer(&stp_timer, jiffies + HZ);
693 mutex_unlock(&stp_work_mutex);
697 * STP subsys sysfs interface functions
699 static struct bus_type stp_subsys = {
704 static ssize_t stp_ctn_id_show(struct device *dev,
705 struct device_attribute *attr,
708 ssize_t ret = -ENODATA;
710 mutex_lock(&stp_work_mutex);
712 ret = sprintf(buf, "%016llx\n",
713 *(unsigned long long *) stp_info.ctnid);
714 mutex_unlock(&stp_work_mutex);
718 static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
720 static ssize_t stp_ctn_type_show(struct device *dev,
721 struct device_attribute *attr,
724 ssize_t ret = -ENODATA;
726 mutex_lock(&stp_work_mutex);
728 ret = sprintf(buf, "%i\n", stp_info.ctn);
729 mutex_unlock(&stp_work_mutex);
733 static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
735 static ssize_t stp_dst_offset_show(struct device *dev,
736 struct device_attribute *attr,
739 ssize_t ret = -ENODATA;
741 mutex_lock(&stp_work_mutex);
742 if (stpinfo_valid() && (stp_info.vbits & 0x2000))
743 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
744 mutex_unlock(&stp_work_mutex);
748 static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
750 static ssize_t stp_leap_seconds_show(struct device *dev,
751 struct device_attribute *attr,
754 ssize_t ret = -ENODATA;
756 mutex_lock(&stp_work_mutex);
757 if (stpinfo_valid() && (stp_info.vbits & 0x8000))
758 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
759 mutex_unlock(&stp_work_mutex);
763 static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
765 static ssize_t stp_stratum_show(struct device *dev,
766 struct device_attribute *attr,
769 ssize_t ret = -ENODATA;
771 mutex_lock(&stp_work_mutex);
773 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
774 mutex_unlock(&stp_work_mutex);
778 static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
780 static ssize_t stp_time_offset_show(struct device *dev,
781 struct device_attribute *attr,
784 ssize_t ret = -ENODATA;
786 mutex_lock(&stp_work_mutex);
787 if (stpinfo_valid() && (stp_info.vbits & 0x0800))
788 ret = sprintf(buf, "%i\n", (int) stp_info.tto);
789 mutex_unlock(&stp_work_mutex);
793 static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
795 static ssize_t stp_time_zone_offset_show(struct device *dev,
796 struct device_attribute *attr,
799 ssize_t ret = -ENODATA;
801 mutex_lock(&stp_work_mutex);
802 if (stpinfo_valid() && (stp_info.vbits & 0x4000))
803 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
804 mutex_unlock(&stp_work_mutex);
808 static DEVICE_ATTR(time_zone_offset, 0400,
809 stp_time_zone_offset_show, NULL);
811 static ssize_t stp_timing_mode_show(struct device *dev,
812 struct device_attribute *attr,
815 ssize_t ret = -ENODATA;
817 mutex_lock(&stp_work_mutex);
819 ret = sprintf(buf, "%i\n", stp_info.tmd);
820 mutex_unlock(&stp_work_mutex);
824 static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
826 static ssize_t stp_timing_state_show(struct device *dev,
827 struct device_attribute *attr,
830 ssize_t ret = -ENODATA;
832 mutex_lock(&stp_work_mutex);
834 ret = sprintf(buf, "%i\n", stp_info.tst);
835 mutex_unlock(&stp_work_mutex);
839 static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
841 static ssize_t stp_online_show(struct device *dev,
842 struct device_attribute *attr,
845 return sprintf(buf, "%i\n", stp_online);
848 static ssize_t stp_online_store(struct device *dev,
849 struct device_attribute *attr,
850 const char *buf, size_t count)
854 value = simple_strtoul(buf, NULL, 0);
855 if (value != 0 && value != 1)
857 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
859 mutex_lock(&clock_sync_mutex);
862 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
864 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
865 queue_work(time_sync_wq, &stp_work);
866 mutex_unlock(&clock_sync_mutex);
871 * Can't use DEVICE_ATTR because the attribute should be named
872 * stp/online but dev_attr_online already exists in this file ..
874 static struct device_attribute dev_attr_stp_online = {
875 .attr = { .name = "online", .mode = 0600 },
876 .show = stp_online_show,
877 .store = stp_online_store,
880 static struct device_attribute *stp_attributes[] = {
883 &dev_attr_dst_offset,
884 &dev_attr_leap_seconds,
885 &dev_attr_stp_online,
887 &dev_attr_time_offset,
888 &dev_attr_time_zone_offset,
889 &dev_attr_timing_mode,
890 &dev_attr_timing_state,
894 static int __init stp_init_sysfs(void)
896 struct device_attribute **attr;
899 rc = subsys_system_register(&stp_subsys, NULL);
902 for (attr = stp_attributes; *attr; attr++) {
903 rc = device_create_file(stp_subsys.dev_root, *attr);
909 for (; attr >= stp_attributes; attr--)
910 device_remove_file(stp_subsys.dev_root, *attr);
911 bus_unregister(&stp_subsys);
916 device_initcall(stp_init_sysfs);