1 // SPDX-License-Identifier: GPL-2.0
3 * Time of day based timer functions.
6 * Copyright IBM Corp. 1999, 2008
7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
11 * Derived from "arch/i386/kernel/time.c"
12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
15 #define KMSG_COMPONENT "time"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/kernel_stat.h>
19 #include <linux/errno.h>
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/sched/clock.h>
23 #include <linux/kernel.h>
24 #include <linux/param.h>
25 #include <linux/string.h>
27 #include <linux/interrupt.h>
28 #include <linux/cpu.h>
29 #include <linux/stop_machine.h>
30 #include <linux/time.h>
31 #include <linux/device.h>
32 #include <linux/delay.h>
33 #include <linux/init.h>
34 #include <linux/smp.h>
35 #include <linux/types.h>
36 #include <linux/profile.h>
37 #include <linux/timex.h>
38 #include <linux/notifier.h>
39 #include <linux/timekeeper_internal.h>
40 #include <linux/clockchips.h>
41 #include <linux/gfp.h>
42 #include <linux/kprobes.h>
43 #include <linux/uaccess.h>
44 #include <vdso/vsyscall.h>
45 #include <vdso/clocksource.h>
46 #include <vdso/helpers.h>
47 #include <asm/facility.h>
48 #include <asm/delay.h>
49 #include <asm/div64.h>
52 #include <asm/irq_regs.h>
53 #include <asm/vtimer.h>
58 union tod_clock tod_clock_base __section(".data");
59 EXPORT_SYMBOL_GPL(tod_clock_base);
61 u64 clock_comparator_max = -1ULL;
62 EXPORT_SYMBOL_GPL(clock_comparator_max);
64 static DEFINE_PER_CPU(struct clock_event_device, comparators);
66 ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
67 EXPORT_SYMBOL(s390_epoch_delta_notifier);
69 unsigned char ptff_function_mask[16];
71 static unsigned long lpar_offset;
72 static unsigned long initial_leap_seconds;
73 static unsigned long tod_steering_end;
74 static long tod_steering_delta;
77 * Get time offsets with PTFF
79 void __init time_early_init(void)
84 /* Initialize TOD steering parameters */
85 tod_steering_end = tod_clock_base.tod;
86 vdso_data->arch_data.tod_steering_end = tod_steering_end;
88 if (!test_facility(28))
91 ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
94 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
95 lpar_offset = qto.tod_epoch_difference;
97 /* get initial leap seconds */
98 if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
99 initial_leap_seconds = (unsigned long)
100 ((long) qui.old_leap * 4096000000L);
104 * Scheduler clock - returns current time in nanosec units.
106 unsigned long long notrace sched_clock(void)
108 return tod_to_ns(get_tod_clock_monotonic());
110 NOKPROBE_SYMBOL(sched_clock);
112 static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt)
114 unsigned long rem, sec, nsec;
117 rem = do_div(sec, 1000000);
118 nsec = ((clk->sus + (rem << 12)) * 125) >> 9;
123 void clock_comparator_work(void)
125 struct clock_event_device *cd;
127 S390_lowcore.clock_comparator = clock_comparator_max;
128 cd = this_cpu_ptr(&comparators);
129 cd->event_handler(cd);
132 static int s390_next_event(unsigned long delta,
133 struct clock_event_device *evt)
135 S390_lowcore.clock_comparator = get_tod_clock() + delta;
136 set_clock_comparator(S390_lowcore.clock_comparator);
141 * Set up lowcore and control register of the current cpu to
142 * enable TOD clock and clock comparator interrupts.
144 void init_cpu_timer(void)
146 struct clock_event_device *cd;
149 S390_lowcore.clock_comparator = clock_comparator_max;
150 set_clock_comparator(S390_lowcore.clock_comparator);
152 cpu = smp_processor_id();
153 cd = &per_cpu(comparators, cpu);
154 cd->name = "comparator";
155 cd->features = CLOCK_EVT_FEAT_ONESHOT;
158 cd->min_delta_ns = 1;
159 cd->min_delta_ticks = 1;
160 cd->max_delta_ns = LONG_MAX;
161 cd->max_delta_ticks = ULONG_MAX;
163 cd->cpumask = cpumask_of(cpu);
164 cd->set_next_event = s390_next_event;
166 clockevents_register_device(cd);
168 /* Enable clock comparator timer interrupt. */
171 /* Always allow the timing alert external interrupt. */
175 static void clock_comparator_interrupt(struct ext_code ext_code,
176 unsigned int param32,
177 unsigned long param64)
179 inc_irq_stat(IRQEXT_CLK);
180 if (S390_lowcore.clock_comparator == clock_comparator_max)
181 set_clock_comparator(S390_lowcore.clock_comparator);
184 static void stp_timing_alert(struct stp_irq_parm *);
186 static void timing_alert_interrupt(struct ext_code ext_code,
187 unsigned int param32, unsigned long param64)
189 inc_irq_stat(IRQEXT_TLA);
190 if (param32 & 0x00038000)
191 stp_timing_alert((struct stp_irq_parm *) ¶m32);
194 static void stp_reset(void);
196 void read_persistent_clock64(struct timespec64 *ts)
201 delta = initial_leap_seconds + TOD_UNIX_EPOCH;
202 store_tod_clock_ext(&clk);
204 ext_to_timespec64(&clk, ts);
207 void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
208 struct timespec64 *boot_offset)
210 struct timespec64 boot_time;
214 delta = initial_leap_seconds + TOD_UNIX_EPOCH;
215 clk = tod_clock_base;
217 ext_to_timespec64(&clk, &boot_time);
219 read_persistent_clock64(wall_time);
220 *boot_offset = timespec64_sub(*wall_time, boot_time);
223 static u64 read_tod_clock(struct clocksource *cs)
225 unsigned long now, adj;
227 preempt_disable(); /* protect from changes to steering parameters */
228 now = get_tod_clock();
229 adj = tod_steering_end - now;
230 if (unlikely((s64) adj > 0))
232 * manually steer by 1 cycle every 2^16 cycles. This
233 * corresponds to shifting the tod delta by 15. 1s is
234 * therefore steered in ~9h. The adjust will decrease
235 * over time, until it finally reaches 0.
237 now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
242 static struct clocksource clocksource_tod = {
245 .read = read_tod_clock,
246 .mask = CLOCKSOURCE_MASK(64),
249 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
250 .vdso_clock_mode = VDSO_CLOCKMODE_TOD,
253 struct clocksource * __init clocksource_default_clock(void)
255 return &clocksource_tod;
259 * Initialize the TOD clock and the CPU timer of
262 void __init time_init(void)
264 /* Reset time synchronization interfaces. */
267 /* request the clock comparator external interrupt */
268 if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
269 panic("Couldn't request external interrupt 0x1004");
271 /* request the timing alert external interrupt */
272 if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
273 panic("Couldn't request external interrupt 0x1406");
275 if (__clocksource_register(&clocksource_tod) != 0)
276 panic("Could not register TOD clock source");
278 /* Enable TOD clock interrupts on the boot cpu. */
281 /* Enable cpu timer interrupts on the boot cpu. */
285 static DEFINE_PER_CPU(atomic_t, clock_sync_word);
286 static DEFINE_MUTEX(stp_mutex);
287 static unsigned long clock_sync_flags;
289 #define CLOCK_SYNC_HAS_STP 0
290 #define CLOCK_SYNC_STP 1
291 #define CLOCK_SYNC_STPINFO_VALID 2
294 * The get_clock function for the physical clock. It will get the current
295 * TOD clock, subtract the LPAR offset and write the result to *clock.
296 * The function returns 0 if the clock is in sync with the external time
297 * source. If the clock mode is local it will return -EOPNOTSUPP and
298 * -EAGAIN if the clock is not in sync with the external reference.
300 int get_phys_clock(unsigned long *clock)
303 unsigned int sw0, sw1;
305 sw_ptr = &get_cpu_var(clock_sync_word);
306 sw0 = atomic_read(sw_ptr);
307 *clock = get_tod_clock() - lpar_offset;
308 sw1 = atomic_read(sw_ptr);
309 put_cpu_var(clock_sync_word);
310 if (sw0 == sw1 && (sw0 & 0x80000000U))
311 /* Success: time is in sync. */
313 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
315 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
319 EXPORT_SYMBOL(get_phys_clock);
322 * Make get_phys_clock() return -EAGAIN.
324 static void disable_sync_clock(void *dummy)
326 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
328 * Clear the in-sync bit 2^31. All get_phys_clock calls will
329 * fail until the sync bit is turned back on. In addition
330 * increase the "sequence" counter to avoid the race of an
331 * stp event and the complete recovery against get_phys_clock.
333 atomic_andnot(0x80000000, sw_ptr);
338 * Make get_phys_clock() return 0 again.
339 * Needs to be called from a context disabled for preemption.
341 static void enable_sync_clock(void)
343 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
344 atomic_or(0x80000000, sw_ptr);
348 * Function to check if the clock is in sync.
350 static inline int check_sync_clock(void)
355 sw_ptr = &get_cpu_var(clock_sync_word);
356 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
357 put_cpu_var(clock_sync_word);
362 * Apply clock delta to the global data structures.
363 * This is called once on the CPU that performed the clock sync.
365 static void clock_sync_global(unsigned long delta)
367 unsigned long now, adj;
370 /* Fixup the monotonic sched clock. */
371 tod_clock_base.eitod += delta;
372 /* Adjust TOD steering parameters. */
373 now = get_tod_clock();
374 adj = tod_steering_end - now;
375 if (unlikely((s64) adj >= 0))
376 /* Calculate how much of the old adjustment is left. */
377 tod_steering_delta = (tod_steering_delta < 0) ?
378 -(adj >> 15) : (adj >> 15);
379 tod_steering_delta += delta;
380 if ((abs(tod_steering_delta) >> 48) != 0)
381 panic("TOD clock sync offset %li is too large to drift\n",
383 tod_steering_end = now + (abs(tod_steering_delta) << 15);
384 vdso_data->arch_data.tod_steering_end = tod_steering_end;
386 /* Update LPAR offset. */
387 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
388 lpar_offset = qto.tod_epoch_difference;
389 /* Call the TOD clock change notifier. */
390 atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
394 * Apply clock delta to the per-CPU data structures of this CPU.
395 * This is called for each online CPU after the call to clock_sync_global.
397 static void clock_sync_local(unsigned long delta)
399 /* Add the delta to the clock comparator. */
400 if (S390_lowcore.clock_comparator != clock_comparator_max) {
401 S390_lowcore.clock_comparator += delta;
402 set_clock_comparator(S390_lowcore.clock_comparator);
404 /* Adjust the last_update_clock time-stamp. */
405 S390_lowcore.last_update_clock += delta;
408 /* Single threaded workqueue used for stp sync events */
409 static struct workqueue_struct *time_sync_wq;
411 static void __init time_init_wq(void)
415 time_sync_wq = create_singlethread_workqueue("timesync");
418 struct clock_sync_data {
421 unsigned long clock_delta;
425 * Server Time Protocol (STP) code.
427 static bool stp_online;
428 static struct stp_sstpi stp_info;
429 static void *stp_page;
431 static void stp_work_fn(struct work_struct *work);
432 static DECLARE_WORK(stp_work, stp_work_fn);
433 static struct timer_list stp_timer;
435 static int __init early_parse_stp(char *p)
437 return kstrtobool(p, &stp_online);
439 early_param("stp", early_parse_stp);
442 * Reset STP attachment.
444 static void __init stp_reset(void)
448 stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
449 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
451 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
452 else if (stp_online) {
453 pr_warn("The real or virtual hardware system does not provide an STP interface\n");
454 free_page((unsigned long) stp_page);
460 static void stp_timeout(struct timer_list *unused)
462 queue_work(time_sync_wq, &stp_work);
465 static int __init stp_init(void)
467 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
469 timer_setup(&stp_timer, stp_timeout, 0);
473 queue_work(time_sync_wq, &stp_work);
477 arch_initcall(stp_init);
480 * STP timing alert. There are three causes:
481 * 1) timing status change
482 * 2) link availability change
483 * 3) time control parameter change
484 * In all three cases we are only interested in the clock source state.
485 * If a STP clock source is now available use it.
487 static void stp_timing_alert(struct stp_irq_parm *intparm)
489 if (intparm->tsc || intparm->lac || intparm->tcpc)
490 queue_work(time_sync_wq, &stp_work);
494 * STP sync check machine check. This is called when the timing state
495 * changes from the synchronized state to the unsynchronized state.
496 * After a STP sync check the clock is not in sync. The machine check
497 * is broadcasted to all cpus at the same time.
499 int stp_sync_check(void)
501 disable_sync_clock(NULL);
506 * STP island condition machine check. This is called when an attached
507 * server attempts to communicate over an STP link and the servers
508 * have matching CTN ids and have a valid stratum-1 configuration
509 * but the configurations do not match.
511 int stp_island_check(void)
513 disable_sync_clock(NULL);
517 void stp_queue_work(void)
519 queue_work(time_sync_wq, &stp_work);
522 static int __store_stpinfo(void)
524 int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
527 clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
529 set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
533 static int stpinfo_valid(void)
535 return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
538 static int stp_sync_clock(void *data)
540 struct clock_sync_data *sync = data;
541 u64 clock_delta, flags;
546 if (xchg(&first, 1) == 0) {
547 /* Wait until all other cpus entered the sync function. */
548 while (atomic_read(&sync->cpus) != 0)
551 if (stp_info.todoff[0] || stp_info.todoff[1] ||
552 stp_info.todoff[2] || stp_info.todoff[3] ||
554 flags = vdso_update_begin();
555 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
558 sync->clock_delta = clock_delta;
559 clock_sync_global(clock_delta);
560 rc = __store_stpinfo();
561 if (rc == 0 && stp_info.tmd != 2)
564 vdso_update_end(flags);
566 sync->in_sync = rc ? -EAGAIN : 1;
570 atomic_dec(&sync->cpus);
571 /* Wait for in_sync to be set. */
572 while (READ_ONCE(sync->in_sync) == 0)
575 if (sync->in_sync != 1)
576 /* Didn't work. Clear per-cpu in sync bit again. */
577 disable_sync_clock(NULL);
578 /* Apply clock delta to per-CPU fields of this CPU. */
579 clock_sync_local(sync->clock_delta);
584 static int stp_clear_leap(void)
586 struct __kernel_timex txc;
589 memset(&txc, 0, sizeof(txc));
591 ret = do_adjtimex(&txc);
595 txc.modes = ADJ_STATUS;
596 txc.status &= ~(STA_INS|STA_DEL);
597 return do_adjtimex(&txc);
600 static void stp_check_leap(void)
602 struct stp_stzi stzi;
603 struct stp_lsoib *lsoib = &stzi.lsoib;
604 struct __kernel_timex txc;
608 if (!stp_info.lu || !check_sync_clock()) {
610 * Either a scheduled leap second was removed by the operator,
611 * or STP is out of sync. In both cases, clear the leap second
614 if (stp_clear_leap() < 0)
615 pr_err("failed to clear leap second flags\n");
619 if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
620 pr_err("stzi failed\n");
624 timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
625 leapdiff = lsoib->nlso - lsoib->also;
627 if (leapdiff != 1 && leapdiff != -1) {
628 pr_err("Cannot schedule %d leap seconds\n", leapdiff);
633 if (stp_clear_leap() < 0)
634 pr_err("failed to clear leap second flags\n");
635 } else if (timediff < 7200) {
636 memset(&txc, 0, sizeof(txc));
637 ret = do_adjtimex(&txc);
641 txc.modes = ADJ_STATUS;
643 txc.status |= STA_INS;
645 txc.status |= STA_DEL;
646 ret = do_adjtimex(&txc);
648 pr_err("failed to set leap second flags\n");
649 /* arm Timer to clear leap second flags */
650 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
652 /* The day the leap second is scheduled for hasn't been reached. Retry
655 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
660 * STP work. Check for the STP state and take over the clock
661 * synchronization if the STP clock source is usable.
663 static void stp_work_fn(struct work_struct *work)
665 struct clock_sync_data stp_sync;
668 /* prevent multiple execution. */
669 mutex_lock(&stp_mutex);
672 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
673 del_timer_sync(&stp_timer);
677 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
681 rc = __store_stpinfo();
682 if (rc || stp_info.c == 0)
685 /* Skip synchronization if the clock is already in sync. */
686 if (!check_sync_clock()) {
687 memset(&stp_sync, 0, sizeof(stp_sync));
689 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
690 stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
694 if (!check_sync_clock())
696 * There is a usable clock but the synchonization failed.
697 * Retry after a second.
699 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
700 else if (stp_info.lu)
704 mutex_unlock(&stp_mutex);
708 * STP subsys sysfs interface functions
710 static struct bus_type stp_subsys = {
715 static ssize_t ctn_id_show(struct device *dev,
716 struct device_attribute *attr,
719 ssize_t ret = -ENODATA;
721 mutex_lock(&stp_mutex);
723 ret = sprintf(buf, "%016lx\n",
724 *(unsigned long *) stp_info.ctnid);
725 mutex_unlock(&stp_mutex);
729 static DEVICE_ATTR_RO(ctn_id);
731 static ssize_t ctn_type_show(struct device *dev,
732 struct device_attribute *attr,
735 ssize_t ret = -ENODATA;
737 mutex_lock(&stp_mutex);
739 ret = sprintf(buf, "%i\n", stp_info.ctn);
740 mutex_unlock(&stp_mutex);
744 static DEVICE_ATTR_RO(ctn_type);
746 static ssize_t dst_offset_show(struct device *dev,
747 struct device_attribute *attr,
750 ssize_t ret = -ENODATA;
752 mutex_lock(&stp_mutex);
753 if (stpinfo_valid() && (stp_info.vbits & 0x2000))
754 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
755 mutex_unlock(&stp_mutex);
759 static DEVICE_ATTR_RO(dst_offset);
761 static ssize_t leap_seconds_show(struct device *dev,
762 struct device_attribute *attr,
765 ssize_t ret = -ENODATA;
767 mutex_lock(&stp_mutex);
768 if (stpinfo_valid() && (stp_info.vbits & 0x8000))
769 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
770 mutex_unlock(&stp_mutex);
774 static DEVICE_ATTR_RO(leap_seconds);
776 static ssize_t leap_seconds_scheduled_show(struct device *dev,
777 struct device_attribute *attr,
780 struct stp_stzi stzi;
783 mutex_lock(&stp_mutex);
784 if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
785 mutex_unlock(&stp_mutex);
789 ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
790 mutex_unlock(&stp_mutex);
795 return sprintf(buf, "0,0\n");
797 return sprintf(buf, "%lu,%d\n",
798 tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
799 stzi.lsoib.nlso - stzi.lsoib.also);
802 static DEVICE_ATTR_RO(leap_seconds_scheduled);
804 static ssize_t stratum_show(struct device *dev,
805 struct device_attribute *attr,
808 ssize_t ret = -ENODATA;
810 mutex_lock(&stp_mutex);
812 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
813 mutex_unlock(&stp_mutex);
817 static DEVICE_ATTR_RO(stratum);
819 static ssize_t time_offset_show(struct device *dev,
820 struct device_attribute *attr,
823 ssize_t ret = -ENODATA;
825 mutex_lock(&stp_mutex);
826 if (stpinfo_valid() && (stp_info.vbits & 0x0800))
827 ret = sprintf(buf, "%i\n", (int) stp_info.tto);
828 mutex_unlock(&stp_mutex);
832 static DEVICE_ATTR_RO(time_offset);
834 static ssize_t time_zone_offset_show(struct device *dev,
835 struct device_attribute *attr,
838 ssize_t ret = -ENODATA;
840 mutex_lock(&stp_mutex);
841 if (stpinfo_valid() && (stp_info.vbits & 0x4000))
842 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
843 mutex_unlock(&stp_mutex);
847 static DEVICE_ATTR_RO(time_zone_offset);
849 static ssize_t timing_mode_show(struct device *dev,
850 struct device_attribute *attr,
853 ssize_t ret = -ENODATA;
855 mutex_lock(&stp_mutex);
857 ret = sprintf(buf, "%i\n", stp_info.tmd);
858 mutex_unlock(&stp_mutex);
862 static DEVICE_ATTR_RO(timing_mode);
864 static ssize_t timing_state_show(struct device *dev,
865 struct device_attribute *attr,
868 ssize_t ret = -ENODATA;
870 mutex_lock(&stp_mutex);
872 ret = sprintf(buf, "%i\n", stp_info.tst);
873 mutex_unlock(&stp_mutex);
877 static DEVICE_ATTR_RO(timing_state);
879 static ssize_t online_show(struct device *dev,
880 struct device_attribute *attr,
883 return sprintf(buf, "%i\n", stp_online);
886 static ssize_t online_store(struct device *dev,
887 struct device_attribute *attr,
888 const char *buf, size_t count)
892 value = simple_strtoul(buf, NULL, 0);
893 if (value != 0 && value != 1)
895 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
897 mutex_lock(&stp_mutex);
900 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
902 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
903 queue_work(time_sync_wq, &stp_work);
904 mutex_unlock(&stp_mutex);
909 * Can't use DEVICE_ATTR because the attribute should be named
910 * stp/online but dev_attr_online already exists in this file ..
912 static DEVICE_ATTR_RW(online);
914 static struct attribute *stp_dev_attrs[] = {
915 &dev_attr_ctn_id.attr,
916 &dev_attr_ctn_type.attr,
917 &dev_attr_dst_offset.attr,
918 &dev_attr_leap_seconds.attr,
919 &dev_attr_online.attr,
920 &dev_attr_leap_seconds_scheduled.attr,
921 &dev_attr_stratum.attr,
922 &dev_attr_time_offset.attr,
923 &dev_attr_time_zone_offset.attr,
924 &dev_attr_timing_mode.attr,
925 &dev_attr_timing_state.attr,
928 ATTRIBUTE_GROUPS(stp_dev);
930 static int __init stp_init_sysfs(void)
932 return subsys_system_register(&stp_subsys, stp_dev_groups);
935 device_initcall(stp_init_sysfs);