1 // SPDX-License-Identifier: GPL-2.0+
3 * 2002-10-15 Posix Clocks & timers
4 * by George Anzinger george@mvista.com
5 * Copyright (C) 2002 2003 by MontaVista Software.
7 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
8 * Copyright (C) 2004 Boris Hu
10 * These are all the functions necessary to implement POSIX clocks & timers
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/time.h>
16 #include <linux/mutex.h>
17 #include <linux/sched/task.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/init.h>
22 #include <linux/compiler.h>
23 #include <linux/hash.h>
24 #include <linux/posix-clock.h>
25 #include <linux/posix-timers.h>
26 #include <linux/syscalls.h>
27 #include <linux/wait.h>
28 #include <linux/workqueue.h>
29 #include <linux/export.h>
30 #include <linux/hashtable.h>
31 #include <linux/compat.h>
32 #include <linux/nospec.h>
33 #include <linux/time_namespace.h>
35 #include "timekeeping.h"
36 #include "posix-timers.h"
38 static struct kmem_cache *posix_timers_cache;
41 * Timers are managed in a hash table for lockless lookup. The hash key is
42 * constructed from current::signal and the timer ID and the timer is
43 * matched against current::signal and the timer ID when walking the hash
46 * This allows checkpoint/restore to reconstruct the exact timer IDs for
49 static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
50 static DEFINE_SPINLOCK(hash_lock);
52 static const struct k_clock * const posix_clocks[];
53 static const struct k_clock *clockid_to_kclock(const clockid_t id);
54 static const struct k_clock clock_realtime, clock_monotonic;
57 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
58 * SIGEV values. Here we put out an error if this assumption fails.
60 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
61 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
62 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
66 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
67 * to implement others. This structure defines the various
70 * FUNCTIONS: The CLOCKs structure defines possible functions to
71 * handle various clock functions.
73 * The standard POSIX timer management code assumes the
74 * following: 1.) The k_itimer struct (sched.h) is used for
75 * the timer. 2.) The list, it_lock, it_clock, it_id and
76 * it_pid fields are not modified by timer code.
78 * Permissions: It is assumed that the clock_settime() function defined
79 * for each clock will take care of permission checks. Some
80 * clocks may be set able by any user (i.e. local process
81 * clocks) others not. Currently the only set able clock we
82 * have is CLOCK_REALTIME and its high res counter part, both of
83 * which we beg off on and pass to do_sys_settimeofday().
85 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
87 #define lock_timer(tid, flags) \
88 ({ struct k_itimer *__timr; \
89 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
93 static int hash(struct signal_struct *sig, unsigned int nr)
95 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
98 static struct k_itimer *__posix_timers_find(struct hlist_head *head,
99 struct signal_struct *sig,
102 struct k_itimer *timer;
104 hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&hash_lock)) {
105 /* timer->it_signal can be set concurrently */
106 if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id))
112 static struct k_itimer *posix_timer_by_id(timer_t id)
114 struct signal_struct *sig = current->signal;
115 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
117 return __posix_timers_find(head, sig, id);
120 static int posix_timer_add(struct k_itimer *timer)
122 struct signal_struct *sig = current->signal;
123 struct hlist_head *head;
124 unsigned int cnt, id;
127 * FIXME: Replace this by a per signal struct xarray once there is
128 * a plan to handle the resulting CRIU regression gracefully.
130 for (cnt = 0; cnt <= INT_MAX; cnt++) {
131 spin_lock(&hash_lock);
132 id = sig->next_posix_timer_id;
134 /* Write the next ID back. Clamp it to the positive space */
135 sig->next_posix_timer_id = (id + 1) & INT_MAX;
137 head = &posix_timers_hashtable[hash(sig, id)];
138 if (!__posix_timers_find(head, sig, id)) {
139 hlist_add_head_rcu(&timer->t_hash, head);
140 spin_unlock(&hash_lock);
143 spin_unlock(&hash_lock);
145 /* POSIX return code when no timer ID could be allocated */
149 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
151 spin_unlock_irqrestore(&timr->it_lock, flags);
154 /* Get clock_realtime */
155 static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
157 ktime_get_real_ts64(tp);
161 static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
163 return ktime_get_real();
166 /* Set clock_realtime */
167 static int posix_clock_realtime_set(const clockid_t which_clock,
168 const struct timespec64 *tp)
170 return do_sys_settimeofday64(tp, NULL);
173 static int posix_clock_realtime_adj(const clockid_t which_clock,
174 struct __kernel_timex *t)
176 return do_adjtimex(t);
180 * Get monotonic time for posix timers
182 static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
185 timens_add_monotonic(tp);
189 static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
195 * Get monotonic-raw time for posix timers
197 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
199 ktime_get_raw_ts64(tp);
200 timens_add_monotonic(tp);
205 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
207 ktime_get_coarse_real_ts64(tp);
211 static int posix_get_monotonic_coarse(clockid_t which_clock,
212 struct timespec64 *tp)
214 ktime_get_coarse_ts64(tp);
215 timens_add_monotonic(tp);
219 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
221 *tp = ktime_to_timespec64(KTIME_LOW_RES);
225 static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
227 ktime_get_boottime_ts64(tp);
228 timens_add_boottime(tp);
232 static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
234 return ktime_get_boottime();
237 static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
239 ktime_get_clocktai_ts64(tp);
243 static ktime_t posix_get_tai_ktime(clockid_t which_clock)
245 return ktime_get_clocktai();
248 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
251 tp->tv_nsec = hrtimer_resolution;
256 * Initialize everything, well, just everything in Posix clocks/timers ;)
258 static __init int init_posix_timers(void)
260 posix_timers_cache = kmem_cache_create("posix_timers_cache",
261 sizeof(struct k_itimer), 0,
262 SLAB_PANIC | SLAB_ACCOUNT, NULL);
265 __initcall(init_posix_timers);
268 * The siginfo si_overrun field and the return value of timer_getoverrun(2)
269 * are of type int. Clamp the overrun value to INT_MAX
271 static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
273 s64 sum = timr->it_overrun_last + (s64)baseval;
275 return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
278 static void common_hrtimer_rearm(struct k_itimer *timr)
280 struct hrtimer *timer = &timr->it.real.timer;
282 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
284 hrtimer_restart(timer);
288 * This function is exported for use by the signal deliver code. It is
289 * called just prior to the info block being released and passes that
290 * block to us. It's function is to update the overrun entry AND to
291 * restart the timer. It should only be called if the timer is to be
292 * restarted (i.e. we have flagged this in the sys_private entry of the
295 * To protect against the timer going away while the interrupt is queued,
296 * we require that the it_requeue_pending flag be set.
298 void posixtimer_rearm(struct kernel_siginfo *info)
300 struct k_itimer *timr;
303 timr = lock_timer(info->si_tid, &flags);
307 if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
308 timr->kclock->timer_rearm(timr);
311 timr->it_overrun_last = timr->it_overrun;
312 timr->it_overrun = -1LL;
313 ++timr->it_requeue_pending;
315 info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
318 unlock_timer(timr, flags);
321 int posix_timer_event(struct k_itimer *timr, int si_private)
326 * FIXME: if ->sigq is queued we can race with
327 * dequeue_signal()->posixtimer_rearm().
329 * If dequeue_signal() sees the "right" value of
330 * si_sys_private it calls posixtimer_rearm().
331 * We re-queue ->sigq and drop ->it_lock().
332 * posixtimer_rearm() locks the timer
333 * and re-schedules it while ->sigq is pending.
334 * Not really bad, but not that we want.
336 timr->sigq->info.si_sys_private = si_private;
338 type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
339 ret = send_sigqueue(timr->sigq, timr->it_pid, type);
340 /* If we failed to send the signal the timer stops. */
345 * This function gets called when a POSIX.1b interval timer expires. It
346 * is used as a callback from the kernel internal timer. The
347 * run_timer_list code ALWAYS calls with interrupts on.
349 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
351 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
353 struct k_itimer *timr;
356 enum hrtimer_restart ret = HRTIMER_NORESTART;
358 timr = container_of(timer, struct k_itimer, it.real.timer);
359 spin_lock_irqsave(&timr->it_lock, flags);
362 if (timr->it_interval != 0)
363 si_private = ++timr->it_requeue_pending;
365 if (posix_timer_event(timr, si_private)) {
367 * signal was not sent because of sig_ignor
368 * we will not get a call back to restart it AND
369 * it should be restarted.
371 if (timr->it_interval != 0) {
372 ktime_t now = hrtimer_cb_get_time(timer);
375 * FIXME: What we really want, is to stop this
376 * timer completely and restart it in case the
377 * SIG_IGN is removed. This is a non trivial
378 * change which involves sighand locking
379 * (sigh !), which we don't want to do late in
382 * For now we just let timers with an interval
383 * less than a jiffie expire every jiffie to
384 * avoid softirq starvation in case of SIG_IGN
385 * and a very small interval, which would put
386 * the timer right back on the softirq pending
387 * list. By moving now ahead of time we trick
388 * hrtimer_forward() to expire the timer
389 * later, while we still maintain the overrun
390 * accuracy, but have some inconsistency in
391 * the timer_gettime() case. This is at least
392 * better than a starved softirq. A more
393 * complex fix which solves also another related
394 * inconsistency is already in the pipeline.
396 #ifdef CONFIG_HIGH_RES_TIMERS
398 ktime_t kj = NSEC_PER_SEC / HZ;
400 if (timr->it_interval < kj)
401 now = ktime_add(now, kj);
404 timr->it_overrun += hrtimer_forward(timer, now,
406 ret = HRTIMER_RESTART;
407 ++timr->it_requeue_pending;
412 unlock_timer(timr, flags);
416 static struct pid *good_sigevent(sigevent_t * event)
418 struct pid *pid = task_tgid(current);
419 struct task_struct *rtn;
421 switch (event->sigev_notify) {
422 case SIGEV_SIGNAL | SIGEV_THREAD_ID:
423 pid = find_vpid(event->sigev_notify_thread_id);
424 rtn = pid_task(pid, PIDTYPE_PID);
425 if (!rtn || !same_thread_group(rtn, current))
430 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
440 static struct k_itimer * alloc_posix_timer(void)
442 struct k_itimer *tmr;
443 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
446 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
447 kmem_cache_free(posix_timers_cache, tmr);
450 clear_siginfo(&tmr->sigq->info);
454 static void k_itimer_rcu_free(struct rcu_head *head)
456 struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
458 kmem_cache_free(posix_timers_cache, tmr);
461 static void posix_timer_free(struct k_itimer *tmr)
463 put_pid(tmr->it_pid);
464 sigqueue_free(tmr->sigq);
465 call_rcu(&tmr->rcu, k_itimer_rcu_free);
468 static void posix_timer_unhash_and_free(struct k_itimer *tmr)
470 spin_lock(&hash_lock);
471 hlist_del_rcu(&tmr->t_hash);
472 spin_unlock(&hash_lock);
473 posix_timer_free(tmr);
476 static int common_timer_create(struct k_itimer *new_timer)
478 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
482 /* Create a POSIX.1b interval timer. */
483 static int do_timer_create(clockid_t which_clock, struct sigevent *event,
484 timer_t __user *created_timer_id)
486 const struct k_clock *kc = clockid_to_kclock(which_clock);
487 struct k_itimer *new_timer;
488 int error, new_timer_id;
492 if (!kc->timer_create)
495 new_timer = alloc_posix_timer();
496 if (unlikely(!new_timer))
499 spin_lock_init(&new_timer->it_lock);
502 * Add the timer to the hash table. The timer is not yet valid
503 * because new_timer::it_signal is still NULL. The timer id is also
504 * not yet visible to user space.
506 new_timer_id = posix_timer_add(new_timer);
507 if (new_timer_id < 0) {
508 posix_timer_free(new_timer);
512 new_timer->it_id = (timer_t) new_timer_id;
513 new_timer->it_clock = which_clock;
514 new_timer->kclock = kc;
515 new_timer->it_overrun = -1LL;
519 new_timer->it_pid = get_pid(good_sigevent(event));
521 if (!new_timer->it_pid) {
525 new_timer->it_sigev_notify = event->sigev_notify;
526 new_timer->sigq->info.si_signo = event->sigev_signo;
527 new_timer->sigq->info.si_value = event->sigev_value;
529 new_timer->it_sigev_notify = SIGEV_SIGNAL;
530 new_timer->sigq->info.si_signo = SIGALRM;
531 memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
532 new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
533 new_timer->it_pid = get_pid(task_tgid(current));
536 new_timer->sigq->info.si_tid = new_timer->it_id;
537 new_timer->sigq->info.si_code = SI_TIMER;
539 if (copy_to_user(created_timer_id,
540 &new_timer_id, sizeof (new_timer_id))) {
545 error = kc->timer_create(new_timer);
549 spin_lock_irq(¤t->sighand->siglock);
550 /* This makes the timer valid in the hash table */
551 WRITE_ONCE(new_timer->it_signal, current->signal);
552 list_add(&new_timer->list, ¤t->signal->posix_timers);
553 spin_unlock_irq(¤t->sighand->siglock);
557 * In the case of the timer belonging to another task, after
558 * the task is unlocked, the timer is owned by the other task
559 * and may cease to exist at any time. Don't use or modify
560 * new_timer after the unlock call.
563 posix_timer_unhash_and_free(new_timer);
567 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
568 struct sigevent __user *, timer_event_spec,
569 timer_t __user *, created_timer_id)
571 if (timer_event_spec) {
574 if (copy_from_user(&event, timer_event_spec, sizeof (event)))
576 return do_timer_create(which_clock, &event, created_timer_id);
578 return do_timer_create(which_clock, NULL, created_timer_id);
582 COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
583 struct compat_sigevent __user *, timer_event_spec,
584 timer_t __user *, created_timer_id)
586 if (timer_event_spec) {
589 if (get_compat_sigevent(&event, timer_event_spec))
591 return do_timer_create(which_clock, &event, created_timer_id);
593 return do_timer_create(which_clock, NULL, created_timer_id);
597 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
599 struct k_itimer *timr;
602 * timer_t could be any type >= int and we want to make sure any
603 * @timer_id outside positive int range fails lookup.
605 if ((unsigned long long)timer_id > INT_MAX)
609 * The hash lookup and the timers are RCU protected.
611 * Timers are added to the hash in invalid state where
612 * timr::it_signal == NULL. timer::it_signal is only set after the
613 * rest of the initialization succeeded.
615 * Timer destruction happens in steps:
616 * 1) Set timr::it_signal to NULL with timr::it_lock held
617 * 2) Release timr::it_lock
618 * 3) Remove from the hash under hash_lock
619 * 4) Call RCU for removal after the grace period
621 * Holding rcu_read_lock() accross the lookup ensures that
622 * the timer cannot be freed.
624 * The lookup validates locklessly that timr::it_signal ==
625 * current::it_signal and timr::it_id == @timer_id. timr::it_id
626 * can't change, but timr::it_signal becomes NULL during
630 timr = posix_timer_by_id(timer_id);
632 spin_lock_irqsave(&timr->it_lock, *flags);
634 * Validate under timr::it_lock that timr::it_signal is
635 * still valid. Pairs with #1 above.
637 if (timr->it_signal == current->signal) {
641 spin_unlock_irqrestore(&timr->it_lock, *flags);
648 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
650 struct hrtimer *timer = &timr->it.real.timer;
652 return __hrtimer_expires_remaining_adjusted(timer, now);
655 static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
657 struct hrtimer *timer = &timr->it.real.timer;
659 return hrtimer_forward(timer, now, timr->it_interval);
663 * Get the time remaining on a POSIX.1b interval timer.
665 * Two issues to handle here:
667 * 1) The timer has a requeue pending. The return value must appear as
668 * if the timer has been requeued right now.
670 * 2) The timer is a SIGEV_NONE timer. These timers are never enqueued
671 * into the hrtimer queue and therefore never expired. Emulate expiry
672 * here taking #1 into account.
674 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
676 const struct k_clock *kc = timr->kclock;
677 ktime_t now, remaining, iv;
680 sig_none = timr->it_sigev_notify == SIGEV_NONE;
681 iv = timr->it_interval;
683 /* interval timer ? */
685 cur_setting->it_interval = ktime_to_timespec64(iv);
686 } else if (!timr->it_active) {
688 * SIGEV_NONE oneshot timers are never queued and therefore
689 * timr->it_active is always false. The check below
690 * vs. remaining time will handle this case.
692 * For all other timers there is nothing to update here, so
699 now = kc->clock_get_ktime(timr->it_clock);
702 * If this is an interval timer and either has requeue pending or
703 * is a SIGEV_NONE timer move the expiry time forward by intervals,
704 * so expiry is > now.
706 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
707 timr->it_overrun += kc->timer_forward(timr, now);
709 remaining = kc->timer_remaining(timr, now);
711 * As @now is retrieved before a possible timer_forward() and
712 * cannot be reevaluated by the compiler @remaining is based on the
713 * same @now value. Therefore @remaining is consistent vs. @now.
715 * Consequently all interval timers, i.e. @iv > 0, cannot have a
716 * remaining time <= 0 because timer_forward() guarantees to move
717 * them forward so that the next timer expiry is > @now.
719 if (remaining <= 0) {
721 * A single shot SIGEV_NONE timer must return 0, when it is
722 * expired! Timers which have a real signal delivery mode
723 * must return a remaining time greater than 0 because the
724 * signal has not yet been delivered.
727 cur_setting->it_value.tv_nsec = 1;
729 cur_setting->it_value = ktime_to_timespec64(remaining);
733 static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
735 struct k_itimer *timr;
736 const struct k_clock *kc;
740 timr = lock_timer(timer_id, &flags);
744 memset(setting, 0, sizeof(*setting));
746 if (WARN_ON_ONCE(!kc || !kc->timer_get))
749 kc->timer_get(timr, setting);
751 unlock_timer(timr, flags);
755 /* Get the time remaining on a POSIX.1b interval timer. */
756 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
757 struct __kernel_itimerspec __user *, setting)
759 struct itimerspec64 cur_setting;
761 int ret = do_timer_gettime(timer_id, &cur_setting);
763 if (put_itimerspec64(&cur_setting, setting))
769 #ifdef CONFIG_COMPAT_32BIT_TIME
771 SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
772 struct old_itimerspec32 __user *, setting)
774 struct itimerspec64 cur_setting;
776 int ret = do_timer_gettime(timer_id, &cur_setting);
778 if (put_old_itimerspec32(&cur_setting, setting))
787 * Get the number of overruns of a POSIX.1b interval timer. This is to
788 * be the overrun of the timer last delivered. At the same time we are
789 * accumulating overruns on the next timer. The overrun is frozen when
790 * the signal is delivered, either at the notify time (if the info block
791 * is not queued) or at the actual delivery time (as we are informed by
792 * the call back to posixtimer_rearm(). So all we need to do is
793 * to pick up the frozen overrun.
795 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
797 struct k_itimer *timr;
801 timr = lock_timer(timer_id, &flags);
805 overrun = timer_overrun_to_int(timr, 0);
806 unlock_timer(timr, flags);
811 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
812 bool absolute, bool sigev_none)
814 struct hrtimer *timer = &timr->it.real.timer;
815 enum hrtimer_mode mode;
817 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
819 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
820 * clock modifications, so they become CLOCK_MONOTONIC based under the
821 * hood. See hrtimer_init(). Update timr->kclock, so the generic
822 * functions which use timr->kclock->clock_get_*() work.
824 * Note: it_clock stays unmodified, because the next timer_set() might
825 * use ABSTIME, so it needs to switch back.
827 if (timr->it_clock == CLOCK_REALTIME)
828 timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
830 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
831 timr->it.real.timer.function = posix_timer_fn;
834 expires = ktime_add_safe(expires, timer->base->get_time());
835 hrtimer_set_expires(timer, expires);
838 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
841 static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
843 return hrtimer_try_to_cancel(&timr->it.real.timer);
846 static void common_timer_wait_running(struct k_itimer *timer)
848 hrtimer_cancel_wait_running(&timer->it.real.timer);
852 * On PREEMPT_RT this prevents priority inversion and a potential livelock
853 * against the ksoftirqd thread in case that ksoftirqd gets preempted while
854 * executing a hrtimer callback.
856 * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this
857 * just results in a cpu_relax().
859 * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is
860 * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this
861 * prevents spinning on an eventually scheduled out task and a livelock
862 * when the task which tries to delete or disarm the timer has preempted
863 * the task which runs the expiry in task work context.
865 static struct k_itimer *timer_wait_running(struct k_itimer *timer,
866 unsigned long *flags)
868 const struct k_clock *kc = READ_ONCE(timer->kclock);
869 timer_t timer_id = READ_ONCE(timer->it_id);
871 /* Prevent kfree(timer) after dropping the lock */
873 unlock_timer(timer, *flags);
876 * kc->timer_wait_running() might drop RCU lock. So @timer
877 * cannot be touched anymore after the function returns!
879 if (!WARN_ON_ONCE(!kc->timer_wait_running))
880 kc->timer_wait_running(timer);
883 /* Relock the timer. It might be not longer hashed. */
884 return lock_timer(timer_id, flags);
887 /* Set a POSIX.1b interval timer. */
888 int common_timer_set(struct k_itimer *timr, int flags,
889 struct itimerspec64 *new_setting,
890 struct itimerspec64 *old_setting)
892 const struct k_clock *kc = timr->kclock;
897 common_timer_get(timr, old_setting);
899 /* Prevent rearming by clearing the interval */
900 timr->it_interval = 0;
902 * Careful here. On SMP systems the timer expiry function could be
903 * active and spinning on timr->it_lock.
905 if (kc->timer_try_to_cancel(timr) < 0)
909 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
911 timr->it_overrun_last = 0;
913 /* Switch off the timer when it_value is zero */
914 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
917 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
918 expires = timespec64_to_ktime(new_setting->it_value);
919 if (flags & TIMER_ABSTIME)
920 expires = timens_ktime_to_host(timr->it_clock, expires);
921 sigev_none = timr->it_sigev_notify == SIGEV_NONE;
923 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
924 timr->it_active = !sigev_none;
928 static int do_timer_settime(timer_t timer_id, int tmr_flags,
929 struct itimerspec64 *new_spec64,
930 struct itimerspec64 *old_spec64)
932 const struct k_clock *kc;
933 struct k_itimer *timr;
937 if (!timespec64_valid(&new_spec64->it_interval) ||
938 !timespec64_valid(&new_spec64->it_value))
942 memset(old_spec64, 0, sizeof(*old_spec64));
944 timr = lock_timer(timer_id, &flags);
950 if (WARN_ON_ONCE(!kc || !kc->timer_set))
953 error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64);
955 if (error == TIMER_RETRY) {
956 // We already got the old time...
958 /* Unlocks and relocks the timer if it still exists */
959 timr = timer_wait_running(timr, &flags);
962 unlock_timer(timr, flags);
967 /* Set a POSIX.1b interval timer */
968 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
969 const struct __kernel_itimerspec __user *, new_setting,
970 struct __kernel_itimerspec __user *, old_setting)
972 struct itimerspec64 new_spec, old_spec;
973 struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
979 if (get_itimerspec64(&new_spec, new_setting))
982 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
983 if (!error && old_setting) {
984 if (put_itimerspec64(&old_spec, old_setting))
990 #ifdef CONFIG_COMPAT_32BIT_TIME
991 SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
992 struct old_itimerspec32 __user *, new,
993 struct old_itimerspec32 __user *, old)
995 struct itimerspec64 new_spec, old_spec;
996 struct itimerspec64 *rtn = old ? &old_spec : NULL;
1001 if (get_old_itimerspec32(&new_spec, new))
1004 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
1005 if (!error && old) {
1006 if (put_old_itimerspec32(&old_spec, old))
1013 int common_timer_del(struct k_itimer *timer)
1015 const struct k_clock *kc = timer->kclock;
1017 timer->it_interval = 0;
1018 if (kc->timer_try_to_cancel(timer) < 0)
1020 timer->it_active = 0;
1024 static inline int timer_delete_hook(struct k_itimer *timer)
1026 const struct k_clock *kc = timer->kclock;
1028 if (WARN_ON_ONCE(!kc || !kc->timer_del))
1030 return kc->timer_del(timer);
1033 /* Delete a POSIX.1b interval timer. */
1034 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
1036 struct k_itimer *timer;
1037 unsigned long flags;
1039 timer = lock_timer(timer_id, &flags);
1045 if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) {
1046 /* Unlocks and relocks the timer if it still exists */
1047 timer = timer_wait_running(timer, &flags);
1051 spin_lock(¤t->sighand->siglock);
1052 list_del(&timer->list);
1053 spin_unlock(¤t->sighand->siglock);
1055 * A concurrent lookup could check timer::it_signal lockless. It
1056 * will reevaluate with timer::it_lock held and observe the NULL.
1058 WRITE_ONCE(timer->it_signal, NULL);
1060 unlock_timer(timer, flags);
1061 posix_timer_unhash_and_free(timer);
1066 * Delete a timer if it is armed, remove it from the hash and schedule it
1069 static void itimer_delete(struct k_itimer *timer)
1071 unsigned long flags;
1074 * irqsave is required to make timer_wait_running() work.
1076 spin_lock_irqsave(&timer->it_lock, flags);
1080 * Even if the timer is not longer accessible from other tasks
1081 * it still might be armed and queued in the underlying timer
1082 * mechanism. Worse, that timer mechanism might run the expiry
1083 * function concurrently.
1085 if (timer_delete_hook(timer) == TIMER_RETRY) {
1087 * Timer is expired concurrently, prevent livelocks
1088 * and pointless spinning on RT.
1090 * timer_wait_running() drops timer::it_lock, which opens
1091 * the possibility for another task to delete the timer.
1093 * That's not possible here because this is invoked from
1094 * do_exit() only for the last thread of the thread group.
1095 * So no other task can access and delete that timer.
1097 if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer))
1102 list_del(&timer->list);
1105 * Setting timer::it_signal to NULL is technically not required
1106 * here as nothing can access the timer anymore legitimately via
1107 * the hash table. Set it to NULL nevertheless so that all deletion
1108 * paths are consistent.
1110 WRITE_ONCE(timer->it_signal, NULL);
1112 spin_unlock_irqrestore(&timer->it_lock, flags);
1113 posix_timer_unhash_and_free(timer);
1117 * Invoked from do_exit() when the last thread of a thread group exits.
1118 * At that point no other task can access the timers of the dying
1121 void exit_itimers(struct task_struct *tsk)
1123 struct list_head timers;
1124 struct k_itimer *tmr;
1126 if (list_empty(&tsk->signal->posix_timers))
1129 /* Protect against concurrent read via /proc/$PID/timers */
1130 spin_lock_irq(&tsk->sighand->siglock);
1131 list_replace_init(&tsk->signal->posix_timers, &timers);
1132 spin_unlock_irq(&tsk->sighand->siglock);
1134 /* The timers are not longer accessible via tsk::signal */
1135 while (!list_empty(&timers)) {
1136 tmr = list_first_entry(&timers, struct k_itimer, list);
1141 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1142 const struct __kernel_timespec __user *, tp)
1144 const struct k_clock *kc = clockid_to_kclock(which_clock);
1145 struct timespec64 new_tp;
1147 if (!kc || !kc->clock_set)
1150 if (get_timespec64(&new_tp, tp))
1153 return kc->clock_set(which_clock, &new_tp);
1156 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1157 struct __kernel_timespec __user *, tp)
1159 const struct k_clock *kc = clockid_to_kclock(which_clock);
1160 struct timespec64 kernel_tp;
1166 error = kc->clock_get_timespec(which_clock, &kernel_tp);
1168 if (!error && put_timespec64(&kernel_tp, tp))
1174 int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
1176 const struct k_clock *kc = clockid_to_kclock(which_clock);
1183 return kc->clock_adj(which_clock, ktx);
1186 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1187 struct __kernel_timex __user *, utx)
1189 struct __kernel_timex ktx;
1192 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1195 err = do_clock_adjtime(which_clock, &ktx);
1197 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1204 * sys_clock_getres - Get the resolution of a clock
1205 * @which_clock: The clock to get the resolution for
1206 * @tp: Pointer to a a user space timespec64 for storage
1210 * "The clock_getres() function shall return the resolution of any
1211 * clock. Clock resolutions are implementation-defined and cannot be set by
1212 * a process. If the argument res is not NULL, the resolution of the
1213 * specified clock shall be stored in the location pointed to by res. If
1214 * res is NULL, the clock resolution is not returned. If the time argument
1215 * of clock_settime() is not a multiple of res, then the value is truncated
1216 * to a multiple of res."
1218 * Due to the various hardware constraints the real resolution can vary
1219 * wildly and even change during runtime when the underlying devices are
1220 * replaced. The kernel also can use hardware devices with different
1221 * resolutions for reading the time and for arming timers.
1223 * The kernel therefore deviates from the POSIX spec in various aspects:
1225 * 1) The resolution returned to user space
1227 * For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI,
1228 * CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW
1229 * the kernel differentiates only two cases:
1231 * I) Low resolution mode:
1233 * When high resolution timers are disabled at compile or runtime
1234 * the resolution returned is nanoseconds per tick, which represents
1235 * the precision at which timers expire.
1237 * II) High resolution mode:
1239 * When high resolution timers are enabled the resolution returned
1240 * is always one nanosecond independent of the actual resolution of
1241 * the underlying hardware devices.
1243 * For CLOCK_*_ALARM the actual resolution depends on system
1244 * state. When system is running the resolution is the same as the
1245 * resolution of the other clocks. During suspend the actual
1246 * resolution is the resolution of the underlying RTC device which
1247 * might be way less precise than the clockevent device used during
1250 * For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution
1251 * returned is always nanoseconds per tick.
1253 * For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution
1254 * returned is always one nanosecond under the assumption that the
1255 * underlying scheduler clock has a better resolution than nanoseconds
1258 * For dynamic POSIX clocks (PTP devices) the resolution returned is
1259 * always one nanosecond.
1261 * 2) Affect on sys_clock_settime()
1263 * The kernel does not truncate the time which is handed in to
1264 * sys_clock_settime(). The kernel internal timekeeping is always using
1265 * nanoseconds precision independent of the clocksource device which is
1266 * used to read the time from. The resolution of that device only
1267 * affects the presicion of the time returned by sys_clock_gettime().
1270 * 0 Success. @tp contains the resolution
1271 * -EINVAL @which_clock is not a valid clock ID
1272 * -EFAULT Copying the resolution to @tp faulted
1273 * -ENODEV Dynamic POSIX clock is not backed by a device
1274 * -EOPNOTSUPP Dynamic POSIX clock does not support getres()
1276 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1277 struct __kernel_timespec __user *, tp)
1279 const struct k_clock *kc = clockid_to_kclock(which_clock);
1280 struct timespec64 rtn_tp;
1286 error = kc->clock_getres(which_clock, &rtn_tp);
1288 if (!error && tp && put_timespec64(&rtn_tp, tp))
1294 #ifdef CONFIG_COMPAT_32BIT_TIME
1296 SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
1297 struct old_timespec32 __user *, tp)
1299 const struct k_clock *kc = clockid_to_kclock(which_clock);
1300 struct timespec64 ts;
1302 if (!kc || !kc->clock_set)
1305 if (get_old_timespec32(&ts, tp))
1308 return kc->clock_set(which_clock, &ts);
1311 SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
1312 struct old_timespec32 __user *, tp)
1314 const struct k_clock *kc = clockid_to_kclock(which_clock);
1315 struct timespec64 ts;
1321 err = kc->clock_get_timespec(which_clock, &ts);
1323 if (!err && put_old_timespec32(&ts, tp))
1329 SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
1330 struct old_timex32 __user *, utp)
1332 struct __kernel_timex ktx;
1335 err = get_old_timex32(&ktx, utp);
1339 err = do_clock_adjtime(which_clock, &ktx);
1341 if (err >= 0 && put_old_timex32(utp, &ktx))
1347 SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
1348 struct old_timespec32 __user *, tp)
1350 const struct k_clock *kc = clockid_to_kclock(which_clock);
1351 struct timespec64 ts;
1357 err = kc->clock_getres(which_clock, &ts);
1358 if (!err && tp && put_old_timespec32(&ts, tp))
1367 * nanosleep for monotonic and realtime clocks
1369 static int common_nsleep(const clockid_t which_clock, int flags,
1370 const struct timespec64 *rqtp)
1372 ktime_t texp = timespec64_to_ktime(*rqtp);
1374 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1375 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1379 static int common_nsleep_timens(const clockid_t which_clock, int flags,
1380 const struct timespec64 *rqtp)
1382 ktime_t texp = timespec64_to_ktime(*rqtp);
1384 if (flags & TIMER_ABSTIME)
1385 texp = timens_ktime_to_host(which_clock, texp);
1387 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1388 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1392 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1393 const struct __kernel_timespec __user *, rqtp,
1394 struct __kernel_timespec __user *, rmtp)
1396 const struct k_clock *kc = clockid_to_kclock(which_clock);
1397 struct timespec64 t;
1404 if (get_timespec64(&t, rqtp))
1407 if (!timespec64_valid(&t))
1409 if (flags & TIMER_ABSTIME)
1411 current->restart_block.fn = do_no_restart_syscall;
1412 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1413 current->restart_block.nanosleep.rmtp = rmtp;
1415 return kc->nsleep(which_clock, flags, &t);
1418 #ifdef CONFIG_COMPAT_32BIT_TIME
1420 SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
1421 struct old_timespec32 __user *, rqtp,
1422 struct old_timespec32 __user *, rmtp)
1424 const struct k_clock *kc = clockid_to_kclock(which_clock);
1425 struct timespec64 t;
1432 if (get_old_timespec32(&t, rqtp))
1435 if (!timespec64_valid(&t))
1437 if (flags & TIMER_ABSTIME)
1439 current->restart_block.fn = do_no_restart_syscall;
1440 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1441 current->restart_block.nanosleep.compat_rmtp = rmtp;
1443 return kc->nsleep(which_clock, flags, &t);
1448 static const struct k_clock clock_realtime = {
1449 .clock_getres = posix_get_hrtimer_res,
1450 .clock_get_timespec = posix_get_realtime_timespec,
1451 .clock_get_ktime = posix_get_realtime_ktime,
1452 .clock_set = posix_clock_realtime_set,
1453 .clock_adj = posix_clock_realtime_adj,
1454 .nsleep = common_nsleep,
1455 .timer_create = common_timer_create,
1456 .timer_set = common_timer_set,
1457 .timer_get = common_timer_get,
1458 .timer_del = common_timer_del,
1459 .timer_rearm = common_hrtimer_rearm,
1460 .timer_forward = common_hrtimer_forward,
1461 .timer_remaining = common_hrtimer_remaining,
1462 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1463 .timer_wait_running = common_timer_wait_running,
1464 .timer_arm = common_hrtimer_arm,
1467 static const struct k_clock clock_monotonic = {
1468 .clock_getres = posix_get_hrtimer_res,
1469 .clock_get_timespec = posix_get_monotonic_timespec,
1470 .clock_get_ktime = posix_get_monotonic_ktime,
1471 .nsleep = common_nsleep_timens,
1472 .timer_create = common_timer_create,
1473 .timer_set = common_timer_set,
1474 .timer_get = common_timer_get,
1475 .timer_del = common_timer_del,
1476 .timer_rearm = common_hrtimer_rearm,
1477 .timer_forward = common_hrtimer_forward,
1478 .timer_remaining = common_hrtimer_remaining,
1479 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1480 .timer_wait_running = common_timer_wait_running,
1481 .timer_arm = common_hrtimer_arm,
1484 static const struct k_clock clock_monotonic_raw = {
1485 .clock_getres = posix_get_hrtimer_res,
1486 .clock_get_timespec = posix_get_monotonic_raw,
1489 static const struct k_clock clock_realtime_coarse = {
1490 .clock_getres = posix_get_coarse_res,
1491 .clock_get_timespec = posix_get_realtime_coarse,
1494 static const struct k_clock clock_monotonic_coarse = {
1495 .clock_getres = posix_get_coarse_res,
1496 .clock_get_timespec = posix_get_monotonic_coarse,
1499 static const struct k_clock clock_tai = {
1500 .clock_getres = posix_get_hrtimer_res,
1501 .clock_get_ktime = posix_get_tai_ktime,
1502 .clock_get_timespec = posix_get_tai_timespec,
1503 .nsleep = common_nsleep,
1504 .timer_create = common_timer_create,
1505 .timer_set = common_timer_set,
1506 .timer_get = common_timer_get,
1507 .timer_del = common_timer_del,
1508 .timer_rearm = common_hrtimer_rearm,
1509 .timer_forward = common_hrtimer_forward,
1510 .timer_remaining = common_hrtimer_remaining,
1511 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1512 .timer_wait_running = common_timer_wait_running,
1513 .timer_arm = common_hrtimer_arm,
1516 static const struct k_clock clock_boottime = {
1517 .clock_getres = posix_get_hrtimer_res,
1518 .clock_get_ktime = posix_get_boottime_ktime,
1519 .clock_get_timespec = posix_get_boottime_timespec,
1520 .nsleep = common_nsleep_timens,
1521 .timer_create = common_timer_create,
1522 .timer_set = common_timer_set,
1523 .timer_get = common_timer_get,
1524 .timer_del = common_timer_del,
1525 .timer_rearm = common_hrtimer_rearm,
1526 .timer_forward = common_hrtimer_forward,
1527 .timer_remaining = common_hrtimer_remaining,
1528 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1529 .timer_wait_running = common_timer_wait_running,
1530 .timer_arm = common_hrtimer_arm,
1533 static const struct k_clock * const posix_clocks[] = {
1534 [CLOCK_REALTIME] = &clock_realtime,
1535 [CLOCK_MONOTONIC] = &clock_monotonic,
1536 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1537 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1538 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1539 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1540 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1541 [CLOCK_BOOTTIME] = &clock_boottime,
1542 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1543 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1544 [CLOCK_TAI] = &clock_tai,
1547 static const struct k_clock *clockid_to_kclock(const clockid_t id)
1552 return (id & CLOCKFD_MASK) == CLOCKFD ?
1553 &clock_posix_dynamic : &clock_posix_cpu;
1556 if (id >= ARRAY_SIZE(posix_clocks))
1559 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];