2 * linux/kernel/posix-timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/mutex.h>
38 #include <linux/sched/task.h>
40 #include <linux/uaccess.h>
41 #include <linux/list.h>
42 #include <linux/init.h>
43 #include <linux/compiler.h>
44 #include <linux/hash.h>
45 #include <linux/posix-clock.h>
46 #include <linux/posix-timers.h>
47 #include <linux/syscalls.h>
48 #include <linux/wait.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
51 #include <linux/hashtable.h>
52 #include <linux/compat.h>
54 #include "timekeeping.h"
55 #include "posix-timers.h"
58 * Management arrays for POSIX timers. Timers are now kept in static hash table
60 * Timer ids are allocated by local routine, which selects proper hash head by
61 * key, constructed from current->signal address and per signal struct counter.
62 * This keeps timer ids unique per process, but now they can intersect between
67 * Lets keep our timers in a slab cache :-)
69 static struct kmem_cache *posix_timers_cache;
71 static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
72 static DEFINE_SPINLOCK(hash_lock);
74 static const struct k_clock * const posix_clocks[];
75 static const struct k_clock *clockid_to_kclock(const clockid_t id);
76 static const struct k_clock clock_realtime, clock_monotonic;
79 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
80 * SIGEV values. Here we put out an error if this assumption fails.
82 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
83 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
84 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
88 * parisc wants ENOTSUP instead of EOPNOTSUPP
91 # define ENANOSLEEP_NOTSUP EOPNOTSUPP
93 # define ENANOSLEEP_NOTSUP ENOTSUP
97 * The timer ID is turned into a timer address by idr_find().
98 * Verifying a valid ID consists of:
100 * a) checking that idr_find() returns other than -1.
101 * b) checking that the timer id matches the one in the timer itself.
102 * c) that the timer owner is in the callers thread group.
106 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
107 * to implement others. This structure defines the various
110 * RESOLUTION: Clock resolution is used to round up timer and interval
111 * times, NOT to report clock times, which are reported with as
112 * much resolution as the system can muster. In some cases this
113 * resolution may depend on the underlying clock hardware and
114 * may not be quantifiable until run time, and only then is the
115 * necessary code is written. The standard says we should say
116 * something about this issue in the documentation...
118 * FUNCTIONS: The CLOCKs structure defines possible functions to
119 * handle various clock functions.
121 * The standard POSIX timer management code assumes the
122 * following: 1.) The k_itimer struct (sched.h) is used for
123 * the timer. 2.) The list, it_lock, it_clock, it_id and
124 * it_pid fields are not modified by timer code.
126 * Permissions: It is assumed that the clock_settime() function defined
127 * for each clock will take care of permission checks. Some
128 * clocks may be set able by any user (i.e. local process
129 * clocks) others not. Currently the only set able clock we
130 * have is CLOCK_REALTIME and its high res counter part, both of
131 * which we beg off on and pass to do_sys_settimeofday().
133 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
135 #define lock_timer(tid, flags) \
136 ({ struct k_itimer *__timr; \
137 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
141 static int hash(struct signal_struct *sig, unsigned int nr)
143 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
146 static struct k_itimer *__posix_timers_find(struct hlist_head *head,
147 struct signal_struct *sig,
150 struct k_itimer *timer;
152 hlist_for_each_entry_rcu(timer, head, t_hash) {
153 if ((timer->it_signal == sig) && (timer->it_id == id))
159 static struct k_itimer *posix_timer_by_id(timer_t id)
161 struct signal_struct *sig = current->signal;
162 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
164 return __posix_timers_find(head, sig, id);
167 static int posix_timer_add(struct k_itimer *timer)
169 struct signal_struct *sig = current->signal;
170 int first_free_id = sig->posix_timer_id;
171 struct hlist_head *head;
175 spin_lock(&hash_lock);
176 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
177 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
178 hlist_add_head_rcu(&timer->t_hash, head);
179 ret = sig->posix_timer_id;
181 if (++sig->posix_timer_id < 0)
182 sig->posix_timer_id = 0;
183 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
184 /* Loop over all possible ids completed */
186 spin_unlock(&hash_lock);
187 } while (ret == -ENOENT);
191 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
193 spin_unlock_irqrestore(&timr->it_lock, flags);
196 /* Get clock_realtime */
197 static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
199 ktime_get_real_ts64(tp);
203 /* Set clock_realtime */
204 static int posix_clock_realtime_set(const clockid_t which_clock,
205 const struct timespec64 *tp)
207 return do_sys_settimeofday64(tp, NULL);
210 static int posix_clock_realtime_adj(const clockid_t which_clock,
213 return do_adjtimex(t);
217 * Get monotonic time for posix timers
219 static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
226 * Get monotonic-raw time for posix timers
228 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
230 getrawmonotonic64(tp);
235 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
237 *tp = current_kernel_time64();
241 static int posix_get_monotonic_coarse(clockid_t which_clock,
242 struct timespec64 *tp)
244 *tp = get_monotonic_coarse64();
248 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
250 *tp = ktime_to_timespec64(KTIME_LOW_RES);
254 static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
256 get_monotonic_boottime64(tp);
260 static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
262 timekeeping_clocktai64(tp);
266 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
269 tp->tv_nsec = hrtimer_resolution;
274 * Initialize everything, well, just everything in Posix clocks/timers ;)
276 static __init int init_posix_timers(void)
278 posix_timers_cache = kmem_cache_create("posix_timers_cache",
279 sizeof (struct k_itimer), 0, SLAB_PANIC,
283 __initcall(init_posix_timers);
285 static void common_hrtimer_rearm(struct k_itimer *timr)
287 struct hrtimer *timer = &timr->it.real.timer;
289 if (!timr->it_interval)
292 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
293 timer->base->get_time(),
295 hrtimer_restart(timer);
299 * This function is exported for use by the signal deliver code. It is
300 * called just prior to the info block being released and passes that
301 * block to us. It's function is to update the overrun entry AND to
302 * restart the timer. It should only be called if the timer is to be
303 * restarted (i.e. we have flagged this in the sys_private entry of the
306 * To protect against the timer going away while the interrupt is queued,
307 * we require that the it_requeue_pending flag be set.
309 void posixtimer_rearm(struct siginfo *info)
311 struct k_itimer *timr;
314 timr = lock_timer(info->si_tid, &flags);
318 if (timr->it_requeue_pending == info->si_sys_private) {
319 timr->kclock->timer_rearm(timr);
322 timr->it_overrun_last = timr->it_overrun;
323 timr->it_overrun = -1;
324 ++timr->it_requeue_pending;
326 info->si_overrun += timr->it_overrun_last;
329 unlock_timer(timr, flags);
332 int posix_timer_event(struct k_itimer *timr, int si_private)
334 struct task_struct *task;
335 int shared, ret = -1;
337 * FIXME: if ->sigq is queued we can race with
338 * dequeue_signal()->posixtimer_rearm().
340 * If dequeue_signal() sees the "right" value of
341 * si_sys_private it calls posixtimer_rearm().
342 * We re-queue ->sigq and drop ->it_lock().
343 * posixtimer_rearm() locks the timer
344 * and re-schedules it while ->sigq is pending.
345 * Not really bad, but not that we want.
347 timr->sigq->info.si_sys_private = si_private;
350 task = pid_task(timr->it_pid, PIDTYPE_PID);
352 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
353 ret = send_sigqueue(timr->sigq, task, shared);
356 /* If we failed to send the signal the timer stops. */
361 * This function gets called when a POSIX.1b interval timer expires. It
362 * is used as a callback from the kernel internal timer. The
363 * run_timer_list code ALWAYS calls with interrupts on.
365 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
367 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
369 struct k_itimer *timr;
372 enum hrtimer_restart ret = HRTIMER_NORESTART;
374 timr = container_of(timer, struct k_itimer, it.real.timer);
375 spin_lock_irqsave(&timr->it_lock, flags);
378 if (timr->it_interval != 0)
379 si_private = ++timr->it_requeue_pending;
381 if (posix_timer_event(timr, si_private)) {
383 * signal was not sent because of sig_ignor
384 * we will not get a call back to restart it AND
385 * it should be restarted.
387 if (timr->it_interval != 0) {
388 ktime_t now = hrtimer_cb_get_time(timer);
391 * FIXME: What we really want, is to stop this
392 * timer completely and restart it in case the
393 * SIG_IGN is removed. This is a non trivial
394 * change which involves sighand locking
395 * (sigh !), which we don't want to do late in
398 * For now we just let timers with an interval
399 * less than a jiffie expire every jiffie to
400 * avoid softirq starvation in case of SIG_IGN
401 * and a very small interval, which would put
402 * the timer right back on the softirq pending
403 * list. By moving now ahead of time we trick
404 * hrtimer_forward() to expire the timer
405 * later, while we still maintain the overrun
406 * accuracy, but have some inconsistency in
407 * the timer_gettime() case. This is at least
408 * better than a starved softirq. A more
409 * complex fix which solves also another related
410 * inconsistency is already in the pipeline.
412 #ifdef CONFIG_HIGH_RES_TIMERS
414 ktime_t kj = NSEC_PER_SEC / HZ;
416 if (timr->it_interval < kj)
417 now = ktime_add(now, kj);
420 timr->it_overrun += (unsigned int)
421 hrtimer_forward(timer, now,
423 ret = HRTIMER_RESTART;
424 ++timr->it_requeue_pending;
429 unlock_timer(timr, flags);
433 static struct pid *good_sigevent(sigevent_t * event)
435 struct task_struct *rtn = current->group_leader;
437 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
438 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
439 !same_thread_group(rtn, current) ||
440 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
443 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
444 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
447 return task_pid(rtn);
450 static struct k_itimer * alloc_posix_timer(void)
452 struct k_itimer *tmr;
453 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
456 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
457 kmem_cache_free(posix_timers_cache, tmr);
460 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
464 static void k_itimer_rcu_free(struct rcu_head *head)
466 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
468 kmem_cache_free(posix_timers_cache, tmr);
472 #define IT_ID_NOT_SET 0
473 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
477 spin_lock_irqsave(&hash_lock, flags);
478 hlist_del_rcu(&tmr->t_hash);
479 spin_unlock_irqrestore(&hash_lock, flags);
481 put_pid(tmr->it_pid);
482 sigqueue_free(tmr->sigq);
483 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
486 static int common_timer_create(struct k_itimer *new_timer)
488 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
492 /* Create a POSIX.1b interval timer. */
494 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
495 struct sigevent __user *, timer_event_spec,
496 timer_t __user *, created_timer_id)
498 const struct k_clock *kc = clockid_to_kclock(which_clock);
499 struct k_itimer *new_timer;
500 int error, new_timer_id;
502 int it_id_set = IT_ID_NOT_SET;
506 if (!kc->timer_create)
509 new_timer = alloc_posix_timer();
510 if (unlikely(!new_timer))
513 spin_lock_init(&new_timer->it_lock);
514 new_timer_id = posix_timer_add(new_timer);
515 if (new_timer_id < 0) {
516 error = new_timer_id;
520 it_id_set = IT_ID_SET;
521 new_timer->it_id = (timer_t) new_timer_id;
522 new_timer->it_clock = which_clock;
523 new_timer->kclock = kc;
524 new_timer->it_overrun = -1;
526 if (timer_event_spec) {
527 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
532 new_timer->it_pid = get_pid(good_sigevent(&event));
534 if (!new_timer->it_pid) {
539 memset(&event.sigev_value, 0, sizeof(event.sigev_value));
540 event.sigev_notify = SIGEV_SIGNAL;
541 event.sigev_signo = SIGALRM;
542 event.sigev_value.sival_int = new_timer->it_id;
543 new_timer->it_pid = get_pid(task_tgid(current));
546 new_timer->it_sigev_notify = event.sigev_notify;
547 new_timer->sigq->info.si_signo = event.sigev_signo;
548 new_timer->sigq->info.si_value = event.sigev_value;
549 new_timer->sigq->info.si_tid = new_timer->it_id;
550 new_timer->sigq->info.si_code = SI_TIMER;
552 if (copy_to_user(created_timer_id,
553 &new_timer_id, sizeof (new_timer_id))) {
558 error = kc->timer_create(new_timer);
562 spin_lock_irq(¤t->sighand->siglock);
563 new_timer->it_signal = current->signal;
564 list_add(&new_timer->list, ¤t->signal->posix_timers);
565 spin_unlock_irq(¤t->sighand->siglock);
569 * In the case of the timer belonging to another task, after
570 * the task is unlocked, the timer is owned by the other task
571 * and may cease to exist at any time. Don't use or modify
572 * new_timer after the unlock call.
575 release_posix_timer(new_timer, it_id_set);
580 * Locking issues: We need to protect the result of the id look up until
581 * we get the timer locked down so it is not deleted under us. The
582 * removal is done under the idr spinlock so we use that here to bridge
583 * the find to the timer lock. To avoid a dead lock, the timer id MUST
584 * be release with out holding the timer lock.
586 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
588 struct k_itimer *timr;
591 * timer_t could be any type >= int and we want to make sure any
592 * @timer_id outside positive int range fails lookup.
594 if ((unsigned long long)timer_id > INT_MAX)
598 timr = posix_timer_by_id(timer_id);
600 spin_lock_irqsave(&timr->it_lock, *flags);
601 if (timr->it_signal == current->signal) {
605 spin_unlock_irqrestore(&timr->it_lock, *flags);
612 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
614 struct hrtimer *timer = &timr->it.real.timer;
616 return __hrtimer_expires_remaining_adjusted(timer, now);
619 static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
621 struct hrtimer *timer = &timr->it.real.timer;
623 return (int)hrtimer_forward(timer, now, timr->it_interval);
627 * Get the time remaining on a POSIX.1b interval timer. This function
628 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
631 * We have a couple of messes to clean up here. First there is the case
632 * of a timer that has a requeue pending. These timers should appear to
633 * be in the timer list with an expiry as if we were to requeue them
636 * The second issue is the SIGEV_NONE timer which may be active but is
637 * not really ever put in the timer list (to save system resources).
638 * This timer may be expired, and if so, we will do it here. Otherwise
639 * it is the same as a requeue pending timer WRT to what we should
642 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
644 const struct k_clock *kc = timr->kclock;
645 ktime_t now, remaining, iv;
646 struct timespec64 ts64;
649 sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
650 iv = timr->it_interval;
652 /* interval timer ? */
654 cur_setting->it_interval = ktime_to_timespec64(iv);
655 } else if (!timr->it_active) {
657 * SIGEV_NONE oneshot timers are never queued. Check them
665 * The timespec64 based conversion is suboptimal, but it's not
666 * worth to implement yet another callback.
668 kc->clock_get(timr->it_clock, &ts64);
669 now = timespec64_to_ktime(ts64);
672 * When a requeue is pending or this is a SIGEV_NONE timer move the
673 * expiry time forward by intervals, so expiry is > now.
675 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
676 timr->it_overrun += kc->timer_forward(timr, now);
678 remaining = kc->timer_remaining(timr, now);
679 /* Return 0 only, when the timer is expired and not pending */
680 if (remaining <= 0) {
682 * A single shot SIGEV_NONE timer must return 0, when
686 cur_setting->it_value.tv_nsec = 1;
688 cur_setting->it_value = ktime_to_timespec64(remaining);
692 /* Get the time remaining on a POSIX.1b interval timer. */
693 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
694 struct itimerspec __user *, setting)
696 struct itimerspec64 cur_setting64;
697 struct itimerspec cur_setting;
698 struct k_itimer *timr;
699 const struct k_clock *kc;
703 timr = lock_timer(timer_id, &flags);
707 memset(&cur_setting64, 0, sizeof(cur_setting64));
709 if (WARN_ON_ONCE(!kc || !kc->timer_get))
712 kc->timer_get(timr, &cur_setting64);
714 unlock_timer(timr, flags);
716 cur_setting = itimerspec64_to_itimerspec(&cur_setting64);
717 if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
724 * Get the number of overruns of a POSIX.1b interval timer. This is to
725 * be the overrun of the timer last delivered. At the same time we are
726 * accumulating overruns on the next timer. The overrun is frozen when
727 * the signal is delivered, either at the notify time (if the info block
728 * is not queued) or at the actual delivery time (as we are informed by
729 * the call back to posixtimer_rearm(). So all we need to do is
730 * to pick up the frozen overrun.
732 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
734 struct k_itimer *timr;
738 timr = lock_timer(timer_id, &flags);
742 overrun = timr->it_overrun_last;
743 unlock_timer(timr, flags);
748 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
749 bool absolute, bool sigev_none)
751 struct hrtimer *timer = &timr->it.real.timer;
752 enum hrtimer_mode mode;
754 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
756 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
757 * clock modifications, so they become CLOCK_MONOTONIC based under the
758 * hood. See hrtimer_init(). Update timr->kclock, so the generic
759 * functions which use timr->kclock->clock_get() work.
761 * Note: it_clock stays unmodified, because the next timer_set() might
762 * use ABSTIME, so it needs to switch back.
764 if (timr->it_clock == CLOCK_REALTIME)
765 timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
767 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
768 timr->it.real.timer.function = posix_timer_fn;
771 expires = ktime_add_safe(expires, timer->base->get_time());
772 hrtimer_set_expires(timer, expires);
775 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
778 static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
780 return hrtimer_try_to_cancel(&timr->it.real.timer);
783 /* Set a POSIX.1b interval timer. */
784 int common_timer_set(struct k_itimer *timr, int flags,
785 struct itimerspec64 *new_setting,
786 struct itimerspec64 *old_setting)
788 const struct k_clock *kc = timr->kclock;
793 common_timer_get(timr, old_setting);
795 /* Prevent rearming by clearing the interval */
796 timr->it_interval = 0;
798 * Careful here. On SMP systems the timer expiry function could be
799 * active and spinning on timr->it_lock.
801 if (kc->timer_try_to_cancel(timr) < 0)
805 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
807 timr->it_overrun_last = 0;
809 /* Switch off the timer when it_value is zero */
810 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
813 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
814 expires = timespec64_to_ktime(new_setting->it_value);
815 sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
817 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
818 timr->it_active = !sigev_none;
822 /* Set a POSIX.1b interval timer */
823 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
824 const struct itimerspec __user *, new_setting,
825 struct itimerspec __user *, old_setting)
827 struct itimerspec64 new_spec64, old_spec64;
828 struct itimerspec64 *rtn = old_setting ? &old_spec64 : NULL;
829 struct itimerspec new_spec, old_spec;
830 struct k_itimer *timr;
832 const struct k_clock *kc;
838 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
840 new_spec64 = itimerspec_to_itimerspec64(&new_spec);
842 if (!timespec64_valid(&new_spec64.it_interval) ||
843 !timespec64_valid(&new_spec64.it_value))
846 memset(rtn, 0, sizeof(*rtn));
848 timr = lock_timer(timer_id, &flag);
853 if (WARN_ON_ONCE(!kc || !kc->timer_set))
856 error = kc->timer_set(timr, flags, &new_spec64, rtn);
858 unlock_timer(timr, flag);
859 if (error == TIMER_RETRY) {
860 rtn = NULL; // We already got the old time...
864 old_spec = itimerspec64_to_itimerspec(&old_spec64);
865 if (old_setting && !error &&
866 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
872 int common_timer_del(struct k_itimer *timer)
874 const struct k_clock *kc = timer->kclock;
876 timer->it_interval = 0;
877 if (kc->timer_try_to_cancel(timer) < 0)
879 timer->it_active = 0;
883 static inline int timer_delete_hook(struct k_itimer *timer)
885 const struct k_clock *kc = timer->kclock;
887 if (WARN_ON_ONCE(!kc || !kc->timer_del))
889 return kc->timer_del(timer);
892 /* Delete a POSIX.1b interval timer. */
893 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
895 struct k_itimer *timer;
899 timer = lock_timer(timer_id, &flags);
903 if (timer_delete_hook(timer) == TIMER_RETRY) {
904 unlock_timer(timer, flags);
908 spin_lock(¤t->sighand->siglock);
909 list_del(&timer->list);
910 spin_unlock(¤t->sighand->siglock);
912 * This keeps any tasks waiting on the spin lock from thinking
913 * they got something (see the lock code above).
915 timer->it_signal = NULL;
917 unlock_timer(timer, flags);
918 release_posix_timer(timer, IT_ID_SET);
923 * return timer owned by the process, used by exit_itimers
925 static void itimer_delete(struct k_itimer *timer)
930 spin_lock_irqsave(&timer->it_lock, flags);
932 if (timer_delete_hook(timer) == TIMER_RETRY) {
933 unlock_timer(timer, flags);
936 list_del(&timer->list);
938 * This keeps any tasks waiting on the spin lock from thinking
939 * they got something (see the lock code above).
941 timer->it_signal = NULL;
943 unlock_timer(timer, flags);
944 release_posix_timer(timer, IT_ID_SET);
948 * This is called by do_exit or de_thread, only when there are no more
949 * references to the shared signal_struct.
951 void exit_itimers(struct signal_struct *sig)
953 struct k_itimer *tmr;
955 while (!list_empty(&sig->posix_timers)) {
956 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
961 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
962 const struct timespec __user *, tp)
964 const struct k_clock *kc = clockid_to_kclock(which_clock);
965 struct timespec64 new_tp64;
966 struct timespec new_tp;
968 if (!kc || !kc->clock_set)
971 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
973 new_tp64 = timespec_to_timespec64(new_tp);
975 return kc->clock_set(which_clock, &new_tp64);
978 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
979 struct timespec __user *,tp)
981 const struct k_clock *kc = clockid_to_kclock(which_clock);
982 struct timespec64 kernel_tp64;
983 struct timespec kernel_tp;
989 error = kc->clock_get(which_clock, &kernel_tp64);
990 kernel_tp = timespec64_to_timespec(kernel_tp64);
992 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
998 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
999 struct timex __user *, utx)
1001 const struct k_clock *kc = clockid_to_kclock(which_clock);
1010 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1013 err = kc->clock_adj(which_clock, &ktx);
1015 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1021 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1022 struct timespec __user *, tp)
1024 const struct k_clock *kc = clockid_to_kclock(which_clock);
1025 struct timespec64 rtn_tp64;
1026 struct timespec rtn_tp;
1032 error = kc->clock_getres(which_clock, &rtn_tp64);
1033 rtn_tp = timespec64_to_timespec(rtn_tp64);
1035 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
1042 * nanosleep for monotonic and realtime clocks
1044 static int common_nsleep(const clockid_t which_clock, int flags,
1045 struct timespec64 *tsave)
1047 return hrtimer_nanosleep(tsave, flags & TIMER_ABSTIME ?
1048 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1052 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1053 const struct timespec __user *, rqtp,
1054 struct timespec __user *, rmtp)
1056 const struct k_clock *kc = clockid_to_kclock(which_clock);
1057 struct timespec64 t64;
1063 return -ENANOSLEEP_NOTSUP;
1065 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1068 t64 = timespec_to_timespec64(t);
1069 if (!timespec64_valid(&t64))
1071 if (flags & TIMER_ABSTIME)
1073 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1074 current->restart_block.nanosleep.rmtp = rmtp;
1076 return kc->nsleep(which_clock, flags, &t64);
1079 #ifdef CONFIG_COMPAT
1080 COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
1081 struct compat_timespec __user *, rqtp,
1082 struct compat_timespec __user *, rmtp)
1084 const struct k_clock *kc = clockid_to_kclock(which_clock);
1085 struct timespec64 t64;
1091 return -ENANOSLEEP_NOTSUP;
1093 if (compat_get_timespec(&t, rqtp))
1096 t64 = timespec_to_timespec64(t);
1097 if (!timespec64_valid(&t64))
1099 if (flags & TIMER_ABSTIME)
1101 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1102 current->restart_block.nanosleep.compat_rmtp = rmtp;
1104 return kc->nsleep(which_clock, flags, &t64);
1108 static const struct k_clock clock_realtime = {
1109 .clock_getres = posix_get_hrtimer_res,
1110 .clock_get = posix_clock_realtime_get,
1111 .clock_set = posix_clock_realtime_set,
1112 .clock_adj = posix_clock_realtime_adj,
1113 .nsleep = common_nsleep,
1114 .nsleep_restart = hrtimer_nanosleep_restart,
1115 .timer_create = common_timer_create,
1116 .timer_set = common_timer_set,
1117 .timer_get = common_timer_get,
1118 .timer_del = common_timer_del,
1119 .timer_rearm = common_hrtimer_rearm,
1120 .timer_forward = common_hrtimer_forward,
1121 .timer_remaining = common_hrtimer_remaining,
1122 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1123 .timer_arm = common_hrtimer_arm,
1126 static const struct k_clock clock_monotonic = {
1127 .clock_getres = posix_get_hrtimer_res,
1128 .clock_get = posix_ktime_get_ts,
1129 .nsleep = common_nsleep,
1130 .nsleep_restart = hrtimer_nanosleep_restart,
1131 .timer_create = common_timer_create,
1132 .timer_set = common_timer_set,
1133 .timer_get = common_timer_get,
1134 .timer_del = common_timer_del,
1135 .timer_rearm = common_hrtimer_rearm,
1136 .timer_forward = common_hrtimer_forward,
1137 .timer_remaining = common_hrtimer_remaining,
1138 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1139 .timer_arm = common_hrtimer_arm,
1142 static const struct k_clock clock_monotonic_raw = {
1143 .clock_getres = posix_get_hrtimer_res,
1144 .clock_get = posix_get_monotonic_raw,
1147 static const struct k_clock clock_realtime_coarse = {
1148 .clock_getres = posix_get_coarse_res,
1149 .clock_get = posix_get_realtime_coarse,
1152 static const struct k_clock clock_monotonic_coarse = {
1153 .clock_getres = posix_get_coarse_res,
1154 .clock_get = posix_get_monotonic_coarse,
1157 static const struct k_clock clock_tai = {
1158 .clock_getres = posix_get_hrtimer_res,
1159 .clock_get = posix_get_tai,
1160 .nsleep = common_nsleep,
1161 .nsleep_restart = hrtimer_nanosleep_restart,
1162 .timer_create = common_timer_create,
1163 .timer_set = common_timer_set,
1164 .timer_get = common_timer_get,
1165 .timer_del = common_timer_del,
1166 .timer_rearm = common_hrtimer_rearm,
1167 .timer_forward = common_hrtimer_forward,
1168 .timer_remaining = common_hrtimer_remaining,
1169 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1170 .timer_arm = common_hrtimer_arm,
1173 static const struct k_clock clock_boottime = {
1174 .clock_getres = posix_get_hrtimer_res,
1175 .clock_get = posix_get_boottime,
1176 .nsleep = common_nsleep,
1177 .nsleep_restart = hrtimer_nanosleep_restart,
1178 .timer_create = common_timer_create,
1179 .timer_set = common_timer_set,
1180 .timer_get = common_timer_get,
1181 .timer_del = common_timer_del,
1182 .timer_rearm = common_hrtimer_rearm,
1183 .timer_forward = common_hrtimer_forward,
1184 .timer_remaining = common_hrtimer_remaining,
1185 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1186 .timer_arm = common_hrtimer_arm,
1189 static const struct k_clock * const posix_clocks[] = {
1190 [CLOCK_REALTIME] = &clock_realtime,
1191 [CLOCK_MONOTONIC] = &clock_monotonic,
1192 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1193 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1194 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1195 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1196 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1197 [CLOCK_BOOTTIME] = &clock_boottime,
1198 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1199 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1200 [CLOCK_TAI] = &clock_tai,
1203 static const struct k_clock *clockid_to_kclock(const clockid_t id)
1206 return (id & CLOCKFD_MASK) == CLOCKFD ?
1207 &clock_posix_dynamic : &clock_posix_cpu;
1209 if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id])
1211 return posix_clocks[id];