2 * linux/kernel/posix-timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/mutex.h>
38 #include <linux/sched/task.h>
40 #include <linux/uaccess.h>
41 #include <linux/list.h>
42 #include <linux/init.h>
43 #include <linux/compiler.h>
44 #include <linux/hash.h>
45 #include <linux/posix-clock.h>
46 #include <linux/posix-timers.h>
47 #include <linux/syscalls.h>
48 #include <linux/wait.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
51 #include <linux/hashtable.h>
52 #include <linux/compat.h>
53 #include <linux/nospec.h>
55 #include "timekeeping.h"
56 #include "posix-timers.h"
59 * Management arrays for POSIX timers. Timers are now kept in static hash table
61 * Timer ids are allocated by local routine, which selects proper hash head by
62 * key, constructed from current->signal address and per signal struct counter.
63 * This keeps timer ids unique per process, but now they can intersect between
68 * Lets keep our timers in a slab cache :-)
70 static struct kmem_cache *posix_timers_cache;
72 static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
73 static DEFINE_SPINLOCK(hash_lock);
75 static const struct k_clock * const posix_clocks[];
76 static const struct k_clock *clockid_to_kclock(const clockid_t id);
77 static const struct k_clock clock_realtime, clock_monotonic;
80 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
81 * SIGEV values. Here we put out an error if this assumption fails.
83 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
84 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
85 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
89 * parisc wants ENOTSUP instead of EOPNOTSUPP
92 # define ENANOSLEEP_NOTSUP EOPNOTSUPP
94 # define ENANOSLEEP_NOTSUP ENOTSUP
98 * The timer ID is turned into a timer address by idr_find().
99 * Verifying a valid ID consists of:
101 * a) checking that idr_find() returns other than -1.
102 * b) checking that the timer id matches the one in the timer itself.
103 * c) that the timer owner is in the callers thread group.
107 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
108 * to implement others. This structure defines the various
111 * RESOLUTION: Clock resolution is used to round up timer and interval
112 * times, NOT to report clock times, which are reported with as
113 * much resolution as the system can muster. In some cases this
114 * resolution may depend on the underlying clock hardware and
115 * may not be quantifiable until run time, and only then is the
116 * necessary code is written. The standard says we should say
117 * something about this issue in the documentation...
119 * FUNCTIONS: The CLOCKs structure defines possible functions to
120 * handle various clock functions.
122 * The standard POSIX timer management code assumes the
123 * following: 1.) The k_itimer struct (sched.h) is used for
124 * the timer. 2.) The list, it_lock, it_clock, it_id and
125 * it_pid fields are not modified by timer code.
127 * Permissions: It is assumed that the clock_settime() function defined
128 * for each clock will take care of permission checks. Some
129 * clocks may be set able by any user (i.e. local process
130 * clocks) others not. Currently the only set able clock we
131 * have is CLOCK_REALTIME and its high res counter part, both of
132 * which we beg off on and pass to do_sys_settimeofday().
134 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
136 #define lock_timer(tid, flags) \
137 ({ struct k_itimer *__timr; \
138 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
142 static int hash(struct signal_struct *sig, unsigned int nr)
144 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
147 static struct k_itimer *__posix_timers_find(struct hlist_head *head,
148 struct signal_struct *sig,
151 struct k_itimer *timer;
153 hlist_for_each_entry_rcu(timer, head, t_hash) {
154 if ((timer->it_signal == sig) && (timer->it_id == id))
160 static struct k_itimer *posix_timer_by_id(timer_t id)
162 struct signal_struct *sig = current->signal;
163 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
165 return __posix_timers_find(head, sig, id);
168 static int posix_timer_add(struct k_itimer *timer)
170 struct signal_struct *sig = current->signal;
171 int first_free_id = sig->posix_timer_id;
172 struct hlist_head *head;
176 spin_lock(&hash_lock);
177 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
178 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
179 hlist_add_head_rcu(&timer->t_hash, head);
180 ret = sig->posix_timer_id;
182 if (++sig->posix_timer_id < 0)
183 sig->posix_timer_id = 0;
184 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
185 /* Loop over all possible ids completed */
187 spin_unlock(&hash_lock);
188 } while (ret == -ENOENT);
192 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
194 spin_unlock_irqrestore(&timr->it_lock, flags);
197 /* Get clock_realtime */
198 static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
200 ktime_get_real_ts64(tp);
204 /* Set clock_realtime */
205 static int posix_clock_realtime_set(const clockid_t which_clock,
206 const struct timespec64 *tp)
208 return do_sys_settimeofday64(tp, NULL);
211 static int posix_clock_realtime_adj(const clockid_t which_clock,
214 return do_adjtimex(t);
218 * Get monotonic time for posix timers
220 static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
227 * Get monotonic-raw time for posix timers
229 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
231 getrawmonotonic64(tp);
236 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
238 *tp = current_kernel_time64();
242 static int posix_get_monotonic_coarse(clockid_t which_clock,
243 struct timespec64 *tp)
245 *tp = get_monotonic_coarse64();
249 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
251 *tp = ktime_to_timespec64(KTIME_LOW_RES);
255 static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
257 timekeeping_clocktai64(tp);
261 static int posix_get_monotonic_active(clockid_t which_clock,
262 struct timespec64 *tp)
264 ktime_get_active_ts64(tp);
268 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
271 tp->tv_nsec = hrtimer_resolution;
276 * Initialize everything, well, just everything in Posix clocks/timers ;)
278 static __init int init_posix_timers(void)
280 posix_timers_cache = kmem_cache_create("posix_timers_cache",
281 sizeof (struct k_itimer), 0, SLAB_PANIC,
285 __initcall(init_posix_timers);
287 static void common_hrtimer_rearm(struct k_itimer *timr)
289 struct hrtimer *timer = &timr->it.real.timer;
291 if (!timr->it_interval)
294 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
295 timer->base->get_time(),
297 hrtimer_restart(timer);
301 * This function is exported for use by the signal deliver code. It is
302 * called just prior to the info block being released and passes that
303 * block to us. It's function is to update the overrun entry AND to
304 * restart the timer. It should only be called if the timer is to be
305 * restarted (i.e. we have flagged this in the sys_private entry of the
308 * To protect against the timer going away while the interrupt is queued,
309 * we require that the it_requeue_pending flag be set.
311 void posixtimer_rearm(struct siginfo *info)
313 struct k_itimer *timr;
316 timr = lock_timer(info->si_tid, &flags);
320 if (timr->it_requeue_pending == info->si_sys_private) {
321 timr->kclock->timer_rearm(timr);
324 timr->it_overrun_last = timr->it_overrun;
325 timr->it_overrun = -1;
326 ++timr->it_requeue_pending;
328 info->si_overrun += timr->it_overrun_last;
331 unlock_timer(timr, flags);
334 int posix_timer_event(struct k_itimer *timr, int si_private)
336 struct task_struct *task;
337 int shared, ret = -1;
339 * FIXME: if ->sigq is queued we can race with
340 * dequeue_signal()->posixtimer_rearm().
342 * If dequeue_signal() sees the "right" value of
343 * si_sys_private it calls posixtimer_rearm().
344 * We re-queue ->sigq and drop ->it_lock().
345 * posixtimer_rearm() locks the timer
346 * and re-schedules it while ->sigq is pending.
347 * Not really bad, but not that we want.
349 timr->sigq->info.si_sys_private = si_private;
352 task = pid_task(timr->it_pid, PIDTYPE_PID);
354 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
355 ret = send_sigqueue(timr->sigq, task, shared);
358 /* If we failed to send the signal the timer stops. */
363 * This function gets called when a POSIX.1b interval timer expires. It
364 * is used as a callback from the kernel internal timer. The
365 * run_timer_list code ALWAYS calls with interrupts on.
367 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
369 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
371 struct k_itimer *timr;
374 enum hrtimer_restart ret = HRTIMER_NORESTART;
376 timr = container_of(timer, struct k_itimer, it.real.timer);
377 spin_lock_irqsave(&timr->it_lock, flags);
380 if (timr->it_interval != 0)
381 si_private = ++timr->it_requeue_pending;
383 if (posix_timer_event(timr, si_private)) {
385 * signal was not sent because of sig_ignor
386 * we will not get a call back to restart it AND
387 * it should be restarted.
389 if (timr->it_interval != 0) {
390 ktime_t now = hrtimer_cb_get_time(timer);
393 * FIXME: What we really want, is to stop this
394 * timer completely and restart it in case the
395 * SIG_IGN is removed. This is a non trivial
396 * change which involves sighand locking
397 * (sigh !), which we don't want to do late in
400 * For now we just let timers with an interval
401 * less than a jiffie expire every jiffie to
402 * avoid softirq starvation in case of SIG_IGN
403 * and a very small interval, which would put
404 * the timer right back on the softirq pending
405 * list. By moving now ahead of time we trick
406 * hrtimer_forward() to expire the timer
407 * later, while we still maintain the overrun
408 * accuracy, but have some inconsistency in
409 * the timer_gettime() case. This is at least
410 * better than a starved softirq. A more
411 * complex fix which solves also another related
412 * inconsistency is already in the pipeline.
414 #ifdef CONFIG_HIGH_RES_TIMERS
416 ktime_t kj = NSEC_PER_SEC / HZ;
418 if (timr->it_interval < kj)
419 now = ktime_add(now, kj);
422 timr->it_overrun += (unsigned int)
423 hrtimer_forward(timer, now,
425 ret = HRTIMER_RESTART;
426 ++timr->it_requeue_pending;
431 unlock_timer(timr, flags);
435 static struct pid *good_sigevent(sigevent_t * event)
437 struct task_struct *rtn = current->group_leader;
439 switch (event->sigev_notify) {
440 case SIGEV_SIGNAL | SIGEV_THREAD_ID:
441 rtn = find_task_by_vpid(event->sigev_notify_thread_id);
442 if (!rtn || !same_thread_group(rtn, current))
447 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
451 return task_pid(rtn);
457 static struct k_itimer * alloc_posix_timer(void)
459 struct k_itimer *tmr;
460 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
463 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
464 kmem_cache_free(posix_timers_cache, tmr);
467 clear_siginfo(&tmr->sigq->info);
471 static void k_itimer_rcu_free(struct rcu_head *head)
473 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
475 kmem_cache_free(posix_timers_cache, tmr);
479 #define IT_ID_NOT_SET 0
480 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
484 spin_lock_irqsave(&hash_lock, flags);
485 hlist_del_rcu(&tmr->t_hash);
486 spin_unlock_irqrestore(&hash_lock, flags);
488 put_pid(tmr->it_pid);
489 sigqueue_free(tmr->sigq);
490 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
493 static int common_timer_create(struct k_itimer *new_timer)
495 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
499 /* Create a POSIX.1b interval timer. */
500 static int do_timer_create(clockid_t which_clock, struct sigevent *event,
501 timer_t __user *created_timer_id)
503 const struct k_clock *kc = clockid_to_kclock(which_clock);
504 struct k_itimer *new_timer;
505 int error, new_timer_id;
506 int it_id_set = IT_ID_NOT_SET;
510 if (!kc->timer_create)
513 new_timer = alloc_posix_timer();
514 if (unlikely(!new_timer))
517 spin_lock_init(&new_timer->it_lock);
518 new_timer_id = posix_timer_add(new_timer);
519 if (new_timer_id < 0) {
520 error = new_timer_id;
524 it_id_set = IT_ID_SET;
525 new_timer->it_id = (timer_t) new_timer_id;
526 new_timer->it_clock = which_clock;
527 new_timer->kclock = kc;
528 new_timer->it_overrun = -1;
532 new_timer->it_pid = get_pid(good_sigevent(event));
534 if (!new_timer->it_pid) {
538 new_timer->it_sigev_notify = event->sigev_notify;
539 new_timer->sigq->info.si_signo = event->sigev_signo;
540 new_timer->sigq->info.si_value = event->sigev_value;
542 new_timer->it_sigev_notify = SIGEV_SIGNAL;
543 new_timer->sigq->info.si_signo = SIGALRM;
544 memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
545 new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
546 new_timer->it_pid = get_pid(task_tgid(current));
549 new_timer->sigq->info.si_tid = new_timer->it_id;
550 new_timer->sigq->info.si_code = SI_TIMER;
552 if (copy_to_user(created_timer_id,
553 &new_timer_id, sizeof (new_timer_id))) {
558 error = kc->timer_create(new_timer);
562 spin_lock_irq(¤t->sighand->siglock);
563 new_timer->it_signal = current->signal;
564 list_add(&new_timer->list, ¤t->signal->posix_timers);
565 spin_unlock_irq(¤t->sighand->siglock);
569 * In the case of the timer belonging to another task, after
570 * the task is unlocked, the timer is owned by the other task
571 * and may cease to exist at any time. Don't use or modify
572 * new_timer after the unlock call.
575 release_posix_timer(new_timer, it_id_set);
579 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
580 struct sigevent __user *, timer_event_spec,
581 timer_t __user *, created_timer_id)
583 if (timer_event_spec) {
586 if (copy_from_user(&event, timer_event_spec, sizeof (event)))
588 return do_timer_create(which_clock, &event, created_timer_id);
590 return do_timer_create(which_clock, NULL, created_timer_id);
594 COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
595 struct compat_sigevent __user *, timer_event_spec,
596 timer_t __user *, created_timer_id)
598 if (timer_event_spec) {
601 if (get_compat_sigevent(&event, timer_event_spec))
603 return do_timer_create(which_clock, &event, created_timer_id);
605 return do_timer_create(which_clock, NULL, created_timer_id);
610 * Locking issues: We need to protect the result of the id look up until
611 * we get the timer locked down so it is not deleted under us. The
612 * removal is done under the idr spinlock so we use that here to bridge
613 * the find to the timer lock. To avoid a dead lock, the timer id MUST
614 * be release with out holding the timer lock.
616 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
618 struct k_itimer *timr;
621 * timer_t could be any type >= int and we want to make sure any
622 * @timer_id outside positive int range fails lookup.
624 if ((unsigned long long)timer_id > INT_MAX)
628 timr = posix_timer_by_id(timer_id);
630 spin_lock_irqsave(&timr->it_lock, *flags);
631 if (timr->it_signal == current->signal) {
635 spin_unlock_irqrestore(&timr->it_lock, *flags);
642 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
644 struct hrtimer *timer = &timr->it.real.timer;
646 return __hrtimer_expires_remaining_adjusted(timer, now);
649 static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
651 struct hrtimer *timer = &timr->it.real.timer;
653 return (int)hrtimer_forward(timer, now, timr->it_interval);
657 * Get the time remaining on a POSIX.1b interval timer. This function
658 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
661 * We have a couple of messes to clean up here. First there is the case
662 * of a timer that has a requeue pending. These timers should appear to
663 * be in the timer list with an expiry as if we were to requeue them
666 * The second issue is the SIGEV_NONE timer which may be active but is
667 * not really ever put in the timer list (to save system resources).
668 * This timer may be expired, and if so, we will do it here. Otherwise
669 * it is the same as a requeue pending timer WRT to what we should
672 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
674 const struct k_clock *kc = timr->kclock;
675 ktime_t now, remaining, iv;
676 struct timespec64 ts64;
679 sig_none = timr->it_sigev_notify == SIGEV_NONE;
680 iv = timr->it_interval;
682 /* interval timer ? */
684 cur_setting->it_interval = ktime_to_timespec64(iv);
685 } else if (!timr->it_active) {
687 * SIGEV_NONE oneshot timers are never queued. Check them
695 * The timespec64 based conversion is suboptimal, but it's not
696 * worth to implement yet another callback.
698 kc->clock_get(timr->it_clock, &ts64);
699 now = timespec64_to_ktime(ts64);
702 * When a requeue is pending or this is a SIGEV_NONE timer move the
703 * expiry time forward by intervals, so expiry is > now.
705 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
706 timr->it_overrun += kc->timer_forward(timr, now);
708 remaining = kc->timer_remaining(timr, now);
709 /* Return 0 only, when the timer is expired and not pending */
710 if (remaining <= 0) {
712 * A single shot SIGEV_NONE timer must return 0, when
716 cur_setting->it_value.tv_nsec = 1;
718 cur_setting->it_value = ktime_to_timespec64(remaining);
722 /* Get the time remaining on a POSIX.1b interval timer. */
723 static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
725 struct k_itimer *timr;
726 const struct k_clock *kc;
730 timr = lock_timer(timer_id, &flags);
734 memset(setting, 0, sizeof(*setting));
736 if (WARN_ON_ONCE(!kc || !kc->timer_get))
739 kc->timer_get(timr, setting);
741 unlock_timer(timr, flags);
745 /* Get the time remaining on a POSIX.1b interval timer. */
746 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
747 struct itimerspec __user *, setting)
749 struct itimerspec64 cur_setting;
751 int ret = do_timer_gettime(timer_id, &cur_setting);
753 if (put_itimerspec64(&cur_setting, setting))
760 COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
761 struct compat_itimerspec __user *, setting)
763 struct itimerspec64 cur_setting;
765 int ret = do_timer_gettime(timer_id, &cur_setting);
767 if (put_compat_itimerspec64(&cur_setting, setting))
775 * Get the number of overruns of a POSIX.1b interval timer. This is to
776 * be the overrun of the timer last delivered. At the same time we are
777 * accumulating overruns on the next timer. The overrun is frozen when
778 * the signal is delivered, either at the notify time (if the info block
779 * is not queued) or at the actual delivery time (as we are informed by
780 * the call back to posixtimer_rearm(). So all we need to do is
781 * to pick up the frozen overrun.
783 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
785 struct k_itimer *timr;
789 timr = lock_timer(timer_id, &flags);
793 overrun = timr->it_overrun_last;
794 unlock_timer(timr, flags);
799 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
800 bool absolute, bool sigev_none)
802 struct hrtimer *timer = &timr->it.real.timer;
803 enum hrtimer_mode mode;
805 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
807 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
808 * clock modifications, so they become CLOCK_MONOTONIC based under the
809 * hood. See hrtimer_init(). Update timr->kclock, so the generic
810 * functions which use timr->kclock->clock_get() work.
812 * Note: it_clock stays unmodified, because the next timer_set() might
813 * use ABSTIME, so it needs to switch back.
815 if (timr->it_clock == CLOCK_REALTIME)
816 timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
818 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
819 timr->it.real.timer.function = posix_timer_fn;
822 expires = ktime_add_safe(expires, timer->base->get_time());
823 hrtimer_set_expires(timer, expires);
826 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
829 static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
831 return hrtimer_try_to_cancel(&timr->it.real.timer);
834 /* Set a POSIX.1b interval timer. */
835 int common_timer_set(struct k_itimer *timr, int flags,
836 struct itimerspec64 *new_setting,
837 struct itimerspec64 *old_setting)
839 const struct k_clock *kc = timr->kclock;
844 common_timer_get(timr, old_setting);
846 /* Prevent rearming by clearing the interval */
847 timr->it_interval = 0;
849 * Careful here. On SMP systems the timer expiry function could be
850 * active and spinning on timr->it_lock.
852 if (kc->timer_try_to_cancel(timr) < 0)
856 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
858 timr->it_overrun_last = 0;
860 /* Switch off the timer when it_value is zero */
861 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
864 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
865 expires = timespec64_to_ktime(new_setting->it_value);
866 sigev_none = timr->it_sigev_notify == SIGEV_NONE;
868 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
869 timr->it_active = !sigev_none;
873 static int do_timer_settime(timer_t timer_id, int flags,
874 struct itimerspec64 *new_spec64,
875 struct itimerspec64 *old_spec64)
877 const struct k_clock *kc;
878 struct k_itimer *timr;
882 if (!timespec64_valid(&new_spec64->it_interval) ||
883 !timespec64_valid(&new_spec64->it_value))
887 memset(old_spec64, 0, sizeof(*old_spec64));
889 timr = lock_timer(timer_id, &flag);
894 if (WARN_ON_ONCE(!kc || !kc->timer_set))
897 error = kc->timer_set(timr, flags, new_spec64, old_spec64);
899 unlock_timer(timr, flag);
900 if (error == TIMER_RETRY) {
901 old_spec64 = NULL; // We already got the old time...
908 /* Set a POSIX.1b interval timer */
909 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
910 const struct itimerspec __user *, new_setting,
911 struct itimerspec __user *, old_setting)
913 struct itimerspec64 new_spec, old_spec;
914 struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
920 if (get_itimerspec64(&new_spec, new_setting))
923 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
924 if (!error && old_setting) {
925 if (put_itimerspec64(&old_spec, old_setting))
932 COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
933 struct compat_itimerspec __user *, new,
934 struct compat_itimerspec __user *, old)
936 struct itimerspec64 new_spec, old_spec;
937 struct itimerspec64 *rtn = old ? &old_spec : NULL;
942 if (get_compat_itimerspec64(&new_spec, new))
945 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
947 if (put_compat_itimerspec64(&old_spec, old))
954 int common_timer_del(struct k_itimer *timer)
956 const struct k_clock *kc = timer->kclock;
958 timer->it_interval = 0;
959 if (kc->timer_try_to_cancel(timer) < 0)
961 timer->it_active = 0;
965 static inline int timer_delete_hook(struct k_itimer *timer)
967 const struct k_clock *kc = timer->kclock;
969 if (WARN_ON_ONCE(!kc || !kc->timer_del))
971 return kc->timer_del(timer);
974 /* Delete a POSIX.1b interval timer. */
975 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
977 struct k_itimer *timer;
981 timer = lock_timer(timer_id, &flags);
985 if (timer_delete_hook(timer) == TIMER_RETRY) {
986 unlock_timer(timer, flags);
990 spin_lock(¤t->sighand->siglock);
991 list_del(&timer->list);
992 spin_unlock(¤t->sighand->siglock);
994 * This keeps any tasks waiting on the spin lock from thinking
995 * they got something (see the lock code above).
997 timer->it_signal = NULL;
999 unlock_timer(timer, flags);
1000 release_posix_timer(timer, IT_ID_SET);
1005 * return timer owned by the process, used by exit_itimers
1007 static void itimer_delete(struct k_itimer *timer)
1009 unsigned long flags;
1012 spin_lock_irqsave(&timer->it_lock, flags);
1014 if (timer_delete_hook(timer) == TIMER_RETRY) {
1015 unlock_timer(timer, flags);
1018 list_del(&timer->list);
1020 * This keeps any tasks waiting on the spin lock from thinking
1021 * they got something (see the lock code above).
1023 timer->it_signal = NULL;
1025 unlock_timer(timer, flags);
1026 release_posix_timer(timer, IT_ID_SET);
1030 * This is called by do_exit or de_thread, only when there are no more
1031 * references to the shared signal_struct.
1033 void exit_itimers(struct signal_struct *sig)
1035 struct k_itimer *tmr;
1037 while (!list_empty(&sig->posix_timers)) {
1038 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
1043 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1044 const struct timespec __user *, tp)
1046 const struct k_clock *kc = clockid_to_kclock(which_clock);
1047 struct timespec64 new_tp;
1049 if (!kc || !kc->clock_set)
1052 if (get_timespec64(&new_tp, tp))
1055 return kc->clock_set(which_clock, &new_tp);
1058 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1059 struct timespec __user *,tp)
1061 const struct k_clock *kc = clockid_to_kclock(which_clock);
1062 struct timespec64 kernel_tp;
1068 error = kc->clock_get(which_clock, &kernel_tp);
1070 if (!error && put_timespec64(&kernel_tp, tp))
1076 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1077 struct timex __user *, utx)
1079 const struct k_clock *kc = clockid_to_kclock(which_clock);
1088 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1091 err = kc->clock_adj(which_clock, &ktx);
1093 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1099 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1100 struct timespec __user *, tp)
1102 const struct k_clock *kc = clockid_to_kclock(which_clock);
1103 struct timespec64 rtn_tp;
1109 error = kc->clock_getres(which_clock, &rtn_tp);
1111 if (!error && tp && put_timespec64(&rtn_tp, tp))
1117 #ifdef CONFIG_COMPAT
1119 COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
1120 struct compat_timespec __user *, tp)
1122 const struct k_clock *kc = clockid_to_kclock(which_clock);
1123 struct timespec64 ts;
1125 if (!kc || !kc->clock_set)
1128 if (compat_get_timespec64(&ts, tp))
1131 return kc->clock_set(which_clock, &ts);
1134 COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
1135 struct compat_timespec __user *, tp)
1137 const struct k_clock *kc = clockid_to_kclock(which_clock);
1138 struct timespec64 ts;
1144 err = kc->clock_get(which_clock, &ts);
1146 if (!err && compat_put_timespec64(&ts, tp))
1152 COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
1153 struct compat_timex __user *, utp)
1155 const struct k_clock *kc = clockid_to_kclock(which_clock);
1164 err = compat_get_timex(&ktx, utp);
1168 err = kc->clock_adj(which_clock, &ktx);
1171 err = compat_put_timex(utp, &ktx);
1176 COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
1177 struct compat_timespec __user *, tp)
1179 const struct k_clock *kc = clockid_to_kclock(which_clock);
1180 struct timespec64 ts;
1186 err = kc->clock_getres(which_clock, &ts);
1187 if (!err && tp && compat_put_timespec64(&ts, tp))
1196 * nanosleep for monotonic and realtime clocks
1198 static int common_nsleep(const clockid_t which_clock, int flags,
1199 const struct timespec64 *rqtp)
1201 return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ?
1202 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1206 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1207 const struct timespec __user *, rqtp,
1208 struct timespec __user *, rmtp)
1210 const struct k_clock *kc = clockid_to_kclock(which_clock);
1211 struct timespec64 t;
1216 return -ENANOSLEEP_NOTSUP;
1218 if (get_timespec64(&t, rqtp))
1221 if (!timespec64_valid(&t))
1223 if (flags & TIMER_ABSTIME)
1225 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1226 current->restart_block.nanosleep.rmtp = rmtp;
1228 return kc->nsleep(which_clock, flags, &t);
1231 #ifdef CONFIG_COMPAT
1232 COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
1233 struct compat_timespec __user *, rqtp,
1234 struct compat_timespec __user *, rmtp)
1236 const struct k_clock *kc = clockid_to_kclock(which_clock);
1237 struct timespec64 t;
1242 return -ENANOSLEEP_NOTSUP;
1244 if (compat_get_timespec64(&t, rqtp))
1247 if (!timespec64_valid(&t))
1249 if (flags & TIMER_ABSTIME)
1251 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1252 current->restart_block.nanosleep.compat_rmtp = rmtp;
1254 return kc->nsleep(which_clock, flags, &t);
1258 static const struct k_clock clock_realtime = {
1259 .clock_getres = posix_get_hrtimer_res,
1260 .clock_get = posix_clock_realtime_get,
1261 .clock_set = posix_clock_realtime_set,
1262 .clock_adj = posix_clock_realtime_adj,
1263 .nsleep = common_nsleep,
1264 .timer_create = common_timer_create,
1265 .timer_set = common_timer_set,
1266 .timer_get = common_timer_get,
1267 .timer_del = common_timer_del,
1268 .timer_rearm = common_hrtimer_rearm,
1269 .timer_forward = common_hrtimer_forward,
1270 .timer_remaining = common_hrtimer_remaining,
1271 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1272 .timer_arm = common_hrtimer_arm,
1275 static const struct k_clock clock_monotonic = {
1276 .clock_getres = posix_get_hrtimer_res,
1277 .clock_get = posix_ktime_get_ts,
1278 .nsleep = common_nsleep,
1279 .timer_create = common_timer_create,
1280 .timer_set = common_timer_set,
1281 .timer_get = common_timer_get,
1282 .timer_del = common_timer_del,
1283 .timer_rearm = common_hrtimer_rearm,
1284 .timer_forward = common_hrtimer_forward,
1285 .timer_remaining = common_hrtimer_remaining,
1286 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1287 .timer_arm = common_hrtimer_arm,
1290 static const struct k_clock clock_monotonic_raw = {
1291 .clock_getres = posix_get_hrtimer_res,
1292 .clock_get = posix_get_monotonic_raw,
1295 static const struct k_clock clock_realtime_coarse = {
1296 .clock_getres = posix_get_coarse_res,
1297 .clock_get = posix_get_realtime_coarse,
1300 static const struct k_clock clock_monotonic_coarse = {
1301 .clock_getres = posix_get_coarse_res,
1302 .clock_get = posix_get_monotonic_coarse,
1305 static const struct k_clock clock_tai = {
1306 .clock_getres = posix_get_hrtimer_res,
1307 .clock_get = posix_get_tai,
1308 .nsleep = common_nsleep,
1309 .timer_create = common_timer_create,
1310 .timer_set = common_timer_set,
1311 .timer_get = common_timer_get,
1312 .timer_del = common_timer_del,
1313 .timer_rearm = common_hrtimer_rearm,
1314 .timer_forward = common_hrtimer_forward,
1315 .timer_remaining = common_hrtimer_remaining,
1316 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1317 .timer_arm = common_hrtimer_arm,
1320 static const struct k_clock clock_monotonic_active = {
1321 .clock_getres = posix_get_hrtimer_res,
1322 .clock_get = posix_get_monotonic_active,
1325 static const struct k_clock * const posix_clocks[] = {
1326 [CLOCK_REALTIME] = &clock_realtime,
1327 [CLOCK_MONOTONIC] = &clock_monotonic,
1328 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1329 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1330 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1331 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1332 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1333 [CLOCK_BOOTTIME] = &clock_monotonic,
1334 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1335 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1336 [CLOCK_TAI] = &clock_tai,
1337 [CLOCK_MONOTONIC_ACTIVE] = &clock_monotonic_active,
1340 static const struct k_clock *clockid_to_kclock(const clockid_t id)
1345 return (id & CLOCKFD_MASK) == CLOCKFD ?
1346 &clock_posix_dynamic : &clock_posix_cpu;
1349 if (id >= ARRAY_SIZE(posix_clocks))
1352 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];