1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991, 1992 Linus Torvalds
5 * This file contains the interface functions for the various time related
6 * system calls: time, stime, gettimeofday, settimeofday, adjtime
8 * Modification history:
10 * 1993-09-02 Philip Gladstone
11 * Created file with time related functions from sched/core.c and adjtimex()
12 * 1993-10-08 Torsten Duwe
13 * adjtime interface update and CMOS clock write code
14 * 1995-08-13 Torsten Duwe
15 * kernel PLL updated to 1994-12-13 specs (rfc-1589)
16 * 1999-01-16 Ulrich Windl
17 * Introduced error checking for many cases in adjtimex().
18 * Updated NTP code according to technical memorandum Jan '96
19 * "A Kernel Model for Precision Timekeeping" by Dave Mills
20 * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
21 * (Even though the technical memorandum forbids it)
22 * 2004-07-14 Christoph Lameter
23 * Added getnstimeofday to allow the posix timer functions to return
24 * with nanosecond accuracy
27 #include <linux/export.h>
28 #include <linux/kernel.h>
29 #include <linux/timex.h>
30 #include <linux/capability.h>
31 #include <linux/timekeeper_internal.h>
32 #include <linux/errno.h>
33 #include <linux/syscalls.h>
34 #include <linux/security.h>
36 #include <linux/math64.h>
37 #include <linux/ptrace.h>
39 #include <linux/uaccess.h>
40 #include <linux/compat.h>
41 #include <asm/unistd.h>
43 #include <generated/timeconst.h>
44 #include "timekeeping.h"
47 * The timezone where the local system is located. Used as a default by some
48 * programs who obtain this value by using gettimeofday.
50 struct timezone sys_tz;
52 EXPORT_SYMBOL(sys_tz);
54 #ifdef __ARCH_WANT_SYS_TIME
57 * sys_time() can be implemented in user-level using
58 * sys_gettimeofday(). Is this for backwards compatibility? If so,
59 * why not move it into the appropriate arch directory (for those
60 * architectures that need it).
62 SYSCALL_DEFINE1(time, __kernel_old_time_t __user *, tloc)
64 __kernel_old_time_t i = (__kernel_old_time_t)ktime_get_real_seconds();
70 force_successful_syscall_return();
75 * sys_stime() can be implemented in user-level using
76 * sys_settimeofday(). Is this for backwards compatibility? If so,
77 * why not move it into the appropriate arch directory (for those
78 * architectures that need it).
81 SYSCALL_DEFINE1(stime, __kernel_old_time_t __user *, tptr)
86 if (get_user(tv.tv_sec, tptr))
91 err = security_settime64(&tv, NULL);
95 do_settimeofday64(&tv);
99 #endif /* __ARCH_WANT_SYS_TIME */
101 #ifdef CONFIG_COMPAT_32BIT_TIME
102 #ifdef __ARCH_WANT_SYS_TIME32
104 /* old_time32_t is a 32 bit "long" and needs to get converted. */
105 SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc)
109 i = (old_time32_t)ktime_get_real_seconds();
112 if (put_user(i,tloc))
115 force_successful_syscall_return();
119 SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
121 struct timespec64 tv;
124 if (get_user(tv.tv_sec, tptr))
129 err = security_settime64(&tv, NULL);
133 do_settimeofday64(&tv);
137 #endif /* __ARCH_WANT_SYS_TIME32 */
140 SYSCALL_DEFINE2(gettimeofday, struct __kernel_old_timeval __user *, tv,
141 struct timezone __user *, tz)
143 if (likely(tv != NULL)) {
144 struct timespec64 ts;
146 ktime_get_real_ts64(&ts);
147 if (put_user(ts.tv_sec, &tv->tv_sec) ||
148 put_user(ts.tv_nsec / 1000, &tv->tv_usec))
151 if (unlikely(tz != NULL)) {
152 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
159 * In case for some reason the CMOS clock has not already been running
160 * in UTC, but in some local time: The first time we set the timezone,
161 * we will warp the clock so that it is ticking UTC time instead of
162 * local time. Presumably, if someone is setting the timezone then we
163 * are running in an environment where the programs understand about
164 * timezones. This should be done at boot time in the /etc/rc script,
165 * as soon as possible, so that the clock can be set right. Otherwise,
166 * various programs will get confused when the clock gets warped.
169 int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
171 static int firsttime = 1;
174 if (tv && !timespec64_valid_settod(tv))
177 error = security_settime64(tv, tz);
182 /* Verify we're within the +-15 hrs range */
183 if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
187 update_vsyscall_tz();
191 timekeeping_warp_clock();
195 return do_settimeofday64(tv);
199 SYSCALL_DEFINE2(settimeofday, struct __kernel_old_timeval __user *, tv,
200 struct timezone __user *, tz)
202 struct timespec64 new_ts;
203 struct timezone new_tz;
206 if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
207 get_user(new_ts.tv_nsec, &tv->tv_usec))
210 if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
213 new_ts.tv_nsec *= NSEC_PER_USEC;
216 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
220 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
224 COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
225 struct timezone __user *, tz)
228 struct timespec64 ts;
230 ktime_get_real_ts64(&ts);
231 if (put_user(ts.tv_sec, &tv->tv_sec) ||
232 put_user(ts.tv_nsec / 1000, &tv->tv_usec))
236 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
243 COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
244 struct timezone __user *, tz)
246 struct timespec64 new_ts;
247 struct timezone new_tz;
250 if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
251 get_user(new_ts.tv_nsec, &tv->tv_usec))
254 if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
257 new_ts.tv_nsec *= NSEC_PER_USEC;
260 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
264 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
269 SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
271 struct __kernel_timex txc; /* Local copy of parameter */
274 /* Copy the user data space into the kernel copy
275 * structure. But bear in mind that the structures
278 if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex)))
280 ret = do_adjtimex(&txc);
281 return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret;
285 #ifdef CONFIG_COMPAT_32BIT_TIME
286 int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp)
288 struct old_timex32 tx32;
290 memset(txc, 0, sizeof(struct __kernel_timex));
291 if (copy_from_user(&tx32, utp, sizeof(struct old_timex32)))
294 txc->modes = tx32.modes;
295 txc->offset = tx32.offset;
296 txc->freq = tx32.freq;
297 txc->maxerror = tx32.maxerror;
298 txc->esterror = tx32.esterror;
299 txc->status = tx32.status;
300 txc->constant = tx32.constant;
301 txc->precision = tx32.precision;
302 txc->tolerance = tx32.tolerance;
303 txc->time.tv_sec = tx32.time.tv_sec;
304 txc->time.tv_usec = tx32.time.tv_usec;
305 txc->tick = tx32.tick;
306 txc->ppsfreq = tx32.ppsfreq;
307 txc->jitter = tx32.jitter;
308 txc->shift = tx32.shift;
309 txc->stabil = tx32.stabil;
310 txc->jitcnt = tx32.jitcnt;
311 txc->calcnt = tx32.calcnt;
312 txc->errcnt = tx32.errcnt;
313 txc->stbcnt = tx32.stbcnt;
318 int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc)
320 struct old_timex32 tx32;
322 memset(&tx32, 0, sizeof(struct old_timex32));
323 tx32.modes = txc->modes;
324 tx32.offset = txc->offset;
325 tx32.freq = txc->freq;
326 tx32.maxerror = txc->maxerror;
327 tx32.esterror = txc->esterror;
328 tx32.status = txc->status;
329 tx32.constant = txc->constant;
330 tx32.precision = txc->precision;
331 tx32.tolerance = txc->tolerance;
332 tx32.time.tv_sec = txc->time.tv_sec;
333 tx32.time.tv_usec = txc->time.tv_usec;
334 tx32.tick = txc->tick;
335 tx32.ppsfreq = txc->ppsfreq;
336 tx32.jitter = txc->jitter;
337 tx32.shift = txc->shift;
338 tx32.stabil = txc->stabil;
339 tx32.jitcnt = txc->jitcnt;
340 tx32.calcnt = txc->calcnt;
341 tx32.errcnt = txc->errcnt;
342 tx32.stbcnt = txc->stbcnt;
344 if (copy_to_user(utp, &tx32, sizeof(struct old_timex32)))
349 SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp)
351 struct __kernel_timex txc;
354 err = get_old_timex32(&txc, utp);
358 ret = do_adjtimex(&txc);
360 err = put_old_timex32(utp, &txc);
369 * Convert jiffies to milliseconds and back.
371 * Avoid unnecessary multiplications/divisions in the
372 * two most common HZ cases:
374 unsigned int jiffies_to_msecs(const unsigned long j)
376 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
377 return (MSEC_PER_SEC / HZ) * j;
378 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
379 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
381 # if BITS_PER_LONG == 32
382 return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
385 return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
389 EXPORT_SYMBOL(jiffies_to_msecs);
391 unsigned int jiffies_to_usecs(const unsigned long j)
394 * Hz usually doesn't go much further MSEC_PER_SEC.
395 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
397 BUILD_BUG_ON(HZ > USEC_PER_SEC);
399 #if !(USEC_PER_SEC % HZ)
400 return (USEC_PER_SEC / HZ) * j;
402 # if BITS_PER_LONG == 32
403 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
405 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
409 EXPORT_SYMBOL(jiffies_to_usecs);
412 * mktime64 - Converts date to seconds.
413 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
414 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
415 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
417 * [For the Julian calendar (which was used in Russia before 1917,
418 * Britain & colonies before 1752, anywhere else before 1582,
419 * and is still in use by some communities) leave out the
420 * -year/100+year/400 terms, and add 10.]
422 * This algorithm was first published by Gauss (I think).
424 * A leap second can be indicated by calling this function with sec as
425 * 60 (allowable under ISO 8601). The leap second is treated the same
426 * as the following second since they don't exist in UNIX time.
428 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
429 * tomorrow - (allowable under ISO 8601) is supported.
431 time64_t mktime64(const unsigned int year0, const unsigned int mon0,
432 const unsigned int day, const unsigned int hour,
433 const unsigned int min, const unsigned int sec)
435 unsigned int mon = mon0, year = year0;
437 /* 1..12 -> 11,12,1..10 */
438 if (0 >= (int) (mon -= 2)) {
439 mon += 12; /* Puts Feb last since it has leap day */
444 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
446 )*24 + hour /* now have hours - midnight tomorrow handled here */
447 )*60 + min /* now have minutes */
448 )*60 + sec; /* finally seconds */
450 EXPORT_SYMBOL(mktime64);
452 struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
454 struct timespec64 ts = ns_to_timespec64(nsec);
455 struct __kernel_old_timeval tv;
457 tv.tv_sec = ts.tv_sec;
458 tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
462 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
465 * set_normalized_timespec - set timespec sec and nsec parts and normalize
467 * @ts: pointer to timespec variable to be set
468 * @sec: seconds to set
469 * @nsec: nanoseconds to set
471 * Set seconds and nanoseconds field of a timespec variable and
472 * normalize to the timespec storage format
474 * Note: The tv_nsec part is always in the range of
475 * 0 <= tv_nsec < NSEC_PER_SEC
476 * For negative values only the tv_sec field is negative !
478 void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
480 while (nsec >= NSEC_PER_SEC) {
482 * The following asm() prevents the compiler from
483 * optimising this loop into a modulo operation. See
484 * also __iter_div_u64_rem() in include/linux/time.h
486 asm("" : "+rm"(nsec));
487 nsec -= NSEC_PER_SEC;
491 asm("" : "+rm"(nsec));
492 nsec += NSEC_PER_SEC;
498 EXPORT_SYMBOL(set_normalized_timespec64);
501 * ns_to_timespec64 - Convert nanoseconds to timespec64
502 * @nsec: the nanoseconds value to be converted
504 * Returns the timespec64 representation of the nsec parameter.
506 struct timespec64 ns_to_timespec64(const s64 nsec)
508 struct timespec64 ts = { 0, 0 };
511 if (likely(nsec > 0)) {
512 ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
514 } else if (nsec < 0) {
516 * With negative times, tv_sec points to the earlier
517 * second, and tv_nsec counts the nanoseconds since
518 * then, so tv_nsec is always a positive number.
520 ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1;
521 ts.tv_nsec = NSEC_PER_SEC - rem - 1;
526 EXPORT_SYMBOL(ns_to_timespec64);
529 * msecs_to_jiffies: - convert milliseconds to jiffies
530 * @m: time in milliseconds
532 * conversion is done as follows:
534 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
536 * - 'too large' values [that would result in larger than
537 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
539 * - all other values are converted to jiffies by either multiplying
540 * the input value by a factor or dividing it with a factor and
541 * handling any 32-bit overflows.
542 * for the details see __msecs_to_jiffies()
544 * msecs_to_jiffies() checks for the passed in value being a constant
545 * via __builtin_constant_p() allowing gcc to eliminate most of the
546 * code, __msecs_to_jiffies() is called if the value passed does not
547 * allow constant folding and the actual conversion must be done at
549 * the _msecs_to_jiffies helpers are the HZ dependent conversion
550 * routines found in include/linux/jiffies.h
552 unsigned long __msecs_to_jiffies(const unsigned int m)
555 * Negative value, means infinite timeout:
558 return MAX_JIFFY_OFFSET;
559 return _msecs_to_jiffies(m);
561 EXPORT_SYMBOL(__msecs_to_jiffies);
563 unsigned long __usecs_to_jiffies(const unsigned int u)
565 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
566 return MAX_JIFFY_OFFSET;
567 return _usecs_to_jiffies(u);
569 EXPORT_SYMBOL(__usecs_to_jiffies);
572 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
573 * that a remainder subtract here would not do the right thing as the
574 * resolution values don't fall on second boundries. I.e. the line:
575 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
576 * Note that due to the small error in the multiplier here, this
577 * rounding is incorrect for sufficiently large values of tv_nsec, but
578 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
581 * Rather, we just shift the bits off the right.
583 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
584 * value to a scaled second value.
588 timespec64_to_jiffies(const struct timespec64 *value)
590 u64 sec = value->tv_sec;
591 long nsec = value->tv_nsec + TICK_NSEC - 1;
593 if (sec >= MAX_SEC_IN_JIFFIES){
594 sec = MAX_SEC_IN_JIFFIES;
597 return ((sec * SEC_CONVERSION) +
598 (((u64)nsec * NSEC_CONVERSION) >>
599 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
602 EXPORT_SYMBOL(timespec64_to_jiffies);
605 jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
608 * Convert jiffies to nanoseconds and separate with
612 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
614 value->tv_nsec = rem;
616 EXPORT_SYMBOL(jiffies_to_timespec64);
619 * Convert jiffies/jiffies_64 to clock_t and back.
621 clock_t jiffies_to_clock_t(unsigned long x)
623 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
625 return x * (USER_HZ / HZ);
627 return x / (HZ / USER_HZ);
630 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
633 EXPORT_SYMBOL(jiffies_to_clock_t);
635 unsigned long clock_t_to_jiffies(unsigned long x)
637 #if (HZ % USER_HZ)==0
638 if (x >= ~0UL / (HZ / USER_HZ))
640 return x * (HZ / USER_HZ);
642 /* Don't worry about loss of precision here .. */
643 if (x >= ~0UL / HZ * USER_HZ)
646 /* .. but do try to contain it here */
647 return div_u64((u64)x * HZ, USER_HZ);
650 EXPORT_SYMBOL(clock_t_to_jiffies);
652 u64 jiffies_64_to_clock_t(u64 x)
654 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
656 x = div_u64(x * USER_HZ, HZ);
658 x = div_u64(x, HZ / USER_HZ);
664 * There are better ways that don't overflow early,
665 * but even this doesn't overflow in hundreds of years
668 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
672 EXPORT_SYMBOL(jiffies_64_to_clock_t);
674 u64 nsec_to_clock_t(u64 x)
676 #if (NSEC_PER_SEC % USER_HZ) == 0
677 return div_u64(x, NSEC_PER_SEC / USER_HZ);
678 #elif (USER_HZ % 512) == 0
679 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
682 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
683 * overflow after 64.99 years.
684 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
686 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
690 u64 jiffies64_to_nsecs(u64 j)
692 #if !(NSEC_PER_SEC % HZ)
693 return (NSEC_PER_SEC / HZ) * j;
695 return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
698 EXPORT_SYMBOL(jiffies64_to_nsecs);
700 u64 jiffies64_to_msecs(const u64 j)
702 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
703 return (MSEC_PER_SEC / HZ) * j;
705 return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
708 EXPORT_SYMBOL(jiffies64_to_msecs);
711 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
715 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
716 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
717 * for scheduler, not for use in device drivers to calculate timeout value.
720 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
721 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
723 u64 nsecs_to_jiffies64(u64 n)
725 #if (NSEC_PER_SEC % HZ) == 0
726 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
727 return div_u64(n, NSEC_PER_SEC / HZ);
728 #elif (HZ % 512) == 0
729 /* overflow after 292 years if HZ = 1024 */
730 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
733 * Generic case - optimized for cases where HZ is a multiple of 3.
734 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
736 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
739 EXPORT_SYMBOL(nsecs_to_jiffies64);
742 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
746 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
747 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
748 * for scheduler, not for use in device drivers to calculate timeout value.
751 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
752 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
754 unsigned long nsecs_to_jiffies(u64 n)
756 return (unsigned long)nsecs_to_jiffies64(n);
758 EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
761 * Add two timespec64 values and do a safety check for overflow.
762 * It's assumed that both values are valid (>= 0).
763 * And, each timespec64 is in normalized form.
765 struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
766 const struct timespec64 rhs)
768 struct timespec64 res;
770 set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
771 lhs.tv_nsec + rhs.tv_nsec);
773 if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
774 res.tv_sec = TIME64_MAX;
781 int get_timespec64(struct timespec64 *ts,
782 const struct __kernel_timespec __user *uts)
784 struct __kernel_timespec kts;
787 ret = copy_from_user(&kts, uts, sizeof(kts));
791 ts->tv_sec = kts.tv_sec;
793 /* Zero out the padding in compat mode */
794 if (in_compat_syscall())
795 kts.tv_nsec &= 0xFFFFFFFFUL;
797 /* In 32-bit mode, this drops the padding */
798 ts->tv_nsec = kts.tv_nsec;
802 EXPORT_SYMBOL_GPL(get_timespec64);
804 int put_timespec64(const struct timespec64 *ts,
805 struct __kernel_timespec __user *uts)
807 struct __kernel_timespec kts = {
808 .tv_sec = ts->tv_sec,
809 .tv_nsec = ts->tv_nsec
812 return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
814 EXPORT_SYMBOL_GPL(put_timespec64);
816 static int __get_old_timespec32(struct timespec64 *ts64,
817 const struct old_timespec32 __user *cts)
819 struct old_timespec32 ts;
822 ret = copy_from_user(&ts, cts, sizeof(ts));
826 ts64->tv_sec = ts.tv_sec;
827 ts64->tv_nsec = ts.tv_nsec;
832 static int __put_old_timespec32(const struct timespec64 *ts64,
833 struct old_timespec32 __user *cts)
835 struct old_timespec32 ts = {
836 .tv_sec = ts64->tv_sec,
837 .tv_nsec = ts64->tv_nsec
839 return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
842 int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
844 if (COMPAT_USE_64BIT_TIME)
845 return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
847 return __get_old_timespec32(ts, uts);
849 EXPORT_SYMBOL_GPL(get_old_timespec32);
851 int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
853 if (COMPAT_USE_64BIT_TIME)
854 return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
856 return __put_old_timespec32(ts, uts);
858 EXPORT_SYMBOL_GPL(put_old_timespec32);
860 int get_itimerspec64(struct itimerspec64 *it,
861 const struct __kernel_itimerspec __user *uit)
865 ret = get_timespec64(&it->it_interval, &uit->it_interval);
869 ret = get_timespec64(&it->it_value, &uit->it_value);
873 EXPORT_SYMBOL_GPL(get_itimerspec64);
875 int put_itimerspec64(const struct itimerspec64 *it,
876 struct __kernel_itimerspec __user *uit)
880 ret = put_timespec64(&it->it_interval, &uit->it_interval);
884 ret = put_timespec64(&it->it_value, &uit->it_value);
888 EXPORT_SYMBOL_GPL(put_itimerspec64);
890 int get_old_itimerspec32(struct itimerspec64 *its,
891 const struct old_itimerspec32 __user *uits)
894 if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
895 __get_old_timespec32(&its->it_value, &uits->it_value))
899 EXPORT_SYMBOL_GPL(get_old_itimerspec32);
901 int put_old_itimerspec32(const struct itimerspec64 *its,
902 struct old_itimerspec32 __user *uits)
904 if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
905 __put_old_timespec32(&its->it_value, &uits->it_value))
909 EXPORT_SYMBOL_GPL(put_old_itimerspec32);