1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 ARM Limited
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
10 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12 #include <asm/errno.h>
14 #include <asm/vdso/compat_barrier.h>
16 #define VDSO_HAS_CLOCK_GETRES 1
18 #define BUILD_VDSO32 1
20 static __always_inline
21 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
24 register struct timezone *tz asm("r1") = _tz;
25 register struct __kernel_old_timeval *tv asm("r0") = _tv;
26 register long ret asm ("r0");
27 register long nr asm("r7") = __NR_compat_gettimeofday;
32 : "r" (tv), "r" (tz), "r" (nr)
38 static __always_inline
39 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
41 register struct __kernel_timespec *ts asm("r1") = _ts;
42 register clockid_t clkid asm("r0") = _clkid;
43 register long ret asm ("r0");
44 register long nr asm("r7") = __NR_compat_clock_gettime64;
49 : "r" (clkid), "r" (ts), "r" (nr)
55 static __always_inline
56 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
58 register struct old_timespec32 *ts asm("r1") = _ts;
59 register clockid_t clkid asm("r0") = _clkid;
60 register long ret asm ("r0");
61 register long nr asm("r7") = __NR_compat_clock_gettime;
66 : "r" (clkid), "r" (ts), "r" (nr)
72 static __always_inline
73 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
75 register struct __kernel_timespec *ts asm("r1") = _ts;
76 register clockid_t clkid asm("r0") = _clkid;
77 register long ret asm ("r0");
78 register long nr asm("r7") = __NR_compat_clock_getres_time64;
83 : "r" (clkid), "r" (ts), "r" (nr)
89 static __always_inline
90 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
92 register struct old_timespec32 *ts asm("r1") = _ts;
93 register clockid_t clkid asm("r0") = _clkid;
94 register long ret asm ("r0");
95 register long nr asm("r7") = __NR_compat_clock_getres;
100 : "r" (clkid), "r" (ts), "r" (nr)
106 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
111 * Core checks for mode already, so this raced against a concurrent
112 * update. Return something. Core will do another round and then
113 * see the mode change and fallback to the syscall.
115 if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
119 * This isb() is required to prevent that the counter value
123 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
125 * This isb() is required to prevent that the seq lock is
133 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
135 const struct vdso_data *ret;
138 * This simply puts &_vdso_data into ret. The reason why we don't use
139 * `ret = _vdso_data` is that the compiler tends to optimise this in a
140 * very suboptimal way: instead of keeping &_vdso_data in a register,
141 * it goes through a relocation almost every time _vdso_data must be
142 * accessed (even in subfunctions). This is both time and space
143 * consuming: each relocation uses a word in the code section, and it
144 * has to be loaded at runtime.
146 * This trick hides the assignment from the compiler. Since it cannot
147 * track where the pointer comes from, it will only use one relocation
148 * where __arch_get_vdso_data() is called, and then keep the result in
151 asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
156 #ifdef CONFIG_TIME_NS
157 static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
159 const struct vdso_data *ret;
161 /* See __arch_get_vdso_data(). */
162 asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
168 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
170 return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
172 #define vdso_clocksource_ok vdso_clocksource_ok
174 #endif /* !__ASSEMBLY__ */
176 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */