Merge branch 'work.fdpic' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / arch_timer.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * arch/arm64/include/asm/arch_timer.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  */
8 #ifndef __ASM_ARCH_TIMER_H
9 #define __ASM_ARCH_TIMER_H
10
11 #include <asm/barrier.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
14
15 #include <linux/bug.h>
16 #include <linux/init.h>
17 #include <linux/jump_label.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20
21 #include <clocksource/arm_arch_timer.h>
22
23 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
24 #define has_erratum_handler(h)                                          \
25         ({                                                              \
26                 const struct arch_timer_erratum_workaround *__wa;       \
27                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
28                 (__wa && __wa->h);                                      \
29         })
30
31 #define erratum_handler(h)                                              \
32         ({                                                              \
33                 const struct arch_timer_erratum_workaround *__wa;       \
34                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
35                 (__wa && __wa->h) ? __wa->h : arch_timer_##h;           \
36         })
37
38 #else
39 #define has_erratum_handler(h)                     false
40 #define erratum_handler(h)                         (arch_timer_##h)
41 #endif
42
43 enum arch_timer_erratum_match_type {
44         ate_match_dt,
45         ate_match_local_cap_id,
46         ate_match_acpi_oem_info,
47 };
48
49 struct clock_event_device;
50
51 struct arch_timer_erratum_workaround {
52         enum arch_timer_erratum_match_type match_type;
53         const void *id;
54         const char *desc;
55         u32 (*read_cntp_tval_el0)(void);
56         u32 (*read_cntv_tval_el0)(void);
57         u64 (*read_cntpct_el0)(void);
58         u64 (*read_cntvct_el0)(void);
59         int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
60         int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
61         bool disable_compat_vdso;
62 };
63
64 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
65                 timer_unstable_counter_workaround);
66
67 /* inline sysreg accessors that make erratum_handler() work */
68 static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
69 {
70         return read_sysreg(cntp_tval_el0);
71 }
72
73 static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
74 {
75         return read_sysreg(cntv_tval_el0);
76 }
77
78 static inline notrace u64 arch_timer_read_cntpct_el0(void)
79 {
80         return read_sysreg(cntpct_el0);
81 }
82
83 static inline notrace u64 arch_timer_read_cntvct_el0(void)
84 {
85         return read_sysreg(cntvct_el0);
86 }
87
88 #define arch_timer_reg_read_stable(reg)                                 \
89         ({                                                              \
90                 u64 _val;                                               \
91                                                                         \
92                 preempt_disable_notrace();                              \
93                 _val = erratum_handler(read_ ## reg)();                 \
94                 preempt_enable_notrace();                               \
95                                                                         \
96                 _val;                                                   \
97         })
98
99 /*
100  * These register accessors are marked inline so the compiler can
101  * nicely work out which register we want, and chuck away the rest of
102  * the code.
103  */
104 static __always_inline
105 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
106 {
107         if (access == ARCH_TIMER_PHYS_ACCESS) {
108                 switch (reg) {
109                 case ARCH_TIMER_REG_CTRL:
110                         write_sysreg(val, cntp_ctl_el0);
111                         break;
112                 case ARCH_TIMER_REG_TVAL:
113                         write_sysreg(val, cntp_tval_el0);
114                         break;
115                 }
116         } else if (access == ARCH_TIMER_VIRT_ACCESS) {
117                 switch (reg) {
118                 case ARCH_TIMER_REG_CTRL:
119                         write_sysreg(val, cntv_ctl_el0);
120                         break;
121                 case ARCH_TIMER_REG_TVAL:
122                         write_sysreg(val, cntv_tval_el0);
123                         break;
124                 }
125         }
126
127         isb();
128 }
129
130 static __always_inline
131 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
132 {
133         if (access == ARCH_TIMER_PHYS_ACCESS) {
134                 switch (reg) {
135                 case ARCH_TIMER_REG_CTRL:
136                         return read_sysreg(cntp_ctl_el0);
137                 case ARCH_TIMER_REG_TVAL:
138                         return arch_timer_reg_read_stable(cntp_tval_el0);
139                 }
140         } else if (access == ARCH_TIMER_VIRT_ACCESS) {
141                 switch (reg) {
142                 case ARCH_TIMER_REG_CTRL:
143                         return read_sysreg(cntv_ctl_el0);
144                 case ARCH_TIMER_REG_TVAL:
145                         return arch_timer_reg_read_stable(cntv_tval_el0);
146                 }
147         }
148
149         BUG();
150 }
151
152 static inline u32 arch_timer_get_cntfrq(void)
153 {
154         return read_sysreg(cntfrq_el0);
155 }
156
157 static inline u32 arch_timer_get_cntkctl(void)
158 {
159         return read_sysreg(cntkctl_el1);
160 }
161
162 static inline void arch_timer_set_cntkctl(u32 cntkctl)
163 {
164         write_sysreg(cntkctl, cntkctl_el1);
165         isb();
166 }
167
168 /*
169  * Ensure that reads of the counter are treated the same as memory reads
170  * for the purposes of ordering by subsequent memory barriers.
171  *
172  * This insanity brought to you by speculative system register reads,
173  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
174  *
175  * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
176  */
177 #define arch_counter_enforce_ordering(val) do {                         \
178         u64 tmp, _val = (val);                                          \
179                                                                         \
180         asm volatile(                                                   \
181         "       eor     %0, %1, %1\n"                                   \
182         "       add     %0, sp, %0\n"                                   \
183         "       ldr     xzr, [%0]"                                      \
184         : "=r" (tmp) : "r" (_val));                                     \
185 } while (0)
186
187 static __always_inline u64 __arch_counter_get_cntpct_stable(void)
188 {
189         u64 cnt;
190
191         isb();
192         cnt = arch_timer_reg_read_stable(cntpct_el0);
193         arch_counter_enforce_ordering(cnt);
194         return cnt;
195 }
196
197 static __always_inline u64 __arch_counter_get_cntpct(void)
198 {
199         u64 cnt;
200
201         isb();
202         cnt = read_sysreg(cntpct_el0);
203         arch_counter_enforce_ordering(cnt);
204         return cnt;
205 }
206
207 static __always_inline u64 __arch_counter_get_cntvct_stable(void)
208 {
209         u64 cnt;
210
211         isb();
212         cnt = arch_timer_reg_read_stable(cntvct_el0);
213         arch_counter_enforce_ordering(cnt);
214         return cnt;
215 }
216
217 static __always_inline u64 __arch_counter_get_cntvct(void)
218 {
219         u64 cnt;
220
221         isb();
222         cnt = read_sysreg(cntvct_el0);
223         arch_counter_enforce_ordering(cnt);
224         return cnt;
225 }
226
227 #undef arch_counter_enforce_ordering
228
229 static inline int arch_timer_arch_init(void)
230 {
231         return 0;
232 }
233
234 static inline void arch_timer_set_evtstrm_feature(void)
235 {
236         cpu_set_named_feature(EVTSTRM);
237 #ifdef CONFIG_COMPAT
238         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
239 #endif
240 }
241
242 static inline bool arch_timer_have_evtstrm_feature(void)
243 {
244         return cpu_have_named_feature(EVTSTRM);
245 }
246 #endif