arm64: atomics: lse: Dereference matching size
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / arch_timer.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * arch/arm64/include/asm/arch_timer.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  */
8 #ifndef __ASM_ARCH_TIMER_H
9 #define __ASM_ARCH_TIMER_H
10
11 #include <asm/barrier.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
14
15 #include <linux/bug.h>
16 #include <linux/init.h>
17 #include <linux/jump_label.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20
21 #include <clocksource/arm_arch_timer.h>
22
23 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
24 #define has_erratum_handler(h)                                          \
25         ({                                                              \
26                 const struct arch_timer_erratum_workaround *__wa;       \
27                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
28                 (__wa && __wa->h);                                      \
29         })
30
31 #define erratum_handler(h)                                              \
32         ({                                                              \
33                 const struct arch_timer_erratum_workaround *__wa;       \
34                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
35                 (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
36         })
37
38 #else
39 #define has_erratum_handler(h)                     false
40 #define erratum_handler(h)                         (arch_timer_##h)
41 #endif
42
43 enum arch_timer_erratum_match_type {
44         ate_match_dt,
45         ate_match_local_cap_id,
46         ate_match_acpi_oem_info,
47 };
48
49 struct clock_event_device;
50
51 struct arch_timer_erratum_workaround {
52         enum arch_timer_erratum_match_type match_type;
53         const void *id;
54         const char *desc;
55         u64 (*read_cntpct_el0)(void);
56         u64 (*read_cntvct_el0)(void);
57         int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
58         int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
59         bool disable_compat_vdso;
60 };
61
62 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
63                 timer_unstable_counter_workaround);
64
65 static inline notrace u64 arch_timer_read_cntpct_el0(void)
66 {
67         u64 cnt;
68
69         asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
70                                  "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
71                                  ARM64_HAS_ECV)
72                      : "=r" (cnt));
73
74         return cnt;
75 }
76
77 static inline notrace u64 arch_timer_read_cntvct_el0(void)
78 {
79         u64 cnt;
80
81         asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
82                                  "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
83                                  ARM64_HAS_ECV)
84                      : "=r" (cnt));
85
86         return cnt;
87 }
88
89 #define arch_timer_reg_read_stable(reg)                                 \
90         ({                                                              \
91                 u64 _val;                                               \
92                                                                         \
93                 preempt_disable_notrace();                              \
94                 _val = erratum_handler(read_ ## reg)();                 \
95                 preempt_enable_notrace();                               \
96                                                                         \
97                 _val;                                                   \
98         })
99
100 /*
101  * These register accessors are marked inline so the compiler can
102  * nicely work out which register we want, and chuck away the rest of
103  * the code.
104  */
105 static __always_inline
106 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
107 {
108         if (access == ARCH_TIMER_PHYS_ACCESS) {
109                 switch (reg) {
110                 case ARCH_TIMER_REG_CTRL:
111                         write_sysreg(val, cntp_ctl_el0);
112                         isb();
113                         break;
114                 case ARCH_TIMER_REG_CVAL:
115                         write_sysreg(val, cntp_cval_el0);
116                         break;
117                 default:
118                         BUILD_BUG();
119                 }
120         } else if (access == ARCH_TIMER_VIRT_ACCESS) {
121                 switch (reg) {
122                 case ARCH_TIMER_REG_CTRL:
123                         write_sysreg(val, cntv_ctl_el0);
124                         isb();
125                         break;
126                 case ARCH_TIMER_REG_CVAL:
127                         write_sysreg(val, cntv_cval_el0);
128                         break;
129                 default:
130                         BUILD_BUG();
131                 }
132         } else {
133                 BUILD_BUG();
134         }
135 }
136
137 static __always_inline
138 u64 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
139 {
140         if (access == ARCH_TIMER_PHYS_ACCESS) {
141                 switch (reg) {
142                 case ARCH_TIMER_REG_CTRL:
143                         return read_sysreg(cntp_ctl_el0);
144                 default:
145                         BUILD_BUG();
146                 }
147         } else if (access == ARCH_TIMER_VIRT_ACCESS) {
148                 switch (reg) {
149                 case ARCH_TIMER_REG_CTRL:
150                         return read_sysreg(cntv_ctl_el0);
151                 default:
152                         BUILD_BUG();
153                 }
154         }
155
156         BUILD_BUG();
157         unreachable();
158 }
159
160 static inline u32 arch_timer_get_cntfrq(void)
161 {
162         return read_sysreg(cntfrq_el0);
163 }
164
165 static inline u32 arch_timer_get_cntkctl(void)
166 {
167         return read_sysreg(cntkctl_el1);
168 }
169
170 static inline void arch_timer_set_cntkctl(u32 cntkctl)
171 {
172         write_sysreg(cntkctl, cntkctl_el1);
173         isb();
174 }
175
176 static __always_inline u64 __arch_counter_get_cntpct_stable(void)
177 {
178         u64 cnt;
179
180         cnt = arch_timer_reg_read_stable(cntpct_el0);
181         arch_counter_enforce_ordering(cnt);
182         return cnt;
183 }
184
185 static __always_inline u64 __arch_counter_get_cntpct(void)
186 {
187         u64 cnt;
188
189         asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
190                                  "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
191                                  ARM64_HAS_ECV)
192                      : "=r" (cnt));
193         arch_counter_enforce_ordering(cnt);
194         return cnt;
195 }
196
197 static __always_inline u64 __arch_counter_get_cntvct_stable(void)
198 {
199         u64 cnt;
200
201         cnt = arch_timer_reg_read_stable(cntvct_el0);
202         arch_counter_enforce_ordering(cnt);
203         return cnt;
204 }
205
206 static __always_inline u64 __arch_counter_get_cntvct(void)
207 {
208         u64 cnt;
209
210         asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
211                                  "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
212                                  ARM64_HAS_ECV)
213                      : "=r" (cnt));
214         arch_counter_enforce_ordering(cnt);
215         return cnt;
216 }
217
218 static inline int arch_timer_arch_init(void)
219 {
220         return 0;
221 }
222
223 static inline void arch_timer_set_evtstrm_feature(void)
224 {
225         cpu_set_named_feature(EVTSTRM);
226 #ifdef CONFIG_COMPAT
227         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
228 #endif
229 }
230
231 static inline bool arch_timer_have_evtstrm_feature(void)
232 {
233         return cpu_have_named_feature(EVTSTRM);
234 }
235 #endif