Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-microblaze.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #define pr_fmt(fmt)     "arch_timer: " fmt
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
30
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33
34 #include <clocksource/arm_arch_timer.h>
35
36 #define CNTTIDR         0x08
37 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
39 #define CNTACR(n)       (0x40 + ((n) * 4))
40 #define CNTACR_RPCT     BIT(0)
41 #define CNTACR_RVCT     BIT(1)
42 #define CNTACR_RFRQ     BIT(2)
43 #define CNTACR_RVOFF    BIT(3)
44 #define CNTACR_RWVT     BIT(4)
45 #define CNTACR_RWPT     BIT(5)
46
47 #define CNTVCT_LO       0x08
48 #define CNTVCT_HI       0x0c
49 #define CNTFRQ          0x10
50 #define CNTP_TVAL       0x28
51 #define CNTP_CTL        0x2c
52 #define CNTV_TVAL       0x38
53 #define CNTV_CTL        0x3c
54
55 static unsigned arch_timers_present __initdata;
56
57 static void __iomem *arch_counter_base;
58
59 struct arch_timer {
60         void __iomem *base;
61         struct clock_event_device evt;
62 };
63
64 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
65
66 static u32 arch_timer_rate;
67 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
68
69 static struct clock_event_device __percpu *arch_timer_evt;
70
71 static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
72 static bool arch_timer_c3stop;
73 static bool arch_timer_mem_use_virtual;
74 static bool arch_counter_suspend_stop;
75 static bool vdso_default = true;
76
77 static cpumask_t evtstrm_available = CPU_MASK_NONE;
78 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
79
80 static int __init early_evtstrm_cfg(char *buf)
81 {
82         return strtobool(buf, &evtstrm_enable);
83 }
84 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
85
86 /*
87  * Architected system timer support.
88  */
89
90 static __always_inline
91 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
92                           struct clock_event_device *clk)
93 {
94         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
95                 struct arch_timer *timer = to_arch_timer(clk);
96                 switch (reg) {
97                 case ARCH_TIMER_REG_CTRL:
98                         writel_relaxed(val, timer->base + CNTP_CTL);
99                         break;
100                 case ARCH_TIMER_REG_TVAL:
101                         writel_relaxed(val, timer->base + CNTP_TVAL);
102                         break;
103                 }
104         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
105                 struct arch_timer *timer = to_arch_timer(clk);
106                 switch (reg) {
107                 case ARCH_TIMER_REG_CTRL:
108                         writel_relaxed(val, timer->base + CNTV_CTL);
109                         break;
110                 case ARCH_TIMER_REG_TVAL:
111                         writel_relaxed(val, timer->base + CNTV_TVAL);
112                         break;
113                 }
114         } else {
115                 arch_timer_reg_write_cp15(access, reg, val);
116         }
117 }
118
119 static __always_inline
120 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
121                         struct clock_event_device *clk)
122 {
123         u32 val;
124
125         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
126                 struct arch_timer *timer = to_arch_timer(clk);
127                 switch (reg) {
128                 case ARCH_TIMER_REG_CTRL:
129                         val = readl_relaxed(timer->base + CNTP_CTL);
130                         break;
131                 case ARCH_TIMER_REG_TVAL:
132                         val = readl_relaxed(timer->base + CNTP_TVAL);
133                         break;
134                 }
135         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
136                 struct arch_timer *timer = to_arch_timer(clk);
137                 switch (reg) {
138                 case ARCH_TIMER_REG_CTRL:
139                         val = readl_relaxed(timer->base + CNTV_CTL);
140                         break;
141                 case ARCH_TIMER_REG_TVAL:
142                         val = readl_relaxed(timer->base + CNTV_TVAL);
143                         break;
144                 }
145         } else {
146                 val = arch_timer_reg_read_cp15(access, reg);
147         }
148
149         return val;
150 }
151
152 static u64 arch_counter_get_cntpct_stable(void)
153 {
154         return __arch_counter_get_cntpct_stable();
155 }
156
157 static u64 arch_counter_get_cntpct(void)
158 {
159         return __arch_counter_get_cntpct();
160 }
161
162 static u64 arch_counter_get_cntvct_stable(void)
163 {
164         return __arch_counter_get_cntvct_stable();
165 }
166
167 static u64 arch_counter_get_cntvct(void)
168 {
169         return __arch_counter_get_cntvct();
170 }
171
172 /*
173  * Default to cp15 based access because arm64 uses this function for
174  * sched_clock() before DT is probed and the cp15 method is guaranteed
175  * to exist on arm64. arm doesn't use this before DT is probed so even
176  * if we don't have the cp15 accessors we won't have a problem.
177  */
178 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
179 EXPORT_SYMBOL_GPL(arch_timer_read_counter);
180
181 static u64 arch_counter_read(struct clocksource *cs)
182 {
183         return arch_timer_read_counter();
184 }
185
186 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
187 {
188         return arch_timer_read_counter();
189 }
190
191 static struct clocksource clocksource_counter = {
192         .name   = "arch_sys_counter",
193         .rating = 400,
194         .read   = arch_counter_read,
195         .mask   = CLOCKSOURCE_MASK(56),
196         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
197 };
198
199 static struct cyclecounter cyclecounter __ro_after_init = {
200         .read   = arch_counter_read_cc,
201         .mask   = CLOCKSOURCE_MASK(56),
202 };
203
204 struct ate_acpi_oem_info {
205         char oem_id[ACPI_OEM_ID_SIZE + 1];
206         char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
207         u32 oem_revision;
208 };
209
210 #ifdef CONFIG_FSL_ERRATUM_A008585
211 /*
212  * The number of retries is an arbitrary value well beyond the highest number
213  * of iterations the loop has been observed to take.
214  */
215 #define __fsl_a008585_read_reg(reg) ({                  \
216         u64 _old, _new;                                 \
217         int _retries = 200;                             \
218                                                         \
219         do {                                            \
220                 _old = read_sysreg(reg);                \
221                 _new = read_sysreg(reg);                \
222                 _retries--;                             \
223         } while (unlikely(_old != _new) && _retries);   \
224                                                         \
225         WARN_ON_ONCE(!_retries);                        \
226         _new;                                           \
227 })
228
229 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
230 {
231         return __fsl_a008585_read_reg(cntp_tval_el0);
232 }
233
234 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
235 {
236         return __fsl_a008585_read_reg(cntv_tval_el0);
237 }
238
239 static u64 notrace fsl_a008585_read_cntpct_el0(void)
240 {
241         return __fsl_a008585_read_reg(cntpct_el0);
242 }
243
244 static u64 notrace fsl_a008585_read_cntvct_el0(void)
245 {
246         return __fsl_a008585_read_reg(cntvct_el0);
247 }
248 #endif
249
250 #ifdef CONFIG_HISILICON_ERRATUM_161010101
251 /*
252  * Verify whether the value of the second read is larger than the first by
253  * less than 32 is the only way to confirm the value is correct, so clear the
254  * lower 5 bits to check whether the difference is greater than 32 or not.
255  * Theoretically the erratum should not occur more than twice in succession
256  * when reading the system counter, but it is possible that some interrupts
257  * may lead to more than twice read errors, triggering the warning, so setting
258  * the number of retries far beyond the number of iterations the loop has been
259  * observed to take.
260  */
261 #define __hisi_161010101_read_reg(reg) ({                               \
262         u64 _old, _new;                                         \
263         int _retries = 50;                                      \
264                                                                 \
265         do {                                                    \
266                 _old = read_sysreg(reg);                        \
267                 _new = read_sysreg(reg);                        \
268                 _retries--;                                     \
269         } while (unlikely((_new - _old) >> 5) && _retries);     \
270                                                                 \
271         WARN_ON_ONCE(!_retries);                                \
272         _new;                                                   \
273 })
274
275 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
276 {
277         return __hisi_161010101_read_reg(cntp_tval_el0);
278 }
279
280 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
281 {
282         return __hisi_161010101_read_reg(cntv_tval_el0);
283 }
284
285 static u64 notrace hisi_161010101_read_cntpct_el0(void)
286 {
287         return __hisi_161010101_read_reg(cntpct_el0);
288 }
289
290 static u64 notrace hisi_161010101_read_cntvct_el0(void)
291 {
292         return __hisi_161010101_read_reg(cntvct_el0);
293 }
294
295 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
296         /*
297          * Note that trailing spaces are required to properly match
298          * the OEM table information.
299          */
300         {
301                 .oem_id         = "HISI  ",
302                 .oem_table_id   = "HIP05   ",
303                 .oem_revision   = 0,
304         },
305         {
306                 .oem_id         = "HISI  ",
307                 .oem_table_id   = "HIP06   ",
308                 .oem_revision   = 0,
309         },
310         {
311                 .oem_id         = "HISI  ",
312                 .oem_table_id   = "HIP07   ",
313                 .oem_revision   = 0,
314         },
315         { /* Sentinel indicating the end of the OEM array */ },
316 };
317 #endif
318
319 #ifdef CONFIG_ARM64_ERRATUM_858921
320 static u64 notrace arm64_858921_read_cntpct_el0(void)
321 {
322         u64 old, new;
323
324         old = read_sysreg(cntpct_el0);
325         new = read_sysreg(cntpct_el0);
326         return (((old ^ new) >> 32) & 1) ? old : new;
327 }
328
329 static u64 notrace arm64_858921_read_cntvct_el0(void)
330 {
331         u64 old, new;
332
333         old = read_sysreg(cntvct_el0);
334         new = read_sysreg(cntvct_el0);
335         return (((old ^ new) >> 32) & 1) ? old : new;
336 }
337 #endif
338
339 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
340 /*
341  * The low bits of the counter registers are indeterminate while bit 10 or
342  * greater is rolling over. Since the counter value can jump both backward
343  * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
344  * with all ones or all zeros in the low bits. Bound the loop by the maximum
345  * number of CPU cycles in 3 consecutive 24 MHz counter periods.
346  */
347 #define __sun50i_a64_read_reg(reg) ({                                   \
348         u64 _val;                                                       \
349         int _retries = 150;                                             \
350                                                                         \
351         do {                                                            \
352                 _val = read_sysreg(reg);                                \
353                 _retries--;                                             \
354         } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries);        \
355                                                                         \
356         WARN_ON_ONCE(!_retries);                                        \
357         _val;                                                           \
358 })
359
360 static u64 notrace sun50i_a64_read_cntpct_el0(void)
361 {
362         return __sun50i_a64_read_reg(cntpct_el0);
363 }
364
365 static u64 notrace sun50i_a64_read_cntvct_el0(void)
366 {
367         return __sun50i_a64_read_reg(cntvct_el0);
368 }
369
370 static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
371 {
372         return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
373 }
374
375 static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
376 {
377         return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
378 }
379 #endif
380
381 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
382 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
383 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
384
385 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
386
387 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
388                                                 struct clock_event_device *clk)
389 {
390         unsigned long ctrl;
391         u64 cval;
392
393         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
394         ctrl |= ARCH_TIMER_CTRL_ENABLE;
395         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
396
397         if (access == ARCH_TIMER_PHYS_ACCESS) {
398                 cval = evt + arch_counter_get_cntpct();
399                 write_sysreg(cval, cntp_cval_el0);
400         } else {
401                 cval = evt + arch_counter_get_cntvct();
402                 write_sysreg(cval, cntv_cval_el0);
403         }
404
405         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
406 }
407
408 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
409                                             struct clock_event_device *clk)
410 {
411         erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
412         return 0;
413 }
414
415 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
416                                             struct clock_event_device *clk)
417 {
418         erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
419         return 0;
420 }
421
422 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
423 #ifdef CONFIG_FSL_ERRATUM_A008585
424         {
425                 .match_type = ate_match_dt,
426                 .id = "fsl,erratum-a008585",
427                 .desc = "Freescale erratum a005858",
428                 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
429                 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
430                 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
431                 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
432                 .set_next_event_phys = erratum_set_next_event_tval_phys,
433                 .set_next_event_virt = erratum_set_next_event_tval_virt,
434         },
435 #endif
436 #ifdef CONFIG_HISILICON_ERRATUM_161010101
437         {
438                 .match_type = ate_match_dt,
439                 .id = "hisilicon,erratum-161010101",
440                 .desc = "HiSilicon erratum 161010101",
441                 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
442                 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
443                 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
444                 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
445                 .set_next_event_phys = erratum_set_next_event_tval_phys,
446                 .set_next_event_virt = erratum_set_next_event_tval_virt,
447         },
448         {
449                 .match_type = ate_match_acpi_oem_info,
450                 .id = hisi_161010101_oem_info,
451                 .desc = "HiSilicon erratum 161010101",
452                 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
453                 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
454                 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
455                 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
456                 .set_next_event_phys = erratum_set_next_event_tval_phys,
457                 .set_next_event_virt = erratum_set_next_event_tval_virt,
458         },
459 #endif
460 #ifdef CONFIG_ARM64_ERRATUM_858921
461         {
462                 .match_type = ate_match_local_cap_id,
463                 .id = (void *)ARM64_WORKAROUND_858921,
464                 .desc = "ARM erratum 858921",
465                 .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
466                 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
467         },
468 #endif
469 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
470         {
471                 .match_type = ate_match_dt,
472                 .id = "allwinner,erratum-unknown1",
473                 .desc = "Allwinner erratum UNKNOWN1",
474                 .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
475                 .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
476                 .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
477                 .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
478                 .set_next_event_phys = erratum_set_next_event_tval_phys,
479                 .set_next_event_virt = erratum_set_next_event_tval_virt,
480         },
481 #endif
482 };
483
484 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
485                                const void *);
486
487 static
488 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
489                                  const void *arg)
490 {
491         const struct device_node *np = arg;
492
493         return of_property_read_bool(np, wa->id);
494 }
495
496 static
497 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
498                                         const void *arg)
499 {
500         return this_cpu_has_cap((uintptr_t)wa->id);
501 }
502
503
504 static
505 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
506                                        const void *arg)
507 {
508         static const struct ate_acpi_oem_info empty_oem_info = {};
509         const struct ate_acpi_oem_info *info = wa->id;
510         const struct acpi_table_header *table = arg;
511
512         /* Iterate over the ACPI OEM info array, looking for a match */
513         while (memcmp(info, &empty_oem_info, sizeof(*info))) {
514                 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
515                     !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
516                     info->oem_revision == table->oem_revision)
517                         return true;
518
519                 info++;
520         }
521
522         return false;
523 }
524
525 static const struct arch_timer_erratum_workaround *
526 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
527                           ate_match_fn_t match_fn,
528                           void *arg)
529 {
530         int i;
531
532         for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
533                 if (ool_workarounds[i].match_type != type)
534                         continue;
535
536                 if (match_fn(&ool_workarounds[i], arg))
537                         return &ool_workarounds[i];
538         }
539
540         return NULL;
541 }
542
543 static
544 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
545                                   bool local)
546 {
547         int i;
548
549         if (local) {
550                 __this_cpu_write(timer_unstable_counter_workaround, wa);
551         } else {
552                 for_each_possible_cpu(i)
553                         per_cpu(timer_unstable_counter_workaround, i) = wa;
554         }
555
556         if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
557                 atomic_set(&timer_unstable_counter_workaround_in_use, 1);
558
559         /*
560          * Don't use the vdso fastpath if errata require using the
561          * out-of-line counter accessor. We may change our mind pretty
562          * late in the game (with a per-CPU erratum, for example), so
563          * change both the default value and the vdso itself.
564          */
565         if (wa->read_cntvct_el0) {
566                 clocksource_counter.archdata.vdso_direct = false;
567                 vdso_default = false;
568         }
569 }
570
571 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
572                                             void *arg)
573 {
574         const struct arch_timer_erratum_workaround *wa, *__wa;
575         ate_match_fn_t match_fn = NULL;
576         bool local = false;
577
578         switch (type) {
579         case ate_match_dt:
580                 match_fn = arch_timer_check_dt_erratum;
581                 break;
582         case ate_match_local_cap_id:
583                 match_fn = arch_timer_check_local_cap_erratum;
584                 local = true;
585                 break;
586         case ate_match_acpi_oem_info:
587                 match_fn = arch_timer_check_acpi_oem_erratum;
588                 break;
589         default:
590                 WARN_ON(1);
591                 return;
592         }
593
594         wa = arch_timer_iterate_errata(type, match_fn, arg);
595         if (!wa)
596                 return;
597
598         __wa = __this_cpu_read(timer_unstable_counter_workaround);
599         if (__wa && wa != __wa)
600                 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
601                         wa->desc, __wa->desc);
602
603         if (__wa)
604                 return;
605
606         arch_timer_enable_workaround(wa, local);
607         pr_info("Enabling %s workaround for %s\n",
608                 local ? "local" : "global", wa->desc);
609 }
610
611 static bool arch_timer_this_cpu_has_cntvct_wa(void)
612 {
613         return has_erratum_handler(read_cntvct_el0);
614 }
615
616 static bool arch_timer_counter_has_wa(void)
617 {
618         return atomic_read(&timer_unstable_counter_workaround_in_use);
619 }
620 #else
621 #define arch_timer_check_ool_workaround(t,a)            do { } while(0)
622 #define arch_timer_this_cpu_has_cntvct_wa()             ({false;})
623 #define arch_timer_counter_has_wa()                     ({false;})
624 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
625
626 static __always_inline irqreturn_t timer_handler(const int access,
627                                         struct clock_event_device *evt)
628 {
629         unsigned long ctrl;
630
631         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
632         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
633                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
634                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
635                 evt->event_handler(evt);
636                 return IRQ_HANDLED;
637         }
638
639         return IRQ_NONE;
640 }
641
642 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
643 {
644         struct clock_event_device *evt = dev_id;
645
646         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
647 }
648
649 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
650 {
651         struct clock_event_device *evt = dev_id;
652
653         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
654 }
655
656 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
657 {
658         struct clock_event_device *evt = dev_id;
659
660         return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
661 }
662
663 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
664 {
665         struct clock_event_device *evt = dev_id;
666
667         return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
668 }
669
670 static __always_inline int timer_shutdown(const int access,
671                                           struct clock_event_device *clk)
672 {
673         unsigned long ctrl;
674
675         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
676         ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
677         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
678
679         return 0;
680 }
681
682 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
683 {
684         return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
685 }
686
687 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
688 {
689         return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
690 }
691
692 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
693 {
694         return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
695 }
696
697 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
698 {
699         return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
700 }
701
702 static __always_inline void set_next_event(const int access, unsigned long evt,
703                                            struct clock_event_device *clk)
704 {
705         unsigned long ctrl;
706         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
707         ctrl |= ARCH_TIMER_CTRL_ENABLE;
708         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
709         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
710         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
711 }
712
713 static int arch_timer_set_next_event_virt(unsigned long evt,
714                                           struct clock_event_device *clk)
715 {
716         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
717         return 0;
718 }
719
720 static int arch_timer_set_next_event_phys(unsigned long evt,
721                                           struct clock_event_device *clk)
722 {
723         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
724         return 0;
725 }
726
727 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
728                                               struct clock_event_device *clk)
729 {
730         set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
731         return 0;
732 }
733
734 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
735                                               struct clock_event_device *clk)
736 {
737         set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
738         return 0;
739 }
740
741 static void __arch_timer_setup(unsigned type,
742                                struct clock_event_device *clk)
743 {
744         clk->features = CLOCK_EVT_FEAT_ONESHOT;
745
746         if (type == ARCH_TIMER_TYPE_CP15) {
747                 typeof(clk->set_next_event) sne;
748
749                 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
750
751                 if (arch_timer_c3stop)
752                         clk->features |= CLOCK_EVT_FEAT_C3STOP;
753                 clk->name = "arch_sys_timer";
754                 clk->rating = 450;
755                 clk->cpumask = cpumask_of(smp_processor_id());
756                 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
757                 switch (arch_timer_uses_ppi) {
758                 case ARCH_TIMER_VIRT_PPI:
759                         clk->set_state_shutdown = arch_timer_shutdown_virt;
760                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
761                         sne = erratum_handler(set_next_event_virt);
762                         break;
763                 case ARCH_TIMER_PHYS_SECURE_PPI:
764                 case ARCH_TIMER_PHYS_NONSECURE_PPI:
765                 case ARCH_TIMER_HYP_PPI:
766                         clk->set_state_shutdown = arch_timer_shutdown_phys;
767                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
768                         sne = erratum_handler(set_next_event_phys);
769                         break;
770                 default:
771                         BUG();
772                 }
773
774                 clk->set_next_event = sne;
775         } else {
776                 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
777                 clk->name = "arch_mem_timer";
778                 clk->rating = 400;
779                 clk->cpumask = cpu_possible_mask;
780                 if (arch_timer_mem_use_virtual) {
781                         clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
782                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
783                         clk->set_next_event =
784                                 arch_timer_set_next_event_virt_mem;
785                 } else {
786                         clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
787                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
788                         clk->set_next_event =
789                                 arch_timer_set_next_event_phys_mem;
790                 }
791         }
792
793         clk->set_state_shutdown(clk);
794
795         clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
796 }
797
798 static void arch_timer_evtstrm_enable(int divider)
799 {
800         u32 cntkctl = arch_timer_get_cntkctl();
801
802         cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
803         /* Set the divider and enable virtual event stream */
804         cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
805                         | ARCH_TIMER_VIRT_EVT_EN;
806         arch_timer_set_cntkctl(cntkctl);
807 #ifdef CONFIG_ARM64
808         cpu_set_named_feature(EVTSTRM);
809 #else
810         elf_hwcap |= HWCAP_EVTSTRM;
811 #endif
812 #ifdef CONFIG_COMPAT
813         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
814 #endif
815         cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
816 }
817
818 static void arch_timer_configure_evtstream(void)
819 {
820         int evt_stream_div, pos;
821
822         /* Find the closest power of two to the divisor */
823         evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
824         pos = fls(evt_stream_div);
825         if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
826                 pos--;
827         /* enable event stream */
828         arch_timer_evtstrm_enable(min(pos, 15));
829 }
830
831 static void arch_counter_set_user_access(void)
832 {
833         u32 cntkctl = arch_timer_get_cntkctl();
834
835         /* Disable user access to the timers and both counters */
836         /* Also disable virtual event stream */
837         cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
838                         | ARCH_TIMER_USR_VT_ACCESS_EN
839                         | ARCH_TIMER_USR_VCT_ACCESS_EN
840                         | ARCH_TIMER_VIRT_EVT_EN
841                         | ARCH_TIMER_USR_PCT_ACCESS_EN);
842
843         /*
844          * Enable user access to the virtual counter if it doesn't
845          * need to be workaround. The vdso may have been already
846          * disabled though.
847          */
848         if (arch_timer_this_cpu_has_cntvct_wa())
849                 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
850         else
851                 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
852
853         arch_timer_set_cntkctl(cntkctl);
854 }
855
856 static bool arch_timer_has_nonsecure_ppi(void)
857 {
858         return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
859                 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
860 }
861
862 static u32 check_ppi_trigger(int irq)
863 {
864         u32 flags = irq_get_trigger_type(irq);
865
866         if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
867                 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
868                 pr_warn("WARNING: Please fix your firmware\n");
869                 flags = IRQF_TRIGGER_LOW;
870         }
871
872         return flags;
873 }
874
875 static int arch_timer_starting_cpu(unsigned int cpu)
876 {
877         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
878         u32 flags;
879
880         __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
881
882         flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
883         enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
884
885         if (arch_timer_has_nonsecure_ppi()) {
886                 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
887                 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
888                                   flags);
889         }
890
891         arch_counter_set_user_access();
892         if (evtstrm_enable)
893                 arch_timer_configure_evtstream();
894
895         return 0;
896 }
897
898 /*
899  * For historical reasons, when probing with DT we use whichever (non-zero)
900  * rate was probed first, and don't verify that others match. If the first node
901  * probed has a clock-frequency property, this overrides the HW register.
902  */
903 static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
904 {
905         /* Who has more than one independent system counter? */
906         if (arch_timer_rate)
907                 return;
908
909         if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
910                 arch_timer_rate = rate;
911
912         /* Check the timer frequency. */
913         if (arch_timer_rate == 0)
914                 pr_warn("frequency not available\n");
915 }
916
917 static void arch_timer_banner(unsigned type)
918 {
919         pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
920                 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
921                 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
922                         " and " : "",
923                 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
924                 (unsigned long)arch_timer_rate / 1000000,
925                 (unsigned long)(arch_timer_rate / 10000) % 100,
926                 type & ARCH_TIMER_TYPE_CP15 ?
927                         (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
928                         "",
929                 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
930                 type & ARCH_TIMER_TYPE_MEM ?
931                         arch_timer_mem_use_virtual ? "virt" : "phys" :
932                         "");
933 }
934
935 u32 arch_timer_get_rate(void)
936 {
937         return arch_timer_rate;
938 }
939
940 bool arch_timer_evtstrm_available(void)
941 {
942         /*
943          * We might get called from a preemptible context. This is fine
944          * because availability of the event stream should be always the same
945          * for a preemptible context and context where we might resume a task.
946          */
947         return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
948 }
949
950 static u64 arch_counter_get_cntvct_mem(void)
951 {
952         u32 vct_lo, vct_hi, tmp_hi;
953
954         do {
955                 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
956                 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
957                 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
958         } while (vct_hi != tmp_hi);
959
960         return ((u64) vct_hi << 32) | vct_lo;
961 }
962
963 static struct arch_timer_kvm_info arch_timer_kvm_info;
964
965 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
966 {
967         return &arch_timer_kvm_info;
968 }
969
970 static void __init arch_counter_register(unsigned type)
971 {
972         u64 start_count;
973
974         /* Register the CP15 based counter if we have one */
975         if (type & ARCH_TIMER_TYPE_CP15) {
976                 u64 (*rd)(void);
977
978                 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
979                     arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
980                         if (arch_timer_counter_has_wa())
981                                 rd = arch_counter_get_cntvct_stable;
982                         else
983                                 rd = arch_counter_get_cntvct;
984                 } else {
985                         if (arch_timer_counter_has_wa())
986                                 rd = arch_counter_get_cntpct_stable;
987                         else
988                                 rd = arch_counter_get_cntpct;
989                 }
990
991                 arch_timer_read_counter = rd;
992                 clocksource_counter.archdata.vdso_direct = vdso_default;
993         } else {
994                 arch_timer_read_counter = arch_counter_get_cntvct_mem;
995         }
996
997         if (!arch_counter_suspend_stop)
998                 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
999         start_count = arch_timer_read_counter();
1000         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1001         cyclecounter.mult = clocksource_counter.mult;
1002         cyclecounter.shift = clocksource_counter.shift;
1003         timecounter_init(&arch_timer_kvm_info.timecounter,
1004                          &cyclecounter, start_count);
1005
1006         /* 56 bits minimum, so we assume worst case rollover */
1007         sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
1008 }
1009
1010 static void arch_timer_stop(struct clock_event_device *clk)
1011 {
1012         pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
1013
1014         disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1015         if (arch_timer_has_nonsecure_ppi())
1016                 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
1017
1018         clk->set_state_shutdown(clk);
1019 }
1020
1021 static int arch_timer_dying_cpu(unsigned int cpu)
1022 {
1023         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
1024
1025         cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1026
1027         arch_timer_stop(clk);
1028         return 0;
1029 }
1030
1031 #ifdef CONFIG_CPU_PM
1032 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
1033 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1034                                     unsigned long action, void *hcpu)
1035 {
1036         if (action == CPU_PM_ENTER) {
1037                 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
1038
1039                 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1040         } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1041                 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1042
1043 #ifdef CONFIG_ARM64
1044                 if (cpu_have_named_feature(EVTSTRM))
1045 #else
1046                 if (elf_hwcap & HWCAP_EVTSTRM)
1047 #endif
1048                         cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1049         }
1050         return NOTIFY_OK;
1051 }
1052
1053 static struct notifier_block arch_timer_cpu_pm_notifier = {
1054         .notifier_call = arch_timer_cpu_pm_notify,
1055 };
1056
1057 static int __init arch_timer_cpu_pm_init(void)
1058 {
1059         return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1060 }
1061
1062 static void __init arch_timer_cpu_pm_deinit(void)
1063 {
1064         WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1065 }
1066
1067 #else
1068 static int __init arch_timer_cpu_pm_init(void)
1069 {
1070         return 0;
1071 }
1072
1073 static void __init arch_timer_cpu_pm_deinit(void)
1074 {
1075 }
1076 #endif
1077
1078 static int __init arch_timer_register(void)
1079 {
1080         int err;
1081         int ppi;
1082
1083         arch_timer_evt = alloc_percpu(struct clock_event_device);
1084         if (!arch_timer_evt) {
1085                 err = -ENOMEM;
1086                 goto out;
1087         }
1088
1089         ppi = arch_timer_ppi[arch_timer_uses_ppi];
1090         switch (arch_timer_uses_ppi) {
1091         case ARCH_TIMER_VIRT_PPI:
1092                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1093                                          "arch_timer", arch_timer_evt);
1094                 break;
1095         case ARCH_TIMER_PHYS_SECURE_PPI:
1096         case ARCH_TIMER_PHYS_NONSECURE_PPI:
1097                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1098                                          "arch_timer", arch_timer_evt);
1099                 if (!err && arch_timer_has_nonsecure_ppi()) {
1100                         ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1101                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
1102                                                  "arch_timer", arch_timer_evt);
1103                         if (err)
1104                                 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1105                                                 arch_timer_evt);
1106                 }
1107                 break;
1108         case ARCH_TIMER_HYP_PPI:
1109                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1110                                          "arch_timer", arch_timer_evt);
1111                 break;
1112         default:
1113                 BUG();
1114         }
1115
1116         if (err) {
1117                 pr_err("can't register interrupt %d (%d)\n", ppi, err);
1118                 goto out_free;
1119         }
1120
1121         err = arch_timer_cpu_pm_init();
1122         if (err)
1123                 goto out_unreg_notify;
1124
1125         /* Register and immediately configure the timer on the boot CPU */
1126         err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1127                                 "clockevents/arm/arch_timer:starting",
1128                                 arch_timer_starting_cpu, arch_timer_dying_cpu);
1129         if (err)
1130                 goto out_unreg_cpupm;
1131         return 0;
1132
1133 out_unreg_cpupm:
1134         arch_timer_cpu_pm_deinit();
1135
1136 out_unreg_notify:
1137         free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1138         if (arch_timer_has_nonsecure_ppi())
1139                 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1140                                 arch_timer_evt);
1141
1142 out_free:
1143         free_percpu(arch_timer_evt);
1144 out:
1145         return err;
1146 }
1147
1148 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1149 {
1150         int ret;
1151         irq_handler_t func;
1152         struct arch_timer *t;
1153
1154         t = kzalloc(sizeof(*t), GFP_KERNEL);
1155         if (!t)
1156                 return -ENOMEM;
1157
1158         t->base = base;
1159         t->evt.irq = irq;
1160         __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1161
1162         if (arch_timer_mem_use_virtual)
1163                 func = arch_timer_handler_virt_mem;
1164         else
1165                 func = arch_timer_handler_phys_mem;
1166
1167         ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1168         if (ret) {
1169                 pr_err("Failed to request mem timer irq\n");
1170                 kfree(t);
1171         }
1172
1173         return ret;
1174 }
1175
1176 static const struct of_device_id arch_timer_of_match[] __initconst = {
1177         { .compatible   = "arm,armv7-timer",    },
1178         { .compatible   = "arm,armv8-timer",    },
1179         {},
1180 };
1181
1182 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1183         { .compatible   = "arm,armv7-timer-mem", },
1184         {},
1185 };
1186
1187 static bool __init arch_timer_needs_of_probing(void)
1188 {
1189         struct device_node *dn;
1190         bool needs_probing = false;
1191         unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1192
1193         /* We have two timers, and both device-tree nodes are probed. */
1194         if ((arch_timers_present & mask) == mask)
1195                 return false;
1196
1197         /*
1198          * Only one type of timer is probed,
1199          * check if we have another type of timer node in device-tree.
1200          */
1201         if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1202                 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1203         else
1204                 dn = of_find_matching_node(NULL, arch_timer_of_match);
1205
1206         if (dn && of_device_is_available(dn))
1207                 needs_probing = true;
1208
1209         of_node_put(dn);
1210
1211         return needs_probing;
1212 }
1213
1214 static int __init arch_timer_common_init(void)
1215 {
1216         arch_timer_banner(arch_timers_present);
1217         arch_counter_register(arch_timers_present);
1218         return arch_timer_arch_init();
1219 }
1220
1221 /**
1222  * arch_timer_select_ppi() - Select suitable PPI for the current system.
1223  *
1224  * If HYP mode is available, we know that the physical timer
1225  * has been configured to be accessible from PL1. Use it, so
1226  * that a guest can use the virtual timer instead.
1227  *
1228  * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1229  * accesses to CNTP_*_EL1 registers are silently redirected to
1230  * their CNTHP_*_EL2 counterparts, and use a different PPI
1231  * number.
1232  *
1233  * If no interrupt provided for virtual timer, we'll have to
1234  * stick to the physical timer. It'd better be accessible...
1235  * For arm64 we never use the secure interrupt.
1236  *
1237  * Return: a suitable PPI type for the current system.
1238  */
1239 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1240 {
1241         if (is_kernel_in_hyp_mode())
1242                 return ARCH_TIMER_HYP_PPI;
1243
1244         if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1245                 return ARCH_TIMER_VIRT_PPI;
1246
1247         if (IS_ENABLED(CONFIG_ARM64))
1248                 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1249
1250         return ARCH_TIMER_PHYS_SECURE_PPI;
1251 }
1252
1253 static void __init arch_timer_populate_kvm_info(void)
1254 {
1255         arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1256         if (is_kernel_in_hyp_mode())
1257                 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1258 }
1259
1260 static int __init arch_timer_of_init(struct device_node *np)
1261 {
1262         int i, ret;
1263         u32 rate;
1264
1265         if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1266                 pr_warn("multiple nodes in dt, skipping\n");
1267                 return 0;
1268         }
1269
1270         arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1271         for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1272                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1273
1274         arch_timer_populate_kvm_info();
1275
1276         rate = arch_timer_get_cntfrq();
1277         arch_timer_of_configure_rate(rate, np);
1278
1279         arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1280
1281         /* Check for globally applicable workarounds */
1282         arch_timer_check_ool_workaround(ate_match_dt, np);
1283
1284         /*
1285          * If we cannot rely on firmware initializing the timer registers then
1286          * we should use the physical timers instead.
1287          */
1288         if (IS_ENABLED(CONFIG_ARM) &&
1289             of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1290                 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1291         else
1292                 arch_timer_uses_ppi = arch_timer_select_ppi();
1293
1294         if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1295                 pr_err("No interrupt available, giving up\n");
1296                 return -EINVAL;
1297         }
1298
1299         /* On some systems, the counter stops ticking when in suspend. */
1300         arch_counter_suspend_stop = of_property_read_bool(np,
1301                                                          "arm,no-tick-in-suspend");
1302
1303         ret = arch_timer_register();
1304         if (ret)
1305                 return ret;
1306
1307         if (arch_timer_needs_of_probing())
1308                 return 0;
1309
1310         return arch_timer_common_init();
1311 }
1312 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1313 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1314
1315 static u32 __init
1316 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1317 {
1318         void __iomem *base;
1319         u32 rate;
1320
1321         base = ioremap(frame->cntbase, frame->size);
1322         if (!base) {
1323                 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1324                 return 0;
1325         }
1326
1327         rate = readl_relaxed(base + CNTFRQ);
1328
1329         iounmap(base);
1330
1331         return rate;
1332 }
1333
1334 static struct arch_timer_mem_frame * __init
1335 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1336 {
1337         struct arch_timer_mem_frame *frame, *best_frame = NULL;
1338         void __iomem *cntctlbase;
1339         u32 cnttidr;
1340         int i;
1341
1342         cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1343         if (!cntctlbase) {
1344                 pr_err("Can't map CNTCTLBase @ %pa\n",
1345                         &timer_mem->cntctlbase);
1346                 return NULL;
1347         }
1348
1349         cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1350
1351         /*
1352          * Try to find a virtual capable frame. Otherwise fall back to a
1353          * physical capable frame.
1354          */
1355         for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1356                 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1357                              CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1358
1359                 frame = &timer_mem->frame[i];
1360                 if (!frame->valid)
1361                         continue;
1362
1363                 /* Try enabling everything, and see what sticks */
1364                 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1365                 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1366
1367                 if ((cnttidr & CNTTIDR_VIRT(i)) &&
1368                     !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1369                         best_frame = frame;
1370                         arch_timer_mem_use_virtual = true;
1371                         break;
1372                 }
1373
1374                 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1375                         continue;
1376
1377                 best_frame = frame;
1378         }
1379
1380         iounmap(cntctlbase);
1381
1382         return best_frame;
1383 }
1384
1385 static int __init
1386 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1387 {
1388         void __iomem *base;
1389         int ret, irq = 0;
1390
1391         if (arch_timer_mem_use_virtual)
1392                 irq = frame->virt_irq;
1393         else
1394                 irq = frame->phys_irq;
1395
1396         if (!irq) {
1397                 pr_err("Frame missing %s irq.\n",
1398                        arch_timer_mem_use_virtual ? "virt" : "phys");
1399                 return -EINVAL;
1400         }
1401
1402         if (!request_mem_region(frame->cntbase, frame->size,
1403                                 "arch_mem_timer"))
1404                 return -EBUSY;
1405
1406         base = ioremap(frame->cntbase, frame->size);
1407         if (!base) {
1408                 pr_err("Can't map frame's registers\n");
1409                 return -ENXIO;
1410         }
1411
1412         ret = arch_timer_mem_register(base, irq);
1413         if (ret) {
1414                 iounmap(base);
1415                 return ret;
1416         }
1417
1418         arch_counter_base = base;
1419         arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1420
1421         return 0;
1422 }
1423
1424 static int __init arch_timer_mem_of_init(struct device_node *np)
1425 {
1426         struct arch_timer_mem *timer_mem;
1427         struct arch_timer_mem_frame *frame;
1428         struct device_node *frame_node;
1429         struct resource res;
1430         int ret = -EINVAL;
1431         u32 rate;
1432
1433         timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1434         if (!timer_mem)
1435                 return -ENOMEM;
1436
1437         if (of_address_to_resource(np, 0, &res))
1438                 goto out;
1439         timer_mem->cntctlbase = res.start;
1440         timer_mem->size = resource_size(&res);
1441
1442         for_each_available_child_of_node(np, frame_node) {
1443                 u32 n;
1444                 struct arch_timer_mem_frame *frame;
1445
1446                 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1447                         pr_err(FW_BUG "Missing frame-number.\n");
1448                         of_node_put(frame_node);
1449                         goto out;
1450                 }
1451                 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1452                         pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1453                                ARCH_TIMER_MEM_MAX_FRAMES - 1);
1454                         of_node_put(frame_node);
1455                         goto out;
1456                 }
1457                 frame = &timer_mem->frame[n];
1458
1459                 if (frame->valid) {
1460                         pr_err(FW_BUG "Duplicated frame-number.\n");
1461                         of_node_put(frame_node);
1462                         goto out;
1463                 }
1464
1465                 if (of_address_to_resource(frame_node, 0, &res)) {
1466                         of_node_put(frame_node);
1467                         goto out;
1468                 }
1469                 frame->cntbase = res.start;
1470                 frame->size = resource_size(&res);
1471
1472                 frame->virt_irq = irq_of_parse_and_map(frame_node,
1473                                                        ARCH_TIMER_VIRT_SPI);
1474                 frame->phys_irq = irq_of_parse_and_map(frame_node,
1475                                                        ARCH_TIMER_PHYS_SPI);
1476
1477                 frame->valid = true;
1478         }
1479
1480         frame = arch_timer_mem_find_best_frame(timer_mem);
1481         if (!frame) {
1482                 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1483                         &timer_mem->cntctlbase);
1484                 ret = -EINVAL;
1485                 goto out;
1486         }
1487
1488         rate = arch_timer_mem_frame_get_cntfrq(frame);
1489         arch_timer_of_configure_rate(rate, np);
1490
1491         ret = arch_timer_mem_frame_register(frame);
1492         if (!ret && !arch_timer_needs_of_probing())
1493                 ret = arch_timer_common_init();
1494 out:
1495         kfree(timer_mem);
1496         return ret;
1497 }
1498 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1499                        arch_timer_mem_of_init);
1500
1501 #ifdef CONFIG_ACPI_GTDT
1502 static int __init
1503 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1504 {
1505         struct arch_timer_mem_frame *frame;
1506         u32 rate;
1507         int i;
1508
1509         for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1510                 frame = &timer_mem->frame[i];
1511
1512                 if (!frame->valid)
1513                         continue;
1514
1515                 rate = arch_timer_mem_frame_get_cntfrq(frame);
1516                 if (rate == arch_timer_rate)
1517                         continue;
1518
1519                 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1520                         &frame->cntbase,
1521                         (unsigned long)rate, (unsigned long)arch_timer_rate);
1522
1523                 return -EINVAL;
1524         }
1525
1526         return 0;
1527 }
1528
1529 static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1530 {
1531         struct arch_timer_mem *timers, *timer;
1532         struct arch_timer_mem_frame *frame, *best_frame = NULL;
1533         int timer_count, i, ret = 0;
1534
1535         timers = kcalloc(platform_timer_count, sizeof(*timers),
1536                             GFP_KERNEL);
1537         if (!timers)
1538                 return -ENOMEM;
1539
1540         ret = acpi_arch_timer_mem_init(timers, &timer_count);
1541         if (ret || !timer_count)
1542                 goto out;
1543
1544         /*
1545          * While unlikely, it's theoretically possible that none of the frames
1546          * in a timer expose the combination of feature we want.
1547          */
1548         for (i = 0; i < timer_count; i++) {
1549                 timer = &timers[i];
1550
1551                 frame = arch_timer_mem_find_best_frame(timer);
1552                 if (!best_frame)
1553                         best_frame = frame;
1554
1555                 ret = arch_timer_mem_verify_cntfrq(timer);
1556                 if (ret) {
1557                         pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1558                         goto out;
1559                 }
1560
1561                 if (!best_frame) /* implies !frame */
1562                         /*
1563                          * Only complain about missing suitable frames if we
1564                          * haven't already found one in a previous iteration.
1565                          */
1566                         pr_err("Unable to find a suitable frame in timer @ %pa\n",
1567                                 &timer->cntctlbase);
1568         }
1569
1570         if (best_frame)
1571                 ret = arch_timer_mem_frame_register(best_frame);
1572 out:
1573         kfree(timers);
1574         return ret;
1575 }
1576
1577 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
1578 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1579 {
1580         int ret, platform_timer_count;
1581
1582         if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1583                 pr_warn("already initialized, skipping\n");
1584                 return -EINVAL;
1585         }
1586
1587         arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1588
1589         ret = acpi_gtdt_init(table, &platform_timer_count);
1590         if (ret) {
1591                 pr_err("Failed to init GTDT table.\n");
1592                 return ret;
1593         }
1594
1595         arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1596                 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1597
1598         arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1599                 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1600
1601         arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1602                 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1603
1604         arch_timer_populate_kvm_info();
1605
1606         /*
1607          * When probing via ACPI, we have no mechanism to override the sysreg
1608          * CNTFRQ value. This *must* be correct.
1609          */
1610         arch_timer_rate = arch_timer_get_cntfrq();
1611         if (!arch_timer_rate) {
1612                 pr_err(FW_BUG "frequency not available.\n");
1613                 return -EINVAL;
1614         }
1615
1616         arch_timer_uses_ppi = arch_timer_select_ppi();
1617         if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1618                 pr_err("No interrupt available, giving up\n");
1619                 return -EINVAL;
1620         }
1621
1622         /* Always-on capability */
1623         arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1624
1625         /* Check for globally applicable workarounds */
1626         arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1627
1628         ret = arch_timer_register();
1629         if (ret)
1630                 return ret;
1631
1632         if (platform_timer_count &&
1633             arch_timer_mem_acpi_init(platform_timer_count))
1634                 pr_err("Failed to initialize memory-mapped timer.\n");
1635
1636         return arch_timer_common_init();
1637 }
1638 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1639 #endif