Merge tag 'nolibc.2022.07.27a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulm...
[linux-2.6-microblaze.git] / drivers / clocksource / timer-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Mediatek SoCs General-Purpose Timer handling.
4  *
5  * Copyright (C) 2014 Matthias Brugger
6  *
7  * Matthias Brugger <matthias.bgg@gmail.com>
8  */
9
10 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
11
12 #include <linux/clockchips.h>
13 #include <linux/clocksource.h>
14 #include <linux/interrupt.h>
15 #include <linux/irqreturn.h>
16 #include <linux/sched_clock.h>
17 #include <linux/slab.h>
18 #include "timer-of.h"
19
20 #define TIMER_CLK_EVT           (1)
21 #define TIMER_CLK_SRC           (2)
22
23 #define TIMER_SYNC_TICKS        (3)
24
25 /* cpux mcusys wrapper */
26 #define CPUX_CON_REG            0x0
27 #define CPUX_IDX_REG            0x4
28
29 /* cpux */
30 #define CPUX_IDX_GLOBAL_CTRL    0x0
31  #define CPUX_ENABLE            BIT(0)
32  #define CPUX_CLK_DIV_MASK      GENMASK(10, 8)
33  #define CPUX_CLK_DIV1          BIT(8)
34  #define CPUX_CLK_DIV2          BIT(9)
35  #define CPUX_CLK_DIV4          BIT(10)
36 #define CPUX_IDX_GLOBAL_IRQ     0x30
37
38 /* gpt */
39 #define GPT_IRQ_EN_REG          0x00
40 #define GPT_IRQ_ENABLE(val)     BIT((val) - 1)
41 #define GPT_IRQ_ACK_REG         0x08
42 #define GPT_IRQ_ACK(val)        BIT((val) - 1)
43
44 #define GPT_CTRL_REG(val)       (0x10 * (val))
45 #define GPT_CTRL_OP(val)        (((val) & 0x3) << 4)
46 #define GPT_CTRL_OP_ONESHOT     (0)
47 #define GPT_CTRL_OP_REPEAT      (1)
48 #define GPT_CTRL_OP_FREERUN     (3)
49 #define GPT_CTRL_CLEAR          (2)
50 #define GPT_CTRL_ENABLE         (1)
51 #define GPT_CTRL_DISABLE        (0)
52
53 #define GPT_CLK_REG(val)        (0x04 + (0x10 * (val)))
54 #define GPT_CLK_SRC(val)        (((val) & 0x1) << 4)
55 #define GPT_CLK_SRC_SYS13M      (0)
56 #define GPT_CLK_SRC_RTC32K      (1)
57 #define GPT_CLK_DIV1            (0x0)
58 #define GPT_CLK_DIV2            (0x1)
59
60 #define GPT_CNT_REG(val)        (0x08 + (0x10 * (val)))
61 #define GPT_CMP_REG(val)        (0x0C + (0x10 * (val)))
62
63 /* system timer */
64 #define SYST_BASE               (0x40)
65
66 #define SYST_CON                (SYST_BASE + 0x0)
67 #define SYST_VAL                (SYST_BASE + 0x4)
68
69 #define SYST_CON_REG(to)        (timer_of_base(to) + SYST_CON)
70 #define SYST_VAL_REG(to)        (timer_of_base(to) + SYST_VAL)
71
72 /*
73  * SYST_CON_EN: Clock enable. Shall be set to
74  *   - Start timer countdown.
75  *   - Allow timeout ticks being updated.
76  *   - Allow changing interrupt status,like clear irq pending.
77  *
78  * SYST_CON_IRQ_EN: Set to enable interrupt.
79  *
80  * SYST_CON_IRQ_CLR: Set to clear interrupt.
81  */
82 #define SYST_CON_EN              BIT(0)
83 #define SYST_CON_IRQ_EN          BIT(1)
84 #define SYST_CON_IRQ_CLR         BIT(4)
85
86 static void __iomem *gpt_sched_reg __read_mostly;
87
88 static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to)
89 {
90         writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
91         return readl(timer_of_base(to) + CPUX_CON_REG);
92 }
93
94 static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to)
95 {
96         writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
97         writel(val, timer_of_base(to) + CPUX_CON_REG);
98 }
99
100 static void mtk_cpux_set_irq(struct timer_of *to, bool enable)
101 {
102         const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask);
103         u32 val;
104
105         val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to);
106
107         if (enable)
108                 val |= *irq_mask;
109         else
110                 val &= ~(*irq_mask);
111
112         mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to);
113 }
114
115 static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt)
116 {
117         /* Clear any irq */
118         mtk_cpux_set_irq(to_timer_of(clkevt), false);
119
120         /*
121          * Disabling CPUXGPT timer will crash the platform, especially
122          * if Trusted Firmware is using it (usually, for sleep states),
123          * so we only mask the IRQ and call it a day.
124          */
125         return 0;
126 }
127
128 static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt)
129 {
130         mtk_cpux_set_irq(to_timer_of(clkevt), true);
131         return 0;
132 }
133
134 static void mtk_syst_ack_irq(struct timer_of *to)
135 {
136         /* Clear and disable interrupt */
137         writel(SYST_CON_EN, SYST_CON_REG(to));
138         writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
139 }
140
141 static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
142 {
143         struct clock_event_device *clkevt = dev_id;
144         struct timer_of *to = to_timer_of(clkevt);
145
146         mtk_syst_ack_irq(to);
147         clkevt->event_handler(clkevt);
148
149         return IRQ_HANDLED;
150 }
151
152 static int mtk_syst_clkevt_next_event(unsigned long ticks,
153                                       struct clock_event_device *clkevt)
154 {
155         struct timer_of *to = to_timer_of(clkevt);
156
157         /* Enable clock to allow timeout tick update later */
158         writel(SYST_CON_EN, SYST_CON_REG(to));
159
160         /*
161          * Write new timeout ticks. Timer shall start countdown
162          * after timeout ticks are updated.
163          */
164         writel(ticks, SYST_VAL_REG(to));
165
166         /* Enable interrupt */
167         writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
168
169         return 0;
170 }
171
172 static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
173 {
174         /* Clear any irq */
175         mtk_syst_ack_irq(to_timer_of(clkevt));
176
177         /* Disable timer */
178         writel(0, SYST_CON_REG(to_timer_of(clkevt)));
179
180         return 0;
181 }
182
183 static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
184 {
185         return mtk_syst_clkevt_shutdown(clkevt);
186 }
187
188 static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
189 {
190         return 0;
191 }
192
193 static u64 notrace mtk_gpt_read_sched_clock(void)
194 {
195         return readl_relaxed(gpt_sched_reg);
196 }
197
198 static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
199 {
200         u32 val;
201
202         val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
203         writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
204                GPT_CTRL_REG(timer));
205 }
206
207 static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
208                                       unsigned long delay, u8 timer)
209 {
210         writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
211 }
212
213 static void mtk_gpt_clkevt_time_start(struct timer_of *to,
214                                       bool periodic, u8 timer)
215 {
216         u32 val;
217
218         /* Acknowledge interrupt */
219         writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
220
221         val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
222
223         /* Clear 2 bit timer operation mode field */
224         val &= ~GPT_CTRL_OP(0x3);
225
226         if (periodic)
227                 val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
228         else
229                 val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
230
231         writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
232                timer_of_base(to) + GPT_CTRL_REG(timer));
233 }
234
235 static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
236 {
237         mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
238
239         return 0;
240 }
241
242 static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
243 {
244         struct timer_of *to = to_timer_of(clk);
245
246         mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
247         mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
248         mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
249
250         return 0;
251 }
252
253 static int mtk_gpt_clkevt_next_event(unsigned long event,
254                                      struct clock_event_device *clk)
255 {
256         struct timer_of *to = to_timer_of(clk);
257
258         mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
259         mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
260         mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
261
262         return 0;
263 }
264
265 static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
266 {
267         struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
268         struct timer_of *to = to_timer_of(clkevt);
269
270         /* Acknowledge timer0 irq */
271         writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
272         clkevt->event_handler(clkevt);
273
274         return IRQ_HANDLED;
275 }
276
277 static void
278 __init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
279 {
280         writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
281                timer_of_base(to) + GPT_CTRL_REG(timer));
282
283         writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
284                timer_of_base(to) + GPT_CLK_REG(timer));
285
286         writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
287
288         writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
289                timer_of_base(to) + GPT_CTRL_REG(timer));
290 }
291
292 static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
293 {
294         u32 val;
295
296         /* Disable all interrupts */
297         writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
298
299         /* Acknowledge all spurious pending interrupts */
300         writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
301
302         val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
303         writel(val | GPT_IRQ_ENABLE(timer),
304                timer_of_base(to) + GPT_IRQ_EN_REG);
305 }
306
307 static void mtk_gpt_resume(struct clock_event_device *clk)
308 {
309         struct timer_of *to = to_timer_of(clk);
310
311         mtk_gpt_enable_irq(to, TIMER_CLK_EVT);
312 }
313
314 static void mtk_gpt_suspend(struct clock_event_device *clk)
315 {
316         struct timer_of *to = to_timer_of(clk);
317
318         /* Disable all interrupts */
319         writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
320
321         /*
322          * This is called with interrupts disabled,
323          * so we need to ack any interrupt that is pending
324          * or for example ATF will prevent a suspend from completing.
325          */
326         writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
327 }
328
329 static struct timer_of to = {
330         .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
331
332         .clkevt = {
333                 .name = "mtk-clkevt",
334                 .rating = 300,
335                 .cpumask = cpu_possible_mask,
336         },
337
338         .of_irq = {
339                 .flags = IRQF_TIMER | IRQF_IRQPOLL,
340         },
341 };
342
343 static int __init mtk_cpux_init(struct device_node *node)
344 {
345         static struct timer_of to_cpux;
346         u32 freq, val;
347         int ret;
348
349         /*
350          * There are per-cpu interrupts for the CPUX General Purpose Timer
351          * but since this timer feeds the AArch64 System Timer we can rely
352          * on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ.
353          */
354         to_cpux.flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
355         to_cpux.clkevt.name = "mtk-cpuxgpt";
356         to_cpux.clkevt.rating = 10;
357         to_cpux.clkevt.cpumask = cpu_possible_mask;
358         to_cpux.clkevt.set_state_shutdown = mtk_cpux_clkevt_shutdown;
359         to_cpux.clkevt.tick_resume = mtk_cpux_clkevt_resume;
360
361         /* If this fails, bad things are about to happen... */
362         ret = timer_of_init(node, &to_cpux);
363         if (ret) {
364                 WARN(1, "Cannot start CPUX timers.\n");
365                 return ret;
366         }
367
368         /*
369          * Check if we're given a clock with the right frequency for this
370          * timer, otherwise warn but keep going with the setup anyway, as
371          * that makes it possible to still boot the kernel, even though
372          * it may not work correctly (random lockups, etc).
373          * The reason behind this is that having an early UART may not be
374          * possible for everyone and this gives a chance to retrieve kmsg
375          * for eventual debugging even on consumer devices.
376          */
377         freq = timer_of_rate(&to_cpux);
378         if (freq > 13000000)
379                 WARN(1, "Requested unsupported timer frequency %u\n", freq);
380
381         /* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */
382         val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux);
383         val &= ~CPUX_CLK_DIV_MASK;
384         val |= CPUX_CLK_DIV2;
385         mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to_cpux);
386
387         /* Enable all CPUXGPT timers */
388         val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux);
389         mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to_cpux);
390
391         clockevents_config_and_register(&to_cpux.clkevt, timer_of_rate(&to_cpux),
392                                         TIMER_SYNC_TICKS, 0xffffffff);
393
394         return 0;
395 }
396
397 static int __init mtk_syst_init(struct device_node *node)
398 {
399         int ret;
400
401         to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
402         to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
403         to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
404         to.clkevt.tick_resume = mtk_syst_clkevt_resume;
405         to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
406         to.of_irq.handler = mtk_syst_handler;
407
408         ret = timer_of_init(node, &to);
409         if (ret)
410                 return ret;
411
412         clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
413                                         TIMER_SYNC_TICKS, 0xffffffff);
414
415         return 0;
416 }
417
418 static int __init mtk_gpt_init(struct device_node *node)
419 {
420         int ret;
421
422         to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
423         to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
424         to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
425         to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
426         to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
427         to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
428         to.clkevt.suspend = mtk_gpt_suspend;
429         to.clkevt.resume = mtk_gpt_resume;
430         to.of_irq.handler = mtk_gpt_interrupt;
431
432         ret = timer_of_init(node, &to);
433         if (ret)
434                 return ret;
435
436         /* Configure clock source */
437         mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
438         clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
439                               node->name, timer_of_rate(&to), 300, 32,
440                               clocksource_mmio_readl_up);
441         gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
442         sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
443
444         /* Configure clock event */
445         mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
446         clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
447                                         TIMER_SYNC_TICKS, 0xffffffff);
448
449         mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
450
451         return 0;
452 }
453 TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
454 TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
455 TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init);