2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 * -sched_clock( ) no longer jiffies based. Uses the same clocksource
12 * Rajeshwarr/Vineetg: Mar 2008
13 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
14 * for arch independent gettimeofday()
15 * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
17 * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
20 /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
21 * Each can programmed to go from @count to @limit and optionally
22 * interrupt when that happens.
23 * A write to Control Register clears the Interrupt
25 * We've designated TIMER0 for events (clockevents)
26 * while TIMER1 for free running (clocksource)
28 * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
29 * which however is currently broken
32 #include <linux/interrupt.h>
33 #include <linux/clk.h>
34 #include <linux/clk-provider.h>
35 #include <linux/clocksource.h>
36 #include <linux/clockchips.h>
37 #include <linux/cpu.h>
39 #include <linux/of_irq.h>
42 #include <soc/arc/timers.h>
43 #include <soc/arc/mcip.h>
46 static unsigned long arc_timer_freq;
48 static int noinline arc_get_timer_clk(struct device_node *node)
53 clk = of_clk_get(node, 0);
55 pr_err("timer missing clk");
59 ret = clk_prepare_enable(clk);
61 pr_err("Couldn't enable parent clk\n");
65 arc_timer_freq = clk_get_rate(clk);
70 /********** Clock Source Device *********/
72 #ifdef CONFIG_ARC_TIMERS_64BIT
74 static cycle_t arc_read_gfrc(struct clocksource *cs)
79 local_irq_save(flags);
81 __mcip_cmd(CMD_GFRC_READ_LO, 0);
82 l = read_aux_reg(ARC_REG_MCIP_READBACK);
84 __mcip_cmd(CMD_GFRC_READ_HI, 0);
85 h = read_aux_reg(ARC_REG_MCIP_READBACK);
87 local_irq_restore(flags);
89 return (((cycle_t)h) << 32) | l;
92 static struct clocksource arc_counter_gfrc = {
93 .name = "ARConnect GFRC",
95 .read = arc_read_gfrc,
96 .mask = CLOCKSOURCE_MASK(64),
97 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
100 static int __init arc_cs_setup_gfrc(struct device_node *node)
105 READ_BCR(ARC_REG_MCIP_BCR, mp);
107 pr_warn("Global-64-bit-Ctr clocksource not detected");
111 ret = arc_get_timer_clk(node);
115 return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
117 CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
119 #define AUX_RTC_CTRL 0x103
120 #define AUX_RTC_LOW 0x104
121 #define AUX_RTC_HIGH 0x105
123 static cycle_t arc_read_rtc(struct clocksource *cs)
125 unsigned long status;
129 * hardware has an internal state machine which tracks readout of
130 * low/high and updates the CTRL.status if
131 * - interrupt/exception taken between the two reads
132 * - high increments after low has been read
135 l = read_aux_reg(AUX_RTC_LOW);
136 h = read_aux_reg(AUX_RTC_HIGH);
137 status = read_aux_reg(AUX_RTC_CTRL);
138 } while (!(status & _BITUL(31)));
140 return (((cycle_t)h) << 32) | l;
143 static struct clocksource arc_counter_rtc = {
146 .read = arc_read_rtc,
147 .mask = CLOCKSOURCE_MASK(64),
148 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
151 static int __init arc_cs_setup_rtc(struct device_node *node)
153 struct bcr_timer timer;
156 READ_BCR(ARC_REG_TIMERS_BCR, timer);
158 pr_warn("Local-64-bit-Ctr clocksource not detected");
162 /* Local to CPU hence not usable in SMP */
163 if (IS_ENABLED(CONFIG_SMP)) {
164 pr_warn("Local-64-bit-Ctr not usable in SMP");
168 ret = arc_get_timer_clk(node);
172 write_aux_reg(AUX_RTC_CTRL, 1);
174 return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
176 CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
181 * 32bit TIMER1 to keep counting monotonically and wraparound
184 static cycle_t arc_read_timer1(struct clocksource *cs)
186 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
189 static struct clocksource arc_counter_timer1 = {
190 .name = "ARC Timer1",
192 .read = arc_read_timer1,
193 .mask = CLOCKSOURCE_MASK(32),
194 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
197 static int __init arc_cs_setup_timer1(struct device_node *node)
201 /* Local to CPU hence not usable in SMP */
202 if (IS_ENABLED(CONFIG_SMP))
205 ret = arc_get_timer_clk(node);
209 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
210 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
211 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
213 return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
216 /********** Clock Event Device *********/
218 static int arc_timer_irq;
221 * Arm the timer to interrupt after @cycles
222 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
224 static void arc_timer_event_setup(unsigned int cycles)
226 write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
227 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
229 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
233 static int arc_clkevent_set_next_event(unsigned long delta,
234 struct clock_event_device *dev)
236 arc_timer_event_setup(delta);
240 static int arc_clkevent_set_periodic(struct clock_event_device *dev)
243 * At X Hz, 1 sec = 1000ms -> X cycles;
244 * 10ms -> X / 100 cycles
246 arc_timer_event_setup(arc_timer_freq / HZ);
250 static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
251 .name = "ARC Timer0",
252 .features = CLOCK_EVT_FEAT_ONESHOT |
253 CLOCK_EVT_FEAT_PERIODIC,
255 .set_next_event = arc_clkevent_set_next_event,
256 .set_state_periodic = arc_clkevent_set_periodic,
259 static irqreturn_t timer_irq_handler(int irq, void *dev_id)
262 * Note that generic IRQ core could have passed @evt for @dev_id if
263 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
265 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
266 int irq_reenable = clockevent_state_periodic(evt);
269 * Any write to CTRL reg ACks the interrupt, we rewrite the
270 * Count when [N]ot [H]alted bit.
271 * And re-arm it if perioid by [I]nterrupt [E]nable bit
273 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
275 evt->event_handler(evt);
281 static int arc_timer_starting_cpu(unsigned int cpu)
283 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
285 evt->cpumask = cpumask_of(smp_processor_id());
287 clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
288 enable_percpu_irq(arc_timer_irq, 0);
292 static int arc_timer_dying_cpu(unsigned int cpu)
294 disable_percpu_irq(arc_timer_irq);
299 * clockevent setup for boot CPU
301 static int __init arc_clockevent_setup(struct device_node *node)
303 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
306 arc_timer_irq = irq_of_parse_and_map(node, 0);
307 if (arc_timer_irq <= 0) {
308 pr_err("clockevent: missing irq");
312 ret = arc_get_timer_clk(node);
314 pr_err("clockevent: missing clk");
318 /* Needs apriori irq_set_percpu_devid() done in intc map function */
319 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
320 "Timer0 (per-cpu-tick)", evt);
322 pr_err("clockevent: unable to request irq\n");
326 ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
327 "AP_ARC_TIMER_STARTING",
328 arc_timer_starting_cpu,
329 arc_timer_dying_cpu);
331 pr_err("Failed to setup hotplug state");
337 static int __init arc_of_timer_init(struct device_node *np)
339 static int init_count = 0;
344 ret = arc_clockevent_setup(np);
346 ret = arc_cs_setup_timer1(np);
351 CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);