1 // SPDX-License-Identifier: GPL-2.0
3 * RZ/G2L Clock Pulse Generator
5 * Copyright (C) 2021 Renesas Electronics Corp.
7 * Based on renesas-cpg-mssr.c
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
34 #include "rzg2l-cpg.h"
37 #define WARN_DEBUG(x) WARN_ON(x)
39 #define WARN_DEBUG(x) do { } while (0)
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
50 #define CLK_ON_R(reg) (reg)
51 #define CLK_MON_R(reg) (0x180 + (reg))
52 #define CLK_RST_R(reg) (reg)
53 #define CLK_MRST_R(reg) (0x180 + (reg))
55 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
59 #define MAX_VCLK_FREQ (148500000)
64 struct rzg2l_cpg_priv *priv;
67 #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
69 struct rzg2l_pll5_param {
78 struct rzg2l_pll5_mux_dsi_div_param {
85 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
87 * @rcdev: Reset controller entity
89 * @base: CPG register block base address
90 * @rmw_lock: protects register accesses
91 * @clks: Array containing all Core and Module Clocks
92 * @num_core_clks: Number of Core Clocks in clks[]
93 * @num_mod_clks: Number of Module Clocks in clks[]
94 * @num_resets: Number of Module Resets in info->resets[]
95 * @last_dt_core_clk: ID of the last Core Clock exported to DT
96 * @info: Pointer to platform data
98 * @mux_dsi_div_params: pll5 mux and dsi div parameters
100 struct rzg2l_cpg_priv {
101 struct reset_controller_dev rcdev;
107 unsigned int num_core_clks;
108 unsigned int num_mod_clks;
109 unsigned int num_resets;
110 unsigned int last_dt_core_clk;
112 const struct rzg2l_cpg_info *info;
114 struct generic_pm_domain genpd;
116 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
119 static void rzg2l_cpg_del_clk_provider(void *data)
121 of_clk_del_provider(data);
124 static struct clk * __init
125 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
128 struct rzg2l_cpg_priv *priv)
130 struct device *dev = priv->dev;
131 const struct clk *parent;
132 const char *parent_name;
133 struct clk_hw *clk_hw;
135 parent = clks[core->parent & 0xffff];
137 return ERR_CAST(parent);
139 parent_name = __clk_get_name(parent);
142 clk_hw = clk_hw_register_divider_table(dev, core->name,
144 base + GET_REG_OFFSET(core->conf),
145 GET_SHIFT(core->conf),
146 GET_WIDTH(core->conf),
151 clk_hw = clk_hw_register_divider(dev, core->name,
153 base + GET_REG_OFFSET(core->conf),
154 GET_SHIFT(core->conf),
155 GET_WIDTH(core->conf),
156 core->flag, &priv->rmw_lock);
159 return ERR_CAST(clk_hw);
164 static struct clk * __init
165 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
167 struct rzg2l_cpg_priv *priv)
169 const struct clk_hw *clk_hw;
171 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
172 core->parent_names, core->num_parents,
174 base + GET_REG_OFFSET(core->conf),
175 GET_SHIFT(core->conf),
176 GET_WIDTH(core->conf),
177 core->mux_flags, &priv->rmw_lock);
179 return ERR_CAST(clk_hw);
184 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
186 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
187 struct rzg2l_cpg_priv *priv = hwdata->priv;
188 u32 off = GET_REG_OFFSET(hwdata->conf);
189 u32 shift = GET_SHIFT(hwdata->conf);
190 const u32 clk_src_266 = 2;
191 u32 msk, val, bitmask;
196 * As per the HW manual, we should not directly switch from 533 MHz to
197 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
198 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
199 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
201 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
202 * switching register is prohibited.
203 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
204 * the index to value mapping is done by adding 1 to the index.
206 bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
207 msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
208 spin_lock_irqsave(&priv->rmw_lock, flags);
209 if (index != clk_src_266) {
210 writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
212 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
214 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
219 writel(bitmask | ((index + 1) << shift), priv->base + off);
221 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
223 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
225 spin_unlock_irqrestore(&priv->rmw_lock, flags);
228 dev_err(priv->dev, "failed to switch clk source\n");
233 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
235 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
236 struct rzg2l_cpg_priv *priv = hwdata->priv;
237 u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
239 val >>= GET_SHIFT(hwdata->conf);
240 val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
242 return val ? val - 1 : 0;
245 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
246 .determine_rate = __clk_mux_determine_rate_closest,
247 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
248 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
251 static struct clk * __init
252 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
254 struct rzg2l_cpg_priv *priv)
256 struct sd_hw_data *clk_hw_data;
257 struct clk_init_data init;
258 struct clk_hw *clk_hw;
261 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
263 return ERR_PTR(-ENOMEM);
265 clk_hw_data->priv = priv;
266 clk_hw_data->conf = core->conf;
268 init.name = core->name;
269 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
271 init.num_parents = core->num_parents;
272 init.parent_names = core->parent_names;
274 clk_hw = &clk_hw_data->hw;
275 clk_hw->init = &init;
277 ret = devm_clk_hw_register(priv->dev, clk_hw);
285 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
288 unsigned long foutpostdiv_rate;
290 params->pl5_intin = rate / MEGA;
291 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
292 params->pl5_refdiv = 2;
293 params->pl5_postdiv1 = 1;
294 params->pl5_postdiv2 = 1;
295 params->pl5_spread = 0x16;
298 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
299 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
300 (params->pl5_postdiv1 * params->pl5_postdiv2);
302 return foutpostdiv_rate;
305 struct dsi_div_hw_data {
309 struct rzg2l_cpg_priv *priv;
312 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
314 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
315 unsigned long parent_rate)
317 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
318 unsigned long rate = dsi_div->rate;
326 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
329 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
330 struct rzg2l_cpg_priv *priv = dsi_div->priv;
331 struct rzg2l_pll5_param params;
332 unsigned long parent_rate;
334 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
336 if (priv->mux_dsi_div_params.clksrc)
342 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
343 struct clk_rate_request *req)
345 if (req->rate > MAX_VCLK_FREQ)
346 req->rate = MAX_VCLK_FREQ;
348 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
353 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
355 unsigned long parent_rate)
357 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
358 struct rzg2l_cpg_priv *priv = dsi_div->priv;
361 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
363 * Based on the dot clock, the DSI divider clock sets the divider value,
364 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
365 * source for the MUX and propagates that info to the parents.
368 if (!rate || rate > MAX_VCLK_FREQ)
371 dsi_div->rate = rate;
372 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
373 (priv->mux_dsi_div_params.dsi_div_a << 0) |
374 (priv->mux_dsi_div_params.dsi_div_b << 8),
375 priv->base + CPG_PL5_SDIV);
380 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
381 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
382 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
383 .set_rate = rzg2l_cpg_dsi_div_set_rate,
386 static struct clk * __init
387 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
389 struct rzg2l_cpg_priv *priv)
391 struct dsi_div_hw_data *clk_hw_data;
392 const struct clk *parent;
393 const char *parent_name;
394 struct clk_init_data init;
395 struct clk_hw *clk_hw;
398 parent = clks[core->parent & 0xffff];
400 return ERR_CAST(parent);
402 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
404 return ERR_PTR(-ENOMEM);
406 clk_hw_data->priv = priv;
408 parent_name = __clk_get_name(parent);
409 init.name = core->name;
410 init.ops = &rzg2l_cpg_dsi_div_ops;
411 init.flags = CLK_SET_RATE_PARENT;
412 init.parent_names = &parent_name;
413 init.num_parents = 1;
415 clk_hw = &clk_hw_data->hw;
416 clk_hw->init = &init;
418 ret = devm_clk_hw_register(priv->dev, clk_hw);
425 struct pll5_mux_hw_data {
429 struct rzg2l_cpg_priv *priv;
432 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
434 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
435 struct clk_rate_request *req)
437 struct clk_hw *parent;
438 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
439 struct rzg2l_cpg_priv *priv = hwdata->priv;
441 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
442 req->best_parent_hw = parent;
443 req->best_parent_rate = req->rate;
448 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
450 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
451 struct rzg2l_cpg_priv *priv = hwdata->priv;
455 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
458 * Based on the dot clock, the DSI divider clock calculates the parent
459 * rate and clk source for the MUX. It propagates that info to
460 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
463 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
464 priv->base + CPG_OTHERFUNC1_REG);
469 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
471 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
472 struct rzg2l_cpg_priv *priv = hwdata->priv;
474 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
477 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
478 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
479 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
480 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
483 static struct clk * __init
484 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
485 struct rzg2l_cpg_priv *priv)
487 struct pll5_mux_hw_data *clk_hw_data;
488 struct clk_init_data init;
489 struct clk_hw *clk_hw;
492 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
494 return ERR_PTR(-ENOMEM);
496 clk_hw_data->priv = priv;
497 clk_hw_data->conf = core->conf;
499 init.name = core->name;
500 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
501 init.flags = CLK_SET_RATE_PARENT;
502 init.num_parents = core->num_parents;
503 init.parent_names = core->parent_names;
505 clk_hw = &clk_hw_data->hw;
506 clk_hw->init = &init;
508 ret = devm_clk_hw_register(priv->dev, clk_hw);
518 unsigned long foutpostdiv_rate;
519 struct rzg2l_cpg_priv *priv;
522 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
524 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
527 struct sipll5 *sipll5 = to_sipll5(hw);
528 struct rzg2l_cpg_priv *priv = sipll5->priv;
531 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
532 (priv->mux_dsi_div_params.dsi_div_b + 1));
534 if (priv->mux_dsi_div_params.clksrc)
540 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
541 unsigned long parent_rate)
543 struct sipll5 *sipll5 = to_sipll5(hw);
544 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
547 pll5_rate = parent_rate;
552 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
554 unsigned long *parent_rate)
559 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
561 unsigned long parent_rate)
563 struct sipll5 *sipll5 = to_sipll5(hw);
564 struct rzg2l_cpg_priv *priv = sipll5->priv;
565 struct rzg2l_pll5_param params;
566 unsigned long vclk_rate;
571 * OSC --> PLL5 --> FOUTPOSTDIV-->|
572 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
575 * Based on the dot clock, the DSI divider clock calculates the parent
576 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
577 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
579 * OSC --> PLL5 --> FOUTPOSTDIV
585 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
586 sipll5->foutpostdiv_rate =
587 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
589 /* Put PLL5 into standby mode */
590 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
591 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
592 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
594 dev_err(priv->dev, "failed to release pll5 lock");
598 /* Output clock setting 1 */
599 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
600 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
602 /* Output clock setting, SSCG modulation value setting 3 */
603 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
605 /* Output clock setting 4 */
606 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
607 priv->base + CPG_SIPLL5_CLK4);
609 /* Output clock setting 5 */
610 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
612 /* PLL normal mode setting */
613 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
614 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
615 priv->base + CPG_SIPLL5_STBY);
617 /* PLL normal mode transition, output clock stability check */
618 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
619 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
621 dev_err(priv->dev, "failed to lock pll5");
628 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
629 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
630 .round_rate = rzg2l_cpg_sipll5_round_rate,
631 .set_rate = rzg2l_cpg_sipll5_set_rate,
634 static struct clk * __init
635 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
637 struct rzg2l_cpg_priv *priv)
639 const struct clk *parent;
640 struct clk_init_data init;
641 const char *parent_name;
642 struct sipll5 *sipll5;
643 struct clk_hw *clk_hw;
646 parent = clks[core->parent & 0xffff];
648 return ERR_CAST(parent);
650 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
652 return ERR_PTR(-ENOMEM);
654 init.name = core->name;
655 parent_name = __clk_get_name(parent);
656 init.ops = &rzg2l_cpg_sipll5_ops;
658 init.parent_names = &parent_name;
659 init.num_parents = 1;
661 sipll5->hw.init = &init;
662 sipll5->conf = core->conf;
665 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
666 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
668 clk_hw = &sipll5->hw;
669 clk_hw->init = &init;
671 ret = devm_clk_hw_register(priv->dev, clk_hw);
675 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
676 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
677 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
687 struct rzg2l_cpg_priv *priv;
690 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
692 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
693 unsigned long parent_rate)
695 struct pll_clk *pll_clk = to_pll(hw);
696 struct rzg2l_cpg_priv *priv = pll_clk->priv;
697 unsigned int val1, val2;
700 if (pll_clk->type != CLK_TYPE_SAM_PLL)
703 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
704 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
706 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
709 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
712 static const struct clk_ops rzg2l_cpg_pll_ops = {
713 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
716 static struct clk * __init
717 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
720 struct rzg2l_cpg_priv *priv)
722 struct device *dev = priv->dev;
723 const struct clk *parent;
724 struct clk_init_data init;
725 const char *parent_name;
726 struct pll_clk *pll_clk;
728 parent = clks[core->parent & 0xffff];
730 return ERR_CAST(parent);
732 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
734 return ERR_PTR(-ENOMEM);
736 parent_name = __clk_get_name(parent);
737 init.name = core->name;
738 init.ops = &rzg2l_cpg_pll_ops;
740 init.parent_names = &parent_name;
741 init.num_parents = 1;
743 pll_clk->hw.init = &init;
744 pll_clk->conf = core->conf;
745 pll_clk->base = base;
746 pll_clk->priv = priv;
747 pll_clk->type = core->type;
749 return clk_register(NULL, &pll_clk->hw);
753 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
756 unsigned int clkidx = clkspec->args[1];
757 struct rzg2l_cpg_priv *priv = data;
758 struct device *dev = priv->dev;
762 switch (clkspec->args[0]) {
765 if (clkidx > priv->last_dt_core_clk) {
766 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
767 return ERR_PTR(-EINVAL);
769 clk = priv->clks[clkidx];
774 if (clkidx >= priv->num_mod_clks) {
775 dev_err(dev, "Invalid %s clock index %u\n", type,
777 return ERR_PTR(-EINVAL);
779 clk = priv->clks[priv->num_core_clks + clkidx];
783 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
784 return ERR_PTR(-EINVAL);
788 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
791 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
792 clkspec->args[0], clkspec->args[1], clk,
798 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
799 const struct rzg2l_cpg_info *info,
800 struct rzg2l_cpg_priv *priv)
802 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
803 struct device *dev = priv->dev;
804 unsigned int id = core->id, div = core->div;
805 const char *parent_name;
807 WARN_DEBUG(id >= priv->num_core_clks);
808 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
811 /* Skip NULLified clock */
815 switch (core->type) {
817 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
820 WARN_DEBUG(core->parent >= priv->num_core_clks);
821 parent = priv->clks[core->parent];
822 if (IS_ERR(parent)) {
827 parent_name = __clk_get_name(parent);
828 clk = clk_register_fixed_factor(NULL, core->name,
829 parent_name, CLK_SET_RATE_PARENT,
832 case CLK_TYPE_SAM_PLL:
833 clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
836 case CLK_TYPE_SIPLL5:
837 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
840 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
844 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
846 case CLK_TYPE_SD_MUX:
847 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
849 case CLK_TYPE_PLL5_4_MUX:
850 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
852 case CLK_TYPE_DSI_DIV:
853 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
859 if (IS_ERR_OR_NULL(clk))
862 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
863 priv->clks[id] = clk;
867 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
868 core->name, PTR_ERR(clk));
872 * struct mstp_clock - MSTP gating clock
874 * @hw: handle between common and hardware-specific interfaces
875 * @off: register offset
877 * @enabled: soft state of the clock, if it is coupled with another clock
878 * @priv: CPG/MSTP private data
879 * @sibling: pointer to the other coupled clock
886 struct rzg2l_cpg_priv *priv;
887 struct mstp_clock *sibling;
890 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
892 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
894 struct mstp_clock *clock = to_mod_clock(hw);
895 struct rzg2l_cpg_priv *priv = clock->priv;
896 unsigned int reg = clock->off;
897 struct device *dev = priv->dev;
898 u32 bitmask = BIT(clock->bit);
903 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
907 dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
908 enable ? "ON" : "OFF");
910 value = bitmask << 16;
914 writel(value, priv->base + CLK_ON_R(reg));
919 if (!priv->info->has_clk_mon_regs)
922 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
923 value & bitmask, 0, 10);
925 dev_err(dev, "Failed to enable CLK_ON %p\n",
926 priv->base + CLK_ON_R(reg));
931 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
933 struct mstp_clock *clock = to_mod_clock(hw);
935 if (clock->sibling) {
936 struct rzg2l_cpg_priv *priv = clock->priv;
940 spin_lock_irqsave(&priv->rmw_lock, flags);
941 enabled = clock->sibling->enabled;
942 clock->enabled = true;
943 spin_unlock_irqrestore(&priv->rmw_lock, flags);
948 return rzg2l_mod_clock_endisable(hw, true);
951 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
953 struct mstp_clock *clock = to_mod_clock(hw);
955 if (clock->sibling) {
956 struct rzg2l_cpg_priv *priv = clock->priv;
960 spin_lock_irqsave(&priv->rmw_lock, flags);
961 enabled = clock->sibling->enabled;
962 clock->enabled = false;
963 spin_unlock_irqrestore(&priv->rmw_lock, flags);
968 rzg2l_mod_clock_endisable(hw, false);
971 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
973 struct mstp_clock *clock = to_mod_clock(hw);
974 struct rzg2l_cpg_priv *priv = clock->priv;
975 u32 bitmask = BIT(clock->bit);
979 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
984 return clock->enabled;
986 if (priv->info->has_clk_mon_regs)
987 value = readl(priv->base + CLK_MON_R(clock->off));
989 value = readl(priv->base + clock->off);
991 return value & bitmask;
994 static const struct clk_ops rzg2l_mod_clock_ops = {
995 .enable = rzg2l_mod_clock_enable,
996 .disable = rzg2l_mod_clock_disable,
997 .is_enabled = rzg2l_mod_clock_is_enabled,
1000 static struct mstp_clock
1001 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1002 struct rzg2l_cpg_priv *priv)
1007 for (i = 0; i < priv->num_mod_clks; i++) {
1008 struct mstp_clock *clk;
1010 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1013 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1014 clk = to_mod_clock(hw);
1015 if (clock->off == clk->off && clock->bit == clk->bit)
1023 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1024 const struct rzg2l_cpg_info *info,
1025 struct rzg2l_cpg_priv *priv)
1027 struct mstp_clock *clock = NULL;
1028 struct device *dev = priv->dev;
1029 unsigned int id = mod->id;
1030 struct clk_init_data init;
1031 struct clk *parent, *clk;
1032 const char *parent_name;
1035 WARN_DEBUG(id < priv->num_core_clks);
1036 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1037 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1038 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1041 /* Skip NULLified clock */
1045 parent = priv->clks[mod->parent];
1046 if (IS_ERR(parent)) {
1051 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1053 clk = ERR_PTR(-ENOMEM);
1057 init.name = mod->name;
1058 init.ops = &rzg2l_mod_clock_ops;
1059 init.flags = CLK_SET_RATE_PARENT;
1060 for (i = 0; i < info->num_crit_mod_clks; i++)
1061 if (id == info->crit_mod_clks[i]) {
1062 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1064 init.flags |= CLK_IS_CRITICAL;
1068 parent_name = __clk_get_name(parent);
1069 init.parent_names = &parent_name;
1070 init.num_parents = 1;
1072 clock->off = mod->off;
1073 clock->bit = mod->bit;
1075 clock->hw.init = &init;
1077 clk = clk_register(NULL, &clock->hw);
1081 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1082 priv->clks[id] = clk;
1084 if (mod->is_coupled) {
1085 struct mstp_clock *sibling;
1087 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1088 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1090 clock->sibling = sibling;
1091 sibling->sibling = clock;
1098 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1099 mod->name, PTR_ERR(clk));
1102 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1104 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1107 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1108 const struct rzg2l_cpg_info *info = priv->info;
1109 unsigned int reg = info->resets[id].off;
1110 u32 dis = BIT(info->resets[id].bit);
1113 dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1116 writel(we, priv->base + CLK_RST_R(reg));
1118 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1121 /* Release module from reset state */
1122 writel(we | dis, priv->base + CLK_RST_R(reg));
1127 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1130 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1131 const struct rzg2l_cpg_info *info = priv->info;
1132 unsigned int reg = info->resets[id].off;
1133 u32 value = BIT(info->resets[id].bit) << 16;
1135 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1137 writel(value, priv->base + CLK_RST_R(reg));
1141 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1144 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1145 const struct rzg2l_cpg_info *info = priv->info;
1146 unsigned int reg = info->resets[id].off;
1147 u32 dis = BIT(info->resets[id].bit);
1148 u32 value = (dis << 16) | dis;
1150 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1153 writel(value, priv->base + CLK_RST_R(reg));
1157 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1160 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1161 const struct rzg2l_cpg_info *info = priv->info;
1162 unsigned int reg = info->resets[id].off;
1163 u32 bitmask = BIT(info->resets[id].bit);
1164 s8 monbit = info->resets[id].monbit;
1166 if (info->has_clk_mon_regs) {
1167 return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
1168 } else if (monbit >= 0) {
1169 u32 monbitmask = BIT(monbit);
1171 return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
1176 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1177 .reset = rzg2l_cpg_reset,
1178 .assert = rzg2l_cpg_assert,
1179 .deassert = rzg2l_cpg_deassert,
1180 .status = rzg2l_cpg_status,
1183 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1184 const struct of_phandle_args *reset_spec)
1186 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1187 const struct rzg2l_cpg_info *info = priv->info;
1188 unsigned int id = reset_spec->args[0];
1190 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1191 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1198 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1200 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1201 priv->rcdev.of_node = priv->dev->of_node;
1202 priv->rcdev.dev = priv->dev;
1203 priv->rcdev.of_reset_n_cells = 1;
1204 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1205 priv->rcdev.nr_resets = priv->num_resets;
1207 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1210 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1211 const struct of_phandle_args *clkspec)
1213 const struct rzg2l_cpg_info *info = priv->info;
1217 if (clkspec->args_count != 2)
1220 if (clkspec->args[0] != CPG_MOD)
1223 id = clkspec->args[1] + info->num_total_core_clks;
1224 for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1225 if (info->no_pm_mod_clks[i] == id)
1232 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1234 struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1235 struct device_node *np = dev->of_node;
1236 struct of_phandle_args clkspec;
1242 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1244 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1247 error = pm_clk_create(dev);
1249 of_node_put(clkspec.np);
1253 clk = of_clk_get_from_provider(&clkspec);
1254 of_node_put(clkspec.np);
1256 error = PTR_ERR(clk);
1260 error = pm_clk_add_clk(dev, clk);
1262 dev_err(dev, "pm_clk_add_clk failed %d\n",
1267 of_node_put(clkspec.np);
1278 pm_clk_destroy(dev);
1283 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1285 if (!pm_clk_no_clocks(dev))
1286 pm_clk_destroy(dev);
1289 static void rzg2l_cpg_genpd_remove(void *data)
1291 pm_genpd_remove(data);
1294 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1296 struct device *dev = priv->dev;
1297 struct device_node *np = dev->of_node;
1298 struct generic_pm_domain *genpd = &priv->genpd;
1301 genpd->name = np->name;
1302 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1303 GENPD_FLAG_ACTIVE_WAKEUP;
1304 genpd->attach_dev = rzg2l_cpg_attach_dev;
1305 genpd->detach_dev = rzg2l_cpg_detach_dev;
1306 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1310 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1314 return of_genpd_add_provider_simple(np, genpd);
1317 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1319 struct device *dev = &pdev->dev;
1320 struct device_node *np = dev->of_node;
1321 const struct rzg2l_cpg_info *info;
1322 struct rzg2l_cpg_priv *priv;
1323 unsigned int nclks, i;
1327 info = of_device_get_match_data(dev);
1329 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1335 spin_lock_init(&priv->rmw_lock);
1337 priv->base = devm_platform_ioremap_resource(pdev, 0);
1338 if (IS_ERR(priv->base))
1339 return PTR_ERR(priv->base);
1341 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1342 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1346 dev_set_drvdata(dev, priv);
1348 priv->num_core_clks = info->num_total_core_clks;
1349 priv->num_mod_clks = info->num_hw_mod_clks;
1350 priv->num_resets = info->num_resets;
1351 priv->last_dt_core_clk = info->last_dt_core_clk;
1353 for (i = 0; i < nclks; i++)
1354 clks[i] = ERR_PTR(-ENOENT);
1356 for (i = 0; i < info->num_core_clks; i++)
1357 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1359 for (i = 0; i < info->num_mod_clks; i++)
1360 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1362 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1366 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1370 error = rzg2l_cpg_add_clk_domain(priv);
1374 error = rzg2l_cpg_reset_controller_register(priv);
1381 static const struct of_device_id rzg2l_cpg_match[] = {
1382 #ifdef CONFIG_CLK_R9A07G043
1384 .compatible = "renesas,r9a07g043-cpg",
1385 .data = &r9a07g043_cpg_info,
1388 #ifdef CONFIG_CLK_R9A07G044
1390 .compatible = "renesas,r9a07g044-cpg",
1391 .data = &r9a07g044_cpg_info,
1394 #ifdef CONFIG_CLK_R9A07G054
1396 .compatible = "renesas,r9a07g054-cpg",
1397 .data = &r9a07g054_cpg_info,
1400 #ifdef CONFIG_CLK_R9A09G011
1402 .compatible = "renesas,r9a09g011-cpg",
1403 .data = &r9a09g011_cpg_info,
1409 static struct platform_driver rzg2l_cpg_driver = {
1411 .name = "rzg2l-cpg",
1412 .of_match_table = rzg2l_cpg_match,
1416 static int __init rzg2l_cpg_init(void)
1418 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1421 subsys_initcall(rzg2l_cpg_init);
1423 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");