1 // SPDX-License-Identifier: GPL-2.0
3 * RZ/G2L Clock Pulse Generator
5 * Copyright (C) 2021 Renesas Electronics Corp.
7 * Based on renesas-cpg-mssr.c
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
34 #include "rzg2l-cpg.h"
37 #define WARN_DEBUG(x) WARN_ON(x)
39 #define WARN_DEBUG(x) do { } while (0)
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
45 #define KDIV(val) FIELD_GET(GENMASK(31, 16), val)
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
50 #define CLK_ON_R(reg) (reg)
51 #define CLK_MON_R(reg) (0x180 + (reg))
52 #define CLK_RST_R(reg) (reg)
53 #define CLK_MRST_R(reg) (0x180 + (reg))
55 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
59 #define MAX_VCLK_FREQ (148500000)
64 struct rzg2l_cpg_priv *priv;
67 #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
69 struct rzg2l_pll5_param {
78 struct rzg2l_pll5_mux_dsi_div_param {
85 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
87 * @rcdev: Reset controller entity
89 * @base: CPG register block base address
90 * @rmw_lock: protects register accesses
91 * @clks: Array containing all Core and Module Clocks
92 * @num_core_clks: Number of Core Clocks in clks[]
93 * @num_mod_clks: Number of Module Clocks in clks[]
94 * @num_resets: Number of Module Resets in info->resets[]
95 * @last_dt_core_clk: ID of the last Core Clock exported to DT
96 * @info: Pointer to platform data
98 * @mux_dsi_div_params: pll5 mux and dsi div parameters
100 struct rzg2l_cpg_priv {
101 struct reset_controller_dev rcdev;
107 unsigned int num_core_clks;
108 unsigned int num_mod_clks;
109 unsigned int num_resets;
110 unsigned int last_dt_core_clk;
112 const struct rzg2l_cpg_info *info;
114 struct generic_pm_domain genpd;
116 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
119 static void rzg2l_cpg_del_clk_provider(void *data)
121 of_clk_del_provider(data);
124 static struct clk * __init
125 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
128 struct rzg2l_cpg_priv *priv)
130 struct device *dev = priv->dev;
131 const struct clk *parent;
132 const char *parent_name;
133 struct clk_hw *clk_hw;
135 parent = clks[core->parent & 0xffff];
137 return ERR_CAST(parent);
139 parent_name = __clk_get_name(parent);
142 clk_hw = clk_hw_register_divider_table(dev, core->name,
144 base + GET_REG_OFFSET(core->conf),
145 GET_SHIFT(core->conf),
146 GET_WIDTH(core->conf),
151 clk_hw = clk_hw_register_divider(dev, core->name,
153 base + GET_REG_OFFSET(core->conf),
154 GET_SHIFT(core->conf),
155 GET_WIDTH(core->conf),
156 core->flag, &priv->rmw_lock);
159 return ERR_CAST(clk_hw);
164 static struct clk * __init
165 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
167 struct rzg2l_cpg_priv *priv)
169 const struct clk_hw *clk_hw;
171 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
172 core->parent_names, core->num_parents,
174 base + GET_REG_OFFSET(core->conf),
175 GET_SHIFT(core->conf),
176 GET_WIDTH(core->conf),
177 core->mux_flags, &priv->rmw_lock);
179 return ERR_CAST(clk_hw);
184 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
186 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
187 struct rzg2l_cpg_priv *priv = hwdata->priv;
188 u32 off = GET_REG_OFFSET(hwdata->conf);
189 u32 shift = GET_SHIFT(hwdata->conf);
190 const u32 clk_src_266 = 2;
191 u32 msk, val, bitmask;
196 * As per the HW manual, we should not directly switch from 533 MHz to
197 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
198 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
199 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
201 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
202 * switching register is prohibited.
203 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
204 * the index to value mapping is done by adding 1 to the index.
206 bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
207 msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
208 spin_lock_irqsave(&priv->rmw_lock, flags);
209 if (index != clk_src_266) {
210 writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
212 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
214 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
219 writel(bitmask | ((index + 1) << shift), priv->base + off);
221 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
223 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
225 spin_unlock_irqrestore(&priv->rmw_lock, flags);
228 dev_err(priv->dev, "failed to switch clk source\n");
233 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
235 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
236 struct rzg2l_cpg_priv *priv = hwdata->priv;
237 u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
239 val >>= GET_SHIFT(hwdata->conf);
240 val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
244 /* Prohibited clk source, change it to 533 MHz(reset value) */
245 rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
251 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
252 .determine_rate = __clk_mux_determine_rate_closest,
253 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
254 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
257 static struct clk * __init
258 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
260 struct rzg2l_cpg_priv *priv)
262 struct sd_hw_data *clk_hw_data;
263 struct clk_init_data init;
264 struct clk_hw *clk_hw;
267 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
269 return ERR_PTR(-ENOMEM);
271 clk_hw_data->priv = priv;
272 clk_hw_data->conf = core->conf;
274 init.name = core->name;
275 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
277 init.num_parents = core->num_parents;
278 init.parent_names = core->parent_names;
280 clk_hw = &clk_hw_data->hw;
281 clk_hw->init = &init;
283 ret = devm_clk_hw_register(priv->dev, clk_hw);
291 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
294 unsigned long foutpostdiv_rate;
296 params->pl5_intin = rate / MEGA;
297 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
298 params->pl5_refdiv = 2;
299 params->pl5_postdiv1 = 1;
300 params->pl5_postdiv2 = 1;
301 params->pl5_spread = 0x16;
304 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
305 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
306 (params->pl5_postdiv1 * params->pl5_postdiv2);
308 return foutpostdiv_rate;
311 struct dsi_div_hw_data {
315 struct rzg2l_cpg_priv *priv;
318 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
320 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
321 unsigned long parent_rate)
323 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
324 unsigned long rate = dsi_div->rate;
332 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
335 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
336 struct rzg2l_cpg_priv *priv = dsi_div->priv;
337 struct rzg2l_pll5_param params;
338 unsigned long parent_rate;
340 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
342 if (priv->mux_dsi_div_params.clksrc)
348 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
349 struct clk_rate_request *req)
351 if (req->rate > MAX_VCLK_FREQ)
352 req->rate = MAX_VCLK_FREQ;
354 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
359 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
361 unsigned long parent_rate)
363 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
364 struct rzg2l_cpg_priv *priv = dsi_div->priv;
367 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
369 * Based on the dot clock, the DSI divider clock sets the divider value,
370 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
371 * source for the MUX and propagates that info to the parents.
374 if (!rate || rate > MAX_VCLK_FREQ)
377 dsi_div->rate = rate;
378 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
379 (priv->mux_dsi_div_params.dsi_div_a << 0) |
380 (priv->mux_dsi_div_params.dsi_div_b << 8),
381 priv->base + CPG_PL5_SDIV);
386 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
387 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
388 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
389 .set_rate = rzg2l_cpg_dsi_div_set_rate,
392 static struct clk * __init
393 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
395 struct rzg2l_cpg_priv *priv)
397 struct dsi_div_hw_data *clk_hw_data;
398 const struct clk *parent;
399 const char *parent_name;
400 struct clk_init_data init;
401 struct clk_hw *clk_hw;
404 parent = clks[core->parent & 0xffff];
406 return ERR_CAST(parent);
408 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
410 return ERR_PTR(-ENOMEM);
412 clk_hw_data->priv = priv;
414 parent_name = __clk_get_name(parent);
415 init.name = core->name;
416 init.ops = &rzg2l_cpg_dsi_div_ops;
417 init.flags = CLK_SET_RATE_PARENT;
418 init.parent_names = &parent_name;
419 init.num_parents = 1;
421 clk_hw = &clk_hw_data->hw;
422 clk_hw->init = &init;
424 ret = devm_clk_hw_register(priv->dev, clk_hw);
431 struct pll5_mux_hw_data {
435 struct rzg2l_cpg_priv *priv;
438 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
440 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
441 struct clk_rate_request *req)
443 struct clk_hw *parent;
444 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
445 struct rzg2l_cpg_priv *priv = hwdata->priv;
447 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
448 req->best_parent_hw = parent;
449 req->best_parent_rate = req->rate;
454 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
456 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
457 struct rzg2l_cpg_priv *priv = hwdata->priv;
461 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
464 * Based on the dot clock, the DSI divider clock calculates the parent
465 * rate and clk source for the MUX. It propagates that info to
466 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
469 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
470 priv->base + CPG_OTHERFUNC1_REG);
475 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
477 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
478 struct rzg2l_cpg_priv *priv = hwdata->priv;
480 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
483 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
484 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
485 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
486 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
489 static struct clk * __init
490 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
491 struct rzg2l_cpg_priv *priv)
493 struct pll5_mux_hw_data *clk_hw_data;
494 struct clk_init_data init;
495 struct clk_hw *clk_hw;
498 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
500 return ERR_PTR(-ENOMEM);
502 clk_hw_data->priv = priv;
503 clk_hw_data->conf = core->conf;
505 init.name = core->name;
506 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
507 init.flags = CLK_SET_RATE_PARENT;
508 init.num_parents = core->num_parents;
509 init.parent_names = core->parent_names;
511 clk_hw = &clk_hw_data->hw;
512 clk_hw->init = &init;
514 ret = devm_clk_hw_register(priv->dev, clk_hw);
524 unsigned long foutpostdiv_rate;
525 struct rzg2l_cpg_priv *priv;
528 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
530 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
533 struct sipll5 *sipll5 = to_sipll5(hw);
534 struct rzg2l_cpg_priv *priv = sipll5->priv;
537 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
538 (priv->mux_dsi_div_params.dsi_div_b + 1));
540 if (priv->mux_dsi_div_params.clksrc)
546 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
547 unsigned long parent_rate)
549 struct sipll5 *sipll5 = to_sipll5(hw);
550 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
553 pll5_rate = parent_rate;
558 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
560 unsigned long *parent_rate)
565 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
567 unsigned long parent_rate)
569 struct sipll5 *sipll5 = to_sipll5(hw);
570 struct rzg2l_cpg_priv *priv = sipll5->priv;
571 struct rzg2l_pll5_param params;
572 unsigned long vclk_rate;
577 * OSC --> PLL5 --> FOUTPOSTDIV-->|
578 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
581 * Based on the dot clock, the DSI divider clock calculates the parent
582 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
583 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
585 * OSC --> PLL5 --> FOUTPOSTDIV
591 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
592 sipll5->foutpostdiv_rate =
593 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
595 /* Put PLL5 into standby mode */
596 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
597 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
598 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
600 dev_err(priv->dev, "failed to release pll5 lock");
604 /* Output clock setting 1 */
605 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
606 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
608 /* Output clock setting, SSCG modulation value setting 3 */
609 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
611 /* Output clock setting 4 */
612 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
613 priv->base + CPG_SIPLL5_CLK4);
615 /* Output clock setting 5 */
616 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
618 /* PLL normal mode setting */
619 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
620 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
621 priv->base + CPG_SIPLL5_STBY);
623 /* PLL normal mode transition, output clock stability check */
624 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
625 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
627 dev_err(priv->dev, "failed to lock pll5");
634 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
635 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
636 .round_rate = rzg2l_cpg_sipll5_round_rate,
637 .set_rate = rzg2l_cpg_sipll5_set_rate,
640 static struct clk * __init
641 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
643 struct rzg2l_cpg_priv *priv)
645 const struct clk *parent;
646 struct clk_init_data init;
647 const char *parent_name;
648 struct sipll5 *sipll5;
649 struct clk_hw *clk_hw;
652 parent = clks[core->parent & 0xffff];
654 return ERR_CAST(parent);
656 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
658 return ERR_PTR(-ENOMEM);
660 init.name = core->name;
661 parent_name = __clk_get_name(parent);
662 init.ops = &rzg2l_cpg_sipll5_ops;
664 init.parent_names = &parent_name;
665 init.num_parents = 1;
667 sipll5->hw.init = &init;
668 sipll5->conf = core->conf;
671 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
672 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
674 clk_hw = &sipll5->hw;
675 clk_hw->init = &init;
677 ret = devm_clk_hw_register(priv->dev, clk_hw);
681 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
682 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
683 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
693 struct rzg2l_cpg_priv *priv;
696 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
698 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
699 unsigned long parent_rate)
701 struct pll_clk *pll_clk = to_pll(hw);
702 struct rzg2l_cpg_priv *priv = pll_clk->priv;
703 unsigned int val1, val2;
704 unsigned int mult = 1;
705 unsigned int div = 1;
707 if (pll_clk->type != CLK_TYPE_SAM_PLL)
710 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
711 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
712 mult = MDIV(val1) + KDIV(val1) / 65536;
713 div = PDIV(val1) << SDIV(val2);
715 return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
718 static const struct clk_ops rzg2l_cpg_pll_ops = {
719 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
722 static struct clk * __init
723 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
726 struct rzg2l_cpg_priv *priv)
728 struct device *dev = priv->dev;
729 const struct clk *parent;
730 struct clk_init_data init;
731 const char *parent_name;
732 struct pll_clk *pll_clk;
734 parent = clks[core->parent & 0xffff];
736 return ERR_CAST(parent);
738 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
740 return ERR_PTR(-ENOMEM);
742 parent_name = __clk_get_name(parent);
743 init.name = core->name;
744 init.ops = &rzg2l_cpg_pll_ops;
746 init.parent_names = &parent_name;
747 init.num_parents = 1;
749 pll_clk->hw.init = &init;
750 pll_clk->conf = core->conf;
751 pll_clk->base = base;
752 pll_clk->priv = priv;
753 pll_clk->type = core->type;
755 return clk_register(NULL, &pll_clk->hw);
759 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
762 unsigned int clkidx = clkspec->args[1];
763 struct rzg2l_cpg_priv *priv = data;
764 struct device *dev = priv->dev;
768 switch (clkspec->args[0]) {
771 if (clkidx > priv->last_dt_core_clk) {
772 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
773 return ERR_PTR(-EINVAL);
775 clk = priv->clks[clkidx];
780 if (clkidx >= priv->num_mod_clks) {
781 dev_err(dev, "Invalid %s clock index %u\n", type,
783 return ERR_PTR(-EINVAL);
785 clk = priv->clks[priv->num_core_clks + clkidx];
789 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
790 return ERR_PTR(-EINVAL);
794 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
797 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
798 clkspec->args[0], clkspec->args[1], clk,
804 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
805 const struct rzg2l_cpg_info *info,
806 struct rzg2l_cpg_priv *priv)
808 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
809 struct device *dev = priv->dev;
810 unsigned int id = core->id, div = core->div;
811 const char *parent_name;
813 WARN_DEBUG(id >= priv->num_core_clks);
814 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
817 /* Skip NULLified clock */
821 switch (core->type) {
823 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
826 WARN_DEBUG(core->parent >= priv->num_core_clks);
827 parent = priv->clks[core->parent];
828 if (IS_ERR(parent)) {
833 parent_name = __clk_get_name(parent);
834 clk = clk_register_fixed_factor(NULL, core->name,
835 parent_name, CLK_SET_RATE_PARENT,
838 case CLK_TYPE_SAM_PLL:
839 clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
842 case CLK_TYPE_SIPLL5:
843 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
846 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
850 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
852 case CLK_TYPE_SD_MUX:
853 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
855 case CLK_TYPE_PLL5_4_MUX:
856 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
858 case CLK_TYPE_DSI_DIV:
859 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
865 if (IS_ERR_OR_NULL(clk))
868 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
869 priv->clks[id] = clk;
873 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
874 core->name, PTR_ERR(clk));
878 * struct mstp_clock - MSTP gating clock
880 * @hw: handle between common and hardware-specific interfaces
881 * @off: register offset
883 * @enabled: soft state of the clock, if it is coupled with another clock
884 * @priv: CPG/MSTP private data
885 * @sibling: pointer to the other coupled clock
892 struct rzg2l_cpg_priv *priv;
893 struct mstp_clock *sibling;
896 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
898 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
900 struct mstp_clock *clock = to_mod_clock(hw);
901 struct rzg2l_cpg_priv *priv = clock->priv;
902 unsigned int reg = clock->off;
903 struct device *dev = priv->dev;
905 u32 bitmask = BIT(clock->bit);
910 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
914 dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
915 enable ? "ON" : "OFF");
916 spin_lock_irqsave(&priv->rmw_lock, flags);
918 value = bitmask << 16;
921 writel(value, priv->base + CLK_ON_R(reg));
923 spin_unlock_irqrestore(&priv->rmw_lock, flags);
928 if (!priv->info->has_clk_mon_regs)
931 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
932 value & bitmask, 0, 10);
934 dev_err(dev, "Failed to enable CLK_ON %p\n",
935 priv->base + CLK_ON_R(reg));
940 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
942 struct mstp_clock *clock = to_mod_clock(hw);
944 if (clock->sibling) {
945 struct rzg2l_cpg_priv *priv = clock->priv;
949 spin_lock_irqsave(&priv->rmw_lock, flags);
950 enabled = clock->sibling->enabled;
951 clock->enabled = true;
952 spin_unlock_irqrestore(&priv->rmw_lock, flags);
957 return rzg2l_mod_clock_endisable(hw, true);
960 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
962 struct mstp_clock *clock = to_mod_clock(hw);
964 if (clock->sibling) {
965 struct rzg2l_cpg_priv *priv = clock->priv;
969 spin_lock_irqsave(&priv->rmw_lock, flags);
970 enabled = clock->sibling->enabled;
971 clock->enabled = false;
972 spin_unlock_irqrestore(&priv->rmw_lock, flags);
977 rzg2l_mod_clock_endisable(hw, false);
980 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
982 struct mstp_clock *clock = to_mod_clock(hw);
983 struct rzg2l_cpg_priv *priv = clock->priv;
984 u32 bitmask = BIT(clock->bit);
988 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
993 return clock->enabled;
995 if (priv->info->has_clk_mon_regs)
996 value = readl(priv->base + CLK_MON_R(clock->off));
998 value = readl(priv->base + clock->off);
1000 return value & bitmask;
1003 static const struct clk_ops rzg2l_mod_clock_ops = {
1004 .enable = rzg2l_mod_clock_enable,
1005 .disable = rzg2l_mod_clock_disable,
1006 .is_enabled = rzg2l_mod_clock_is_enabled,
1009 static struct mstp_clock
1010 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1011 struct rzg2l_cpg_priv *priv)
1016 for (i = 0; i < priv->num_mod_clks; i++) {
1017 struct mstp_clock *clk;
1019 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1022 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1023 clk = to_mod_clock(hw);
1024 if (clock->off == clk->off && clock->bit == clk->bit)
1032 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1033 const struct rzg2l_cpg_info *info,
1034 struct rzg2l_cpg_priv *priv)
1036 struct mstp_clock *clock = NULL;
1037 struct device *dev = priv->dev;
1038 unsigned int id = mod->id;
1039 struct clk_init_data init;
1040 struct clk *parent, *clk;
1041 const char *parent_name;
1044 WARN_DEBUG(id < priv->num_core_clks);
1045 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1046 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1047 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1050 /* Skip NULLified clock */
1054 parent = priv->clks[mod->parent];
1055 if (IS_ERR(parent)) {
1060 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1062 clk = ERR_PTR(-ENOMEM);
1066 init.name = mod->name;
1067 init.ops = &rzg2l_mod_clock_ops;
1068 init.flags = CLK_SET_RATE_PARENT;
1069 for (i = 0; i < info->num_crit_mod_clks; i++)
1070 if (id == info->crit_mod_clks[i]) {
1071 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1073 init.flags |= CLK_IS_CRITICAL;
1077 parent_name = __clk_get_name(parent);
1078 init.parent_names = &parent_name;
1079 init.num_parents = 1;
1081 clock->off = mod->off;
1082 clock->bit = mod->bit;
1084 clock->hw.init = &init;
1086 clk = clk_register(NULL, &clock->hw);
1090 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1091 priv->clks[id] = clk;
1093 if (mod->is_coupled) {
1094 struct mstp_clock *sibling;
1096 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1097 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1099 clock->sibling = sibling;
1100 sibling->sibling = clock;
1107 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1108 mod->name, PTR_ERR(clk));
1111 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1113 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1116 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1117 const struct rzg2l_cpg_info *info = priv->info;
1118 unsigned int reg = info->resets[id].off;
1119 u32 dis = BIT(info->resets[id].bit);
1122 dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1125 writel(we, priv->base + CLK_RST_R(reg));
1127 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1130 /* Release module from reset state */
1131 writel(we | dis, priv->base + CLK_RST_R(reg));
1136 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1139 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1140 const struct rzg2l_cpg_info *info = priv->info;
1141 unsigned int reg = info->resets[id].off;
1142 u32 value = BIT(info->resets[id].bit) << 16;
1144 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1146 writel(value, priv->base + CLK_RST_R(reg));
1150 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1153 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1154 const struct rzg2l_cpg_info *info = priv->info;
1155 unsigned int reg = info->resets[id].off;
1156 u32 dis = BIT(info->resets[id].bit);
1157 u32 value = (dis << 16) | dis;
1159 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1162 writel(value, priv->base + CLK_RST_R(reg));
1166 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1169 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1170 const struct rzg2l_cpg_info *info = priv->info;
1171 unsigned int reg = info->resets[id].off;
1172 u32 bitmask = BIT(info->resets[id].bit);
1173 s8 monbit = info->resets[id].monbit;
1175 if (info->has_clk_mon_regs) {
1176 return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
1177 } else if (monbit >= 0) {
1178 u32 monbitmask = BIT(monbit);
1180 return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
1185 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1186 .reset = rzg2l_cpg_reset,
1187 .assert = rzg2l_cpg_assert,
1188 .deassert = rzg2l_cpg_deassert,
1189 .status = rzg2l_cpg_status,
1192 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1193 const struct of_phandle_args *reset_spec)
1195 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1196 const struct rzg2l_cpg_info *info = priv->info;
1197 unsigned int id = reset_spec->args[0];
1199 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1200 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1207 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1209 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1210 priv->rcdev.of_node = priv->dev->of_node;
1211 priv->rcdev.dev = priv->dev;
1212 priv->rcdev.of_reset_n_cells = 1;
1213 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1214 priv->rcdev.nr_resets = priv->num_resets;
1216 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1219 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1220 const struct of_phandle_args *clkspec)
1222 const struct rzg2l_cpg_info *info = priv->info;
1226 if (clkspec->args_count != 2)
1229 if (clkspec->args[0] != CPG_MOD)
1232 id = clkspec->args[1] + info->num_total_core_clks;
1233 for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1234 if (info->no_pm_mod_clks[i] == id)
1241 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1243 struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1244 struct device_node *np = dev->of_node;
1245 struct of_phandle_args clkspec;
1251 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1253 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1256 error = pm_clk_create(dev);
1258 of_node_put(clkspec.np);
1262 clk = of_clk_get_from_provider(&clkspec);
1263 of_node_put(clkspec.np);
1265 error = PTR_ERR(clk);
1269 error = pm_clk_add_clk(dev, clk);
1271 dev_err(dev, "pm_clk_add_clk failed %d\n",
1276 of_node_put(clkspec.np);
1287 pm_clk_destroy(dev);
1292 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1294 if (!pm_clk_no_clocks(dev))
1295 pm_clk_destroy(dev);
1298 static void rzg2l_cpg_genpd_remove(void *data)
1300 pm_genpd_remove(data);
1303 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1305 struct device *dev = priv->dev;
1306 struct device_node *np = dev->of_node;
1307 struct generic_pm_domain *genpd = &priv->genpd;
1310 genpd->name = np->name;
1311 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1312 GENPD_FLAG_ACTIVE_WAKEUP;
1313 genpd->attach_dev = rzg2l_cpg_attach_dev;
1314 genpd->detach_dev = rzg2l_cpg_detach_dev;
1315 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1319 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1323 return of_genpd_add_provider_simple(np, genpd);
1326 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1328 struct device *dev = &pdev->dev;
1329 struct device_node *np = dev->of_node;
1330 const struct rzg2l_cpg_info *info;
1331 struct rzg2l_cpg_priv *priv;
1332 unsigned int nclks, i;
1336 info = of_device_get_match_data(dev);
1338 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1344 spin_lock_init(&priv->rmw_lock);
1346 priv->base = devm_platform_ioremap_resource(pdev, 0);
1347 if (IS_ERR(priv->base))
1348 return PTR_ERR(priv->base);
1350 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1351 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1355 dev_set_drvdata(dev, priv);
1357 priv->num_core_clks = info->num_total_core_clks;
1358 priv->num_mod_clks = info->num_hw_mod_clks;
1359 priv->num_resets = info->num_resets;
1360 priv->last_dt_core_clk = info->last_dt_core_clk;
1362 for (i = 0; i < nclks; i++)
1363 clks[i] = ERR_PTR(-ENOENT);
1365 for (i = 0; i < info->num_core_clks; i++)
1366 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1368 for (i = 0; i < info->num_mod_clks; i++)
1369 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1371 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1375 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1379 error = rzg2l_cpg_add_clk_domain(priv);
1383 error = rzg2l_cpg_reset_controller_register(priv);
1390 static const struct of_device_id rzg2l_cpg_match[] = {
1391 #ifdef CONFIG_CLK_R9A07G043
1393 .compatible = "renesas,r9a07g043-cpg",
1394 .data = &r9a07g043_cpg_info,
1397 #ifdef CONFIG_CLK_R9A07G044
1399 .compatible = "renesas,r9a07g044-cpg",
1400 .data = &r9a07g044_cpg_info,
1403 #ifdef CONFIG_CLK_R9A07G054
1405 .compatible = "renesas,r9a07g054-cpg",
1406 .data = &r9a07g054_cpg_info,
1409 #ifdef CONFIG_CLK_R9A09G011
1411 .compatible = "renesas,r9a09g011-cpg",
1412 .data = &r9a09g011_cpg_info,
1418 static struct platform_driver rzg2l_cpg_driver = {
1420 .name = "rzg2l-cpg",
1421 .of_match_table = rzg2l_cpg_match,
1425 static int __init rzg2l_cpg_init(void)
1427 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1430 subsys_initcall(rzg2l_cpg_init);
1432 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");