clk: renesas: rzg2l: Lock around writes to mux register
[linux-2.6-microblaze.git] / drivers / clk / renesas / rzg2l-cpg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "rzg2l-cpg.h"
35
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)   WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)   do { } while (0)
40 #endif
41
42 #define GET_SHIFT(val)          ((val >> 12) & 0xff)
43 #define GET_WIDTH(val)          ((val >> 8) & 0xf)
44
45 #define KDIV(val)               FIELD_GET(GENMASK(31, 16), val)
46 #define MDIV(val)               FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val)               FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val)               FIELD_GET(GENMASK(2, 0), val)
49
50 #define CLK_ON_R(reg)           (reg)
51 #define CLK_MON_R(reg)          (0x180 + (reg))
52 #define CLK_RST_R(reg)          (reg)
53 #define CLK_MRST_R(reg)         (0x180 + (reg))
54
55 #define GET_REG_OFFSET(val)             ((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val)        ((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val)        ((val >> 12) & 0xfff)
58
59 #define MAX_VCLK_FREQ           (148500000)
60
61 struct sd_hw_data {
62         struct clk_hw hw;
63         u32 conf;
64         struct rzg2l_cpg_priv *priv;
65 };
66
67 #define to_sd_hw_data(_hw)      container_of(_hw, struct sd_hw_data, hw)
68
69 struct rzg2l_pll5_param {
70         u32 pl5_fracin;
71         u8 pl5_refdiv;
72         u8 pl5_intin;
73         u8 pl5_postdiv1;
74         u8 pl5_postdiv2;
75         u8 pl5_spread;
76 };
77
78 struct rzg2l_pll5_mux_dsi_div_param {
79         u8 clksrc;
80         u8 dsi_div_a;
81         u8 dsi_div_b;
82 };
83
84 /**
85  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
86  *
87  * @rcdev: Reset controller entity
88  * @dev: CPG device
89  * @base: CPG register block base address
90  * @rmw_lock: protects register accesses
91  * @clks: Array containing all Core and Module Clocks
92  * @num_core_clks: Number of Core Clocks in clks[]
93  * @num_mod_clks: Number of Module Clocks in clks[]
94  * @num_resets: Number of Module Resets in info->resets[]
95  * @last_dt_core_clk: ID of the last Core Clock exported to DT
96  * @info: Pointer to platform data
97  * @genpd: PM domain
98  * @mux_dsi_div_params: pll5 mux and dsi div parameters
99  */
100 struct rzg2l_cpg_priv {
101         struct reset_controller_dev rcdev;
102         struct device *dev;
103         void __iomem *base;
104         spinlock_t rmw_lock;
105
106         struct clk **clks;
107         unsigned int num_core_clks;
108         unsigned int num_mod_clks;
109         unsigned int num_resets;
110         unsigned int last_dt_core_clk;
111
112         const struct rzg2l_cpg_info *info;
113
114         struct generic_pm_domain genpd;
115
116         struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
117 };
118
119 static void rzg2l_cpg_del_clk_provider(void *data)
120 {
121         of_clk_del_provider(data);
122 }
123
124 static struct clk * __init
125 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
126                            struct clk **clks,
127                            void __iomem *base,
128                            struct rzg2l_cpg_priv *priv)
129 {
130         struct device *dev = priv->dev;
131         const struct clk *parent;
132         const char *parent_name;
133         struct clk_hw *clk_hw;
134
135         parent = clks[core->parent & 0xffff];
136         if (IS_ERR(parent))
137                 return ERR_CAST(parent);
138
139         parent_name = __clk_get_name(parent);
140
141         if (core->dtable)
142                 clk_hw = clk_hw_register_divider_table(dev, core->name,
143                                                        parent_name, 0,
144                                                        base + GET_REG_OFFSET(core->conf),
145                                                        GET_SHIFT(core->conf),
146                                                        GET_WIDTH(core->conf),
147                                                        core->flag,
148                                                        core->dtable,
149                                                        &priv->rmw_lock);
150         else
151                 clk_hw = clk_hw_register_divider(dev, core->name,
152                                                  parent_name, 0,
153                                                  base + GET_REG_OFFSET(core->conf),
154                                                  GET_SHIFT(core->conf),
155                                                  GET_WIDTH(core->conf),
156                                                  core->flag, &priv->rmw_lock);
157
158         if (IS_ERR(clk_hw))
159                 return ERR_CAST(clk_hw);
160
161         return clk_hw->clk;
162 }
163
164 static struct clk * __init
165 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
166                            void __iomem *base,
167                            struct rzg2l_cpg_priv *priv)
168 {
169         const struct clk_hw *clk_hw;
170
171         clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
172                                           core->parent_names, core->num_parents,
173                                           core->flag,
174                                           base + GET_REG_OFFSET(core->conf),
175                                           GET_SHIFT(core->conf),
176                                           GET_WIDTH(core->conf),
177                                           core->mux_flags, &priv->rmw_lock);
178         if (IS_ERR(clk_hw))
179                 return ERR_CAST(clk_hw);
180
181         return clk_hw->clk;
182 }
183
184 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
185 {
186         struct sd_hw_data *hwdata = to_sd_hw_data(hw);
187         struct rzg2l_cpg_priv *priv = hwdata->priv;
188         u32 off = GET_REG_OFFSET(hwdata->conf);
189         u32 shift = GET_SHIFT(hwdata->conf);
190         const u32 clk_src_266 = 2;
191         u32 msk, val, bitmask;
192         unsigned long flags;
193         int ret;
194
195         /*
196          * As per the HW manual, we should not directly switch from 533 MHz to
197          * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
198          * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
199          * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
200          * (400 MHz)).
201          * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
202          * switching register is prohibited.
203          * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
204          * the index to value mapping is done by adding 1 to the index.
205          */
206         bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
207         msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
208         spin_lock_irqsave(&priv->rmw_lock, flags);
209         if (index != clk_src_266) {
210                 writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
211
212                 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
213                                                 !(val & msk), 10,
214                                                 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
215                 if (ret)
216                         goto unlock;
217         }
218
219         writel(bitmask | ((index + 1) << shift), priv->base + off);
220
221         ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
222                                         !(val & msk), 10,
223                                         CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
224 unlock:
225         spin_unlock_irqrestore(&priv->rmw_lock, flags);
226
227         if (ret)
228                 dev_err(priv->dev, "failed to switch clk source\n");
229
230         return ret;
231 }
232
233 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
234 {
235         struct sd_hw_data *hwdata = to_sd_hw_data(hw);
236         struct rzg2l_cpg_priv *priv = hwdata->priv;
237         u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
238
239         val >>= GET_SHIFT(hwdata->conf);
240         val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
241         if (val) {
242                 val--;
243         } else {
244                 /* Prohibited clk source, change it to 533 MHz(reset value) */
245                 rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
246         }
247
248         return val;
249 }
250
251 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
252         .determine_rate = __clk_mux_determine_rate_closest,
253         .set_parent     = rzg2l_cpg_sd_clk_mux_set_parent,
254         .get_parent     = rzg2l_cpg_sd_clk_mux_get_parent,
255 };
256
257 static struct clk * __init
258 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
259                               void __iomem *base,
260                               struct rzg2l_cpg_priv *priv)
261 {
262         struct sd_hw_data *clk_hw_data;
263         struct clk_init_data init;
264         struct clk_hw *clk_hw;
265         int ret;
266
267         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
268         if (!clk_hw_data)
269                 return ERR_PTR(-ENOMEM);
270
271         clk_hw_data->priv = priv;
272         clk_hw_data->conf = core->conf;
273
274         init.name = core->name;
275         init.ops = &rzg2l_cpg_sd_clk_mux_ops;
276         init.flags = 0;
277         init.num_parents = core->num_parents;
278         init.parent_names = core->parent_names;
279
280         clk_hw = &clk_hw_data->hw;
281         clk_hw->init = &init;
282
283         ret = devm_clk_hw_register(priv->dev, clk_hw);
284         if (ret)
285                 return ERR_PTR(ret);
286
287         return clk_hw->clk;
288 }
289
290 static unsigned long
291 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
292                                unsigned long rate)
293 {
294         unsigned long foutpostdiv_rate;
295
296         params->pl5_intin = rate / MEGA;
297         params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
298         params->pl5_refdiv = 2;
299         params->pl5_postdiv1 = 1;
300         params->pl5_postdiv2 = 1;
301         params->pl5_spread = 0x16;
302
303         foutpostdiv_rate =
304                 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
305                 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
306                 (params->pl5_postdiv1 * params->pl5_postdiv2);
307
308         return foutpostdiv_rate;
309 }
310
311 struct dsi_div_hw_data {
312         struct clk_hw hw;
313         u32 conf;
314         unsigned long rate;
315         struct rzg2l_cpg_priv *priv;
316 };
317
318 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
319
320 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
321                                                    unsigned long parent_rate)
322 {
323         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
324         unsigned long rate = dsi_div->rate;
325
326         if (!rate)
327                 rate = parent_rate;
328
329         return rate;
330 }
331
332 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
333                                                     unsigned long rate)
334 {
335         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
336         struct rzg2l_cpg_priv *priv = dsi_div->priv;
337         struct rzg2l_pll5_param params;
338         unsigned long parent_rate;
339
340         parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
341
342         if (priv->mux_dsi_div_params.clksrc)
343                 parent_rate /= 2;
344
345         return parent_rate;
346 }
347
348 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
349                                             struct clk_rate_request *req)
350 {
351         if (req->rate > MAX_VCLK_FREQ)
352                 req->rate = MAX_VCLK_FREQ;
353
354         req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
355
356         return 0;
357 }
358
359 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
360                                       unsigned long rate,
361                                       unsigned long parent_rate)
362 {
363         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
364         struct rzg2l_cpg_priv *priv = dsi_div->priv;
365
366         /*
367          * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
368          *
369          * Based on the dot clock, the DSI divider clock sets the divider value,
370          * calculates the pll parameters for generating FOUTPOSTDIV and the clk
371          * source for the MUX and propagates that info to the parents.
372          */
373
374         if (!rate || rate > MAX_VCLK_FREQ)
375                 return -EINVAL;
376
377         dsi_div->rate = rate;
378         writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
379                (priv->mux_dsi_div_params.dsi_div_a << 0) |
380                (priv->mux_dsi_div_params.dsi_div_b << 8),
381                priv->base + CPG_PL5_SDIV);
382
383         return 0;
384 }
385
386 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
387         .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
388         .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
389         .set_rate = rzg2l_cpg_dsi_div_set_rate,
390 };
391
392 static struct clk * __init
393 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
394                                struct clk **clks,
395                                struct rzg2l_cpg_priv *priv)
396 {
397         struct dsi_div_hw_data *clk_hw_data;
398         const struct clk *parent;
399         const char *parent_name;
400         struct clk_init_data init;
401         struct clk_hw *clk_hw;
402         int ret;
403
404         parent = clks[core->parent & 0xffff];
405         if (IS_ERR(parent))
406                 return ERR_CAST(parent);
407
408         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
409         if (!clk_hw_data)
410                 return ERR_PTR(-ENOMEM);
411
412         clk_hw_data->priv = priv;
413
414         parent_name = __clk_get_name(parent);
415         init.name = core->name;
416         init.ops = &rzg2l_cpg_dsi_div_ops;
417         init.flags = CLK_SET_RATE_PARENT;
418         init.parent_names = &parent_name;
419         init.num_parents = 1;
420
421         clk_hw = &clk_hw_data->hw;
422         clk_hw->init = &init;
423
424         ret = devm_clk_hw_register(priv->dev, clk_hw);
425         if (ret)
426                 return ERR_PTR(ret);
427
428         return clk_hw->clk;
429 }
430
431 struct pll5_mux_hw_data {
432         struct clk_hw hw;
433         u32 conf;
434         unsigned long rate;
435         struct rzg2l_cpg_priv *priv;
436 };
437
438 #define to_pll5_mux_hw_data(_hw)        container_of(_hw, struct pll5_mux_hw_data, hw)
439
440 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
441                                                    struct clk_rate_request *req)
442 {
443         struct clk_hw *parent;
444         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
445         struct rzg2l_cpg_priv *priv = hwdata->priv;
446
447         parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
448         req->best_parent_hw = parent;
449         req->best_parent_rate = req->rate;
450
451         return 0;
452 }
453
454 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
455 {
456         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
457         struct rzg2l_cpg_priv *priv = hwdata->priv;
458
459         /*
460          * FOUTPOSTDIV--->|
461          *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
462          *  |--FOUT1PH0-->|
463          *
464          * Based on the dot clock, the DSI divider clock calculates the parent
465          * rate and clk source for the MUX. It propagates that info to
466          * pll5_4_clk_mux which sets the clock source for DSI divider clock.
467          */
468
469         writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
470                priv->base + CPG_OTHERFUNC1_REG);
471
472         return 0;
473 }
474
475 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
476 {
477         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
478         struct rzg2l_cpg_priv *priv = hwdata->priv;
479
480         return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
481 }
482
483 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
484         .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
485         .set_parent     = rzg2l_cpg_pll5_4_clk_mux_set_parent,
486         .get_parent     = rzg2l_cpg_pll5_4_clk_mux_get_parent,
487 };
488
489 static struct clk * __init
490 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
491                                   struct rzg2l_cpg_priv *priv)
492 {
493         struct pll5_mux_hw_data *clk_hw_data;
494         struct clk_init_data init;
495         struct clk_hw *clk_hw;
496         int ret;
497
498         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
499         if (!clk_hw_data)
500                 return ERR_PTR(-ENOMEM);
501
502         clk_hw_data->priv = priv;
503         clk_hw_data->conf = core->conf;
504
505         init.name = core->name;
506         init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
507         init.flags = CLK_SET_RATE_PARENT;
508         init.num_parents = core->num_parents;
509         init.parent_names = core->parent_names;
510
511         clk_hw = &clk_hw_data->hw;
512         clk_hw->init = &init;
513
514         ret = devm_clk_hw_register(priv->dev, clk_hw);
515         if (ret)
516                 return ERR_PTR(ret);
517
518         return clk_hw->clk;
519 }
520
521 struct sipll5 {
522         struct clk_hw hw;
523         u32 conf;
524         unsigned long foutpostdiv_rate;
525         struct rzg2l_cpg_priv *priv;
526 };
527
528 #define to_sipll5(_hw)  container_of(_hw, struct sipll5, hw)
529
530 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
531                                              unsigned long rate)
532 {
533         struct sipll5 *sipll5 = to_sipll5(hw);
534         struct rzg2l_cpg_priv *priv = sipll5->priv;
535         unsigned long vclk;
536
537         vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
538                        (priv->mux_dsi_div_params.dsi_div_b + 1));
539
540         if (priv->mux_dsi_div_params.clksrc)
541                 vclk /= 2;
542
543         return vclk;
544 }
545
546 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
547                                                   unsigned long parent_rate)
548 {
549         struct sipll5 *sipll5 = to_sipll5(hw);
550         unsigned long pll5_rate = sipll5->foutpostdiv_rate;
551
552         if (!pll5_rate)
553                 pll5_rate = parent_rate;
554
555         return pll5_rate;
556 }
557
558 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
559                                         unsigned long rate,
560                                         unsigned long *parent_rate)
561 {
562         return rate;
563 }
564
565 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
566                                      unsigned long rate,
567                                      unsigned long parent_rate)
568 {
569         struct sipll5 *sipll5 = to_sipll5(hw);
570         struct rzg2l_cpg_priv *priv = sipll5->priv;
571         struct rzg2l_pll5_param params;
572         unsigned long vclk_rate;
573         int ret;
574         u32 val;
575
576         /*
577          *  OSC --> PLL5 --> FOUTPOSTDIV-->|
578          *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
579          *                   |--FOUT1PH0-->|
580          *
581          * Based on the dot clock, the DSI divider clock calculates the parent
582          * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
583          * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
584          *
585          * OSC --> PLL5 --> FOUTPOSTDIV
586          */
587
588         if (!rate)
589                 return -EINVAL;
590
591         vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
592         sipll5->foutpostdiv_rate =
593                 rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
594
595         /* Put PLL5 into standby mode */
596         writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
597         ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
598                                  !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
599         if (ret) {
600                 dev_err(priv->dev, "failed to release pll5 lock");
601                 return ret;
602         }
603
604         /* Output clock setting 1 */
605         writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
606                (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
607
608         /* Output clock setting, SSCG modulation value setting 3 */
609         writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
610
611         /* Output clock setting 4 */
612         writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
613                priv->base + CPG_SIPLL5_CLK4);
614
615         /* Output clock setting 5 */
616         writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
617
618         /* PLL normal mode setting */
619         writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
620                CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
621                priv->base + CPG_SIPLL5_STBY);
622
623         /* PLL normal mode transition, output clock stability check */
624         ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
625                                  (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
626         if (ret) {
627                 dev_err(priv->dev, "failed to lock pll5");
628                 return ret;
629         }
630
631         return 0;
632 }
633
634 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
635         .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
636         .round_rate = rzg2l_cpg_sipll5_round_rate,
637         .set_rate = rzg2l_cpg_sipll5_set_rate,
638 };
639
640 static struct clk * __init
641 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
642                           struct clk **clks,
643                           struct rzg2l_cpg_priv *priv)
644 {
645         const struct clk *parent;
646         struct clk_init_data init;
647         const char *parent_name;
648         struct sipll5 *sipll5;
649         struct clk_hw *clk_hw;
650         int ret;
651
652         parent = clks[core->parent & 0xffff];
653         if (IS_ERR(parent))
654                 return ERR_CAST(parent);
655
656         sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
657         if (!sipll5)
658                 return ERR_PTR(-ENOMEM);
659
660         init.name = core->name;
661         parent_name = __clk_get_name(parent);
662         init.ops = &rzg2l_cpg_sipll5_ops;
663         init.flags = 0;
664         init.parent_names = &parent_name;
665         init.num_parents = 1;
666
667         sipll5->hw.init = &init;
668         sipll5->conf = core->conf;
669         sipll5->priv = priv;
670
671         writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
672                CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
673
674         clk_hw = &sipll5->hw;
675         clk_hw->init = &init;
676
677         ret = devm_clk_hw_register(priv->dev, clk_hw);
678         if (ret)
679                 return ERR_PTR(ret);
680
681         priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
682         priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
683         priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
684
685         return clk_hw->clk;
686 }
687
688 struct pll_clk {
689         struct clk_hw hw;
690         unsigned int conf;
691         unsigned int type;
692         void __iomem *base;
693         struct rzg2l_cpg_priv *priv;
694 };
695
696 #define to_pll(_hw)     container_of(_hw, struct pll_clk, hw)
697
698 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
699                                                    unsigned long parent_rate)
700 {
701         struct pll_clk *pll_clk = to_pll(hw);
702         struct rzg2l_cpg_priv *priv = pll_clk->priv;
703         unsigned int val1, val2;
704         unsigned int mult = 1;
705         unsigned int div = 1;
706
707         if (pll_clk->type != CLK_TYPE_SAM_PLL)
708                 return parent_rate;
709
710         val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
711         val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
712         mult = MDIV(val1) + KDIV(val1) / 65536;
713         div = PDIV(val1) << SDIV(val2);
714
715         return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
716 }
717
718 static const struct clk_ops rzg2l_cpg_pll_ops = {
719         .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
720 };
721
722 static struct clk * __init
723 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
724                            struct clk **clks,
725                            void __iomem *base,
726                            struct rzg2l_cpg_priv *priv)
727 {
728         struct device *dev = priv->dev;
729         const struct clk *parent;
730         struct clk_init_data init;
731         const char *parent_name;
732         struct pll_clk *pll_clk;
733
734         parent = clks[core->parent & 0xffff];
735         if (IS_ERR(parent))
736                 return ERR_CAST(parent);
737
738         pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
739         if (!pll_clk)
740                 return ERR_PTR(-ENOMEM);
741
742         parent_name = __clk_get_name(parent);
743         init.name = core->name;
744         init.ops = &rzg2l_cpg_pll_ops;
745         init.flags = 0;
746         init.parent_names = &parent_name;
747         init.num_parents = 1;
748
749         pll_clk->hw.init = &init;
750         pll_clk->conf = core->conf;
751         pll_clk->base = base;
752         pll_clk->priv = priv;
753         pll_clk->type = core->type;
754
755         return clk_register(NULL, &pll_clk->hw);
756 }
757
758 static struct clk
759 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
760                                void *data)
761 {
762         unsigned int clkidx = clkspec->args[1];
763         struct rzg2l_cpg_priv *priv = data;
764         struct device *dev = priv->dev;
765         const char *type;
766         struct clk *clk;
767
768         switch (clkspec->args[0]) {
769         case CPG_CORE:
770                 type = "core";
771                 if (clkidx > priv->last_dt_core_clk) {
772                         dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
773                         return ERR_PTR(-EINVAL);
774                 }
775                 clk = priv->clks[clkidx];
776                 break;
777
778         case CPG_MOD:
779                 type = "module";
780                 if (clkidx >= priv->num_mod_clks) {
781                         dev_err(dev, "Invalid %s clock index %u\n", type,
782                                 clkidx);
783                         return ERR_PTR(-EINVAL);
784                 }
785                 clk = priv->clks[priv->num_core_clks + clkidx];
786                 break;
787
788         default:
789                 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
790                 return ERR_PTR(-EINVAL);
791         }
792
793         if (IS_ERR(clk))
794                 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
795                         PTR_ERR(clk));
796         else
797                 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
798                         clkspec->args[0], clkspec->args[1], clk,
799                         clk_get_rate(clk));
800         return clk;
801 }
802
803 static void __init
804 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
805                             const struct rzg2l_cpg_info *info,
806                             struct rzg2l_cpg_priv *priv)
807 {
808         struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
809         struct device *dev = priv->dev;
810         unsigned int id = core->id, div = core->div;
811         const char *parent_name;
812
813         WARN_DEBUG(id >= priv->num_core_clks);
814         WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
815
816         if (!core->name) {
817                 /* Skip NULLified clock */
818                 return;
819         }
820
821         switch (core->type) {
822         case CLK_TYPE_IN:
823                 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
824                 break;
825         case CLK_TYPE_FF:
826                 WARN_DEBUG(core->parent >= priv->num_core_clks);
827                 parent = priv->clks[core->parent];
828                 if (IS_ERR(parent)) {
829                         clk = parent;
830                         goto fail;
831                 }
832
833                 parent_name = __clk_get_name(parent);
834                 clk = clk_register_fixed_factor(NULL, core->name,
835                                                 parent_name, CLK_SET_RATE_PARENT,
836                                                 core->mult, div);
837                 break;
838         case CLK_TYPE_SAM_PLL:
839                 clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
840                                                  priv->base, priv);
841                 break;
842         case CLK_TYPE_SIPLL5:
843                 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
844                 break;
845         case CLK_TYPE_DIV:
846                 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
847                                                  priv->base, priv);
848                 break;
849         case CLK_TYPE_MUX:
850                 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
851                 break;
852         case CLK_TYPE_SD_MUX:
853                 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
854                 break;
855         case CLK_TYPE_PLL5_4_MUX:
856                 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
857                 break;
858         case CLK_TYPE_DSI_DIV:
859                 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
860                 break;
861         default:
862                 goto fail;
863         }
864
865         if (IS_ERR_OR_NULL(clk))
866                 goto fail;
867
868         dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
869         priv->clks[id] = clk;
870         return;
871
872 fail:
873         dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
874                 core->name, PTR_ERR(clk));
875 }
876
877 /**
878  * struct mstp_clock - MSTP gating clock
879  *
880  * @hw: handle between common and hardware-specific interfaces
881  * @off: register offset
882  * @bit: ON/MON bit
883  * @enabled: soft state of the clock, if it is coupled with another clock
884  * @priv: CPG/MSTP private data
885  * @sibling: pointer to the other coupled clock
886  */
887 struct mstp_clock {
888         struct clk_hw hw;
889         u16 off;
890         u8 bit;
891         bool enabled;
892         struct rzg2l_cpg_priv *priv;
893         struct mstp_clock *sibling;
894 };
895
896 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
897
898 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
899 {
900         struct mstp_clock *clock = to_mod_clock(hw);
901         struct rzg2l_cpg_priv *priv = clock->priv;
902         unsigned int reg = clock->off;
903         struct device *dev = priv->dev;
904         unsigned long flags;
905         u32 bitmask = BIT(clock->bit);
906         u32 value;
907         int error;
908
909         if (!clock->off) {
910                 dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
911                 return 0;
912         }
913
914         dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
915                 enable ? "ON" : "OFF");
916         spin_lock_irqsave(&priv->rmw_lock, flags);
917
918         value = bitmask << 16;
919         if (enable)
920                 value |= bitmask;
921         writel(value, priv->base + CLK_ON_R(reg));
922
923         spin_unlock_irqrestore(&priv->rmw_lock, flags);
924
925         if (!enable)
926                 return 0;
927
928         if (!priv->info->has_clk_mon_regs)
929                 return 0;
930
931         error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
932                                           value & bitmask, 0, 10);
933         if (error)
934                 dev_err(dev, "Failed to enable CLK_ON %p\n",
935                         priv->base + CLK_ON_R(reg));
936
937         return error;
938 }
939
940 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
941 {
942         struct mstp_clock *clock = to_mod_clock(hw);
943
944         if (clock->sibling) {
945                 struct rzg2l_cpg_priv *priv = clock->priv;
946                 unsigned long flags;
947                 bool enabled;
948
949                 spin_lock_irqsave(&priv->rmw_lock, flags);
950                 enabled = clock->sibling->enabled;
951                 clock->enabled = true;
952                 spin_unlock_irqrestore(&priv->rmw_lock, flags);
953                 if (enabled)
954                         return 0;
955         }
956
957         return rzg2l_mod_clock_endisable(hw, true);
958 }
959
960 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
961 {
962         struct mstp_clock *clock = to_mod_clock(hw);
963
964         if (clock->sibling) {
965                 struct rzg2l_cpg_priv *priv = clock->priv;
966                 unsigned long flags;
967                 bool enabled;
968
969                 spin_lock_irqsave(&priv->rmw_lock, flags);
970                 enabled = clock->sibling->enabled;
971                 clock->enabled = false;
972                 spin_unlock_irqrestore(&priv->rmw_lock, flags);
973                 if (enabled)
974                         return;
975         }
976
977         rzg2l_mod_clock_endisable(hw, false);
978 }
979
980 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
981 {
982         struct mstp_clock *clock = to_mod_clock(hw);
983         struct rzg2l_cpg_priv *priv = clock->priv;
984         u32 bitmask = BIT(clock->bit);
985         u32 value;
986
987         if (!clock->off) {
988                 dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
989                 return 1;
990         }
991
992         if (clock->sibling)
993                 return clock->enabled;
994
995         if (priv->info->has_clk_mon_regs)
996                 value = readl(priv->base + CLK_MON_R(clock->off));
997         else
998                 value = readl(priv->base + clock->off);
999
1000         return value & bitmask;
1001 }
1002
1003 static const struct clk_ops rzg2l_mod_clock_ops = {
1004         .enable = rzg2l_mod_clock_enable,
1005         .disable = rzg2l_mod_clock_disable,
1006         .is_enabled = rzg2l_mod_clock_is_enabled,
1007 };
1008
1009 static struct mstp_clock
1010 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1011                              struct rzg2l_cpg_priv *priv)
1012 {
1013         struct clk_hw *hw;
1014         unsigned int i;
1015
1016         for (i = 0; i < priv->num_mod_clks; i++) {
1017                 struct mstp_clock *clk;
1018
1019                 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1020                         continue;
1021
1022                 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1023                 clk = to_mod_clock(hw);
1024                 if (clock->off == clk->off && clock->bit == clk->bit)
1025                         return clk;
1026         }
1027
1028         return NULL;
1029 }
1030
1031 static void __init
1032 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1033                            const struct rzg2l_cpg_info *info,
1034                            struct rzg2l_cpg_priv *priv)
1035 {
1036         struct mstp_clock *clock = NULL;
1037         struct device *dev = priv->dev;
1038         unsigned int id = mod->id;
1039         struct clk_init_data init;
1040         struct clk *parent, *clk;
1041         const char *parent_name;
1042         unsigned int i;
1043
1044         WARN_DEBUG(id < priv->num_core_clks);
1045         WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1046         WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1047         WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1048
1049         if (!mod->name) {
1050                 /* Skip NULLified clock */
1051                 return;
1052         }
1053
1054         parent = priv->clks[mod->parent];
1055         if (IS_ERR(parent)) {
1056                 clk = parent;
1057                 goto fail;
1058         }
1059
1060         clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1061         if (!clock) {
1062                 clk = ERR_PTR(-ENOMEM);
1063                 goto fail;
1064         }
1065
1066         init.name = mod->name;
1067         init.ops = &rzg2l_mod_clock_ops;
1068         init.flags = CLK_SET_RATE_PARENT;
1069         for (i = 0; i < info->num_crit_mod_clks; i++)
1070                 if (id == info->crit_mod_clks[i]) {
1071                         dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1072                                 mod->name);
1073                         init.flags |= CLK_IS_CRITICAL;
1074                         break;
1075                 }
1076
1077         parent_name = __clk_get_name(parent);
1078         init.parent_names = &parent_name;
1079         init.num_parents = 1;
1080
1081         clock->off = mod->off;
1082         clock->bit = mod->bit;
1083         clock->priv = priv;
1084         clock->hw.init = &init;
1085
1086         clk = clk_register(NULL, &clock->hw);
1087         if (IS_ERR(clk))
1088                 goto fail;
1089
1090         dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1091         priv->clks[id] = clk;
1092
1093         if (mod->is_coupled) {
1094                 struct mstp_clock *sibling;
1095
1096                 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1097                 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1098                 if (sibling) {
1099                         clock->sibling = sibling;
1100                         sibling->sibling = clock;
1101                 }
1102         }
1103
1104         return;
1105
1106 fail:
1107         dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1108                 mod->name, PTR_ERR(clk));
1109 }
1110
1111 #define rcdev_to_priv(x)        container_of(x, struct rzg2l_cpg_priv, rcdev)
1112
1113 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1114                            unsigned long id)
1115 {
1116         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1117         const struct rzg2l_cpg_info *info = priv->info;
1118         unsigned int reg = info->resets[id].off;
1119         u32 dis = BIT(info->resets[id].bit);
1120         u32 we = dis << 16;
1121
1122         dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1123
1124         /* Reset module */
1125         writel(we, priv->base + CLK_RST_R(reg));
1126
1127         /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1128         udelay(35);
1129
1130         /* Release module from reset state */
1131         writel(we | dis, priv->base + CLK_RST_R(reg));
1132
1133         return 0;
1134 }
1135
1136 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1137                             unsigned long id)
1138 {
1139         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1140         const struct rzg2l_cpg_info *info = priv->info;
1141         unsigned int reg = info->resets[id].off;
1142         u32 value = BIT(info->resets[id].bit) << 16;
1143
1144         dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1145
1146         writel(value, priv->base + CLK_RST_R(reg));
1147         return 0;
1148 }
1149
1150 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1151                               unsigned long id)
1152 {
1153         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1154         const struct rzg2l_cpg_info *info = priv->info;
1155         unsigned int reg = info->resets[id].off;
1156         u32 dis = BIT(info->resets[id].bit);
1157         u32 value = (dis << 16) | dis;
1158
1159         dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1160                 CLK_RST_R(reg));
1161
1162         writel(value, priv->base + CLK_RST_R(reg));
1163         return 0;
1164 }
1165
1166 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1167                             unsigned long id)
1168 {
1169         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1170         const struct rzg2l_cpg_info *info = priv->info;
1171         unsigned int reg = info->resets[id].off;
1172         u32 bitmask = BIT(info->resets[id].bit);
1173         s8 monbit = info->resets[id].monbit;
1174
1175         if (info->has_clk_mon_regs) {
1176                 return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
1177         } else if (monbit >= 0) {
1178                 u32 monbitmask = BIT(monbit);
1179
1180                 return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
1181         }
1182         return -ENOTSUPP;
1183 }
1184
1185 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1186         .reset = rzg2l_cpg_reset,
1187         .assert = rzg2l_cpg_assert,
1188         .deassert = rzg2l_cpg_deassert,
1189         .status = rzg2l_cpg_status,
1190 };
1191
1192 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1193                                  const struct of_phandle_args *reset_spec)
1194 {
1195         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1196         const struct rzg2l_cpg_info *info = priv->info;
1197         unsigned int id = reset_spec->args[0];
1198
1199         if (id >= rcdev->nr_resets || !info->resets[id].off) {
1200                 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1201                 return -EINVAL;
1202         }
1203
1204         return id;
1205 }
1206
1207 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1208 {
1209         priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1210         priv->rcdev.of_node = priv->dev->of_node;
1211         priv->rcdev.dev = priv->dev;
1212         priv->rcdev.of_reset_n_cells = 1;
1213         priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1214         priv->rcdev.nr_resets = priv->num_resets;
1215
1216         return devm_reset_controller_register(priv->dev, &priv->rcdev);
1217 }
1218
1219 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1220                                 const struct of_phandle_args *clkspec)
1221 {
1222         const struct rzg2l_cpg_info *info = priv->info;
1223         unsigned int id;
1224         unsigned int i;
1225
1226         if (clkspec->args_count != 2)
1227                 return false;
1228
1229         if (clkspec->args[0] != CPG_MOD)
1230                 return false;
1231
1232         id = clkspec->args[1] + info->num_total_core_clks;
1233         for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1234                 if (info->no_pm_mod_clks[i] == id)
1235                         return false;
1236         }
1237
1238         return true;
1239 }
1240
1241 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1242 {
1243         struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1244         struct device_node *np = dev->of_node;
1245         struct of_phandle_args clkspec;
1246         bool once = true;
1247         struct clk *clk;
1248         int error;
1249         int i = 0;
1250
1251         while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1252                                            &clkspec)) {
1253                 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1254                         if (once) {
1255                                 once = false;
1256                                 error = pm_clk_create(dev);
1257                                 if (error) {
1258                                         of_node_put(clkspec.np);
1259                                         goto err;
1260                                 }
1261                         }
1262                         clk = of_clk_get_from_provider(&clkspec);
1263                         of_node_put(clkspec.np);
1264                         if (IS_ERR(clk)) {
1265                                 error = PTR_ERR(clk);
1266                                 goto fail_destroy;
1267                         }
1268
1269                         error = pm_clk_add_clk(dev, clk);
1270                         if (error) {
1271                                 dev_err(dev, "pm_clk_add_clk failed %d\n",
1272                                         error);
1273                                 goto fail_put;
1274                         }
1275                 } else {
1276                         of_node_put(clkspec.np);
1277                 }
1278                 i++;
1279         }
1280
1281         return 0;
1282
1283 fail_put:
1284         clk_put(clk);
1285
1286 fail_destroy:
1287         pm_clk_destroy(dev);
1288 err:
1289         return error;
1290 }
1291
1292 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1293 {
1294         if (!pm_clk_no_clocks(dev))
1295                 pm_clk_destroy(dev);
1296 }
1297
1298 static void rzg2l_cpg_genpd_remove(void *data)
1299 {
1300         pm_genpd_remove(data);
1301 }
1302
1303 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1304 {
1305         struct device *dev = priv->dev;
1306         struct device_node *np = dev->of_node;
1307         struct generic_pm_domain *genpd = &priv->genpd;
1308         int ret;
1309
1310         genpd->name = np->name;
1311         genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1312                        GENPD_FLAG_ACTIVE_WAKEUP;
1313         genpd->attach_dev = rzg2l_cpg_attach_dev;
1314         genpd->detach_dev = rzg2l_cpg_detach_dev;
1315         ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1316         if (ret)
1317                 return ret;
1318
1319         ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1320         if (ret)
1321                 return ret;
1322
1323         return of_genpd_add_provider_simple(np, genpd);
1324 }
1325
1326 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1327 {
1328         struct device *dev = &pdev->dev;
1329         struct device_node *np = dev->of_node;
1330         const struct rzg2l_cpg_info *info;
1331         struct rzg2l_cpg_priv *priv;
1332         unsigned int nclks, i;
1333         struct clk **clks;
1334         int error;
1335
1336         info = of_device_get_match_data(dev);
1337
1338         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1339         if (!priv)
1340                 return -ENOMEM;
1341
1342         priv->dev = dev;
1343         priv->info = info;
1344         spin_lock_init(&priv->rmw_lock);
1345
1346         priv->base = devm_platform_ioremap_resource(pdev, 0);
1347         if (IS_ERR(priv->base))
1348                 return PTR_ERR(priv->base);
1349
1350         nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1351         clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1352         if (!clks)
1353                 return -ENOMEM;
1354
1355         dev_set_drvdata(dev, priv);
1356         priv->clks = clks;
1357         priv->num_core_clks = info->num_total_core_clks;
1358         priv->num_mod_clks = info->num_hw_mod_clks;
1359         priv->num_resets = info->num_resets;
1360         priv->last_dt_core_clk = info->last_dt_core_clk;
1361
1362         for (i = 0; i < nclks; i++)
1363                 clks[i] = ERR_PTR(-ENOENT);
1364
1365         for (i = 0; i < info->num_core_clks; i++)
1366                 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1367
1368         for (i = 0; i < info->num_mod_clks; i++)
1369                 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1370
1371         error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1372         if (error)
1373                 return error;
1374
1375         error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1376         if (error)
1377                 return error;
1378
1379         error = rzg2l_cpg_add_clk_domain(priv);
1380         if (error)
1381                 return error;
1382
1383         error = rzg2l_cpg_reset_controller_register(priv);
1384         if (error)
1385                 return error;
1386
1387         return 0;
1388 }
1389
1390 static const struct of_device_id rzg2l_cpg_match[] = {
1391 #ifdef CONFIG_CLK_R9A07G043
1392         {
1393                 .compatible = "renesas,r9a07g043-cpg",
1394                 .data = &r9a07g043_cpg_info,
1395         },
1396 #endif
1397 #ifdef CONFIG_CLK_R9A07G044
1398         {
1399                 .compatible = "renesas,r9a07g044-cpg",
1400                 .data = &r9a07g044_cpg_info,
1401         },
1402 #endif
1403 #ifdef CONFIG_CLK_R9A07G054
1404         {
1405                 .compatible = "renesas,r9a07g054-cpg",
1406                 .data = &r9a07g054_cpg_info,
1407         },
1408 #endif
1409 #ifdef CONFIG_CLK_R9A09G011
1410         {
1411                 .compatible = "renesas,r9a09g011-cpg",
1412                 .data = &r9a09g011_cpg_info,
1413         },
1414 #endif
1415         { /* sentinel */ }
1416 };
1417
1418 static struct platform_driver rzg2l_cpg_driver = {
1419         .driver         = {
1420                 .name   = "rzg2l-cpg",
1421                 .of_match_table = rzg2l_cpg_match,
1422         },
1423 };
1424
1425 static int __init rzg2l_cpg_init(void)
1426 {
1427         return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1428 }
1429
1430 subsys_initcall(rzg2l_cpg_init);
1431
1432 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");