521a40dfcb721b3b02adcd084402fdcd627c32ff
[linux-2.6-microblaze.git] / drivers / clk / ingenic / cgu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22
23 #include "cgu.h"
24
25 #define MHZ (1000 * 1000)
26
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
29 {
30         return &clk->cgu->clock_info[clk->idx];
31 }
32
33 /**
34  * ingenic_cgu_gate_get() - get the value of clock gate register bit
35  * @cgu: reference to the CGU whose registers should be read
36  * @info: info struct describing the gate bit
37  *
38  * Retrieves the state of the clock gate bit described by info. The
39  * caller must hold cgu->lock.
40  *
41  * Return: true if the gate bit is set, else false.
42  */
43 static inline bool
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45                      const struct ingenic_cgu_gate_info *info)
46 {
47         return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48                 ^ info->clear_to_gate;
49 }
50
51 /**
52  * ingenic_cgu_gate_set() - set the value of clock gate register bit
53  * @cgu: reference to the CGU whose registers should be modified
54  * @info: info struct describing the gate bit
55  * @val: non-zero to gate a clock, otherwise zero
56  *
57  * Sets the given gate bit in order to gate or ungate a clock.
58  *
59  * The caller must hold cgu->lock.
60  */
61 static inline void
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63                      const struct ingenic_cgu_gate_info *info, bool val)
64 {
65         u32 clkgr = readl(cgu->base + info->reg);
66
67         if (val ^ info->clear_to_gate)
68                 clkgr |= BIT(info->bit);
69         else
70                 clkgr &= ~BIT(info->bit);
71
72         writel(clkgr, cgu->base + info->reg);
73 }
74
75 /*
76  * PLL operations
77  */
78
79 static unsigned long
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84         struct ingenic_cgu *cgu = ingenic_clk->cgu;
85         const struct ingenic_cgu_pll_info *pll_info;
86         unsigned m, n, od_enc, od;
87         bool bypass;
88         u32 ctl;
89
90         BUG_ON(clk_info->type != CGU_CLK_PLL);
91         pll_info = &clk_info->pll;
92
93         ctl = readl(cgu->base + pll_info->reg);
94
95         m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96         m += pll_info->m_offset;
97         n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98         n += pll_info->n_offset;
99         od_enc = ctl >> pll_info->od_shift;
100         od_enc &= GENMASK(pll_info->od_bits - 1, 0);
101
102         ctl = readl(cgu->base + pll_info->bypass_reg);
103
104         bypass = !pll_info->no_bypass_bit &&
105                  !!(ctl & BIT(pll_info->bypass_bit));
106
107         if (bypass)
108                 return parent_rate;
109
110         for (od = 0; od < pll_info->od_max; od++) {
111                 if (pll_info->od_encoding[od] == od_enc)
112                         break;
113         }
114         BUG_ON(od == pll_info->od_max);
115         od++;
116
117         return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
118                 n * od);
119 }
120
121 static unsigned long
122 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
123                  unsigned long rate, unsigned long parent_rate,
124                  unsigned *pm, unsigned *pn, unsigned *pod)
125 {
126         const struct ingenic_cgu_pll_info *pll_info;
127         unsigned m, n, od;
128
129         pll_info = &clk_info->pll;
130         od = 1;
131
132         /*
133          * The frequency after the input divider must be between 10 and 50 MHz.
134          * The highest divider yields the best resolution.
135          */
136         n = parent_rate / (10 * MHZ);
137         n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
138         n = max_t(unsigned, n, pll_info->n_offset);
139
140         m = (rate / MHZ) * od * n / (parent_rate / MHZ);
141         m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
142         m = max_t(unsigned, m, pll_info->m_offset);
143
144         if (pm)
145                 *pm = m;
146         if (pn)
147                 *pn = n;
148         if (pod)
149                 *pod = od;
150
151         return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
152                 n * od);
153 }
154
155 static long
156 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
157                        unsigned long *prate)
158 {
159         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
160         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
161
162         return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
163 }
164
165 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
166                                            const struct ingenic_cgu_pll_info *pll_info)
167 {
168         u32 ctl;
169
170         return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
171                                   ctl & BIT(pll_info->stable_bit),
172                                   0, 100 * USEC_PER_MSEC);
173 }
174
175 static int
176 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
177                      unsigned long parent_rate)
178 {
179         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
180         struct ingenic_cgu *cgu = ingenic_clk->cgu;
181         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
182         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
183         unsigned long rate, flags;
184         unsigned int m, n, od;
185         int ret = 0;
186         u32 ctl;
187
188         rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
189                                &m, &n, &od);
190         if (rate != req_rate)
191                 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
192                         clk_info->name, req_rate, rate);
193
194         spin_lock_irqsave(&cgu->lock, flags);
195         ctl = readl(cgu->base + pll_info->reg);
196
197         ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
198         ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
199
200         ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
201         ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
202
203         ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
204         ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
205
206         writel(ctl, cgu->base + pll_info->reg);
207
208         /* If the PLL is enabled, verify that it's stable */
209         if (ctl & BIT(pll_info->enable_bit))
210                 ret = ingenic_pll_check_stable(cgu, pll_info);
211
212         spin_unlock_irqrestore(&cgu->lock, flags);
213
214         return ret;
215 }
216
217 static int ingenic_pll_enable(struct clk_hw *hw)
218 {
219         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
220         struct ingenic_cgu *cgu = ingenic_clk->cgu;
221         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
222         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
223         unsigned long flags;
224         int ret;
225         u32 ctl;
226
227         spin_lock_irqsave(&cgu->lock, flags);
228         ctl = readl(cgu->base + pll_info->bypass_reg);
229
230         ctl &= ~BIT(pll_info->bypass_bit);
231
232         writel(ctl, cgu->base + pll_info->bypass_reg);
233
234         ctl = readl(cgu->base + pll_info->reg);
235
236         ctl |= BIT(pll_info->enable_bit);
237
238         writel(ctl, cgu->base + pll_info->reg);
239
240         ret = ingenic_pll_check_stable(cgu, pll_info);
241         spin_unlock_irqrestore(&cgu->lock, flags);
242
243         return ret;
244 }
245
246 static void ingenic_pll_disable(struct clk_hw *hw)
247 {
248         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
249         struct ingenic_cgu *cgu = ingenic_clk->cgu;
250         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
251         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
252         unsigned long flags;
253         u32 ctl;
254
255         spin_lock_irqsave(&cgu->lock, flags);
256         ctl = readl(cgu->base + pll_info->reg);
257
258         ctl &= ~BIT(pll_info->enable_bit);
259
260         writel(ctl, cgu->base + pll_info->reg);
261         spin_unlock_irqrestore(&cgu->lock, flags);
262 }
263
264 static int ingenic_pll_is_enabled(struct clk_hw *hw)
265 {
266         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
267         struct ingenic_cgu *cgu = ingenic_clk->cgu;
268         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
269         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
270         u32 ctl;
271
272         ctl = readl(cgu->base + pll_info->reg);
273
274         return !!(ctl & BIT(pll_info->enable_bit));
275 }
276
277 static const struct clk_ops ingenic_pll_ops = {
278         .recalc_rate = ingenic_pll_recalc_rate,
279         .round_rate = ingenic_pll_round_rate,
280         .set_rate = ingenic_pll_set_rate,
281
282         .enable = ingenic_pll_enable,
283         .disable = ingenic_pll_disable,
284         .is_enabled = ingenic_pll_is_enabled,
285 };
286
287 /*
288  * Operations for all non-PLL clocks
289  */
290
291 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
292 {
293         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
294         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
295         struct ingenic_cgu *cgu = ingenic_clk->cgu;
296         u32 reg;
297         u8 i, hw_idx, idx = 0;
298
299         if (clk_info->type & CGU_CLK_MUX) {
300                 reg = readl(cgu->base + clk_info->mux.reg);
301                 hw_idx = (reg >> clk_info->mux.shift) &
302                          GENMASK(clk_info->mux.bits - 1, 0);
303
304                 /*
305                  * Convert the hardware index to the parent index by skipping
306                  * over any -1's in the parents array.
307                  */
308                 for (i = 0; i < hw_idx; i++) {
309                         if (clk_info->parents[i] != -1)
310                                 idx++;
311                 }
312         }
313
314         return idx;
315 }
316
317 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
318 {
319         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
320         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
321         struct ingenic_cgu *cgu = ingenic_clk->cgu;
322         unsigned long flags;
323         u8 curr_idx, hw_idx, num_poss;
324         u32 reg, mask;
325
326         if (clk_info->type & CGU_CLK_MUX) {
327                 /*
328                  * Convert the parent index to the hardware index by adding
329                  * 1 for any -1 in the parents array preceding the given
330                  * index. That is, we want the index of idx'th entry in
331                  * clk_info->parents which does not equal -1.
332                  */
333                 hw_idx = curr_idx = 0;
334                 num_poss = 1 << clk_info->mux.bits;
335                 for (; hw_idx < num_poss; hw_idx++) {
336                         if (clk_info->parents[hw_idx] == -1)
337                                 continue;
338                         if (curr_idx == idx)
339                                 break;
340                         curr_idx++;
341                 }
342
343                 /* idx should always be a valid parent */
344                 BUG_ON(curr_idx != idx);
345
346                 mask = GENMASK(clk_info->mux.bits - 1, 0);
347                 mask <<= clk_info->mux.shift;
348
349                 spin_lock_irqsave(&cgu->lock, flags);
350
351                 /* write the register */
352                 reg = readl(cgu->base + clk_info->mux.reg);
353                 reg &= ~mask;
354                 reg |= hw_idx << clk_info->mux.shift;
355                 writel(reg, cgu->base + clk_info->mux.reg);
356
357                 spin_unlock_irqrestore(&cgu->lock, flags);
358                 return 0;
359         }
360
361         return idx ? -EINVAL : 0;
362 }
363
364 static unsigned long
365 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
366 {
367         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
368         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
369         struct ingenic_cgu *cgu = ingenic_clk->cgu;
370         unsigned long rate = parent_rate;
371         u32 div_reg, div;
372
373         if (clk_info->type & CGU_CLK_DIV) {
374                 div_reg = readl(cgu->base + clk_info->div.reg);
375                 div = (div_reg >> clk_info->div.shift) &
376                       GENMASK(clk_info->div.bits - 1, 0);
377
378                 if (clk_info->div.div_table)
379                         div = clk_info->div.div_table[div];
380                 else
381                         div = (div + 1) * clk_info->div.div;
382
383                 rate /= div;
384         } else if (clk_info->type & CGU_CLK_FIXDIV) {
385                 rate /= clk_info->fixdiv.div;
386         }
387
388         return rate;
389 }
390
391 static unsigned int
392 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
393                         unsigned int div)
394 {
395         unsigned int i;
396
397         for (i = 0; i < (1 << clk_info->div.bits)
398                                 && clk_info->div.div_table[i]; i++) {
399                 if (clk_info->div.div_table[i] >= div)
400                         return i;
401         }
402
403         return i - 1;
404 }
405
406 static unsigned
407 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
408                      unsigned long parent_rate, unsigned long req_rate)
409 {
410         unsigned int div, hw_div;
411
412         /* calculate the divide */
413         div = DIV_ROUND_UP(parent_rate, req_rate);
414
415         if (clk_info->div.div_table) {
416                 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
417
418                 return clk_info->div.div_table[hw_div];
419         }
420
421         /* Impose hardware constraints */
422         div = min_t(unsigned, div, 1 << clk_info->div.bits);
423         div = max_t(unsigned, div, 1);
424
425         /*
426          * If the divider value itself must be divided before being written to
427          * the divider register, we must ensure we don't have any bits set that
428          * would be lost as a result of doing so.
429          */
430         div /= clk_info->div.div;
431         div *= clk_info->div.div;
432
433         return div;
434 }
435
436 static long
437 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
438                        unsigned long *parent_rate)
439 {
440         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
441         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
442         unsigned int div = 1;
443
444         if (clk_info->type & CGU_CLK_DIV)
445                 div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
446         else if (clk_info->type & CGU_CLK_FIXDIV)
447                 div = clk_info->fixdiv.div;
448
449         return DIV_ROUND_UP(*parent_rate, div);
450 }
451
452 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
453                                            const struct ingenic_cgu_clk_info *clk_info)
454 {
455         u32 reg;
456
457         return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
458                                   !(reg & BIT(clk_info->div.busy_bit)),
459                                   0, 100 * USEC_PER_MSEC);
460 }
461
462 static int
463 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
464                      unsigned long parent_rate)
465 {
466         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
467         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
468         struct ingenic_cgu *cgu = ingenic_clk->cgu;
469         unsigned long rate, flags;
470         unsigned int hw_div, div;
471         u32 reg, mask;
472         int ret = 0;
473
474         if (clk_info->type & CGU_CLK_DIV) {
475                 div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
476                 rate = DIV_ROUND_UP(parent_rate, div);
477
478                 if (rate != req_rate)
479                         return -EINVAL;
480
481                 if (clk_info->div.div_table)
482                         hw_div = ingenic_clk_calc_hw_div(clk_info, div);
483                 else
484                         hw_div = ((div / clk_info->div.div) - 1);
485
486                 spin_lock_irqsave(&cgu->lock, flags);
487                 reg = readl(cgu->base + clk_info->div.reg);
488
489                 /* update the divide */
490                 mask = GENMASK(clk_info->div.bits - 1, 0);
491                 reg &= ~(mask << clk_info->div.shift);
492                 reg |= hw_div << clk_info->div.shift;
493
494                 /* clear the stop bit */
495                 if (clk_info->div.stop_bit != -1)
496                         reg &= ~BIT(clk_info->div.stop_bit);
497
498                 /* set the change enable bit */
499                 if (clk_info->div.ce_bit != -1)
500                         reg |= BIT(clk_info->div.ce_bit);
501
502                 /* update the hardware */
503                 writel(reg, cgu->base + clk_info->div.reg);
504
505                 /* wait for the change to take effect */
506                 if (clk_info->div.busy_bit != -1)
507                         ret = ingenic_clk_check_stable(cgu, clk_info);
508
509                 spin_unlock_irqrestore(&cgu->lock, flags);
510                 return ret;
511         }
512
513         return -EINVAL;
514 }
515
516 static int ingenic_clk_enable(struct clk_hw *hw)
517 {
518         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
519         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
520         struct ingenic_cgu *cgu = ingenic_clk->cgu;
521         unsigned long flags;
522
523         if (clk_info->type & CGU_CLK_GATE) {
524                 /* ungate the clock */
525                 spin_lock_irqsave(&cgu->lock, flags);
526                 ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
527                 spin_unlock_irqrestore(&cgu->lock, flags);
528
529                 if (clk_info->gate.delay_us)
530                         udelay(clk_info->gate.delay_us);
531         }
532
533         return 0;
534 }
535
536 static void ingenic_clk_disable(struct clk_hw *hw)
537 {
538         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
539         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
540         struct ingenic_cgu *cgu = ingenic_clk->cgu;
541         unsigned long flags;
542
543         if (clk_info->type & CGU_CLK_GATE) {
544                 /* gate the clock */
545                 spin_lock_irqsave(&cgu->lock, flags);
546                 ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
547                 spin_unlock_irqrestore(&cgu->lock, flags);
548         }
549 }
550
551 static int ingenic_clk_is_enabled(struct clk_hw *hw)
552 {
553         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
554         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
555         struct ingenic_cgu *cgu = ingenic_clk->cgu;
556         int enabled = 1;
557
558         if (clk_info->type & CGU_CLK_GATE)
559                 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
560
561         return enabled;
562 }
563
564 static const struct clk_ops ingenic_clk_ops = {
565         .get_parent = ingenic_clk_get_parent,
566         .set_parent = ingenic_clk_set_parent,
567
568         .recalc_rate = ingenic_clk_recalc_rate,
569         .round_rate = ingenic_clk_round_rate,
570         .set_rate = ingenic_clk_set_rate,
571
572         .enable = ingenic_clk_enable,
573         .disable = ingenic_clk_disable,
574         .is_enabled = ingenic_clk_is_enabled,
575 };
576
577 /*
578  * Setup functions.
579  */
580
581 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
582 {
583         const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
584         struct clk_init_data clk_init;
585         struct ingenic_clk *ingenic_clk = NULL;
586         struct clk *clk, *parent;
587         const char *parent_names[4];
588         unsigned caps, i, num_possible;
589         int err = -EINVAL;
590
591         BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
592
593         if (clk_info->type == CGU_CLK_EXT) {
594                 clk = of_clk_get_by_name(cgu->np, clk_info->name);
595                 if (IS_ERR(clk)) {
596                         pr_err("%s: no external clock '%s' provided\n",
597                                __func__, clk_info->name);
598                         err = -ENODEV;
599                         goto out;
600                 }
601                 err = clk_register_clkdev(clk, clk_info->name, NULL);
602                 if (err) {
603                         clk_put(clk);
604                         goto out;
605                 }
606                 cgu->clocks.clks[idx] = clk;
607                 return 0;
608         }
609
610         if (!clk_info->type) {
611                 pr_err("%s: no clock type specified for '%s'\n", __func__,
612                        clk_info->name);
613                 goto out;
614         }
615
616         ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
617         if (!ingenic_clk) {
618                 err = -ENOMEM;
619                 goto out;
620         }
621
622         ingenic_clk->hw.init = &clk_init;
623         ingenic_clk->cgu = cgu;
624         ingenic_clk->idx = idx;
625
626         clk_init.name = clk_info->name;
627         clk_init.flags = 0;
628         clk_init.parent_names = parent_names;
629
630         caps = clk_info->type;
631
632         if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
633                 clk_init.num_parents = 0;
634
635                 if (caps & CGU_CLK_MUX)
636                         num_possible = 1 << clk_info->mux.bits;
637                 else
638                         num_possible = ARRAY_SIZE(clk_info->parents);
639
640                 for (i = 0; i < num_possible; i++) {
641                         if (clk_info->parents[i] == -1)
642                                 continue;
643
644                         parent = cgu->clocks.clks[clk_info->parents[i]];
645                         parent_names[clk_init.num_parents] =
646                                 __clk_get_name(parent);
647                         clk_init.num_parents++;
648                 }
649
650                 BUG_ON(!clk_init.num_parents);
651                 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
652         } else {
653                 BUG_ON(clk_info->parents[0] == -1);
654                 clk_init.num_parents = 1;
655                 parent = cgu->clocks.clks[clk_info->parents[0]];
656                 parent_names[0] = __clk_get_name(parent);
657         }
658
659         if (caps & CGU_CLK_CUSTOM) {
660                 clk_init.ops = clk_info->custom.clk_ops;
661
662                 caps &= ~CGU_CLK_CUSTOM;
663
664                 if (caps) {
665                         pr_err("%s: custom clock may not be combined with type 0x%x\n",
666                                __func__, caps);
667                         goto out;
668                 }
669         } else if (caps & CGU_CLK_PLL) {
670                 clk_init.ops = &ingenic_pll_ops;
671
672                 caps &= ~CGU_CLK_PLL;
673
674                 if (caps) {
675                         pr_err("%s: PLL may not be combined with type 0x%x\n",
676                                __func__, caps);
677                         goto out;
678                 }
679         } else {
680                 clk_init.ops = &ingenic_clk_ops;
681         }
682
683         /* nothing to do for gates or fixed dividers */
684         caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
685
686         if (caps & CGU_CLK_MUX) {
687                 if (!(caps & CGU_CLK_MUX_GLITCHFREE))
688                         clk_init.flags |= CLK_SET_PARENT_GATE;
689
690                 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
691         }
692
693         if (caps & CGU_CLK_DIV) {
694                 caps &= ~CGU_CLK_DIV;
695         } else {
696                 /* pass rate changes to the parent clock */
697                 clk_init.flags |= CLK_SET_RATE_PARENT;
698         }
699
700         if (caps) {
701                 pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
702                 goto out;
703         }
704
705         clk = clk_register(NULL, &ingenic_clk->hw);
706         if (IS_ERR(clk)) {
707                 pr_err("%s: failed to register clock '%s'\n", __func__,
708                        clk_info->name);
709                 err = PTR_ERR(clk);
710                 goto out;
711         }
712
713         err = clk_register_clkdev(clk, clk_info->name, NULL);
714         if (err)
715                 goto out;
716
717         cgu->clocks.clks[idx] = clk;
718 out:
719         if (err)
720                 kfree(ingenic_clk);
721         return err;
722 }
723
724 struct ingenic_cgu *
725 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
726                 unsigned num_clocks, struct device_node *np)
727 {
728         struct ingenic_cgu *cgu;
729
730         cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
731         if (!cgu)
732                 goto err_out;
733
734         cgu->base = of_iomap(np, 0);
735         if (!cgu->base) {
736                 pr_err("%s: failed to map CGU registers\n", __func__);
737                 goto err_out_free;
738         }
739
740         cgu->np = np;
741         cgu->clock_info = clock_info;
742         cgu->clocks.clk_num = num_clocks;
743
744         spin_lock_init(&cgu->lock);
745
746         return cgu;
747
748 err_out_free:
749         kfree(cgu);
750 err_out:
751         return NULL;
752 }
753
754 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
755 {
756         unsigned i;
757         int err;
758
759         cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
760                                    GFP_KERNEL);
761         if (!cgu->clocks.clks) {
762                 err = -ENOMEM;
763                 goto err_out;
764         }
765
766         for (i = 0; i < cgu->clocks.clk_num; i++) {
767                 err = ingenic_register_clock(cgu, i);
768                 if (err)
769                         goto err_out_unregister;
770         }
771
772         err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
773                                   &cgu->clocks);
774         if (err)
775                 goto err_out_unregister;
776
777         return 0;
778
779 err_out_unregister:
780         for (i = 0; i < cgu->clocks.clk_num; i++) {
781                 if (!cgu->clocks.clks[i])
782                         continue;
783                 if (cgu->clock_info[i].type & CGU_CLK_EXT)
784                         clk_put(cgu->clocks.clks[i]);
785                 else
786                         clk_unregister(cgu->clocks.clks[i]);
787         }
788         kfree(cgu->clocks.clks);
789 err_out:
790         return err;
791 }