GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / clk / ingenic / cgu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22
23 #include "cgu.h"
24
25 #define MHZ (1000 * 1000)
26
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
29 {
30         return &clk->cgu->clock_info[clk->idx];
31 }
32
33 /**
34  * ingenic_cgu_gate_get() - get the value of clock gate register bit
35  * @cgu: reference to the CGU whose registers should be read
36  * @info: info struct describing the gate bit
37  *
38  * Retrieves the state of the clock gate bit described by info. The
39  * caller must hold cgu->lock.
40  *
41  * Return: true if the gate bit is set, else false.
42  */
43 static inline bool
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45                      const struct ingenic_cgu_gate_info *info)
46 {
47         return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48                 ^ info->clear_to_gate;
49 }
50
51 /**
52  * ingenic_cgu_gate_set() - set the value of clock gate register bit
53  * @cgu: reference to the CGU whose registers should be modified
54  * @info: info struct describing the gate bit
55  * @val: non-zero to gate a clock, otherwise zero
56  *
57  * Sets the given gate bit in order to gate or ungate a clock.
58  *
59  * The caller must hold cgu->lock.
60  */
61 static inline void
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63                      const struct ingenic_cgu_gate_info *info, bool val)
64 {
65         u32 clkgr = readl(cgu->base + info->reg);
66
67         if (val ^ info->clear_to_gate)
68                 clkgr |= BIT(info->bit);
69         else
70                 clkgr &= ~BIT(info->bit);
71
72         writel(clkgr, cgu->base + info->reg);
73 }
74
75 /*
76  * PLL operations
77  */
78
79 static unsigned long
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84         struct ingenic_cgu *cgu = ingenic_clk->cgu;
85         const struct ingenic_cgu_pll_info *pll_info;
86         unsigned m, n, od_enc, od;
87         bool bypass;
88         u32 ctl;
89
90         BUG_ON(clk_info->type != CGU_CLK_PLL);
91         pll_info = &clk_info->pll;
92
93         ctl = readl(cgu->base + pll_info->reg);
94
95         m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96         m += pll_info->m_offset;
97         n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98         n += pll_info->n_offset;
99         od_enc = ctl >> pll_info->od_shift;
100         od_enc &= GENMASK(pll_info->od_bits - 1, 0);
101
102         if (pll_info->bypass_bit >= 0) {
103                 ctl = readl(cgu->base + pll_info->bypass_reg);
104
105                 bypass = !!(ctl & BIT(pll_info->bypass_bit));
106
107                 if (bypass)
108                         return parent_rate;
109         }
110
111         for (od = 0; od < pll_info->od_max; od++) {
112                 if (pll_info->od_encoding[od] == od_enc)
113                         break;
114         }
115         BUG_ON(od == pll_info->od_max);
116         od++;
117
118         return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
119                 n * od);
120 }
121
122 static void
123 ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
124                         unsigned long rate, unsigned long parent_rate,
125                         unsigned int *pm, unsigned int *pn, unsigned int *pod)
126 {
127         unsigned int m, n, od = 1;
128
129         /*
130          * The frequency after the input divider must be between 10 and 50 MHz.
131          * The highest divider yields the best resolution.
132          */
133         n = parent_rate / (10 * MHZ);
134         n = min_t(unsigned int, n, 1 << pll_info->n_bits);
135         n = max_t(unsigned int, n, pll_info->n_offset);
136
137         m = (rate / MHZ) * od * n / (parent_rate / MHZ);
138         m = min_t(unsigned int, m, 1 << pll_info->m_bits);
139         m = max_t(unsigned int, m, pll_info->m_offset);
140
141         *pm = m;
142         *pn = n;
143         *pod = od;
144 }
145
146 static unsigned long
147 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
148                  unsigned long rate, unsigned long parent_rate,
149                  unsigned int *pm, unsigned int *pn, unsigned int *pod)
150 {
151         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
152         unsigned int m, n, od;
153
154         if (pll_info->calc_m_n_od)
155                 (*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
156         else
157                 ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
158
159         if (pm)
160                 *pm = m;
161         if (pn)
162                 *pn = n;
163         if (pod)
164                 *pod = od;
165
166         return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
167                 n * od);
168 }
169
170 static long
171 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
172                        unsigned long *prate)
173 {
174         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
175         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
176
177         return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
178 }
179
180 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
181                                            const struct ingenic_cgu_pll_info *pll_info)
182 {
183         u32 ctl;
184
185         return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
186                                   ctl & BIT(pll_info->stable_bit),
187                                   0, 100 * USEC_PER_MSEC);
188 }
189
190 static int
191 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
192                      unsigned long parent_rate)
193 {
194         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
195         struct ingenic_cgu *cgu = ingenic_clk->cgu;
196         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
197         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
198         unsigned long rate, flags;
199         unsigned int m, n, od;
200         int ret = 0;
201         u32 ctl;
202
203         rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
204                                &m, &n, &od);
205         if (rate != req_rate)
206                 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
207                         clk_info->name, req_rate, rate);
208
209         spin_lock_irqsave(&cgu->lock, flags);
210         ctl = readl(cgu->base + pll_info->reg);
211
212         ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
213         ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
214
215         ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
216         ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
217
218         ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
219         ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
220
221         writel(ctl, cgu->base + pll_info->reg);
222
223         /* If the PLL is enabled, verify that it's stable */
224         if (ctl & BIT(pll_info->enable_bit))
225                 ret = ingenic_pll_check_stable(cgu, pll_info);
226
227         spin_unlock_irqrestore(&cgu->lock, flags);
228
229         return ret;
230 }
231
232 static int ingenic_pll_enable(struct clk_hw *hw)
233 {
234         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
235         struct ingenic_cgu *cgu = ingenic_clk->cgu;
236         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
237         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
238         unsigned long flags;
239         int ret;
240         u32 ctl;
241
242         spin_lock_irqsave(&cgu->lock, flags);
243         if (pll_info->bypass_bit >= 0) {
244                 ctl = readl(cgu->base + pll_info->bypass_reg);
245
246                 ctl &= ~BIT(pll_info->bypass_bit);
247
248                 writel(ctl, cgu->base + pll_info->bypass_reg);
249         }
250
251         ctl = readl(cgu->base + pll_info->reg);
252
253         ctl |= BIT(pll_info->enable_bit);
254
255         writel(ctl, cgu->base + pll_info->reg);
256
257         ret = ingenic_pll_check_stable(cgu, pll_info);
258         spin_unlock_irqrestore(&cgu->lock, flags);
259
260         return ret;
261 }
262
263 static void ingenic_pll_disable(struct clk_hw *hw)
264 {
265         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
266         struct ingenic_cgu *cgu = ingenic_clk->cgu;
267         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
268         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
269         unsigned long flags;
270         u32 ctl;
271
272         spin_lock_irqsave(&cgu->lock, flags);
273         ctl = readl(cgu->base + pll_info->reg);
274
275         ctl &= ~BIT(pll_info->enable_bit);
276
277         writel(ctl, cgu->base + pll_info->reg);
278         spin_unlock_irqrestore(&cgu->lock, flags);
279 }
280
281 static int ingenic_pll_is_enabled(struct clk_hw *hw)
282 {
283         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
284         struct ingenic_cgu *cgu = ingenic_clk->cgu;
285         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
286         const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
287         u32 ctl;
288
289         ctl = readl(cgu->base + pll_info->reg);
290
291         return !!(ctl & BIT(pll_info->enable_bit));
292 }
293
294 static const struct clk_ops ingenic_pll_ops = {
295         .recalc_rate = ingenic_pll_recalc_rate,
296         .round_rate = ingenic_pll_round_rate,
297         .set_rate = ingenic_pll_set_rate,
298
299         .enable = ingenic_pll_enable,
300         .disable = ingenic_pll_disable,
301         .is_enabled = ingenic_pll_is_enabled,
302 };
303
304 /*
305  * Operations for all non-PLL clocks
306  */
307
308 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
309 {
310         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
311         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
312         struct ingenic_cgu *cgu = ingenic_clk->cgu;
313         u32 reg;
314         u8 i, hw_idx, idx = 0;
315
316         if (clk_info->type & CGU_CLK_MUX) {
317                 reg = readl(cgu->base + clk_info->mux.reg);
318                 hw_idx = (reg >> clk_info->mux.shift) &
319                          GENMASK(clk_info->mux.bits - 1, 0);
320
321                 /*
322                  * Convert the hardware index to the parent index by skipping
323                  * over any -1's in the parents array.
324                  */
325                 for (i = 0; i < hw_idx; i++) {
326                         if (clk_info->parents[i] != -1)
327                                 idx++;
328                 }
329         }
330
331         return idx;
332 }
333
334 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
335 {
336         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
337         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
338         struct ingenic_cgu *cgu = ingenic_clk->cgu;
339         unsigned long flags;
340         u8 curr_idx, hw_idx, num_poss;
341         u32 reg, mask;
342
343         if (clk_info->type & CGU_CLK_MUX) {
344                 /*
345                  * Convert the parent index to the hardware index by adding
346                  * 1 for any -1 in the parents array preceding the given
347                  * index. That is, we want the index of idx'th entry in
348                  * clk_info->parents which does not equal -1.
349                  */
350                 hw_idx = curr_idx = 0;
351                 num_poss = 1 << clk_info->mux.bits;
352                 for (; hw_idx < num_poss; hw_idx++) {
353                         if (clk_info->parents[hw_idx] == -1)
354                                 continue;
355                         if (curr_idx == idx)
356                                 break;
357                         curr_idx++;
358                 }
359
360                 /* idx should always be a valid parent */
361                 BUG_ON(curr_idx != idx);
362
363                 mask = GENMASK(clk_info->mux.bits - 1, 0);
364                 mask <<= clk_info->mux.shift;
365
366                 spin_lock_irqsave(&cgu->lock, flags);
367
368                 /* write the register */
369                 reg = readl(cgu->base + clk_info->mux.reg);
370                 reg &= ~mask;
371                 reg |= hw_idx << clk_info->mux.shift;
372                 writel(reg, cgu->base + clk_info->mux.reg);
373
374                 spin_unlock_irqrestore(&cgu->lock, flags);
375                 return 0;
376         }
377
378         return idx ? -EINVAL : 0;
379 }
380
381 static unsigned long
382 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
383 {
384         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
385         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
386         struct ingenic_cgu *cgu = ingenic_clk->cgu;
387         unsigned long rate = parent_rate;
388         u32 div_reg, div;
389         u8 parent;
390
391         if (clk_info->type & CGU_CLK_DIV) {
392                 parent = ingenic_clk_get_parent(hw);
393
394                 if (!(clk_info->div.bypass_mask & BIT(parent))) {
395                         div_reg = readl(cgu->base + clk_info->div.reg);
396                         div = (div_reg >> clk_info->div.shift) &
397                               GENMASK(clk_info->div.bits - 1, 0);
398
399                         if (clk_info->div.div_table)
400                                 div = clk_info->div.div_table[div];
401                         else
402                                 div = (div + 1) * clk_info->div.div;
403
404                         rate /= div;
405                 }
406         } else if (clk_info->type & CGU_CLK_FIXDIV) {
407                 rate /= clk_info->fixdiv.div;
408         }
409
410         return rate;
411 }
412
413 static unsigned int
414 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
415                         unsigned int div)
416 {
417         unsigned int i, best_i = 0, best = (unsigned int)-1;
418
419         for (i = 0; i < (1 << clk_info->div.bits)
420                                 && clk_info->div.div_table[i]; i++) {
421                 if (clk_info->div.div_table[i] >= div &&
422                     clk_info->div.div_table[i] < best) {
423                         best = clk_info->div.div_table[i];
424                         best_i = i;
425
426                         if (div == best)
427                                 break;
428                 }
429         }
430
431         return best_i;
432 }
433
434 static unsigned
435 ingenic_clk_calc_div(struct clk_hw *hw,
436                      const struct ingenic_cgu_clk_info *clk_info,
437                      unsigned long parent_rate, unsigned long req_rate)
438 {
439         unsigned int div, hw_div;
440         u8 parent;
441
442         parent = ingenic_clk_get_parent(hw);
443         if (clk_info->div.bypass_mask & BIT(parent))
444                 return 1;
445
446         /* calculate the divide */
447         div = DIV_ROUND_UP(parent_rate, req_rate);
448
449         if (clk_info->div.div_table) {
450                 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
451
452                 return clk_info->div.div_table[hw_div];
453         }
454
455         /* Impose hardware constraints */
456         div = clamp_t(unsigned int, div, clk_info->div.div,
457                       clk_info->div.div << clk_info->div.bits);
458
459         /*
460          * If the divider value itself must be divided before being written to
461          * the divider register, we must ensure we don't have any bits set that
462          * would be lost as a result of doing so.
463          */
464         div = DIV_ROUND_UP(div, clk_info->div.div);
465         div *= clk_info->div.div;
466
467         return div;
468 }
469
470 static long
471 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
472                        unsigned long *parent_rate)
473 {
474         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
475         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
476         unsigned int div = 1;
477
478         if (clk_info->type & CGU_CLK_DIV)
479                 div = ingenic_clk_calc_div(hw, clk_info, *parent_rate, req_rate);
480         else if (clk_info->type & CGU_CLK_FIXDIV)
481                 div = clk_info->fixdiv.div;
482         else if (clk_hw_can_set_rate_parent(hw))
483                 *parent_rate = req_rate;
484
485         return DIV_ROUND_UP(*parent_rate, div);
486 }
487
488 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
489                                            const struct ingenic_cgu_clk_info *clk_info)
490 {
491         u32 reg;
492
493         return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
494                                   !(reg & BIT(clk_info->div.busy_bit)),
495                                   0, 100 * USEC_PER_MSEC);
496 }
497
498 static int
499 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
500                      unsigned long parent_rate)
501 {
502         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
503         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
504         struct ingenic_cgu *cgu = ingenic_clk->cgu;
505         unsigned long rate, flags;
506         unsigned int hw_div, div;
507         u32 reg, mask;
508         int ret = 0;
509
510         if (clk_info->type & CGU_CLK_DIV) {
511                 div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
512                 rate = DIV_ROUND_UP(parent_rate, div);
513
514                 if (rate != req_rate)
515                         return -EINVAL;
516
517                 if (clk_info->div.div_table)
518                         hw_div = ingenic_clk_calc_hw_div(clk_info, div);
519                 else
520                         hw_div = ((div / clk_info->div.div) - 1);
521
522                 spin_lock_irqsave(&cgu->lock, flags);
523                 reg = readl(cgu->base + clk_info->div.reg);
524
525                 /* update the divide */
526                 mask = GENMASK(clk_info->div.bits - 1, 0);
527                 reg &= ~(mask << clk_info->div.shift);
528                 reg |= hw_div << clk_info->div.shift;
529
530                 /* clear the stop bit */
531                 if (clk_info->div.stop_bit != -1)
532                         reg &= ~BIT(clk_info->div.stop_bit);
533
534                 /* set the change enable bit */
535                 if (clk_info->div.ce_bit != -1)
536                         reg |= BIT(clk_info->div.ce_bit);
537
538                 /* update the hardware */
539                 writel(reg, cgu->base + clk_info->div.reg);
540
541                 /* wait for the change to take effect */
542                 if (clk_info->div.busy_bit != -1)
543                         ret = ingenic_clk_check_stable(cgu, clk_info);
544
545                 spin_unlock_irqrestore(&cgu->lock, flags);
546                 return ret;
547         }
548
549         return -EINVAL;
550 }
551
552 static int ingenic_clk_enable(struct clk_hw *hw)
553 {
554         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
555         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
556         struct ingenic_cgu *cgu = ingenic_clk->cgu;
557         unsigned long flags;
558
559         if (clk_info->type & CGU_CLK_GATE) {
560                 /* ungate the clock */
561                 spin_lock_irqsave(&cgu->lock, flags);
562                 ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
563                 spin_unlock_irqrestore(&cgu->lock, flags);
564
565                 if (clk_info->gate.delay_us)
566                         udelay(clk_info->gate.delay_us);
567         }
568
569         return 0;
570 }
571
572 static void ingenic_clk_disable(struct clk_hw *hw)
573 {
574         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
575         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
576         struct ingenic_cgu *cgu = ingenic_clk->cgu;
577         unsigned long flags;
578
579         if (clk_info->type & CGU_CLK_GATE) {
580                 /* gate the clock */
581                 spin_lock_irqsave(&cgu->lock, flags);
582                 ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
583                 spin_unlock_irqrestore(&cgu->lock, flags);
584         }
585 }
586
587 static int ingenic_clk_is_enabled(struct clk_hw *hw)
588 {
589         struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
590         const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
591         struct ingenic_cgu *cgu = ingenic_clk->cgu;
592         int enabled = 1;
593
594         if (clk_info->type & CGU_CLK_GATE)
595                 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
596
597         return enabled;
598 }
599
600 static const struct clk_ops ingenic_clk_ops = {
601         .get_parent = ingenic_clk_get_parent,
602         .set_parent = ingenic_clk_set_parent,
603
604         .recalc_rate = ingenic_clk_recalc_rate,
605         .round_rate = ingenic_clk_round_rate,
606         .set_rate = ingenic_clk_set_rate,
607
608         .enable = ingenic_clk_enable,
609         .disable = ingenic_clk_disable,
610         .is_enabled = ingenic_clk_is_enabled,
611 };
612
613 /*
614  * Setup functions.
615  */
616
617 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
618 {
619         const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
620         struct clk_init_data clk_init;
621         struct ingenic_clk *ingenic_clk = NULL;
622         struct clk *clk, *parent;
623         const char *parent_names[4];
624         unsigned caps, i, num_possible;
625         int err = -EINVAL;
626
627         BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
628
629         if (clk_info->type == CGU_CLK_EXT) {
630                 clk = of_clk_get_by_name(cgu->np, clk_info->name);
631                 if (IS_ERR(clk)) {
632                         pr_err("%s: no external clock '%s' provided\n",
633                                __func__, clk_info->name);
634                         err = -ENODEV;
635                         goto out;
636                 }
637                 err = clk_register_clkdev(clk, clk_info->name, NULL);
638                 if (err) {
639                         clk_put(clk);
640                         goto out;
641                 }
642                 cgu->clocks.clks[idx] = clk;
643                 return 0;
644         }
645
646         if (!clk_info->type) {
647                 pr_err("%s: no clock type specified for '%s'\n", __func__,
648                        clk_info->name);
649                 goto out;
650         }
651
652         ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
653         if (!ingenic_clk) {
654                 err = -ENOMEM;
655                 goto out;
656         }
657
658         ingenic_clk->hw.init = &clk_init;
659         ingenic_clk->cgu = cgu;
660         ingenic_clk->idx = idx;
661
662         clk_init.name = clk_info->name;
663         clk_init.flags = 0;
664         clk_init.parent_names = parent_names;
665
666         caps = clk_info->type;
667
668         if (caps & CGU_CLK_DIV) {
669                 caps &= ~CGU_CLK_DIV;
670         } else if (!(caps & CGU_CLK_CUSTOM)) {
671                 /* pass rate changes to the parent clock */
672                 clk_init.flags |= CLK_SET_RATE_PARENT;
673         }
674
675         if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
676                 clk_init.num_parents = 0;
677
678                 if (caps & CGU_CLK_MUX)
679                         num_possible = 1 << clk_info->mux.bits;
680                 else
681                         num_possible = ARRAY_SIZE(clk_info->parents);
682
683                 for (i = 0; i < num_possible; i++) {
684                         if (clk_info->parents[i] == -1)
685                                 continue;
686
687                         parent = cgu->clocks.clks[clk_info->parents[i]];
688                         parent_names[clk_init.num_parents] =
689                                 __clk_get_name(parent);
690                         clk_init.num_parents++;
691                 }
692
693                 BUG_ON(!clk_init.num_parents);
694                 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
695         } else {
696                 BUG_ON(clk_info->parents[0] == -1);
697                 clk_init.num_parents = 1;
698                 parent = cgu->clocks.clks[clk_info->parents[0]];
699                 parent_names[0] = __clk_get_name(parent);
700         }
701
702         if (caps & CGU_CLK_CUSTOM) {
703                 clk_init.ops = clk_info->custom.clk_ops;
704
705                 caps &= ~CGU_CLK_CUSTOM;
706
707                 if (caps) {
708                         pr_err("%s: custom clock may not be combined with type 0x%x\n",
709                                __func__, caps);
710                         goto out;
711                 }
712         } else if (caps & CGU_CLK_PLL) {
713                 clk_init.ops = &ingenic_pll_ops;
714
715                 caps &= ~CGU_CLK_PLL;
716
717                 if (caps) {
718                         pr_err("%s: PLL may not be combined with type 0x%x\n",
719                                __func__, caps);
720                         goto out;
721                 }
722         } else {
723                 clk_init.ops = &ingenic_clk_ops;
724         }
725
726         /* nothing to do for gates or fixed dividers */
727         caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
728
729         if (caps & CGU_CLK_MUX) {
730                 if (!(caps & CGU_CLK_MUX_GLITCHFREE))
731                         clk_init.flags |= CLK_SET_PARENT_GATE;
732
733                 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
734         }
735
736         if (caps) {
737                 pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
738                 goto out;
739         }
740
741         clk = clk_register(NULL, &ingenic_clk->hw);
742         if (IS_ERR(clk)) {
743                 pr_err("%s: failed to register clock '%s'\n", __func__,
744                        clk_info->name);
745                 err = PTR_ERR(clk);
746                 goto out;
747         }
748
749         err = clk_register_clkdev(clk, clk_info->name, NULL);
750         if (err)
751                 goto out;
752
753         cgu->clocks.clks[idx] = clk;
754 out:
755         if (err)
756                 kfree(ingenic_clk);
757         return err;
758 }
759
760 struct ingenic_cgu *
761 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
762                 unsigned num_clocks, struct device_node *np)
763 {
764         struct ingenic_cgu *cgu;
765
766         cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
767         if (!cgu)
768                 goto err_out;
769
770         cgu->base = of_iomap(np, 0);
771         if (!cgu->base) {
772                 pr_err("%s: failed to map CGU registers\n", __func__);
773                 goto err_out_free;
774         }
775
776         cgu->np = np;
777         cgu->clock_info = clock_info;
778         cgu->clocks.clk_num = num_clocks;
779
780         spin_lock_init(&cgu->lock);
781
782         return cgu;
783
784 err_out_free:
785         kfree(cgu);
786 err_out:
787         return NULL;
788 }
789
790 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
791 {
792         unsigned i;
793         int err;
794
795         cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
796                                    GFP_KERNEL);
797         if (!cgu->clocks.clks) {
798                 err = -ENOMEM;
799                 goto err_out;
800         }
801
802         for (i = 0; i < cgu->clocks.clk_num; i++) {
803                 err = ingenic_register_clock(cgu, i);
804                 if (err)
805                         goto err_out_unregister;
806         }
807
808         err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
809                                   &cgu->clocks);
810         if (err)
811                 goto err_out_unregister;
812
813         return 0;
814
815 err_out_unregister:
816         for (i = 0; i < cgu->clocks.clk_num; i++) {
817                 if (!cgu->clocks.clks[i])
818                         continue;
819                 if (cgu->clock_info[i].type & CGU_CLK_EXT)
820                         clk_put(cgu->clocks.clks[i]);
821                 else
822                         clk_unregister(cgu->clocks.clks[i]);
823         }
824         kfree(cgu->clocks.clks);
825 err_out:
826         return err;
827 }