1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Ingenic SoC CGU driver
5 * Copyright (c) 2013-2015 Imagination Technologies
6 * Author: Paul Burton <paul.burton@mips.com>
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
25 #define MHZ (1000 * 1000)
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
30 return &clk->cgu->clock_info[clk->idx];
34 * ingenic_cgu_gate_get() - get the value of clock gate register bit
35 * @cgu: reference to the CGU whose registers should be read
36 * @info: info struct describing the gate bit
38 * Retrieves the state of the clock gate bit described by info. The
39 * caller must hold cgu->lock.
41 * Return: true if the gate bit is set, else false.
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45 const struct ingenic_cgu_gate_info *info)
47 return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48 ^ info->clear_to_gate;
52 * ingenic_cgu_gate_set() - set the value of clock gate register bit
53 * @cgu: reference to the CGU whose registers should be modified
54 * @info: info struct describing the gate bit
55 * @val: non-zero to gate a clock, otherwise zero
57 * Sets the given gate bit in order to gate or ungate a clock.
59 * The caller must hold cgu->lock.
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63 const struct ingenic_cgu_gate_info *info, bool val)
65 u32 clkgr = readl(cgu->base + info->reg);
67 if (val ^ info->clear_to_gate)
68 clkgr |= BIT(info->bit);
70 clkgr &= ~BIT(info->bit);
72 writel(clkgr, cgu->base + info->reg);
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
82 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84 struct ingenic_cgu *cgu = ingenic_clk->cgu;
85 const struct ingenic_cgu_pll_info *pll_info;
86 unsigned m, n, od_enc, od;
90 BUG_ON(clk_info->type != CGU_CLK_PLL);
91 pll_info = &clk_info->pll;
93 ctl = readl(cgu->base + pll_info->reg);
95 m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96 m += pll_info->m_offset;
97 n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98 n += pll_info->n_offset;
99 od_enc = ctl >> pll_info->od_shift;
100 od_enc &= GENMASK(pll_info->od_bits - 1, 0);
102 if (pll_info->bypass_bit >= 0) {
103 ctl = readl(cgu->base + pll_info->bypass_reg);
105 bypass = !!(ctl & BIT(pll_info->bypass_bit));
111 for (od = 0; od < pll_info->od_max; od++) {
112 if (pll_info->od_encoding[od] == od_enc)
115 BUG_ON(od == pll_info->od_max);
118 return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
123 ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
124 unsigned long rate, unsigned long parent_rate,
125 unsigned int *pm, unsigned int *pn, unsigned int *pod)
127 unsigned int m, n, od = 1;
130 * The frequency after the input divider must be between 10 and 50 MHz.
131 * The highest divider yields the best resolution.
133 n = parent_rate / (10 * MHZ);
134 n = min_t(unsigned int, n, 1 << pll_info->n_bits);
135 n = max_t(unsigned int, n, pll_info->n_offset);
137 m = (rate / MHZ) * od * n / (parent_rate / MHZ);
138 m = min_t(unsigned int, m, 1 << pll_info->m_bits);
139 m = max_t(unsigned int, m, pll_info->m_offset);
147 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
148 unsigned long rate, unsigned long parent_rate,
149 unsigned int *pm, unsigned int *pn, unsigned int *pod)
151 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
152 unsigned int m, n, od;
154 if (pll_info->calc_m_n_od)
155 (*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
157 ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
166 return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
171 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
172 unsigned long *prate)
174 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
175 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
177 return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
180 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
181 const struct ingenic_cgu_pll_info *pll_info)
185 return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
186 ctl & BIT(pll_info->stable_bit),
187 0, 100 * USEC_PER_MSEC);
191 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
192 unsigned long parent_rate)
194 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
195 struct ingenic_cgu *cgu = ingenic_clk->cgu;
196 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
197 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
198 unsigned long rate, flags;
199 unsigned int m, n, od;
203 rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
205 if (rate != req_rate)
206 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
207 clk_info->name, req_rate, rate);
209 spin_lock_irqsave(&cgu->lock, flags);
210 ctl = readl(cgu->base + pll_info->reg);
212 ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
213 ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
215 ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
216 ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
218 ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
219 ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
221 writel(ctl, cgu->base + pll_info->reg);
223 /* If the PLL is enabled, verify that it's stable */
224 if (ctl & BIT(pll_info->enable_bit))
225 ret = ingenic_pll_check_stable(cgu, pll_info);
227 spin_unlock_irqrestore(&cgu->lock, flags);
232 static int ingenic_pll_enable(struct clk_hw *hw)
234 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
235 struct ingenic_cgu *cgu = ingenic_clk->cgu;
236 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
237 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
242 spin_lock_irqsave(&cgu->lock, flags);
243 if (pll_info->bypass_bit >= 0) {
244 ctl = readl(cgu->base + pll_info->bypass_reg);
246 ctl &= ~BIT(pll_info->bypass_bit);
248 writel(ctl, cgu->base + pll_info->bypass_reg);
251 ctl = readl(cgu->base + pll_info->reg);
253 ctl |= BIT(pll_info->enable_bit);
255 writel(ctl, cgu->base + pll_info->reg);
257 ret = ingenic_pll_check_stable(cgu, pll_info);
258 spin_unlock_irqrestore(&cgu->lock, flags);
263 static void ingenic_pll_disable(struct clk_hw *hw)
265 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
266 struct ingenic_cgu *cgu = ingenic_clk->cgu;
267 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
268 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
272 spin_lock_irqsave(&cgu->lock, flags);
273 ctl = readl(cgu->base + pll_info->reg);
275 ctl &= ~BIT(pll_info->enable_bit);
277 writel(ctl, cgu->base + pll_info->reg);
278 spin_unlock_irqrestore(&cgu->lock, flags);
281 static int ingenic_pll_is_enabled(struct clk_hw *hw)
283 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
284 struct ingenic_cgu *cgu = ingenic_clk->cgu;
285 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
286 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
289 ctl = readl(cgu->base + pll_info->reg);
291 return !!(ctl & BIT(pll_info->enable_bit));
294 static const struct clk_ops ingenic_pll_ops = {
295 .recalc_rate = ingenic_pll_recalc_rate,
296 .round_rate = ingenic_pll_round_rate,
297 .set_rate = ingenic_pll_set_rate,
299 .enable = ingenic_pll_enable,
300 .disable = ingenic_pll_disable,
301 .is_enabled = ingenic_pll_is_enabled,
305 * Operations for all non-PLL clocks
308 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
310 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
311 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
312 struct ingenic_cgu *cgu = ingenic_clk->cgu;
314 u8 i, hw_idx, idx = 0;
316 if (clk_info->type & CGU_CLK_MUX) {
317 reg = readl(cgu->base + clk_info->mux.reg);
318 hw_idx = (reg >> clk_info->mux.shift) &
319 GENMASK(clk_info->mux.bits - 1, 0);
322 * Convert the hardware index to the parent index by skipping
323 * over any -1's in the parents array.
325 for (i = 0; i < hw_idx; i++) {
326 if (clk_info->parents[i] != -1)
334 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
336 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
337 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
338 struct ingenic_cgu *cgu = ingenic_clk->cgu;
340 u8 curr_idx, hw_idx, num_poss;
343 if (clk_info->type & CGU_CLK_MUX) {
345 * Convert the parent index to the hardware index by adding
346 * 1 for any -1 in the parents array preceding the given
347 * index. That is, we want the index of idx'th entry in
348 * clk_info->parents which does not equal -1.
350 hw_idx = curr_idx = 0;
351 num_poss = 1 << clk_info->mux.bits;
352 for (; hw_idx < num_poss; hw_idx++) {
353 if (clk_info->parents[hw_idx] == -1)
360 /* idx should always be a valid parent */
361 BUG_ON(curr_idx != idx);
363 mask = GENMASK(clk_info->mux.bits - 1, 0);
364 mask <<= clk_info->mux.shift;
366 spin_lock_irqsave(&cgu->lock, flags);
368 /* write the register */
369 reg = readl(cgu->base + clk_info->mux.reg);
371 reg |= hw_idx << clk_info->mux.shift;
372 writel(reg, cgu->base + clk_info->mux.reg);
374 spin_unlock_irqrestore(&cgu->lock, flags);
378 return idx ? -EINVAL : 0;
382 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
384 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
385 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
386 struct ingenic_cgu *cgu = ingenic_clk->cgu;
387 unsigned long rate = parent_rate;
391 if (clk_info->type & CGU_CLK_DIV) {
392 parent = ingenic_clk_get_parent(hw);
394 if (!(clk_info->div.bypass_mask & BIT(parent))) {
395 div_reg = readl(cgu->base + clk_info->div.reg);
396 div = (div_reg >> clk_info->div.shift) &
397 GENMASK(clk_info->div.bits - 1, 0);
399 if (clk_info->div.div_table)
400 div = clk_info->div.div_table[div];
402 div = (div + 1) * clk_info->div.div;
406 } else if (clk_info->type & CGU_CLK_FIXDIV) {
407 rate /= clk_info->fixdiv.div;
414 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
417 unsigned int i, best_i = 0, best = (unsigned int)-1;
419 for (i = 0; i < (1 << clk_info->div.bits)
420 && clk_info->div.div_table[i]; i++) {
421 if (clk_info->div.div_table[i] >= div &&
422 clk_info->div.div_table[i] < best) {
423 best = clk_info->div.div_table[i];
435 ingenic_clk_calc_div(struct clk_hw *hw,
436 const struct ingenic_cgu_clk_info *clk_info,
437 unsigned long parent_rate, unsigned long req_rate)
439 unsigned int div, hw_div;
442 parent = ingenic_clk_get_parent(hw);
443 if (clk_info->div.bypass_mask & BIT(parent))
446 /* calculate the divide */
447 div = DIV_ROUND_UP(parent_rate, req_rate);
449 if (clk_info->div.div_table) {
450 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
452 return clk_info->div.div_table[hw_div];
455 /* Impose hardware constraints */
456 div = clamp_t(unsigned int, div, clk_info->div.div,
457 clk_info->div.div << clk_info->div.bits);
460 * If the divider value itself must be divided before being written to
461 * the divider register, we must ensure we don't have any bits set that
462 * would be lost as a result of doing so.
464 div = DIV_ROUND_UP(div, clk_info->div.div);
465 div *= clk_info->div.div;
471 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
472 unsigned long *parent_rate)
474 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
475 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
476 unsigned int div = 1;
478 if (clk_info->type & CGU_CLK_DIV)
479 div = ingenic_clk_calc_div(hw, clk_info, *parent_rate, req_rate);
480 else if (clk_info->type & CGU_CLK_FIXDIV)
481 div = clk_info->fixdiv.div;
482 else if (clk_hw_can_set_rate_parent(hw))
483 *parent_rate = req_rate;
485 return DIV_ROUND_UP(*parent_rate, div);
488 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
489 const struct ingenic_cgu_clk_info *clk_info)
493 return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
494 !(reg & BIT(clk_info->div.busy_bit)),
495 0, 100 * USEC_PER_MSEC);
499 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
500 unsigned long parent_rate)
502 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
503 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
504 struct ingenic_cgu *cgu = ingenic_clk->cgu;
505 unsigned long rate, flags;
506 unsigned int hw_div, div;
510 if (clk_info->type & CGU_CLK_DIV) {
511 div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
512 rate = DIV_ROUND_UP(parent_rate, div);
514 if (rate != req_rate)
517 if (clk_info->div.div_table)
518 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
520 hw_div = ((div / clk_info->div.div) - 1);
522 spin_lock_irqsave(&cgu->lock, flags);
523 reg = readl(cgu->base + clk_info->div.reg);
525 /* update the divide */
526 mask = GENMASK(clk_info->div.bits - 1, 0);
527 reg &= ~(mask << clk_info->div.shift);
528 reg |= hw_div << clk_info->div.shift;
530 /* clear the stop bit */
531 if (clk_info->div.stop_bit != -1)
532 reg &= ~BIT(clk_info->div.stop_bit);
534 /* set the change enable bit */
535 if (clk_info->div.ce_bit != -1)
536 reg |= BIT(clk_info->div.ce_bit);
538 /* update the hardware */
539 writel(reg, cgu->base + clk_info->div.reg);
541 /* wait for the change to take effect */
542 if (clk_info->div.busy_bit != -1)
543 ret = ingenic_clk_check_stable(cgu, clk_info);
545 spin_unlock_irqrestore(&cgu->lock, flags);
552 static int ingenic_clk_enable(struct clk_hw *hw)
554 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
555 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
556 struct ingenic_cgu *cgu = ingenic_clk->cgu;
559 if (clk_info->type & CGU_CLK_GATE) {
560 /* ungate the clock */
561 spin_lock_irqsave(&cgu->lock, flags);
562 ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
563 spin_unlock_irqrestore(&cgu->lock, flags);
565 if (clk_info->gate.delay_us)
566 udelay(clk_info->gate.delay_us);
572 static void ingenic_clk_disable(struct clk_hw *hw)
574 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
575 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
576 struct ingenic_cgu *cgu = ingenic_clk->cgu;
579 if (clk_info->type & CGU_CLK_GATE) {
581 spin_lock_irqsave(&cgu->lock, flags);
582 ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
583 spin_unlock_irqrestore(&cgu->lock, flags);
587 static int ingenic_clk_is_enabled(struct clk_hw *hw)
589 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
590 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
591 struct ingenic_cgu *cgu = ingenic_clk->cgu;
594 if (clk_info->type & CGU_CLK_GATE)
595 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
600 static const struct clk_ops ingenic_clk_ops = {
601 .get_parent = ingenic_clk_get_parent,
602 .set_parent = ingenic_clk_set_parent,
604 .recalc_rate = ingenic_clk_recalc_rate,
605 .round_rate = ingenic_clk_round_rate,
606 .set_rate = ingenic_clk_set_rate,
608 .enable = ingenic_clk_enable,
609 .disable = ingenic_clk_disable,
610 .is_enabled = ingenic_clk_is_enabled,
617 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
619 const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
620 struct clk_init_data clk_init;
621 struct ingenic_clk *ingenic_clk = NULL;
622 struct clk *clk, *parent;
623 const char *parent_names[4];
624 unsigned caps, i, num_possible;
627 BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
629 if (clk_info->type == CGU_CLK_EXT) {
630 clk = of_clk_get_by_name(cgu->np, clk_info->name);
632 pr_err("%s: no external clock '%s' provided\n",
633 __func__, clk_info->name);
637 err = clk_register_clkdev(clk, clk_info->name, NULL);
642 cgu->clocks.clks[idx] = clk;
646 if (!clk_info->type) {
647 pr_err("%s: no clock type specified for '%s'\n", __func__,
652 ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
658 ingenic_clk->hw.init = &clk_init;
659 ingenic_clk->cgu = cgu;
660 ingenic_clk->idx = idx;
662 clk_init.name = clk_info->name;
664 clk_init.parent_names = parent_names;
666 caps = clk_info->type;
668 if (caps & CGU_CLK_DIV) {
669 caps &= ~CGU_CLK_DIV;
670 } else if (!(caps & CGU_CLK_CUSTOM)) {
671 /* pass rate changes to the parent clock */
672 clk_init.flags |= CLK_SET_RATE_PARENT;
675 if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
676 clk_init.num_parents = 0;
678 if (caps & CGU_CLK_MUX)
679 num_possible = 1 << clk_info->mux.bits;
681 num_possible = ARRAY_SIZE(clk_info->parents);
683 for (i = 0; i < num_possible; i++) {
684 if (clk_info->parents[i] == -1)
687 parent = cgu->clocks.clks[clk_info->parents[i]];
688 parent_names[clk_init.num_parents] =
689 __clk_get_name(parent);
690 clk_init.num_parents++;
693 BUG_ON(!clk_init.num_parents);
694 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
696 BUG_ON(clk_info->parents[0] == -1);
697 clk_init.num_parents = 1;
698 parent = cgu->clocks.clks[clk_info->parents[0]];
699 parent_names[0] = __clk_get_name(parent);
702 if (caps & CGU_CLK_CUSTOM) {
703 clk_init.ops = clk_info->custom.clk_ops;
705 caps &= ~CGU_CLK_CUSTOM;
708 pr_err("%s: custom clock may not be combined with type 0x%x\n",
712 } else if (caps & CGU_CLK_PLL) {
713 clk_init.ops = &ingenic_pll_ops;
715 caps &= ~CGU_CLK_PLL;
718 pr_err("%s: PLL may not be combined with type 0x%x\n",
723 clk_init.ops = &ingenic_clk_ops;
726 /* nothing to do for gates or fixed dividers */
727 caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
729 if (caps & CGU_CLK_MUX) {
730 if (!(caps & CGU_CLK_MUX_GLITCHFREE))
731 clk_init.flags |= CLK_SET_PARENT_GATE;
733 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
737 pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
741 clk = clk_register(NULL, &ingenic_clk->hw);
743 pr_err("%s: failed to register clock '%s'\n", __func__,
749 err = clk_register_clkdev(clk, clk_info->name, NULL);
753 cgu->clocks.clks[idx] = clk;
761 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
762 unsigned num_clocks, struct device_node *np)
764 struct ingenic_cgu *cgu;
766 cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
770 cgu->base = of_iomap(np, 0);
772 pr_err("%s: failed to map CGU registers\n", __func__);
777 cgu->clock_info = clock_info;
778 cgu->clocks.clk_num = num_clocks;
780 spin_lock_init(&cgu->lock);
790 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
795 cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
797 if (!cgu->clocks.clks) {
802 for (i = 0; i < cgu->clocks.clk_num; i++) {
803 err = ingenic_register_clock(cgu, i);
805 goto err_out_unregister;
808 err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
811 goto err_out_unregister;
816 for (i = 0; i < cgu->clocks.clk_num; i++) {
817 if (!cgu->clocks.clks[i])
819 if (cgu->clock_info[i].type & CGU_CLK_EXT)
820 clk_put(cgu->clocks.clks[i]);
822 clk_unregister(cgu->clocks.clks[i]);
824 kfree(cgu->clocks.clks);