1 // SPDX-License-Identifier: GPL-2.0
5 * Exposes all configurable internal clock sources to the clk framework.
8 * - Root source, usually 12MHz supplied by an external crystal
9 * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
12 * - 6 clock dividers with:
13 * * selectable source [one of the PLLs],
14 * * output divided between [2 .. 512 in steps of 2] (!Au1300)
15 * or [1 .. 256 in steps of 1] (Au1300),
16 * * can be enabled individually.
18 * - up to 6 "internal" (fixed) consumers which:
19 * * take either AUXPLL or one of the above 6 dividers as input,
20 * * divide this input by 1, 2, or 4 (and 3 on Au1300).
21 * * can be disabled separately.
24 * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
25 * depends on board design and should be set by bootloader, read-only.
26 * - peripheral clock: half the rate of sysbus clock, source for a lot
27 * of peripheral blocks, read-only.
28 * - memory clock: clk rate to main memory chips, depends on board
29 * design and is read-only,
30 * - lrclk: the static bus clock signal for synchronous operation.
31 * depends on board design, must be set by bootloader,
32 * but may be required to correctly configure devices attached to
33 * the static bus. The Au1000/1500/1100 manuals call it LCLK, on
34 * later models it's called RCLK.
37 #include <linux/init.h>
39 #include <linux/clk.h>
40 #include <linux/clk-provider.h>
41 #include <linux/clkdev.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/types.h>
45 #include <asm/mach-au1x00/au1000.h>
47 /* Base clock: 12MHz is the default in all databooks, and I haven't
48 * found any board yet which uses a different rate.
50 #define ALCHEMY_ROOTCLK_RATE 12000000
53 * the internal sources which can be driven by the PLLs and dividers.
54 * Names taken from the databooks, refer to them for more information,
55 * especially which ones are share a clock line.
57 static const char * const alchemy_au1300_intclknames[] = {
58 "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
62 static const char * const alchemy_au1200_intclknames[] = {
63 "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
66 static const char * const alchemy_au1550_intclknames[] = {
67 "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
71 static const char * const alchemy_au1100_intclknames[] = {
72 "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
75 static const char * const alchemy_au1500_intclknames[] = {
76 NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
79 static const char * const alchemy_au1000_intclknames[] = {
80 "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
84 /* aliases for a few on-chip sources which are either shared
85 * or have gone through name changes.
87 static struct clk_aliastable {
91 } alchemy_clk_aliases[] __initdata = {
92 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
93 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
94 { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
95 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
96 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
97 { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
98 { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
99 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
100 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
101 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
102 { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
103 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
104 { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
109 #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
111 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
112 static spinlock_t alchemy_clk_fg0_lock;
113 static spinlock_t alchemy_clk_fg1_lock;
114 static DEFINE_SPINLOCK(alchemy_clk_csrc_lock);
116 /* CPU Core clock *****************************************************/
118 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
119 unsigned long parent_rate)
124 * On early Au1000, sys_cpupll was write-only. Since these
125 * silicon versions of Au1000 are not sold, we don't bend
126 * over backwards trying to determine the frequency.
128 if (unlikely(au1xxx_cpu_has_pll_wo()))
131 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
132 if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
140 void __init alchemy_set_lpj(void)
142 preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
143 preset_lpj /= 2 * HZ;
146 static const struct clk_ops alchemy_clkops_cpu = {
147 .recalc_rate = alchemy_clk_cpu_recalc,
150 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
153 struct clk_init_data id;
157 h = kzalloc(sizeof(*h), GFP_KERNEL);
159 return ERR_PTR(-ENOMEM);
161 id.name = ALCHEMY_CPU_CLK;
162 id.parent_names = &parent_name;
165 id.ops = &alchemy_clkops_cpu;
168 clk = clk_register(NULL, h);
170 pr_err("failed to register clock\n");
177 /* AUXPLLs ************************************************************/
179 struct alchemy_auxpll_clk {
181 unsigned long reg; /* au1300 has also AUXPLL2 */
182 int maxmult; /* max multiplier */
184 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
186 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
187 unsigned long parent_rate)
189 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
191 return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
194 static int alchemy_clk_aux_setr(struct clk_hw *hw,
196 unsigned long parent_rate)
198 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
199 unsigned long d = rate;
206 /* minimum is 84MHz, max is 756-1032 depending on variant */
207 if (((d < 7) && (d != 0)) || (d > a->maxmult))
210 alchemy_wrsys(d, a->reg);
214 static long alchemy_clk_aux_roundr(struct clk_hw *hw,
216 unsigned long *parent_rate)
218 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
221 if (!rate || !*parent_rate)
224 mult = rate / (*parent_rate);
226 if (mult && (mult < 7))
228 if (mult > a->maxmult)
231 return (*parent_rate) * mult;
234 static const struct clk_ops alchemy_clkops_aux = {
235 .recalc_rate = alchemy_clk_aux_recalc,
236 .set_rate = alchemy_clk_aux_setr,
237 .round_rate = alchemy_clk_aux_roundr,
240 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
241 char *name, int maxmult,
244 struct clk_init_data id;
246 struct alchemy_auxpll_clk *a;
248 a = kzalloc(sizeof(*a), GFP_KERNEL);
250 return ERR_PTR(-ENOMEM);
253 id.parent_names = &parent_name;
255 id.flags = CLK_GET_RATE_NOCACHE;
256 id.ops = &alchemy_clkops_aux;
259 a->maxmult = maxmult;
262 c = clk_register(NULL, &a->hw);
264 clk_register_clkdev(c, name, NULL);
271 /* sysbus_clk *********************************************************/
273 static struct clk __init *alchemy_clk_setup_sysbus(const char *pn)
275 unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
278 c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
281 clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
285 /* Peripheral Clock ***************************************************/
287 static struct clk __init *alchemy_clk_setup_periph(const char *pn)
289 /* Peripheral clock runs at half the rate of sysbus clk */
292 c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
295 clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
299 /* mem clock **********************************************************/
301 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
303 void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
309 case ALCHEMY_CPU_AU1550:
310 case ALCHEMY_CPU_AU1200:
311 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
312 div = (v & (1 << 15)) ? 1 : 2;
314 case ALCHEMY_CPU_AU1300:
315 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
316 div = (v & (1 << 31)) ? 1 : 2;
318 case ALCHEMY_CPU_AU1000:
319 case ALCHEMY_CPU_AU1500:
320 case ALCHEMY_CPU_AU1100:
326 c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
329 clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
333 /* lrclk: external synchronous static bus clock ***********************/
335 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
337 /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
338 * otherwise lrclk=pclk/4.
339 * All other variants: MEM_STCFG0[15:13] = divisor.
340 * L/RCLK = periph_clk / (divisor + 1)
341 * On Au1000, Au1500, Au1100 it's called LCLK,
342 * on later models it's called RCLK, but it's the same thing.
345 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
348 case ALCHEMY_CPU_AU1000:
349 case ALCHEMY_CPU_AU1500:
350 v = 4 + ((v >> 11) & 1);
352 default: /* all other models */
353 v = ((v >> 13) & 7) + 1;
355 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
358 clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
362 /* Clock dividers and muxes *******************************************/
364 /* data for fgen and csrc mux-dividers */
365 struct alchemy_fgcs_clk {
367 spinlock_t *reglock; /* register lock */
368 unsigned long reg; /* SYS_FREQCTRL0/1 */
369 int shift; /* offset in register */
370 int parent; /* parent before disable [Au1300] */
371 int isen; /* is it enabled? */
372 int *dt; /* dividertable for csrc */
374 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
376 static long alchemy_calc_div(unsigned long rate, unsigned long prate,
377 int scale, int maxdiv, unsigned long *rv)
382 if ((prate / div1) > rate)
385 if (scale == 2) { /* only div-by-multiple-of-2 possible */
387 div1++; /* stay <=prate */
390 div2 = (div1 / scale) - 1; /* value to write to register */
397 div1 = ((div2 + 1) * scale);
401 static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
402 struct clk_rate_request *req,
403 int scale, int maxdiv)
405 struct clk_hw *pc, *bpc, *free;
406 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
415 /* look at the rates each enabled parent supplies and select
416 * the one that gets closest to but not over the requested rate.
418 for (j = 0; j < 7; j++) {
419 pc = clk_hw_get_parent_by_index(hw, j);
423 /* if this parent is currently unused, remember it.
424 * XXX: we would actually want clk_has_active_children()
425 * but this is a good-enough approximation for now.
427 if (!clk_hw_is_prepared(pc)) {
432 pr = clk_hw_get_rate(pc);
436 /* what can hardware actually provide */
437 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
439 diff = req->rate - nr;
443 if (diff < lastdiff) {
453 /* if we couldn't get the exact rate we wanted from the enabled
454 * parents, maybe we can tell an available disabled/inactive one
455 * to give us a rate we can divide down to the requested rate.
457 if (lastdiff && free) {
458 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
462 pr = clk_hw_round_rate(free, tpr);
464 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
467 diff = req->rate - nr;
470 if (diff < lastdiff) {
484 req->best_parent_rate = bpr;
485 req->best_parent_hw = bpc;
491 static int alchemy_clk_fgv1_en(struct clk_hw *hw)
493 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
494 unsigned long v, flags;
496 spin_lock_irqsave(c->reglock, flags);
497 v = alchemy_rdsys(c->reg);
498 v |= (1 << 1) << c->shift;
499 alchemy_wrsys(v, c->reg);
500 spin_unlock_irqrestore(c->reglock, flags);
505 static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
507 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
508 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
513 static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
515 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
516 unsigned long v, flags;
518 spin_lock_irqsave(c->reglock, flags);
519 v = alchemy_rdsys(c->reg);
520 v &= ~((1 << 1) << c->shift);
521 alchemy_wrsys(v, c->reg);
522 spin_unlock_irqrestore(c->reglock, flags);
525 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
527 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
528 unsigned long v, flags;
530 spin_lock_irqsave(c->reglock, flags);
531 v = alchemy_rdsys(c->reg);
533 v |= (1 << c->shift);
535 v &= ~(1 << c->shift);
536 alchemy_wrsys(v, c->reg);
537 spin_unlock_irqrestore(c->reglock, flags);
542 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
544 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
546 return (alchemy_rdsys(c->reg) >> c->shift) & 1;
549 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
550 unsigned long parent_rate)
552 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
553 unsigned long div, v, flags, ret;
554 int sh = c->shift + 2;
556 if (!rate || !parent_rate || rate > (parent_rate / 2))
558 ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
559 spin_lock_irqsave(c->reglock, flags);
560 v = alchemy_rdsys(c->reg);
563 alchemy_wrsys(v, c->reg);
564 spin_unlock_irqrestore(c->reglock, flags);
569 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
570 unsigned long parent_rate)
572 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
573 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
575 v = ((v & 0xff) + 1) * 2;
576 return parent_rate / v;
579 static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
580 struct clk_rate_request *req)
582 return alchemy_clk_fgcs_detr(hw, req, 2, 512);
585 /* Au1000, Au1100, Au15x0, Au12x0 */
586 static const struct clk_ops alchemy_clkops_fgenv1 = {
587 .recalc_rate = alchemy_clk_fgv1_recalc,
588 .determine_rate = alchemy_clk_fgv1_detr,
589 .set_rate = alchemy_clk_fgv1_setr,
590 .set_parent = alchemy_clk_fgv1_setp,
591 .get_parent = alchemy_clk_fgv1_getp,
592 .enable = alchemy_clk_fgv1_en,
593 .disable = alchemy_clk_fgv1_dis,
594 .is_enabled = alchemy_clk_fgv1_isen,
597 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
599 unsigned long v = alchemy_rdsys(c->reg);
601 v &= ~(3 << c->shift);
602 v |= (c->parent & 3) << c->shift;
603 alchemy_wrsys(v, c->reg);
607 static int alchemy_clk_fgv2_en(struct clk_hw *hw)
609 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
612 /* enable by setting the previous parent clock */
613 spin_lock_irqsave(c->reglock, flags);
614 __alchemy_clk_fgv2_en(c);
615 spin_unlock_irqrestore(c->reglock, flags);
620 static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
622 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
624 return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
627 static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
629 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
630 unsigned long v, flags;
632 spin_lock_irqsave(c->reglock, flags);
633 v = alchemy_rdsys(c->reg);
634 v &= ~(3 << c->shift); /* set input mux to "disabled" state */
635 alchemy_wrsys(v, c->reg);
637 spin_unlock_irqrestore(c->reglock, flags);
640 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
642 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
645 spin_lock_irqsave(c->reglock, flags);
646 c->parent = index + 1; /* value to write to register */
648 __alchemy_clk_fgv2_en(c);
649 spin_unlock_irqrestore(c->reglock, flags);
654 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
656 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
657 unsigned long flags, v;
659 spin_lock_irqsave(c->reglock, flags);
661 spin_unlock_irqrestore(c->reglock, flags);
665 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
666 * dividers behave exactly as on previous models (dividers are multiples
667 * of 2); with the bit set, dividers are multiples of 1, halving their
668 * range, but making them also much more flexible.
670 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
671 unsigned long parent_rate)
673 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
674 int sh = c->shift + 2;
675 unsigned long div, v, flags, ret;
677 if (!rate || !parent_rate || rate > parent_rate)
680 v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
681 ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
682 v ? 256 : 512, &div);
684 spin_lock_irqsave(c->reglock, flags);
685 v = alchemy_rdsys(c->reg);
687 v |= (div & 0xff) << sh;
688 alchemy_wrsys(v, c->reg);
689 spin_unlock_irqrestore(c->reglock, flags);
694 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
695 unsigned long parent_rate)
697 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
698 int sh = c->shift + 2;
701 v = alchemy_rdsys(c->reg);
702 t = parent_rate / (((v >> sh) & 0xff) + 1);
703 if ((v & (1 << 30)) == 0) /* test scale bit */
709 static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
710 struct clk_rate_request *req)
712 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
715 if (alchemy_rdsys(c->reg) & (1 << 30)) {
723 return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
726 /* Au1300 larger input mux, no separate disable bit, flexible divider */
727 static const struct clk_ops alchemy_clkops_fgenv2 = {
728 .recalc_rate = alchemy_clk_fgv2_recalc,
729 .determine_rate = alchemy_clk_fgv2_detr,
730 .set_rate = alchemy_clk_fgv2_setr,
731 .set_parent = alchemy_clk_fgv2_setp,
732 .get_parent = alchemy_clk_fgv2_getp,
733 .enable = alchemy_clk_fgv2_en,
734 .disable = alchemy_clk_fgv2_dis,
735 .is_enabled = alchemy_clk_fgv2_isen,
738 static const char * const alchemy_clk_fgv1_parents[] = {
739 ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
742 static const char * const alchemy_clk_fgv2_parents[] = {
743 ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
746 static const char * const alchemy_clk_fgen_names[] = {
747 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
748 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
750 static int __init alchemy_clk_init_fgens(int ctype)
753 struct clk_init_data id;
754 struct alchemy_fgcs_clk *a;
759 case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
760 id.ops = &alchemy_clkops_fgenv1;
761 id.parent_names = alchemy_clk_fgv1_parents;
764 case ALCHEMY_CPU_AU1300:
765 id.ops = &alchemy_clkops_fgenv2;
766 id.parent_names = alchemy_clk_fgv2_parents;
772 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
774 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
778 spin_lock_init(&alchemy_clk_fg0_lock);
779 spin_lock_init(&alchemy_clk_fg1_lock);
781 for (i = 0; i < 6; i++) {
782 id.name = alchemy_clk_fgen_names[i];
783 a->shift = 10 * (i < 3 ? i : i - 3);
785 a->reg = AU1000_SYS_FREQCTRL1;
786 a->reglock = &alchemy_clk_fg1_lock;
788 a->reg = AU1000_SYS_FREQCTRL0;
789 a->reglock = &alchemy_clk_fg0_lock;
792 /* default to first parent if bootloader has set
793 * the mux to disabled state.
795 if (ctype == ALCHEMY_CPU_AU1300) {
796 v = alchemy_rdsys(a->reg);
797 a->parent = (v >> a->shift) & 3;
806 c = clk_register(NULL, &a->hw);
810 clk_register_clkdev(c, id.name, NULL);
817 /* internal sources muxes *********************************************/
819 static int alchemy_clk_csrc_isen(struct clk_hw *hw)
821 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
822 unsigned long v = alchemy_rdsys(c->reg);
824 return (((v >> c->shift) >> 2) & 7) != 0;
827 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
829 unsigned long v = alchemy_rdsys(c->reg);
831 v &= ~((7 << 2) << c->shift);
832 v |= ((c->parent & 7) << 2) << c->shift;
833 alchemy_wrsys(v, c->reg);
837 static int alchemy_clk_csrc_en(struct clk_hw *hw)
839 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
842 /* enable by setting the previous parent clock */
843 spin_lock_irqsave(c->reglock, flags);
844 __alchemy_clk_csrc_en(c);
845 spin_unlock_irqrestore(c->reglock, flags);
850 static void alchemy_clk_csrc_dis(struct clk_hw *hw)
852 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
853 unsigned long v, flags;
855 spin_lock_irqsave(c->reglock, flags);
856 v = alchemy_rdsys(c->reg);
857 v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */
858 alchemy_wrsys(v, c->reg);
860 spin_unlock_irqrestore(c->reglock, flags);
863 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
865 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
868 spin_lock_irqsave(c->reglock, flags);
869 c->parent = index + 1; /* value to write to register */
871 __alchemy_clk_csrc_en(c);
872 spin_unlock_irqrestore(c->reglock, flags);
877 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
879 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
881 return c->parent - 1;
884 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
885 unsigned long parent_rate)
887 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
888 unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
890 return parent_rate / c->dt[v];
893 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
894 unsigned long parent_rate)
896 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
897 unsigned long d, v, flags;
900 if (!rate || !parent_rate || rate > parent_rate)
903 d = (parent_rate + (rate / 2)) / rate;
906 if ((d == 3) && (c->dt[2] != 3))
909 for (i = 0; i < 4; i++)
914 return -EINVAL; /* oops */
916 spin_lock_irqsave(c->reglock, flags);
917 v = alchemy_rdsys(c->reg);
918 v &= ~(3 << c->shift);
919 v |= (i & 3) << c->shift;
920 alchemy_wrsys(v, c->reg);
921 spin_unlock_irqrestore(c->reglock, flags);
926 static int alchemy_clk_csrc_detr(struct clk_hw *hw,
927 struct clk_rate_request *req)
929 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
930 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
932 return alchemy_clk_fgcs_detr(hw, req, scale, 4);
935 static const struct clk_ops alchemy_clkops_csrc = {
936 .recalc_rate = alchemy_clk_csrc_recalc,
937 .determine_rate = alchemy_clk_csrc_detr,
938 .set_rate = alchemy_clk_csrc_setr,
939 .set_parent = alchemy_clk_csrc_setp,
940 .get_parent = alchemy_clk_csrc_getp,
941 .enable = alchemy_clk_csrc_en,
942 .disable = alchemy_clk_csrc_dis,
943 .is_enabled = alchemy_clk_csrc_isen,
946 static const char * const alchemy_clk_csrc_parents[] = {
947 /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
948 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
949 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
953 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */
954 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */
956 static int __init alchemy_clk_setup_imux(int ctype)
958 struct alchemy_fgcs_clk *a;
959 const char * const *names;
960 struct clk_init_data id;
965 id.ops = &alchemy_clkops_csrc;
966 id.parent_names = alchemy_clk_csrc_parents;
968 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
970 dt = alchemy_csrc_dt1;
972 case ALCHEMY_CPU_AU1000:
973 names = alchemy_au1000_intclknames;
975 case ALCHEMY_CPU_AU1500:
976 names = alchemy_au1500_intclknames;
978 case ALCHEMY_CPU_AU1100:
979 names = alchemy_au1100_intclknames;
981 case ALCHEMY_CPU_AU1550:
982 names = alchemy_au1550_intclknames;
984 case ALCHEMY_CPU_AU1200:
985 names = alchemy_au1200_intclknames;
987 case ALCHEMY_CPU_AU1300:
988 dt = alchemy_csrc_dt2;
989 names = alchemy_au1300_intclknames;
995 a = kcalloc(6, sizeof(*a), GFP_KERNEL);
1001 for (i = 0; i < 6; i++) {
1007 a->reg = AU1000_SYS_CLKSRC;
1008 a->reglock = &alchemy_clk_csrc_lock;
1011 /* default to first parent clock if mux is initially
1012 * set to disabled state.
1014 v = alchemy_rdsys(a->reg);
1015 a->parent = ((v >> a->shift) >> 2) & 7;
1023 c = clk_register(NULL, &a->hw);
1027 clk_register_clkdev(c, id.name, NULL);
1036 /**********************************************************************/
1045 static int __init alchemy_clk_init(void)
1047 int ctype = alchemy_get_cputype(), ret, i;
1048 struct clk_aliastable *t = alchemy_clk_aliases;
1051 /* Root of the Alchemy clock tree: external 12MHz crystal osc */
1052 c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
1053 0, ALCHEMY_ROOTCLK_RATE);
1056 /* CPU core clock */
1057 c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
1060 /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
1061 i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
1062 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
1063 i, AU1000_SYS_AUXPLL);
1066 if (ctype == ALCHEMY_CPU_AU1300) {
1067 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
1068 ALCHEMY_AUXPLL2_CLK, i,
1069 AU1300_SYS_AUXPLL2);
1073 /* sysbus clock: cpu core clock divided by 2, 3 or 4 */
1074 c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
1077 /* peripheral clock: runs at half rate of sysbus clk */
1078 c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
1081 /* SDR/DDR memory clock */
1082 c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
1085 /* L/RCLK: external static bus clock for synchronous mode */
1086 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
1089 /* Frequency dividers 0-5 */
1090 ret = alchemy_clk_init_fgens(ctype);
1096 /* diving muxes for internal sources */
1097 ret = alchemy_clk_setup_imux(ctype);
1103 /* set up aliases drivers might look for */
1105 if (t->cputype == ctype)
1106 clk_add_alias(t->alias, NULL, t->base, NULL);
1110 pr_info("Alchemy clocktree installed\n");
1116 postcore_initcall(alchemy_clk_init);