1 // SPDX-License-Identifier: GPL-2.0+
3 * Marvell Armada 37xx SoC Peripheral clocks
5 * Copyright (C) 2016 Marvell
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
9 * Most of the peripheral clocks can be modelled like this:
10 * _____ _______ _______
11 * TBG-A-P --| | | | | | ______
12 * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk
13 * TBG-A-S --| | | | | | |______|
14 * TBG-B-S --|_____| |_______| |_______|
16 * However some clocks may use only one or two block or and use the
17 * xtal clock as parent.
20 #include <linux/clk-provider.h>
21 #include <linux/mfd/syscon.h>
23 #include <linux/of_device.h>
24 #include <linux/platform_device.h>
25 #include <linux/regmap.h>
26 #include <linux/slab.h>
35 #define ARMADA_37XX_DVFS_LOAD_1 1
36 #define LOAD_LEVEL_NR 4
38 #define ARMADA_37XX_NB_L0L1 0x18
39 #define ARMADA_37XX_NB_L2L3 0x1C
40 #define ARMADA_37XX_NB_TBG_DIV_OFF 13
41 #define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
42 #define ARMADA_37XX_NB_CLK_SEL_OFF 11
43 #define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
44 #define ARMADA_37XX_NB_TBG_SEL_OFF 9
45 #define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
46 #define ARMADA_37XX_NB_CONFIG_SHIFT 16
47 #define ARMADA_37XX_NB_DYN_MOD 0x24
48 #define ARMADA_37XX_NB_DFS_EN 31
49 #define ARMADA_37XX_NB_CPU_LOAD 0x30
50 #define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
51 #define ARMADA_37XX_DVFS_LOAD_0 0
52 #define ARMADA_37XX_DVFS_LOAD_1 1
53 #define ARMADA_37XX_DVFS_LOAD_2 2
54 #define ARMADA_37XX_DVFS_LOAD_3 3
56 struct clk_periph_driver_data {
57 struct clk_hw_onecell_data *hw_data;
61 struct clk_double_div {
71 void __iomem *reg_mux;
74 void __iomem *reg_div;
76 struct regmap *nb_pm_base;
77 unsigned long l1_expiration;
80 #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
81 #define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
83 struct clk_periph_data {
85 const char * const *parent_names;
87 struct clk_hw *mux_hw;
88 struct clk_hw *rate_hw;
89 struct clk_hw *gate_hw;
90 struct clk_hw *muxrate_hw;
94 static const struct clk_div_table clk_table6[] = {
95 { .val = 1, .div = 1, },
96 { .val = 2, .div = 2, },
97 { .val = 3, .div = 3, },
98 { .val = 4, .div = 4, },
99 { .val = 5, .div = 5, },
100 { .val = 6, .div = 6, },
101 { .val = 0, .div = 0, }, /* last entry */
104 static const struct clk_div_table clk_table1[] = {
105 { .val = 0, .div = 1, },
106 { .val = 1, .div = 2, },
107 { .val = 0, .div = 0, }, /* last entry */
110 static const struct clk_div_table clk_table2[] = {
111 { .val = 0, .div = 2, },
112 { .val = 1, .div = 4, },
113 { .val = 0, .div = 0, }, /* last entry */
116 static const struct clk_ops clk_double_div_ops;
117 static const struct clk_ops clk_pm_cpu_ops;
119 #define PERIPH_GATE(_name, _bit) \
120 struct clk_gate gate_##_name = { \
121 .reg = (void *)CLK_DIS, \
123 .hw.init = &(struct clk_init_data){ \
124 .ops = &clk_gate_ops, \
128 #define PERIPH_MUX(_name, _shift) \
129 struct clk_mux mux_##_name = { \
130 .reg = (void *)TBG_SEL, \
133 .hw.init = &(struct clk_init_data){ \
134 .ops = &clk_mux_ro_ops, \
138 #define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \
139 struct clk_double_div rate_##_name = { \
140 .reg1 = (void *)_reg1, \
141 .reg2 = (void *)_reg2, \
144 .hw.init = &(struct clk_init_data){ \
145 .ops = &clk_double_div_ops, \
149 #define PERIPH_DIV(_name, _reg, _shift, _table) \
150 struct clk_divider rate_##_name = { \
151 .reg = (void *)_reg, \
154 .hw.init = &(struct clk_init_data){ \
155 .ops = &clk_divider_ro_ops, \
159 #define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \
160 struct clk_pm_cpu muxrate_##_name = { \
161 .reg_mux = (void *)TBG_SEL, \
163 .shift_mux = _shift1, \
164 .reg_div = (void *)_reg, \
165 .shift_div = _shift2, \
166 .hw.init = &(struct clk_init_data){ \
167 .ops = &clk_pm_cpu_ops, \
171 #define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
172 static PERIPH_GATE(_name, _bit); \
173 static PERIPH_MUX(_name, _shift); \
174 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
176 #define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \
177 static PERIPH_GATE(_name, _bit); \
178 static PERIPH_MUX(_name, _shift); \
179 static PERIPH_DIV(_name, _reg, _shift1, _table);
181 #define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \
182 static PERIPH_GATE(_name, _bit); \
183 static PERIPH_DIV(_name, _reg, _shift, _table);
185 #define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
186 static PERIPH_MUX(_name, _shift); \
187 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
189 #define REF_CLK_FULL(_name) \
191 .parent_names = (const char *[]){ "TBG-A-P", \
192 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
194 .mux_hw = &mux_##_name.hw, \
195 .gate_hw = &gate_##_name.hw, \
196 .rate_hw = &rate_##_name.hw, \
199 #define REF_CLK_FULL_DD(_name) \
201 .parent_names = (const char *[]){ "TBG-A-P", \
202 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
204 .mux_hw = &mux_##_name.hw, \
205 .gate_hw = &gate_##_name.hw, \
206 .rate_hw = &rate_##_name.hw, \
207 .is_double_div = true, \
210 #define REF_CLK_GATE(_name, _parent_name) \
212 .parent_names = (const char *[]){ _parent_name}, \
214 .gate_hw = &gate_##_name.hw, \
217 #define REF_CLK_GATE_DIV(_name, _parent_name) \
219 .parent_names = (const char *[]){ _parent_name}, \
221 .gate_hw = &gate_##_name.hw, \
222 .rate_hw = &rate_##_name.hw, \
225 #define REF_CLK_PM_CPU(_name) \
227 .parent_names = (const char *[]){ "TBG-A-P", \
228 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
230 .muxrate_hw = &muxrate_##_name.hw, \
233 #define REF_CLK_MUX_DD(_name) \
235 .parent_names = (const char *[]){ "TBG-A-P", \
236 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
238 .mux_hw = &mux_##_name.hw, \
239 .rate_hw = &rate_##_name.hw, \
240 .is_double_div = true, \
243 /* NB periph clocks */
244 PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13);
245 PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7);
246 PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0);
247 PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6);
248 PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12);
249 PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6);
250 static PERIPH_GATE(avs, 11);
251 PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0);
252 PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24);
253 static PERIPH_GATE(i2c_2, 16);
254 static PERIPH_GATE(i2c_1, 17);
255 PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2);
256 PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
257 PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
258 PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
259 PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
260 static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
262 static struct clk_periph_data data_nb[] = {
263 REF_CLK_FULL_DD(mmc),
264 REF_CLK_FULL_DD(sata_host),
265 REF_CLK_FULL_DD(sec_at),
266 REF_CLK_FULL_DD(sec_dap),
267 REF_CLK_FULL_DD(tscem),
268 REF_CLK_FULL(tscem_tmx),
269 REF_CLK_GATE(avs, "xtal"),
270 REF_CLK_FULL_DD(sqf),
271 REF_CLK_FULL_DD(pwm),
272 REF_CLK_GATE(i2c_2, "xtal"),
273 REF_CLK_GATE(i2c_1, "xtal"),
274 REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"),
275 REF_CLK_FULL_DD(ddr_fclk),
277 REF_CLK_FULL(counter),
278 REF_CLK_FULL_DD(eip97),
283 /* SB periph clocks */
284 PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9);
285 PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21);
286 PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9);
287 static PERIPH_GATE(gbe1_50, 0);
288 static PERIPH_GATE(gbe0_50, 1);
289 static PERIPH_GATE(gbe1_125, 2);
290 static PERIPH_GATE(gbe0_125, 3);
291 PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1);
292 PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1);
293 PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
294 PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
295 PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
296 PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
298 static struct clk_periph_data data_sb[] = {
299 REF_CLK_MUX_DD(gbe_50),
300 REF_CLK_MUX_DD(gbe_core),
301 REF_CLK_MUX_DD(gbe_125),
302 REF_CLK_GATE(gbe1_50, "gbe_50"),
303 REF_CLK_GATE(gbe0_50, "gbe_50"),
304 REF_CLK_GATE(gbe1_125, "gbe_125"),
305 REF_CLK_GATE(gbe0_125, "gbe_125"),
306 REF_CLK_GATE_DIV(gbe1_core, "gbe_core"),
307 REF_CLK_GATE_DIV(gbe0_core, "gbe_core"),
308 REF_CLK_GATE_DIV(gbe_bm, "gbe_core"),
309 REF_CLK_FULL_DD(sdio),
310 REF_CLK_FULL_DD(usb32_usb2_sys),
311 REF_CLK_FULL_DD(usb32_ss_sys),
315 static unsigned int get_div(void __iomem *reg, int shift)
319 val = (readl(reg) >> shift) & 0x7;
325 static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
326 unsigned long parent_rate)
328 struct clk_double_div *double_div = to_clk_double_div(hw);
331 div = get_div(double_div->reg1, double_div->shift1);
332 div *= get_div(double_div->reg2, double_div->shift2);
334 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
337 static const struct clk_ops clk_double_div_ops = {
338 .recalc_rate = clk_double_div_recalc_rate,
341 static void armada_3700_pm_dvfs_update_regs(unsigned int load_level,
343 unsigned int *offset)
345 if (load_level <= ARMADA_37XX_DVFS_LOAD_1)
346 *reg = ARMADA_37XX_NB_L0L1;
348 *reg = ARMADA_37XX_NB_L2L3;
350 if (load_level == ARMADA_37XX_DVFS_LOAD_0 ||
351 load_level == ARMADA_37XX_DVFS_LOAD_2)
352 *offset += ARMADA_37XX_NB_CONFIG_SHIFT;
355 static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base)
357 unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD;
362 regmap_read(base, reg, &val);
364 return !!(val & BIT(ARMADA_37XX_NB_DFS_EN));
367 static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base)
369 unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
370 unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF;
371 unsigned int load_level, div;
374 * This function is always called after the function
375 * armada_3700_pm_dvfs_is_enabled, so no need to check again
376 * if the base is valid.
378 regmap_read(base, reg, &load_level);
381 * The register and the offset inside this register accessed to
382 * read the current divider depend on the load level
384 load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
385 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
387 regmap_read(base, reg, &div);
389 return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK;
392 static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
394 unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
395 unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF;
396 unsigned int load_level, sel;
399 * This function is always called after the function
400 * armada_3700_pm_dvfs_is_enabled, so no need to check again
401 * if the base is valid
403 regmap_read(base, reg, &load_level);
406 * The register and the offset inside this register accessed to
407 * read the current divider depend on the load level
409 load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
410 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
412 regmap_read(base, reg, &sel);
414 return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK;
417 static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
419 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
422 if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
423 val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
425 val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
426 val &= pm_cpu->mask_mux;
432 static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
433 unsigned long parent_rate)
435 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
438 if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
439 div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base);
441 div = get_div(pm_cpu->reg_div, pm_cpu->shift_div);
442 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
445 static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
446 unsigned long *parent_rate)
448 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
449 struct regmap *base = pm_cpu->nb_pm_base;
450 unsigned int div = *parent_rate / rate;
451 unsigned int load_level;
452 /* only available when DVFS is enabled */
453 if (!armada_3700_pm_dvfs_is_enabled(base))
456 for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
457 unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF;
459 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
461 regmap_read(base, reg, &val);
464 val &= ARMADA_37XX_NB_TBG_DIV_MASK;
467 * We found a load level matching the target
468 * divider, switch to this load level and
471 return *parent_rate / div;
474 /* We didn't find any valid divider */
479 * Workaround when base CPU frequnecy is 1000 or 1200 MHz
481 * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
482 * respectively) to L0 frequency (1/1.2 GHz) requires a significant
483 * amount of time to let VDD stabilize to the appropriate
484 * voltage. This amount of time is large enough that it cannot be
485 * covered by the hardware countdown register. Due to this, the CPU
486 * might start operating at L0 before the voltage is stabilized,
487 * leading to CPU stalls.
489 * To work around this problem, we prevent switching directly from the
490 * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
491 * frequency in-between. The sequence therefore becomes:
492 * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
493 * 2. Sleep 20ms for stabling VDD voltage
494 * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
496 static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
497 unsigned int new_level, unsigned long rate,
500 unsigned int cur_level;
502 regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
503 cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
505 if (cur_level == new_level)
509 * System wants to go to L1 on its own. If we are going from L2/L3,
510 * remember when 20ms will expire. If from L0, set the value so that
511 * next switch to L0 won't have to wait.
513 if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
514 if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
515 pm_cpu->l1_expiration = jiffies;
517 pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
522 * If we are setting to L2/L3, just invalidate L1 expiration time,
523 * sleeping is not needed.
525 if (rate < 1000*1000*1000)
526 goto invalidate_l1_exp;
529 * We are going to L0 with rate >= 1GHz. Check whether we have been at
530 * L1 for long enough time. If not, go to L1 for 20ms.
532 if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
533 goto invalidate_l1_exp;
535 regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
536 ARMADA_37XX_NB_CPU_LOAD_MASK,
537 ARMADA_37XX_DVFS_LOAD_1);
541 pm_cpu->l1_expiration = 0;
544 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
545 unsigned long parent_rate)
547 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
548 struct regmap *base = pm_cpu->nb_pm_base;
549 unsigned int div = parent_rate / rate;
550 unsigned int load_level;
552 /* only available when DVFS is enabled */
553 if (!armada_3700_pm_dvfs_is_enabled(base))
556 for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
557 unsigned int reg, mask, val,
558 offset = ARMADA_37XX_NB_TBG_DIV_OFF;
560 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
562 regmap_read(base, reg, &val);
564 val &= ARMADA_37XX_NB_TBG_DIV_MASK;
568 * We found a load level matching the target
569 * divider, switch to this load level and
572 reg = ARMADA_37XX_NB_CPU_LOAD;
573 mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
575 /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
576 if (parent_rate >= 1000*1000*1000)
577 clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
579 regmap_update_bits(base, reg, mask, load_level);
585 /* We didn't find any valid divider */
589 static const struct clk_ops clk_pm_cpu_ops = {
590 .get_parent = clk_pm_cpu_get_parent,
591 .round_rate = clk_pm_cpu_round_rate,
592 .set_rate = clk_pm_cpu_set_rate,
593 .recalc_rate = clk_pm_cpu_recalc_rate,
596 static const struct of_device_id armada_3700_periph_clock_of_match[] = {
597 { .compatible = "marvell,armada-3700-periph-clock-nb",
599 { .compatible = "marvell,armada-3700-periph-clock-sb",
604 static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
605 void __iomem *reg, spinlock_t *lock,
606 struct device *dev, struct clk_hw **hw)
608 const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
610 struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL;
615 mux_hw = data->mux_hw;
616 mux = to_clk_mux(mux_hw);
618 mux_ops = mux_hw->init->ops;
619 mux->reg = reg + (u64)mux->reg;
623 struct clk_gate *gate;
625 gate_hw = data->gate_hw;
626 gate = to_clk_gate(gate_hw);
628 gate_ops = gate_hw->init->ops;
629 gate->reg = reg + (u64)gate->reg;
630 gate->flags = CLK_GATE_SET_TO_DISABLE;
634 rate_hw = data->rate_hw;
635 rate_ops = rate_hw->init->ops;
636 if (data->is_double_div) {
637 struct clk_double_div *rate;
639 rate = to_clk_double_div(rate_hw);
640 rate->reg1 = reg + (u64)rate->reg1;
641 rate->reg2 = reg + (u64)rate->reg2;
643 struct clk_divider *rate = to_clk_divider(rate_hw);
644 const struct clk_div_table *clkt;
647 rate->reg = reg + (u64)rate->reg;
648 for (clkt = rate->table; clkt->div; clkt++)
650 rate->width = order_base_2(table_size);
655 if (data->muxrate_hw) {
656 struct clk_pm_cpu *pmcpu_clk;
657 struct clk_hw *muxrate_hw = data->muxrate_hw;
660 pmcpu_clk = to_clk_pm_cpu(muxrate_hw);
661 pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux;
662 pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div;
665 rate_hw = muxrate_hw;
666 mux_ops = muxrate_hw->init->ops;
667 rate_ops = muxrate_hw->init->ops;
669 map = syscon_regmap_lookup_by_compatible(
670 "marvell,armada-3700-nb-pm");
671 pmcpu_clk->nb_pm_base = map;
674 *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
675 data->num_parents, mux_hw,
676 mux_ops, rate_hw, rate_ops,
677 gate_hw, gate_ops, CLK_IGNORE_UNUSED);
679 return PTR_ERR_OR_ZERO(*hw);
682 static int armada_3700_periph_clock_probe(struct platform_device *pdev)
684 struct clk_periph_driver_data *driver_data;
685 struct device_node *np = pdev->dev.of_node;
686 const struct clk_periph_data *data;
687 struct device *dev = &pdev->dev;
688 int num_periph = 0, i, ret;
689 struct resource *res;
692 data = of_device_get_match_data(dev);
696 while (data[num_periph].name)
699 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
700 reg = devm_ioremap_resource(dev, res);
704 driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
708 driver_data->hw_data = devm_kzalloc(dev,
709 struct_size(driver_data->hw_data,
712 if (!driver_data->hw_data)
714 driver_data->hw_data->num = num_periph;
716 spin_lock_init(&driver_data->lock);
718 for (i = 0; i < num_periph; i++) {
719 struct clk_hw **hw = &driver_data->hw_data->hws[i];
721 if (armada_3700_add_composite_clk(&data[i], reg,
722 &driver_data->lock, dev, hw))
723 dev_err(dev, "Can't register periph clock %s\n",
727 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
728 driver_data->hw_data);
730 for (i = 0; i < num_periph; i++)
731 clk_hw_unregister(driver_data->hw_data->hws[i]);
735 platform_set_drvdata(pdev, driver_data);
739 static int armada_3700_periph_clock_remove(struct platform_device *pdev)
741 struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
742 struct clk_hw_onecell_data *hw_data = data->hw_data;
745 of_clk_del_provider(pdev->dev.of_node);
747 for (i = 0; i < hw_data->num; i++)
748 clk_hw_unregister(hw_data->hws[i]);
753 static struct platform_driver armada_3700_periph_clock_driver = {
754 .probe = armada_3700_periph_clock_probe,
755 .remove = armada_3700_periph_clock_remove,
757 .name = "marvell-armada-3700-periph-clock",
758 .of_match_table = armada_3700_periph_clock_of_match,
762 builtin_platform_driver(armada_3700_periph_clock_driver);