1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
6 * Serge Semin <Sergey.Semin@baikalelectronics.ru>
7 * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
9 * Baikal-T1 CCU Dividers interface driver
12 #define pr_fmt(fmt) "bt1-ccu-div: " fmt
14 #include <linux/kernel.h>
15 #include <linux/printk.h>
16 #include <linux/bits.h>
17 #include <linux/bitfield.h>
18 #include <linux/slab.h>
19 #include <linux/clk-provider.h>
21 #include <linux/spinlock.h>
22 #include <linux/regmap.h>
23 #include <linux/delay.h>
24 #include <linux/time64.h>
25 #include <linux/debugfs.h>
29 #define CCU_DIV_CTL 0x00
30 #define CCU_DIV_CTL_EN BIT(0)
31 #define CCU_DIV_CTL_RST BIT(1)
32 #define CCU_DIV_CTL_SET_CLKDIV BIT(2)
33 #define CCU_DIV_CTL_CLKDIV_FLD 4
34 #define CCU_DIV_CTL_CLKDIV_MASK(_width) \
35 GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
36 #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
37 #define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
38 #define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
40 #define CCU_DIV_RST_DELAY_US 1
41 #define CCU_DIV_LOCK_CHECK_RETRIES 50
43 #define CCU_DIV_CLKDIV_MIN 0
44 #define CCU_DIV_CLKDIV_MAX(_mask) \
45 ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
48 * Use the next two methods until there are generic field setter and
49 * getter available with non-constant mask support.
51 static inline u32 ccu_div_get(u32 mask, u32 val)
53 return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
56 static inline u32 ccu_div_prep(u32 mask, u32 val)
58 return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
61 static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
64 u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
71 static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
74 return ref_clk / (div ?: 1);
77 static int ccu_div_var_update_clkdiv(struct ccu_div *div,
78 unsigned long parent_rate,
79 unsigned long divider)
86 nd = ccu_div_lock_delay_ns(parent_rate, divider);
88 if (div->features & CCU_DIV_LOCK_SHIFTED)
89 lock = CCU_DIV_CTL_LOCK_SHIFTED;
91 lock = CCU_DIV_CTL_LOCK_NORMAL;
93 regmap_update_bits(div->sys_regs, div->reg_ctl,
94 CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
97 * Until there is nsec-version of readl_poll_timeout() is available
98 * we have to implement the next polling loop.
100 count = CCU_DIV_LOCK_CHECK_RETRIES;
103 regmap_read(div->sys_regs, div->reg_ctl, &val);
111 static int ccu_div_var_enable(struct clk_hw *hw)
113 struct clk_hw *parent_hw = clk_hw_get_parent(hw);
114 struct ccu_div *div = to_ccu_div(hw);
120 pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
124 regmap_read(div->sys_regs, div->reg_ctl, &val);
125 if (val & CCU_DIV_CTL_EN)
128 spin_lock_irqsave(&div->lock, flags);
129 ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
130 ccu_div_get(div->mask, val));
132 regmap_update_bits(div->sys_regs, div->reg_ctl,
133 CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
134 spin_unlock_irqrestore(&div->lock, flags);
136 pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
141 static int ccu_div_gate_enable(struct clk_hw *hw)
143 struct ccu_div *div = to_ccu_div(hw);
146 spin_lock_irqsave(&div->lock, flags);
147 regmap_update_bits(div->sys_regs, div->reg_ctl,
148 CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
149 spin_unlock_irqrestore(&div->lock, flags);
154 static void ccu_div_gate_disable(struct clk_hw *hw)
156 struct ccu_div *div = to_ccu_div(hw);
159 spin_lock_irqsave(&div->lock, flags);
160 regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
161 spin_unlock_irqrestore(&div->lock, flags);
164 static int ccu_div_gate_is_enabled(struct clk_hw *hw)
166 struct ccu_div *div = to_ccu_div(hw);
169 regmap_read(div->sys_regs, div->reg_ctl, &val);
171 return !!(val & CCU_DIV_CTL_EN);
174 static int ccu_div_buf_enable(struct clk_hw *hw)
176 struct ccu_div *div = to_ccu_div(hw);
179 spin_lock_irqsave(&div->lock, flags);
180 regmap_update_bits(div->sys_regs, div->reg_ctl,
181 CCU_DIV_CTL_GATE_REF_BUF, 0);
182 spin_unlock_irqrestore(&div->lock, flags);
187 static void ccu_div_buf_disable(struct clk_hw *hw)
189 struct ccu_div *div = to_ccu_div(hw);
192 spin_lock_irqsave(&div->lock, flags);
193 regmap_update_bits(div->sys_regs, div->reg_ctl,
194 CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
195 spin_unlock_irqrestore(&div->lock, flags);
198 static int ccu_div_buf_is_enabled(struct clk_hw *hw)
200 struct ccu_div *div = to_ccu_div(hw);
203 regmap_read(div->sys_regs, div->reg_ctl, &val);
205 return !(val & CCU_DIV_CTL_GATE_REF_BUF);
208 static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
209 unsigned long parent_rate)
211 struct ccu_div *div = to_ccu_div(hw);
212 unsigned long divider;
215 regmap_read(div->sys_regs, div->reg_ctl, &val);
216 divider = ccu_div_get(div->mask, val);
218 return ccu_div_calc_freq(parent_rate, divider);
221 static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
222 unsigned long parent_rate,
225 unsigned long divider;
227 divider = parent_rate / rate;
228 return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
229 CCU_DIV_CLKDIV_MAX(mask));
232 static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
233 unsigned long *parent_rate)
235 struct ccu_div *div = to_ccu_div(hw);
236 unsigned long divider;
238 divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
240 return ccu_div_calc_freq(*parent_rate, divider);
244 * This method is used for the clock divider blocks, which support the
245 * on-the-fly rate change. So due to lacking the EN bit functionality
246 * they can't be gated before the rate adjustment.
248 static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
249 unsigned long parent_rate)
251 struct ccu_div *div = to_ccu_div(hw);
252 unsigned long flags, divider;
256 divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
257 if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
259 } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
260 if (divider == 1 || divider == 2)
262 else if (divider == 3)
266 val = ccu_div_prep(div->mask, divider);
268 spin_lock_irqsave(&div->lock, flags);
269 regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
270 ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
271 spin_unlock_irqrestore(&div->lock, flags);
273 pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
279 * This method is used for the clock divider blocks, which don't support
280 * the on-the-fly rate change.
282 static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
283 unsigned long parent_rate)
285 struct ccu_div *div = to_ccu_div(hw);
286 unsigned long flags, divider;
289 divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
290 val = ccu_div_prep(div->mask, divider);
293 * Also disable the clock divider block if it was enabled by default
294 * or by the bootloader.
296 spin_lock_irqsave(&div->lock, flags);
297 regmap_update_bits(div->sys_regs, div->reg_ctl,
298 div->mask | CCU_DIV_CTL_EN, val);
299 spin_unlock_irqrestore(&div->lock, flags);
304 static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
305 unsigned long parent_rate)
307 struct ccu_div *div = to_ccu_div(hw);
309 return ccu_div_calc_freq(parent_rate, div->divider);
312 static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
313 unsigned long *parent_rate)
315 struct ccu_div *div = to_ccu_div(hw);
317 return ccu_div_calc_freq(*parent_rate, div->divider);
320 static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
321 unsigned long parent_rate)
326 int ccu_div_reset_domain(struct ccu_div *div)
330 if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
333 spin_lock_irqsave(&div->lock, flags);
334 regmap_update_bits(div->sys_regs, div->reg_ctl,
335 CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
336 spin_unlock_irqrestore(&div->lock, flags);
338 /* The next delay must be enough to cover all the resets. */
339 udelay(CCU_DIV_RST_DELAY_US);
344 #ifdef CONFIG_DEBUG_FS
346 struct ccu_div_dbgfs_bit {
352 #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
357 static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
358 CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
359 CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
360 CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
361 CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
362 CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
365 #define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
368 * It can be dangerous to change the Divider settings behind clock framework
369 * back, therefore we don't provide any kernel config based compile time option
370 * for this feature to enable.
372 #undef CCU_DIV_ALLOW_WRITE_DEBUGFS
373 #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
375 static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
377 const struct ccu_div_dbgfs_bit *bit = priv;
378 struct ccu_div *div = bit->div;
381 spin_lock_irqsave(&div->lock, flags);
382 regmap_update_bits(div->sys_regs, div->reg_ctl,
383 bit->mask, val ? bit->mask : 0);
384 spin_unlock_irqrestore(&div->lock, flags);
389 static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
391 struct ccu_div *div = priv;
395 val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
396 CCU_DIV_CLKDIV_MAX(div->mask));
397 data = ccu_div_prep(div->mask, val);
399 spin_lock_irqsave(&div->lock, flags);
400 regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
401 spin_unlock_irqrestore(&div->lock, flags);
406 #define ccu_div_dbgfs_mode 0644
408 #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
410 #define ccu_div_dbgfs_bit_set NULL
411 #define ccu_div_dbgfs_var_clkdiv_set NULL
412 #define ccu_div_dbgfs_mode 0444
414 #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
416 static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
418 const struct ccu_div_dbgfs_bit *bit = priv;
419 struct ccu_div *div = bit->div;
422 regmap_read(div->sys_regs, div->reg_ctl, &data);
423 *val = !!(data & bit->mask);
427 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
428 ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
430 static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
432 struct ccu_div *div = priv;
435 regmap_read(div->sys_regs, div->reg_ctl, &data);
436 *val = ccu_div_get(div->mask, data);
440 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
441 ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
443 static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
445 struct ccu_div *div = priv;
451 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
452 ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
454 static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
456 struct ccu_div *div = to_ccu_div(hw);
457 struct ccu_div_dbgfs_bit *bits;
458 int didx, bidx, num = 2;
461 num += !!(div->flags & CLK_SET_RATE_GATE) +
462 !!(div->features & CCU_DIV_RESET_DOMAIN);
464 bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
468 for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
469 name = ccu_div_bits[bidx].name;
470 if (!(div->flags & CLK_SET_RATE_GATE) &&
471 !strcmp("div_en", name)) {
475 if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
476 !strcmp("div_rst", name)) {
480 if (!strcmp("div_buf", name))
483 bits[didx] = ccu_div_bits[bidx];
484 bits[didx].div = div;
486 if (div->features & CCU_DIV_LOCK_SHIFTED &&
487 !strcmp("div_lock", name)) {
488 bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
491 debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
493 &ccu_div_dbgfs_bit_fops);
497 debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
498 div, &ccu_div_dbgfs_var_clkdiv_fops);
501 static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
503 struct ccu_div *div = to_ccu_div(hw);
504 struct ccu_div_dbgfs_bit *bit;
506 bit = kmalloc(sizeof(*bit), GFP_KERNEL);
510 *bit = ccu_div_bits[0];
512 debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
513 &ccu_div_dbgfs_bit_fops);
515 debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
516 &ccu_div_dbgfs_fixed_clkdiv_fops);
519 static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
521 struct ccu_div *div = to_ccu_div(hw);
522 struct ccu_div_dbgfs_bit *bit;
524 bit = kmalloc(sizeof(*bit), GFP_KERNEL);
528 *bit = ccu_div_bits[3];
530 debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
531 &ccu_div_dbgfs_bit_fops);
534 static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
536 struct ccu_div *div = to_ccu_div(hw);
538 debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
539 &ccu_div_dbgfs_fixed_clkdiv_fops);
542 #else /* !CONFIG_DEBUG_FS */
544 #define ccu_div_var_debug_init NULL
545 #define ccu_div_gate_debug_init NULL
546 #define ccu_div_buf_debug_init NULL
547 #define ccu_div_fixed_debug_init NULL
549 #endif /* !CONFIG_DEBUG_FS */
551 static const struct clk_ops ccu_div_var_gate_to_set_ops = {
552 .enable = ccu_div_var_enable,
553 .disable = ccu_div_gate_disable,
554 .is_enabled = ccu_div_gate_is_enabled,
555 .recalc_rate = ccu_div_var_recalc_rate,
556 .round_rate = ccu_div_var_round_rate,
557 .set_rate = ccu_div_var_set_rate_fast,
558 .debug_init = ccu_div_var_debug_init
561 static const struct clk_ops ccu_div_var_nogate_ops = {
562 .recalc_rate = ccu_div_var_recalc_rate,
563 .round_rate = ccu_div_var_round_rate,
564 .set_rate = ccu_div_var_set_rate_slow,
565 .debug_init = ccu_div_var_debug_init
568 static const struct clk_ops ccu_div_gate_ops = {
569 .enable = ccu_div_gate_enable,
570 .disable = ccu_div_gate_disable,
571 .is_enabled = ccu_div_gate_is_enabled,
572 .recalc_rate = ccu_div_fixed_recalc_rate,
573 .round_rate = ccu_div_fixed_round_rate,
574 .set_rate = ccu_div_fixed_set_rate,
575 .debug_init = ccu_div_gate_debug_init
578 static const struct clk_ops ccu_div_buf_ops = {
579 .enable = ccu_div_buf_enable,
580 .disable = ccu_div_buf_disable,
581 .is_enabled = ccu_div_buf_is_enabled,
582 .debug_init = ccu_div_buf_debug_init
585 static const struct clk_ops ccu_div_fixed_ops = {
586 .recalc_rate = ccu_div_fixed_recalc_rate,
587 .round_rate = ccu_div_fixed_round_rate,
588 .set_rate = ccu_div_fixed_set_rate,
589 .debug_init = ccu_div_fixed_debug_init
592 struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
594 struct clk_parent_data parent_data = { };
595 struct clk_init_data hw_init = { };
600 return ERR_PTR(-EINVAL);
602 div = kzalloc(sizeof(*div), GFP_KERNEL);
604 return ERR_PTR(-ENOMEM);
607 * Note since Baikal-T1 System Controller registers are MMIO-backed
608 * we won't check the regmap IO operations return status, because it
609 * must be zero anyway.
611 div->hw.init = &hw_init;
612 div->id = div_init->id;
613 div->reg_ctl = div_init->base + CCU_DIV_CTL;
614 div->sys_regs = div_init->sys_regs;
615 div->flags = div_init->flags;
616 div->features = div_init->features;
617 spin_lock_init(&div->lock);
619 hw_init.name = div_init->name;
620 hw_init.flags = div_init->flags;
622 if (div_init->type == CCU_DIV_VAR) {
623 if (hw_init.flags & CLK_SET_RATE_GATE)
624 hw_init.ops = &ccu_div_var_gate_to_set_ops;
626 hw_init.ops = &ccu_div_var_nogate_ops;
627 div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
628 } else if (div_init->type == CCU_DIV_GATE) {
629 hw_init.ops = &ccu_div_gate_ops;
630 div->divider = div_init->divider;
631 } else if (div_init->type == CCU_DIV_BUF) {
632 hw_init.ops = &ccu_div_buf_ops;
633 } else if (div_init->type == CCU_DIV_FIXED) {
634 hw_init.ops = &ccu_div_fixed_ops;
635 div->divider = div_init->divider;
641 if (!div_init->parent_name) {
645 parent_data.fw_name = div_init->parent_name;
646 parent_data.name = div_init->parent_name;
647 hw_init.parent_data = &parent_data;
648 hw_init.num_parents = 1;
650 ret = of_clk_hw_register(div_init->np, &div->hw);
662 void ccu_div_hw_unregister(struct ccu_div *div)
664 clk_hw_unregister(&div->hw);