1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/rational.h>
14 #include <linux/regmap.h>
15 #include <linux/math64.h>
16 #include <linux/minmax.h>
17 #include <linux/slab.h>
19 #include <asm/div64.h>
25 #define CMD_UPDATE BIT(0)
26 #define CMD_ROOT_EN BIT(1)
27 #define CMD_DIRTY_CFG BIT(4)
28 #define CMD_DIRTY_N BIT(5)
29 #define CMD_DIRTY_M BIT(6)
30 #define CMD_DIRTY_D BIT(7)
31 #define CMD_ROOT_OFF BIT(31)
34 #define CFG_SRC_DIV_SHIFT 0
35 #define CFG_SRC_SEL_SHIFT 8
36 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
37 #define CFG_MODE_SHIFT 12
38 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
39 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
40 #define CFG_HW_CLK_CTRL_MASK BIT(20)
46 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
47 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
48 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
49 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
51 /* Dynamic Frequency Scaling */
52 #define MAX_PERF_LEVEL 8
53 #define SE_CMD_DFSR_OFFSET 0x14
54 #define SE_CMD_DFS_EN BIT(0)
55 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
56 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
57 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
64 static int clk_rcg2_is_enabled(struct clk_hw *hw)
66 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
70 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
74 return (cmd & CMD_ROOT_OFF) == 0;
77 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
79 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
80 int num_parents = clk_hw_get_num_parents(hw);
83 cfg &= CFG_SRC_SEL_MASK;
84 cfg >>= CFG_SRC_SEL_SHIFT;
86 for (i = 0; i < num_parents; i++)
87 if (cfg == rcg->parent_map[i].cfg)
90 pr_debug("%s: Clock %s has invalid parent, using default.\n",
91 __func__, clk_hw_get_name(hw));
95 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
97 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
101 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
103 pr_debug("%s: Unable to read CFG register for %s\n",
104 __func__, clk_hw_get_name(hw));
108 return __clk_rcg2_get_parent(hw, cfg);
111 static int update_config(struct clk_rcg2 *rcg)
115 struct clk_hw *hw = &rcg->clkr.hw;
116 const char *name = clk_hw_get_name(hw);
118 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
119 CMD_UPDATE, CMD_UPDATE);
123 /* Wait for update to take effect */
124 for (count = 500; count > 0; count--) {
125 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
128 if (!(cmd & CMD_UPDATE))
133 WARN(1, "%s: rcg didn't update its configuration.", name);
137 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
139 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
141 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
143 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
144 CFG_SRC_SEL_MASK, cfg);
148 return update_config(rcg);
152 * Calculate m/n:d rate
155 * rate = ----------- x ---
159 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
162 rate = mult_frac(rate, 2, hid_div + 1);
165 rate = mult_frac(rate, m, n);
171 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
173 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
174 u32 hid_div, m = 0, n = 0, mode = 0, mask;
176 if (rcg->mnd_width) {
177 mask = BIT(rcg->mnd_width) - 1;
178 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
180 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
184 mode = cfg & CFG_MODE_MASK;
185 mode >>= CFG_MODE_SHIFT;
188 mask = BIT(rcg->hid_width) - 1;
189 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
192 return calc_rate(parent_rate, m, n, mode, hid_div);
196 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
198 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
201 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
203 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
206 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
207 struct clk_rate_request *req,
208 enum freq_policy policy)
210 unsigned long clk_flags, rate = req->rate;
212 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
217 f = qcom_find_freq_floor(f, rate);
220 f = qcom_find_freq(f, rate);
229 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
233 clk_flags = clk_hw_get_flags(hw);
234 p = clk_hw_get_parent_by_index(hw, index);
238 if (clk_flags & CLK_SET_RATE_PARENT) {
244 rate *= f->pre_div + 1;
254 rate = clk_hw_get_rate(p);
256 req->best_parent_hw = p;
257 req->best_parent_rate = rate;
263 static int clk_rcg2_determine_rate(struct clk_hw *hw,
264 struct clk_rate_request *req)
266 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
268 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
271 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
272 struct clk_rate_request *req)
274 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
276 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
279 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
282 u32 cfg, mask, d_val, not2d_val, n_minus_m;
283 struct clk_hw *hw = &rcg->clkr.hw;
284 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
289 if (rcg->mnd_width && f->n) {
290 mask = BIT(rcg->mnd_width) - 1;
291 ret = regmap_update_bits(rcg->clkr.regmap,
292 RCG_M_OFFSET(rcg), mask, f->m);
296 ret = regmap_update_bits(rcg->clkr.regmap,
297 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
301 /* Calculate 2d value */
304 n_minus_m = f->n - f->m;
307 d_val = clamp_t(u32, d_val, f->m, n_minus_m);
308 not2d_val = ~d_val & mask;
310 ret = regmap_update_bits(rcg->clkr.regmap,
311 RCG_D_OFFSET(rcg), mask, not2d_val);
316 mask = BIT(rcg->hid_width) - 1;
317 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
318 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
319 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
320 if (rcg->mnd_width && f->n && (f->m != f->n))
321 cfg |= CFG_MODE_DUAL_EDGE;
322 if (rcg->hw_clk_ctrl)
323 cfg |= CFG_HW_CLK_CTRL_MASK;
331 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
336 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
340 ret = __clk_rcg2_configure(rcg, f, &cfg);
344 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
348 return update_config(rcg);
351 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
352 enum freq_policy policy)
354 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
355 const struct freq_tbl *f;
359 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
362 f = qcom_find_freq(rcg->freq_tbl, rate);
371 return clk_rcg2_configure(rcg, f);
374 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
375 unsigned long parent_rate)
377 return __clk_rcg2_set_rate(hw, rate, CEIL);
380 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
381 unsigned long parent_rate)
383 return __clk_rcg2_set_rate(hw, rate, FLOOR);
386 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
387 unsigned long rate, unsigned long parent_rate, u8 index)
389 return __clk_rcg2_set_rate(hw, rate, CEIL);
392 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
393 unsigned long rate, unsigned long parent_rate, u8 index)
395 return __clk_rcg2_set_rate(hw, rate, FLOOR);
398 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
400 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
401 u32 notn_m, n, m, d, not2d, mask;
403 if (!rcg->mnd_width) {
404 /* 50 % duty-cycle for Non-MND RCGs */
410 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d);
411 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
412 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
414 if (!not2d && !m && !notn_m) {
415 /* 50 % duty-cycle always */
421 mask = BIT(rcg->mnd_width) - 1;
424 d = DIV_ROUND_CLOSEST(d, 2);
426 n = (~(notn_m) + m) & mask;
434 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
436 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
437 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
440 /* Duty-cycle cannot be modified for non-MND RCGs */
444 mask = BIT(rcg->mnd_width) - 1;
446 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
447 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
448 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
450 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
451 if (!(cfg & CFG_MODE_MASK))
454 n = (~(notn_m) + m) & mask;
456 duty_per = (duty->num * 100) / duty->den;
458 /* Calculate 2d value */
459 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
462 * Check bit widths of 2d. If D is too big reduce duty cycle.
463 * Also make sure it is never zero.
465 d = clamp_val(d, 1, mask);
467 if ((d / 2) > (n - m))
469 else if ((d / 2) < (m / 2))
474 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
479 return update_config(rcg);
482 const struct clk_ops clk_rcg2_ops = {
483 .is_enabled = clk_rcg2_is_enabled,
484 .get_parent = clk_rcg2_get_parent,
485 .set_parent = clk_rcg2_set_parent,
486 .recalc_rate = clk_rcg2_recalc_rate,
487 .determine_rate = clk_rcg2_determine_rate,
488 .set_rate = clk_rcg2_set_rate,
489 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
490 .get_duty_cycle = clk_rcg2_get_duty_cycle,
491 .set_duty_cycle = clk_rcg2_set_duty_cycle,
493 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
495 const struct clk_ops clk_rcg2_floor_ops = {
496 .is_enabled = clk_rcg2_is_enabled,
497 .get_parent = clk_rcg2_get_parent,
498 .set_parent = clk_rcg2_set_parent,
499 .recalc_rate = clk_rcg2_recalc_rate,
500 .determine_rate = clk_rcg2_determine_floor_rate,
501 .set_rate = clk_rcg2_set_floor_rate,
502 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
503 .get_duty_cycle = clk_rcg2_get_duty_cycle,
504 .set_duty_cycle = clk_rcg2_set_duty_cycle,
506 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
508 const struct clk_ops clk_rcg2_mux_closest_ops = {
509 .determine_rate = __clk_mux_determine_rate_closest,
510 .get_parent = clk_rcg2_get_parent,
511 .set_parent = clk_rcg2_set_parent,
513 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
520 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
521 { 52, 295 }, /* 119 M */
522 { 11, 57 }, /* 130.25 M */
523 { 63, 307 }, /* 138.50 M */
524 { 11, 50 }, /* 148.50 M */
525 { 47, 206 }, /* 154 M */
526 { 31, 100 }, /* 205.25 M */
527 { 107, 269 }, /* 268.50 M */
531 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
532 { 31, 211 }, /* 119 M */
533 { 32, 199 }, /* 130.25 M */
534 { 63, 307 }, /* 138.50 M */
535 { 11, 60 }, /* 148.50 M */
536 { 50, 263 }, /* 154 M */
537 { 31, 120 }, /* 205.25 M */
538 { 119, 359 }, /* 268.50 M */
542 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
543 unsigned long parent_rate)
545 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
546 struct freq_tbl f = *rcg->freq_tbl;
547 const struct frac_entry *frac;
549 s64 src_rate = parent_rate;
551 u32 mask = BIT(rcg->hid_width) - 1;
554 if (src_rate == 810000000)
555 frac = frac_table_810m;
557 frac = frac_table_675m;
559 for (; frac->num; frac++) {
561 request *= frac->den;
562 request = div_s64(request, frac->num);
563 if ((src_rate < (request - delta)) ||
564 (src_rate > (request + delta)))
567 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
570 f.pre_div >>= CFG_SRC_DIV_SHIFT;
575 return clk_rcg2_configure(rcg, &f);
581 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
582 unsigned long rate, unsigned long parent_rate, u8 index)
584 /* Parent index is set statically in frequency table */
585 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
588 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
589 struct clk_rate_request *req)
591 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
592 const struct freq_tbl *f = rcg->freq_tbl;
593 const struct frac_entry *frac;
596 u32 mask = BIT(rcg->hid_width) - 1;
598 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
600 /* Force the correct parent */
601 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
602 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
604 if (req->best_parent_rate == 810000000)
605 frac = frac_table_810m;
607 frac = frac_table_675m;
609 for (; frac->num; frac++) {
611 request *= frac->den;
612 request = div_s64(request, frac->num);
613 if ((req->best_parent_rate < (request - delta)) ||
614 (req->best_parent_rate > (request + delta)))
617 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
619 hid_div >>= CFG_SRC_DIV_SHIFT;
622 req->rate = calc_rate(req->best_parent_rate,
623 frac->num, frac->den,
624 !!frac->den, hid_div);
631 const struct clk_ops clk_edp_pixel_ops = {
632 .is_enabled = clk_rcg2_is_enabled,
633 .get_parent = clk_rcg2_get_parent,
634 .set_parent = clk_rcg2_set_parent,
635 .recalc_rate = clk_rcg2_recalc_rate,
636 .set_rate = clk_edp_pixel_set_rate,
637 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
638 .determine_rate = clk_edp_pixel_determine_rate,
640 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
642 static int clk_byte_determine_rate(struct clk_hw *hw,
643 struct clk_rate_request *req)
645 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
646 const struct freq_tbl *f = rcg->freq_tbl;
647 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
648 unsigned long parent_rate, div;
649 u32 mask = BIT(rcg->hid_width) - 1;
655 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
656 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
658 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
659 div = min_t(u32, div, mask);
661 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
666 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
667 unsigned long parent_rate)
669 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
670 struct freq_tbl f = *rcg->freq_tbl;
672 u32 mask = BIT(rcg->hid_width) - 1;
674 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
675 div = min_t(u32, div, mask);
679 return clk_rcg2_configure(rcg, &f);
682 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
683 unsigned long rate, unsigned long parent_rate, u8 index)
685 /* Parent index is set statically in frequency table */
686 return clk_byte_set_rate(hw, rate, parent_rate);
689 const struct clk_ops clk_byte_ops = {
690 .is_enabled = clk_rcg2_is_enabled,
691 .get_parent = clk_rcg2_get_parent,
692 .set_parent = clk_rcg2_set_parent,
693 .recalc_rate = clk_rcg2_recalc_rate,
694 .set_rate = clk_byte_set_rate,
695 .set_rate_and_parent = clk_byte_set_rate_and_parent,
696 .determine_rate = clk_byte_determine_rate,
698 EXPORT_SYMBOL_GPL(clk_byte_ops);
700 static int clk_byte2_determine_rate(struct clk_hw *hw,
701 struct clk_rate_request *req)
703 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
704 unsigned long parent_rate, div;
705 u32 mask = BIT(rcg->hid_width) - 1;
707 unsigned long rate = req->rate;
712 p = req->best_parent_hw;
713 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
715 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
716 div = min_t(u32, div, mask);
718 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
723 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
724 unsigned long parent_rate)
726 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
727 struct freq_tbl f = { 0 };
729 int i, num_parents = clk_hw_get_num_parents(hw);
730 u32 mask = BIT(rcg->hid_width) - 1;
733 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
734 div = min_t(u32, div, mask);
738 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
739 cfg &= CFG_SRC_SEL_MASK;
740 cfg >>= CFG_SRC_SEL_SHIFT;
742 for (i = 0; i < num_parents; i++) {
743 if (cfg == rcg->parent_map[i].cfg) {
744 f.src = rcg->parent_map[i].src;
745 return clk_rcg2_configure(rcg, &f);
752 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
753 unsigned long rate, unsigned long parent_rate, u8 index)
755 /* Read the hardware to determine parent during set_rate */
756 return clk_byte2_set_rate(hw, rate, parent_rate);
759 const struct clk_ops clk_byte2_ops = {
760 .is_enabled = clk_rcg2_is_enabled,
761 .get_parent = clk_rcg2_get_parent,
762 .set_parent = clk_rcg2_set_parent,
763 .recalc_rate = clk_rcg2_recalc_rate,
764 .set_rate = clk_byte2_set_rate,
765 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
766 .determine_rate = clk_byte2_determine_rate,
768 EXPORT_SYMBOL_GPL(clk_byte2_ops);
770 static const struct frac_entry frac_table_pixel[] = {
779 static int clk_pixel_determine_rate(struct clk_hw *hw,
780 struct clk_rate_request *req)
782 unsigned long request, src_rate;
784 const struct frac_entry *frac = frac_table_pixel;
786 for (; frac->num; frac++) {
787 request = (req->rate * frac->den) / frac->num;
789 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
790 if ((src_rate < (request - delta)) ||
791 (src_rate > (request + delta)))
794 req->best_parent_rate = src_rate;
795 req->rate = (src_rate * frac->num) / frac->den;
802 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
803 unsigned long parent_rate)
805 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
806 struct freq_tbl f = { 0 };
807 const struct frac_entry *frac = frac_table_pixel;
808 unsigned long request;
810 u32 mask = BIT(rcg->hid_width) - 1;
812 int i, num_parents = clk_hw_get_num_parents(hw);
814 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
815 cfg &= CFG_SRC_SEL_MASK;
816 cfg >>= CFG_SRC_SEL_SHIFT;
818 for (i = 0; i < num_parents; i++)
819 if (cfg == rcg->parent_map[i].cfg) {
820 f.src = rcg->parent_map[i].src;
824 for (; frac->num; frac++) {
825 request = (rate * frac->den) / frac->num;
827 if ((parent_rate < (request - delta)) ||
828 (parent_rate > (request + delta)))
831 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
834 f.pre_div >>= CFG_SRC_DIV_SHIFT;
839 return clk_rcg2_configure(rcg, &f);
844 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
845 unsigned long parent_rate, u8 index)
847 return clk_pixel_set_rate(hw, rate, parent_rate);
850 const struct clk_ops clk_pixel_ops = {
851 .is_enabled = clk_rcg2_is_enabled,
852 .get_parent = clk_rcg2_get_parent,
853 .set_parent = clk_rcg2_set_parent,
854 .recalc_rate = clk_rcg2_recalc_rate,
855 .set_rate = clk_pixel_set_rate,
856 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
857 .determine_rate = clk_pixel_determine_rate,
859 EXPORT_SYMBOL_GPL(clk_pixel_ops);
861 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
862 struct clk_rate_request *req)
864 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
865 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
866 struct clk_hw *xo, *p0, *p1, *p2;
867 unsigned long p0_rate;
868 u8 mux_div = cgfx->div;
875 * This function does ping-pong the RCG between PLLs: if we don't
876 * have at least one fixed PLL and two variable ones,
877 * then it's not going to work correctly.
879 if (WARN_ON(!p0 || !p1 || !p2))
882 xo = clk_hw_get_parent_by_index(hw, 0);
883 if (req->rate == clk_hw_get_rate(xo)) {
884 req->best_parent_hw = xo;
891 parent_req.rate = req->rate * mux_div;
893 /* This has to be a fixed rate PLL */
894 p0_rate = clk_hw_get_rate(p0);
896 if (parent_req.rate == p0_rate) {
897 req->rate = req->best_parent_rate = p0_rate;
898 req->best_parent_hw = p0;
902 if (req->best_parent_hw == p0) {
903 /* Are we going back to a previously used rate? */
904 if (clk_hw_get_rate(p2) == parent_req.rate)
905 req->best_parent_hw = p2;
907 req->best_parent_hw = p1;
908 } else if (req->best_parent_hw == p2) {
909 req->best_parent_hw = p1;
911 req->best_parent_hw = p2;
914 clk_hw_get_rate_range(req->best_parent_hw,
915 &parent_req.min_rate, &parent_req.max_rate);
917 if (req->min_rate > parent_req.min_rate)
918 parent_req.min_rate = req->min_rate;
920 if (req->max_rate < parent_req.max_rate)
921 parent_req.max_rate = req->max_rate;
923 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
927 req->rate = req->best_parent_rate = parent_req.rate;
928 req->rate /= mux_div;
933 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
934 unsigned long parent_rate, u8 index)
936 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
937 struct clk_rcg2 *rcg = &cgfx->rcg;
941 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
942 /* On some targets, the GFX3D RCG may need to divide PLL frequency */
944 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
946 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
950 return update_config(rcg);
953 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
954 unsigned long parent_rate)
957 * We should never get here; clk_gfx3d_determine_rate() should always
958 * make us use a different parent than what we're currently using, so
959 * clk_gfx3d_set_rate_and_parent() should always be called.
964 const struct clk_ops clk_gfx3d_ops = {
965 .is_enabled = clk_rcg2_is_enabled,
966 .get_parent = clk_rcg2_get_parent,
967 .set_parent = clk_rcg2_set_parent,
968 .recalc_rate = clk_rcg2_recalc_rate,
969 .set_rate = clk_gfx3d_set_rate,
970 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
971 .determine_rate = clk_gfx3d_determine_rate,
973 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
975 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
977 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
978 const char *name = clk_hw_get_name(hw);
981 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
982 CMD_ROOT_EN, CMD_ROOT_EN);
986 /* wait for RCG to turn ON */
987 for (count = 500; count > 0; count--) {
988 if (clk_rcg2_is_enabled(hw))
994 pr_err("%s: RCG did not turn on\n", name);
998 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
1000 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1002 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1007 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
1009 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1012 ret = clk_rcg2_set_force_enable(hw);
1016 ret = clk_rcg2_configure(rcg, f);
1020 return clk_rcg2_clear_force_enable(hw);
1023 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1024 unsigned long parent_rate)
1026 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1027 const struct freq_tbl *f;
1029 f = qcom_find_freq(rcg->freq_tbl, rate);
1034 * In case clock is disabled, update the M, N and D registers, cache
1035 * the CFG value in parked_cfg and don't hit the update bit of CMD
1038 if (!clk_hw_is_enabled(hw))
1039 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
1041 return clk_rcg2_shared_force_enable_clear(hw, f);
1044 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
1045 unsigned long rate, unsigned long parent_rate, u8 index)
1047 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
1050 static int clk_rcg2_shared_enable(struct clk_hw *hw)
1052 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1056 * Set the update bit because required configuration has already
1057 * been written in clk_rcg2_shared_set_rate()
1059 ret = clk_rcg2_set_force_enable(hw);
1063 /* Write back the stored configuration corresponding to current rate */
1064 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
1068 ret = update_config(rcg);
1072 return clk_rcg2_clear_force_enable(hw);
1075 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1077 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1080 * Store current configuration as switching to safe source would clear
1081 * the SRC and DIV of CFG register
1083 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1086 * Park the RCG at a safe configuration - sourced off of safe source.
1087 * Force enable and disable the RCG while configuring it to safeguard
1088 * against any update signal coming from the downstream clock.
1089 * The current parent is still prepared and enabled at this point, and
1090 * the safe source is always on while application processor subsystem
1091 * is online. Therefore, the RCG can safely switch its parent.
1093 clk_rcg2_set_force_enable(hw);
1095 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1096 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1100 clk_rcg2_clear_force_enable(hw);
1103 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
1105 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1107 /* If the shared rcg is parked use the cached cfg instead */
1108 if (!clk_hw_is_enabled(hw))
1109 return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
1111 return clk_rcg2_get_parent(hw);
1114 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
1116 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1118 /* If the shared rcg is parked only update the cached cfg */
1119 if (!clk_hw_is_enabled(hw)) {
1120 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
1121 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1126 return clk_rcg2_set_parent(hw, index);
1129 static unsigned long
1130 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1132 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1134 /* If the shared rcg is parked use the cached cfg instead */
1135 if (!clk_hw_is_enabled(hw))
1136 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
1138 return clk_rcg2_recalc_rate(hw, parent_rate);
1141 const struct clk_ops clk_rcg2_shared_ops = {
1142 .enable = clk_rcg2_shared_enable,
1143 .disable = clk_rcg2_shared_disable,
1144 .get_parent = clk_rcg2_shared_get_parent,
1145 .set_parent = clk_rcg2_shared_set_parent,
1146 .recalc_rate = clk_rcg2_shared_recalc_rate,
1147 .determine_rate = clk_rcg2_determine_rate,
1148 .set_rate = clk_rcg2_shared_set_rate,
1149 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1151 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1153 /* Common APIs to be used for DFS based RCGR */
1154 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1157 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1159 unsigned long prate = 0;
1160 u32 val, mask, cfg, mode, src;
1163 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1165 mask = BIT(rcg->hid_width) - 1;
1168 f->pre_div = cfg & mask;
1170 src = cfg & CFG_SRC_SEL_MASK;
1171 src >>= CFG_SRC_SEL_SHIFT;
1173 num_parents = clk_hw_get_num_parents(hw);
1174 for (i = 0; i < num_parents; i++) {
1175 if (src == rcg->parent_map[i].cfg) {
1176 f->src = rcg->parent_map[i].src;
1177 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1178 prate = clk_hw_get_rate(p);
1182 mode = cfg & CFG_MODE_MASK;
1183 mode >>= CFG_MODE_SHIFT;
1185 mask = BIT(rcg->mnd_width) - 1;
1186 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1191 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1199 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1202 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1204 struct freq_tbl *freq_tbl;
1207 /* Allocate space for 1 extra since table is NULL terminated */
1208 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1211 rcg->freq_tbl = freq_tbl;
1213 for (i = 0; i < MAX_PERF_LEVEL; i++)
1214 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1219 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1220 struct clk_rate_request *req)
1222 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1225 if (!rcg->freq_tbl) {
1226 ret = clk_rcg2_dfs_populate_freq_table(rcg);
1228 pr_err("Failed to update DFS tables for %s\n",
1229 clk_hw_get_name(hw));
1234 return clk_rcg2_determine_rate(hw, req);
1237 static unsigned long
1238 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1240 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1241 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1243 regmap_read(rcg->clkr.regmap,
1244 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1245 level &= GENMASK(4, 1);
1249 return rcg->freq_tbl[level].freq;
1252 * Assume that parent_rate is actually the parent because
1253 * we can't do any better at figuring it out when the table
1254 * hasn't been populated yet. We only populate the table
1255 * in determine_rate because we can't guarantee the parents
1256 * will be registered with the framework until then.
1258 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1261 mask = BIT(rcg->hid_width) - 1;
1264 pre_div = cfg & mask;
1266 mode = cfg & CFG_MODE_MASK;
1267 mode >>= CFG_MODE_SHIFT;
1269 mask = BIT(rcg->mnd_width) - 1;
1270 regmap_read(rcg->clkr.regmap,
1271 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1274 regmap_read(rcg->clkr.regmap,
1275 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1281 return calc_rate(parent_rate, m, n, mode, pre_div);
1284 static const struct clk_ops clk_rcg2_dfs_ops = {
1285 .is_enabled = clk_rcg2_is_enabled,
1286 .get_parent = clk_rcg2_get_parent,
1287 .determine_rate = clk_rcg2_dfs_determine_rate,
1288 .recalc_rate = clk_rcg2_dfs_recalc_rate,
1291 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1292 struct regmap *regmap)
1294 struct clk_rcg2 *rcg = data->rcg;
1295 struct clk_init_data *init = data->init;
1299 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1303 if (!(val & SE_CMD_DFS_EN))
1307 * Rate changes with consumer writing a register in
1308 * their own I/O region
1310 init->flags |= CLK_GET_RATE_NOCACHE;
1311 init->ops = &clk_rcg2_dfs_ops;
1313 rcg->freq_tbl = NULL;
1318 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1319 const struct clk_rcg_dfs_data *rcgs, size_t len)
1323 for (i = 0; i < len; i++) {
1324 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1331 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1333 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1334 unsigned long parent_rate)
1336 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1337 struct freq_tbl f = { 0 };
1338 u32 mask = BIT(rcg->hid_width) - 1;
1340 int i, num_parents = clk_hw_get_num_parents(hw);
1341 unsigned long num, den;
1343 rational_best_approximation(parent_rate, rate,
1344 GENMASK(rcg->mnd_width - 1, 0),
1345 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1350 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1352 cfg &= CFG_SRC_SEL_MASK;
1353 cfg >>= CFG_SRC_SEL_SHIFT;
1355 for (i = 0; i < num_parents; i++) {
1356 if (cfg == rcg->parent_map[i].cfg) {
1357 f.src = rcg->parent_map[i].src;
1362 f.pre_div = hid_div;
1363 f.pre_div >>= CFG_SRC_DIV_SHIFT;
1374 return clk_rcg2_configure(rcg, &f);
1377 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1378 unsigned long rate, unsigned long parent_rate, u8 index)
1380 return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1383 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1384 struct clk_rate_request *req)
1386 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1387 unsigned long num, den;
1390 /* Parent rate is a fixed phy link rate */
1391 rational_best_approximation(req->best_parent_rate, req->rate,
1392 GENMASK(rcg->mnd_width - 1, 0),
1393 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1398 tmp = req->best_parent_rate * num;
1405 const struct clk_ops clk_dp_ops = {
1406 .is_enabled = clk_rcg2_is_enabled,
1407 .get_parent = clk_rcg2_get_parent,
1408 .set_parent = clk_rcg2_set_parent,
1409 .recalc_rate = clk_rcg2_recalc_rate,
1410 .set_rate = clk_rcg2_dp_set_rate,
1411 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1412 .determine_rate = clk_rcg2_dp_determine_rate,
1414 EXPORT_SYMBOL_GPL(clk_dp_ops);