GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / clk / renesas / rzg2l-cpg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "rzg2l-cpg.h"
35
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)   WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)   do { } while (0)
40 #endif
41
42 #define GET_SHIFT(val)          ((val >> 12) & 0xff)
43 #define GET_WIDTH(val)          ((val >> 8) & 0xf)
44
45 #define KDIV(val)               ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val)               FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val)               FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val)               FIELD_GET(GENMASK(2, 0), val)
49
50 #define RZG3S_DIV_P             GENMASK(28, 26)
51 #define RZG3S_DIV_M             GENMASK(25, 22)
52 #define RZG3S_DIV_NI            GENMASK(21, 13)
53 #define RZG3S_DIV_NF            GENMASK(12, 1)
54
55 #define CLK_ON_R(reg)           (reg)
56 #define CLK_MON_R(reg)          (0x180 + (reg))
57 #define CLK_RST_R(reg)          (reg)
58 #define CLK_MRST_R(reg)         (0x180 + (reg))
59
60 #define GET_REG_OFFSET(val)             ((val >> 20) & 0xfff)
61 #define GET_REG_SAMPLL_CLK1(val)        ((val >> 22) & 0xfff)
62 #define GET_REG_SAMPLL_CLK2(val)        ((val >> 12) & 0xfff)
63
64 #define CPG_WEN_BIT             BIT(16)
65
66 #define MAX_VCLK_FREQ           (148500000)
67
68 /**
69  * struct clk_hw_data - clock hardware data
70  * @hw: clock hw
71  * @conf: clock configuration (register offset, shift, width)
72  * @sconf: clock status configuration (register offset, shift, width)
73  * @priv: CPG private data structure
74  */
75 struct clk_hw_data {
76         struct clk_hw hw;
77         u32 conf;
78         u32 sconf;
79         struct rzg2l_cpg_priv *priv;
80 };
81
82 #define to_clk_hw_data(_hw)     container_of(_hw, struct clk_hw_data, hw)
83
84 /**
85  * struct sd_mux_hw_data - SD MUX clock hardware data
86  * @hw_data: clock hw data
87  * @mtable: clock mux table
88  */
89 struct sd_mux_hw_data {
90         struct clk_hw_data hw_data;
91         const u32 *mtable;
92 };
93
94 #define to_sd_mux_hw_data(_hw)  container_of(_hw, struct sd_mux_hw_data, hw_data)
95
96 /**
97  * struct div_hw_data - divider clock hardware data
98  * @hw_data: clock hw data
99  * @dtable: pointer to divider table
100  * @invalid_rate: invalid rate for divider
101  * @max_rate: maximum rate for divider
102  * @width: divider width
103  */
104 struct div_hw_data {
105         struct clk_hw_data hw_data;
106         const struct clk_div_table *dtable;
107         unsigned long invalid_rate;
108         unsigned long max_rate;
109         u32 width;
110 };
111
112 #define to_div_hw_data(_hw)     container_of(_hw, struct div_hw_data, hw_data)
113
114 struct rzg2l_pll5_param {
115         u32 pl5_fracin;
116         u8 pl5_refdiv;
117         u8 pl5_intin;
118         u8 pl5_postdiv1;
119         u8 pl5_postdiv2;
120         u8 pl5_spread;
121 };
122
123 struct rzg2l_pll5_mux_dsi_div_param {
124         u8 clksrc;
125         u8 dsi_div_a;
126         u8 dsi_div_b;
127 };
128
129 /**
130  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
131  *
132  * @rcdev: Reset controller entity
133  * @dev: CPG device
134  * @base: CPG register block base address
135  * @rmw_lock: protects register accesses
136  * @clks: Array containing all Core and Module Clocks
137  * @num_core_clks: Number of Core Clocks in clks[]
138  * @num_mod_clks: Number of Module Clocks in clks[]
139  * @num_resets: Number of Module Resets in info->resets[]
140  * @last_dt_core_clk: ID of the last Core Clock exported to DT
141  * @info: Pointer to platform data
142  * @genpd: PM domain
143  * @mux_dsi_div_params: pll5 mux and dsi div parameters
144  */
145 struct rzg2l_cpg_priv {
146         struct reset_controller_dev rcdev;
147         struct device *dev;
148         void __iomem *base;
149         spinlock_t rmw_lock;
150
151         struct clk **clks;
152         unsigned int num_core_clks;
153         unsigned int num_mod_clks;
154         unsigned int num_resets;
155         unsigned int last_dt_core_clk;
156
157         const struct rzg2l_cpg_info *info;
158
159         struct generic_pm_domain genpd;
160
161         struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
162 };
163
164 static void rzg2l_cpg_del_clk_provider(void *data)
165 {
166         of_clk_del_provider(data);
167 }
168
169 /* Must be called in atomic context. */
170 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
171 {
172         u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
173         u32 off = GET_REG_OFFSET(conf);
174         u32 val;
175
176         return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
177 }
178
179 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
180                                   void *data)
181 {
182         struct clk_notifier_data *cnd = data;
183         struct clk_hw *hw = __clk_get_hw(cnd->clk);
184         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
185         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
186         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
187         u32 shift = GET_SHIFT(clk_hw_data->conf);
188         const u32 clk_src_266 = 3;
189         unsigned long flags;
190         int ret;
191
192         if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
193                 return NOTIFY_DONE;
194
195         spin_lock_irqsave(&priv->rmw_lock, flags);
196
197         /*
198          * As per the HW manual, we should not directly switch from 533 MHz to
199          * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
200          * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
201          * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
202          * (400 MHz)).
203          * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
204          * switching register is prohibited.
205          * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
206          * the index to value mapping is done by adding 1 to the index.
207          */
208
209         writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
210
211         /* Wait for the update done. */
212         ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
213
214         spin_unlock_irqrestore(&priv->rmw_lock, flags);
215
216         if (ret)
217                 dev_err(priv->dev, "failed to switch to safe clk source\n");
218
219         return notifier_from_errno(ret);
220 }
221
222 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
223                                void *data)
224 {
225         struct clk_notifier_data *cnd = data;
226         struct clk_hw *hw = __clk_get_hw(cnd->clk);
227         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
228         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
229         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
230         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
231         u32 shift = GET_SHIFT(clk_hw_data->conf);
232         unsigned long flags;
233         int ret = 0;
234         u32 val;
235
236         if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
237             div_hw_data->invalid_rate % cnd->new_rate)
238                 return NOTIFY_DONE;
239
240         spin_lock_irqsave(&priv->rmw_lock, flags);
241
242         val = readl(priv->base + off);
243         val >>= shift;
244         val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
245
246         /*
247          * There are different constraints for the user of this notifiers as follows:
248          * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
249          * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
250          * As SD can have only one parent having 800MHz and OCTA div can have
251          * only one parent having 400MHz we took into account the parent rate
252          * at the beginning of function (by checking invalid_rate % new_rate).
253          * Now it is time to check the hardware divider and update it accordingly.
254          */
255         if (!val) {
256                 writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
257                 /* Wait for the update done. */
258                 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
259         }
260
261         spin_unlock_irqrestore(&priv->rmw_lock, flags);
262
263         if (ret)
264                 dev_err(priv->dev, "Failed to downgrade the div\n");
265
266         return notifier_from_errno(ret);
267 }
268
269 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
270                                    struct rzg2l_cpg_priv *priv)
271 {
272         struct notifier_block *nb;
273
274         if (!core->notifier)
275                 return 0;
276
277         nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
278         if (!nb)
279                 return -ENOMEM;
280
281         nb->notifier_call = core->notifier;
282
283         return clk_notifier_register(hw->clk, nb);
284 }
285
286 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
287                                                unsigned long parent_rate)
288 {
289         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
290         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
291         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
292         u32 val;
293
294         val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
295         val >>= GET_SHIFT(clk_hw_data->conf);
296         val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
297
298         return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
299                                    CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
300 }
301
302 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
303 {
304         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
305         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
306
307         if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
308                 req->rate = div_hw_data->max_rate;
309
310         return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
311                                       CLK_DIVIDER_ROUND_CLOSEST);
312 }
313
314 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
315                                   unsigned long parent_rate)
316 {
317         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
318         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
319         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
320         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
321         u32 shift = GET_SHIFT(clk_hw_data->conf);
322         unsigned long flags;
323         u32 val;
324         int ret;
325
326         val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
327                               CLK_DIVIDER_ROUND_CLOSEST);
328
329         spin_lock_irqsave(&priv->rmw_lock, flags);
330         writel((CPG_WEN_BIT | val) << shift, priv->base + off);
331         /* Wait for the update done. */
332         ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
333         spin_unlock_irqrestore(&priv->rmw_lock, flags);
334
335         return ret;
336 }
337
338 static const struct clk_ops rzg3s_div_clk_ops = {
339         .recalc_rate = rzg3s_div_clk_recalc_rate,
340         .determine_rate = rzg3s_div_clk_determine_rate,
341         .set_rate = rzg3s_div_clk_set_rate,
342 };
343
344 static struct clk * __init
345 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
346                            void __iomem *base, struct rzg2l_cpg_priv *priv)
347 {
348         struct div_hw_data *div_hw_data;
349         struct clk_init_data init = {};
350         const struct clk_div_table *clkt;
351         struct clk_hw *clk_hw;
352         const struct clk *parent;
353         const char *parent_name;
354         u32 max = 0;
355         int ret;
356
357         parent = clks[core->parent & 0xffff];
358         if (IS_ERR(parent))
359                 return ERR_CAST(parent);
360
361         parent_name = __clk_get_name(parent);
362
363         div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
364         if (!div_hw_data)
365                 return ERR_PTR(-ENOMEM);
366
367         init.name = core->name;
368         init.flags = core->flag;
369         init.ops = &rzg3s_div_clk_ops;
370         init.parent_names = &parent_name;
371         init.num_parents = 1;
372
373         /* Get the maximum divider to retrieve div width. */
374         for (clkt = core->dtable; clkt->div; clkt++) {
375                 if (max < clkt->div)
376                         max = clkt->div;
377         }
378
379         div_hw_data->hw_data.priv = priv;
380         div_hw_data->hw_data.conf = core->conf;
381         div_hw_data->hw_data.sconf = core->sconf;
382         div_hw_data->dtable = core->dtable;
383         div_hw_data->invalid_rate = core->invalid_rate;
384         div_hw_data->max_rate = core->max_rate;
385         div_hw_data->width = fls(max) - 1;
386
387         clk_hw = &div_hw_data->hw_data.hw;
388         clk_hw->init = &init;
389
390         ret = devm_clk_hw_register(priv->dev, clk_hw);
391         if (ret)
392                 return ERR_PTR(ret);
393
394         ret = rzg2l_register_notifier(clk_hw, core, priv);
395         if (ret) {
396                 dev_err(priv->dev, "Failed to register notifier for %s\n",
397                         core->name);
398                 return ERR_PTR(ret);
399         }
400
401         return clk_hw->clk;
402 }
403
404 static struct clk * __init
405 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
406                            struct clk **clks,
407                            void __iomem *base,
408                            struct rzg2l_cpg_priv *priv)
409 {
410         struct device *dev = priv->dev;
411         const struct clk *parent;
412         const char *parent_name;
413         struct clk_hw *clk_hw;
414
415         parent = clks[core->parent & 0xffff];
416         if (IS_ERR(parent))
417                 return ERR_CAST(parent);
418
419         parent_name = __clk_get_name(parent);
420
421         if (core->dtable)
422                 clk_hw = clk_hw_register_divider_table(dev, core->name,
423                                                        parent_name, 0,
424                                                        base + GET_REG_OFFSET(core->conf),
425                                                        GET_SHIFT(core->conf),
426                                                        GET_WIDTH(core->conf),
427                                                        core->flag,
428                                                        core->dtable,
429                                                        &priv->rmw_lock);
430         else
431                 clk_hw = clk_hw_register_divider(dev, core->name,
432                                                  parent_name, 0,
433                                                  base + GET_REG_OFFSET(core->conf),
434                                                  GET_SHIFT(core->conf),
435                                                  GET_WIDTH(core->conf),
436                                                  core->flag, &priv->rmw_lock);
437
438         if (IS_ERR(clk_hw))
439                 return ERR_CAST(clk_hw);
440
441         return clk_hw->clk;
442 }
443
444 static struct clk * __init
445 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
446                            void __iomem *base,
447                            struct rzg2l_cpg_priv *priv)
448 {
449         const struct clk_hw *clk_hw;
450
451         clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
452                                           core->parent_names, core->num_parents,
453                                           core->flag,
454                                           base + GET_REG_OFFSET(core->conf),
455                                           GET_SHIFT(core->conf),
456                                           GET_WIDTH(core->conf),
457                                           core->mux_flags, &priv->rmw_lock);
458         if (IS_ERR(clk_hw))
459                 return ERR_CAST(clk_hw);
460
461         return clk_hw->clk;
462 }
463
464 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
465 {
466         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
467         struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
468         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
469         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
470         u32 shift = GET_SHIFT(clk_hw_data->conf);
471         unsigned long flags;
472         u32 val;
473         int ret;
474
475         val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
476
477         spin_lock_irqsave(&priv->rmw_lock, flags);
478
479         writel((CPG_WEN_BIT | val) << shift, priv->base + off);
480
481         /* Wait for the update done. */
482         ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
483
484         spin_unlock_irqrestore(&priv->rmw_lock, flags);
485
486         if (ret)
487                 dev_err(priv->dev, "Failed to switch parent\n");
488
489         return ret;
490 }
491
492 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
493 {
494         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
495         struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
496         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
497         u32 val;
498
499         val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
500         val >>= GET_SHIFT(clk_hw_data->conf);
501         val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
502
503         return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
504 }
505
506 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
507         .determine_rate = __clk_mux_determine_rate_closest,
508         .set_parent     = rzg2l_cpg_sd_clk_mux_set_parent,
509         .get_parent     = rzg2l_cpg_sd_clk_mux_get_parent,
510 };
511
512 static struct clk * __init
513 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
514                               void __iomem *base,
515                               struct rzg2l_cpg_priv *priv)
516 {
517         struct sd_mux_hw_data *sd_mux_hw_data;
518         struct clk_init_data init;
519         struct clk_hw *clk_hw;
520         int ret;
521
522         sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
523         if (!sd_mux_hw_data)
524                 return ERR_PTR(-ENOMEM);
525
526         sd_mux_hw_data->hw_data.priv = priv;
527         sd_mux_hw_data->hw_data.conf = core->conf;
528         sd_mux_hw_data->hw_data.sconf = core->sconf;
529         sd_mux_hw_data->mtable = core->mtable;
530
531         init.name = core->name;
532         init.ops = &rzg2l_cpg_sd_clk_mux_ops;
533         init.flags = core->flag;
534         init.num_parents = core->num_parents;
535         init.parent_names = core->parent_names;
536
537         clk_hw = &sd_mux_hw_data->hw_data.hw;
538         clk_hw->init = &init;
539
540         ret = devm_clk_hw_register(priv->dev, clk_hw);
541         if (ret)
542                 return ERR_PTR(ret);
543
544         ret = rzg2l_register_notifier(clk_hw, core, priv);
545         if (ret) {
546                 dev_err(priv->dev, "Failed to register notifier for %s\n",
547                         core->name);
548                 return ERR_PTR(ret);
549         }
550
551         return clk_hw->clk;
552 }
553
554 static unsigned long
555 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
556                                unsigned long rate)
557 {
558         unsigned long foutpostdiv_rate;
559
560         params->pl5_intin = rate / MEGA;
561         params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
562         params->pl5_refdiv = 2;
563         params->pl5_postdiv1 = 1;
564         params->pl5_postdiv2 = 1;
565         params->pl5_spread = 0x16;
566
567         foutpostdiv_rate =
568                 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
569                 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
570                 (params->pl5_postdiv1 * params->pl5_postdiv2);
571
572         return foutpostdiv_rate;
573 }
574
575 struct dsi_div_hw_data {
576         struct clk_hw hw;
577         u32 conf;
578         unsigned long rate;
579         struct rzg2l_cpg_priv *priv;
580 };
581
582 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
583
584 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
585                                                    unsigned long parent_rate)
586 {
587         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
588         unsigned long rate = dsi_div->rate;
589
590         if (!rate)
591                 rate = parent_rate;
592
593         return rate;
594 }
595
596 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
597                                                     unsigned long rate)
598 {
599         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
600         struct rzg2l_cpg_priv *priv = dsi_div->priv;
601         struct rzg2l_pll5_param params;
602         unsigned long parent_rate;
603
604         parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
605
606         if (priv->mux_dsi_div_params.clksrc)
607                 parent_rate /= 2;
608
609         return parent_rate;
610 }
611
612 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
613                                             struct clk_rate_request *req)
614 {
615         if (req->rate > MAX_VCLK_FREQ)
616                 req->rate = MAX_VCLK_FREQ;
617
618         req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
619
620         return 0;
621 }
622
623 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
624                                       unsigned long rate,
625                                       unsigned long parent_rate)
626 {
627         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
628         struct rzg2l_cpg_priv *priv = dsi_div->priv;
629
630         /*
631          * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
632          *
633          * Based on the dot clock, the DSI divider clock sets the divider value,
634          * calculates the pll parameters for generating FOUTPOSTDIV and the clk
635          * source for the MUX and propagates that info to the parents.
636          */
637
638         if (!rate || rate > MAX_VCLK_FREQ)
639                 return -EINVAL;
640
641         dsi_div->rate = rate;
642         writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
643                (priv->mux_dsi_div_params.dsi_div_a << 0) |
644                (priv->mux_dsi_div_params.dsi_div_b << 8),
645                priv->base + CPG_PL5_SDIV);
646
647         return 0;
648 }
649
650 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
651         .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
652         .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
653         .set_rate = rzg2l_cpg_dsi_div_set_rate,
654 };
655
656 static struct clk * __init
657 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
658                                struct clk **clks,
659                                struct rzg2l_cpg_priv *priv)
660 {
661         struct dsi_div_hw_data *clk_hw_data;
662         const struct clk *parent;
663         const char *parent_name;
664         struct clk_init_data init;
665         struct clk_hw *clk_hw;
666         int ret;
667
668         parent = clks[core->parent & 0xffff];
669         if (IS_ERR(parent))
670                 return ERR_CAST(parent);
671
672         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
673         if (!clk_hw_data)
674                 return ERR_PTR(-ENOMEM);
675
676         clk_hw_data->priv = priv;
677
678         parent_name = __clk_get_name(parent);
679         init.name = core->name;
680         init.ops = &rzg2l_cpg_dsi_div_ops;
681         init.flags = CLK_SET_RATE_PARENT;
682         init.parent_names = &parent_name;
683         init.num_parents = 1;
684
685         clk_hw = &clk_hw_data->hw;
686         clk_hw->init = &init;
687
688         ret = devm_clk_hw_register(priv->dev, clk_hw);
689         if (ret)
690                 return ERR_PTR(ret);
691
692         return clk_hw->clk;
693 }
694
695 struct pll5_mux_hw_data {
696         struct clk_hw hw;
697         u32 conf;
698         unsigned long rate;
699         struct rzg2l_cpg_priv *priv;
700 };
701
702 #define to_pll5_mux_hw_data(_hw)        container_of(_hw, struct pll5_mux_hw_data, hw)
703
704 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
705                                                    struct clk_rate_request *req)
706 {
707         struct clk_hw *parent;
708         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
709         struct rzg2l_cpg_priv *priv = hwdata->priv;
710
711         parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
712         req->best_parent_hw = parent;
713         req->best_parent_rate = req->rate;
714
715         return 0;
716 }
717
718 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
719 {
720         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
721         struct rzg2l_cpg_priv *priv = hwdata->priv;
722
723         /*
724          * FOUTPOSTDIV--->|
725          *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
726          *  |--FOUT1PH0-->|
727          *
728          * Based on the dot clock, the DSI divider clock calculates the parent
729          * rate and clk source for the MUX. It propagates that info to
730          * pll5_4_clk_mux which sets the clock source for DSI divider clock.
731          */
732
733         writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
734                priv->base + CPG_OTHERFUNC1_REG);
735
736         return 0;
737 }
738
739 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
740 {
741         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
742         struct rzg2l_cpg_priv *priv = hwdata->priv;
743
744         return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
745 }
746
747 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
748         .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
749         .set_parent     = rzg2l_cpg_pll5_4_clk_mux_set_parent,
750         .get_parent     = rzg2l_cpg_pll5_4_clk_mux_get_parent,
751 };
752
753 static struct clk * __init
754 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
755                                   struct rzg2l_cpg_priv *priv)
756 {
757         struct pll5_mux_hw_data *clk_hw_data;
758         struct clk_init_data init;
759         struct clk_hw *clk_hw;
760         int ret;
761
762         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
763         if (!clk_hw_data)
764                 return ERR_PTR(-ENOMEM);
765
766         clk_hw_data->priv = priv;
767         clk_hw_data->conf = core->conf;
768
769         init.name = core->name;
770         init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
771         init.flags = CLK_SET_RATE_PARENT;
772         init.num_parents = core->num_parents;
773         init.parent_names = core->parent_names;
774
775         clk_hw = &clk_hw_data->hw;
776         clk_hw->init = &init;
777
778         ret = devm_clk_hw_register(priv->dev, clk_hw);
779         if (ret)
780                 return ERR_PTR(ret);
781
782         return clk_hw->clk;
783 }
784
785 struct sipll5 {
786         struct clk_hw hw;
787         u32 conf;
788         unsigned long foutpostdiv_rate;
789         struct rzg2l_cpg_priv *priv;
790 };
791
792 #define to_sipll5(_hw)  container_of(_hw, struct sipll5, hw)
793
794 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
795                                              unsigned long rate)
796 {
797         struct sipll5 *sipll5 = to_sipll5(hw);
798         struct rzg2l_cpg_priv *priv = sipll5->priv;
799         unsigned long vclk;
800
801         vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
802                        (priv->mux_dsi_div_params.dsi_div_b + 1));
803
804         if (priv->mux_dsi_div_params.clksrc)
805                 vclk /= 2;
806
807         return vclk;
808 }
809
810 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
811                                                   unsigned long parent_rate)
812 {
813         struct sipll5 *sipll5 = to_sipll5(hw);
814         unsigned long pll5_rate = sipll5->foutpostdiv_rate;
815
816         if (!pll5_rate)
817                 pll5_rate = parent_rate;
818
819         return pll5_rate;
820 }
821
822 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
823                                         unsigned long rate,
824                                         unsigned long *parent_rate)
825 {
826         return rate;
827 }
828
829 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
830                                      unsigned long rate,
831                                      unsigned long parent_rate)
832 {
833         struct sipll5 *sipll5 = to_sipll5(hw);
834         struct rzg2l_cpg_priv *priv = sipll5->priv;
835         struct rzg2l_pll5_param params;
836         unsigned long vclk_rate;
837         int ret;
838         u32 val;
839
840         /*
841          *  OSC --> PLL5 --> FOUTPOSTDIV-->|
842          *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
843          *                   |--FOUT1PH0-->|
844          *
845          * Based on the dot clock, the DSI divider clock calculates the parent
846          * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
847          * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
848          *
849          * OSC --> PLL5 --> FOUTPOSTDIV
850          */
851
852         if (!rate)
853                 return -EINVAL;
854
855         vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
856         sipll5->foutpostdiv_rate =
857                 rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
858
859         /* Put PLL5 into standby mode */
860         writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
861         ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
862                                  !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
863         if (ret) {
864                 dev_err(priv->dev, "failed to release pll5 lock");
865                 return ret;
866         }
867
868         /* Output clock setting 1 */
869         writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
870                (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
871
872         /* Output clock setting, SSCG modulation value setting 3 */
873         writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
874
875         /* Output clock setting 4 */
876         writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
877                priv->base + CPG_SIPLL5_CLK4);
878
879         /* Output clock setting 5 */
880         writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
881
882         /* PLL normal mode setting */
883         writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
884                CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
885                priv->base + CPG_SIPLL5_STBY);
886
887         /* PLL normal mode transition, output clock stability check */
888         ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
889                                  (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
890         if (ret) {
891                 dev_err(priv->dev, "failed to lock pll5");
892                 return ret;
893         }
894
895         return 0;
896 }
897
898 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
899         .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
900         .round_rate = rzg2l_cpg_sipll5_round_rate,
901         .set_rate = rzg2l_cpg_sipll5_set_rate,
902 };
903
904 static struct clk * __init
905 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
906                           struct clk **clks,
907                           struct rzg2l_cpg_priv *priv)
908 {
909         const struct clk *parent;
910         struct clk_init_data init;
911         const char *parent_name;
912         struct sipll5 *sipll5;
913         struct clk_hw *clk_hw;
914         int ret;
915
916         parent = clks[core->parent & 0xffff];
917         if (IS_ERR(parent))
918                 return ERR_CAST(parent);
919
920         sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
921         if (!sipll5)
922                 return ERR_PTR(-ENOMEM);
923
924         init.name = core->name;
925         parent_name = __clk_get_name(parent);
926         init.ops = &rzg2l_cpg_sipll5_ops;
927         init.flags = 0;
928         init.parent_names = &parent_name;
929         init.num_parents = 1;
930
931         sipll5->hw.init = &init;
932         sipll5->conf = core->conf;
933         sipll5->priv = priv;
934
935         writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
936                CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
937
938         clk_hw = &sipll5->hw;
939         clk_hw->init = &init;
940
941         ret = devm_clk_hw_register(priv->dev, clk_hw);
942         if (ret)
943                 return ERR_PTR(ret);
944
945         priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
946         priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
947         priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
948
949         return clk_hw->clk;
950 }
951
952 struct pll_clk {
953         struct clk_hw hw;
954         unsigned int conf;
955         unsigned int type;
956         void __iomem *base;
957         struct rzg2l_cpg_priv *priv;
958 };
959
960 #define to_pll(_hw)     container_of(_hw, struct pll_clk, hw)
961
962 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
963                                                    unsigned long parent_rate)
964 {
965         struct pll_clk *pll_clk = to_pll(hw);
966         struct rzg2l_cpg_priv *priv = pll_clk->priv;
967         unsigned int val1, val2;
968         u64 rate;
969
970         if (pll_clk->type != CLK_TYPE_SAM_PLL)
971                 return parent_rate;
972
973         val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
974         val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
975
976         rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
977                                16 + SDIV(val2));
978
979         return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
980 }
981
982 static const struct clk_ops rzg2l_cpg_pll_ops = {
983         .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
984 };
985
986 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
987                                                    unsigned long parent_rate)
988 {
989         struct pll_clk *pll_clk = to_pll(hw);
990         struct rzg2l_cpg_priv *priv = pll_clk->priv;
991         u32 nir, nfr, mr, pr, val;
992         u64 rate;
993
994         if (pll_clk->type != CLK_TYPE_G3S_PLL)
995                 return parent_rate;
996
997         val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
998
999         pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
1000         /* Hardware interprets values higher than 8 as p = 16. */
1001         if (pr > 8)
1002                 pr = 16;
1003
1004         mr  = FIELD_GET(RZG3S_DIV_M, val) + 1;
1005         nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1006         nfr = FIELD_GET(RZG3S_DIV_NF, val);
1007
1008         rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1009
1010         return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1011 }
1012
1013 static const struct clk_ops rzg3s_cpg_pll_ops = {
1014         .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1015 };
1016
1017 static struct clk * __init
1018 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1019                            struct clk **clks,
1020                            void __iomem *base,
1021                            struct rzg2l_cpg_priv *priv,
1022                            const struct clk_ops *ops)
1023 {
1024         struct device *dev = priv->dev;
1025         const struct clk *parent;
1026         struct clk_init_data init;
1027         const char *parent_name;
1028         struct pll_clk *pll_clk;
1029
1030         parent = clks[core->parent & 0xffff];
1031         if (IS_ERR(parent))
1032                 return ERR_CAST(parent);
1033
1034         pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1035         if (!pll_clk)
1036                 return ERR_PTR(-ENOMEM);
1037
1038         parent_name = __clk_get_name(parent);
1039         init.name = core->name;
1040         init.ops = ops;
1041         init.flags = 0;
1042         init.parent_names = &parent_name;
1043         init.num_parents = 1;
1044
1045         pll_clk->hw.init = &init;
1046         pll_clk->conf = core->conf;
1047         pll_clk->base = base;
1048         pll_clk->priv = priv;
1049         pll_clk->type = core->type;
1050
1051         return clk_register(NULL, &pll_clk->hw);
1052 }
1053
1054 static struct clk
1055 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1056                                void *data)
1057 {
1058         unsigned int clkidx = clkspec->args[1];
1059         struct rzg2l_cpg_priv *priv = data;
1060         struct device *dev = priv->dev;
1061         const char *type;
1062         struct clk *clk;
1063
1064         switch (clkspec->args[0]) {
1065         case CPG_CORE:
1066                 type = "core";
1067                 if (clkidx > priv->last_dt_core_clk) {
1068                         dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1069                         return ERR_PTR(-EINVAL);
1070                 }
1071                 clk = priv->clks[clkidx];
1072                 break;
1073
1074         case CPG_MOD:
1075                 type = "module";
1076                 if (clkidx >= priv->num_mod_clks) {
1077                         dev_err(dev, "Invalid %s clock index %u\n", type,
1078                                 clkidx);
1079                         return ERR_PTR(-EINVAL);
1080                 }
1081                 clk = priv->clks[priv->num_core_clks + clkidx];
1082                 break;
1083
1084         default:
1085                 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1086                 return ERR_PTR(-EINVAL);
1087         }
1088
1089         if (IS_ERR(clk))
1090                 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1091                         PTR_ERR(clk));
1092         else
1093                 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1094                         clkspec->args[0], clkspec->args[1], clk,
1095                         clk_get_rate(clk));
1096         return clk;
1097 }
1098
1099 static void __init
1100 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1101                             const struct rzg2l_cpg_info *info,
1102                             struct rzg2l_cpg_priv *priv)
1103 {
1104         struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1105         struct device *dev = priv->dev;
1106         unsigned int id = core->id, div = core->div;
1107         const char *parent_name;
1108
1109         WARN_DEBUG(id >= priv->num_core_clks);
1110         WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1111
1112         if (!core->name) {
1113                 /* Skip NULLified clock */
1114                 return;
1115         }
1116
1117         switch (core->type) {
1118         case CLK_TYPE_IN:
1119                 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1120                 break;
1121         case CLK_TYPE_FF:
1122                 WARN_DEBUG(core->parent >= priv->num_core_clks);
1123                 parent = priv->clks[core->parent];
1124                 if (IS_ERR(parent)) {
1125                         clk = parent;
1126                         goto fail;
1127                 }
1128
1129                 parent_name = __clk_get_name(parent);
1130                 clk = clk_register_fixed_factor(NULL, core->name,
1131                                                 parent_name, CLK_SET_RATE_PARENT,
1132                                                 core->mult, div);
1133                 break;
1134         case CLK_TYPE_SAM_PLL:
1135                 clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
1136                                                  &rzg2l_cpg_pll_ops);
1137                 break;
1138         case CLK_TYPE_G3S_PLL:
1139                 clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
1140                                                  &rzg3s_cpg_pll_ops);
1141                 break;
1142         case CLK_TYPE_SIPLL5:
1143                 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
1144                 break;
1145         case CLK_TYPE_DIV:
1146                 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
1147                                                  priv->base, priv);
1148                 break;
1149         case CLK_TYPE_G3S_DIV:
1150                 clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv);
1151                 break;
1152         case CLK_TYPE_MUX:
1153                 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
1154                 break;
1155         case CLK_TYPE_SD_MUX:
1156                 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
1157                 break;
1158         case CLK_TYPE_PLL5_4_MUX:
1159                 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1160                 break;
1161         case CLK_TYPE_DSI_DIV:
1162                 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
1163                 break;
1164         default:
1165                 goto fail;
1166         }
1167
1168         if (IS_ERR_OR_NULL(clk))
1169                 goto fail;
1170
1171         dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1172         priv->clks[id] = clk;
1173         return;
1174
1175 fail:
1176         dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1177                 core->name, PTR_ERR(clk));
1178 }
1179
1180 /**
1181  * struct mstp_clock - MSTP gating clock
1182  *
1183  * @hw: handle between common and hardware-specific interfaces
1184  * @off: register offset
1185  * @bit: ON/MON bit
1186  * @enabled: soft state of the clock, if it is coupled with another clock
1187  * @priv: CPG/MSTP private data
1188  * @sibling: pointer to the other coupled clock
1189  */
1190 struct mstp_clock {
1191         struct clk_hw hw;
1192         u16 off;
1193         u8 bit;
1194         bool enabled;
1195         struct rzg2l_cpg_priv *priv;
1196         struct mstp_clock *sibling;
1197 };
1198
1199 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1200
1201 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1202 {
1203         struct mstp_clock *clock = to_mod_clock(hw);
1204         struct rzg2l_cpg_priv *priv = clock->priv;
1205         unsigned int reg = clock->off;
1206         struct device *dev = priv->dev;
1207         u32 bitmask = BIT(clock->bit);
1208         u32 value;
1209         int error;
1210
1211         if (!clock->off) {
1212                 dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
1213                 return 0;
1214         }
1215
1216         dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1217                 enable ? "ON" : "OFF");
1218
1219         value = bitmask << 16;
1220         if (enable)
1221                 value |= bitmask;
1222
1223         writel(value, priv->base + CLK_ON_R(reg));
1224
1225         if (!enable)
1226                 return 0;
1227
1228         if (!priv->info->has_clk_mon_regs)
1229                 return 0;
1230
1231         error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1232                                           value & bitmask, 0, 10);
1233         if (error)
1234                 dev_err(dev, "Failed to enable CLK_ON %p\n",
1235                         priv->base + CLK_ON_R(reg));
1236
1237         return error;
1238 }
1239
1240 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1241 {
1242         struct mstp_clock *clock = to_mod_clock(hw);
1243
1244         if (clock->sibling) {
1245                 struct rzg2l_cpg_priv *priv = clock->priv;
1246                 unsigned long flags;
1247                 bool enabled;
1248
1249                 spin_lock_irqsave(&priv->rmw_lock, flags);
1250                 enabled = clock->sibling->enabled;
1251                 clock->enabled = true;
1252                 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1253                 if (enabled)
1254                         return 0;
1255         }
1256
1257         return rzg2l_mod_clock_endisable(hw, true);
1258 }
1259
1260 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1261 {
1262         struct mstp_clock *clock = to_mod_clock(hw);
1263
1264         if (clock->sibling) {
1265                 struct rzg2l_cpg_priv *priv = clock->priv;
1266                 unsigned long flags;
1267                 bool enabled;
1268
1269                 spin_lock_irqsave(&priv->rmw_lock, flags);
1270                 enabled = clock->sibling->enabled;
1271                 clock->enabled = false;
1272                 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1273                 if (enabled)
1274                         return;
1275         }
1276
1277         rzg2l_mod_clock_endisable(hw, false);
1278 }
1279
1280 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1281 {
1282         struct mstp_clock *clock = to_mod_clock(hw);
1283         struct rzg2l_cpg_priv *priv = clock->priv;
1284         u32 bitmask = BIT(clock->bit);
1285         u32 value;
1286
1287         if (!clock->off) {
1288                 dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1289                 return 1;
1290         }
1291
1292         if (clock->sibling)
1293                 return clock->enabled;
1294
1295         if (priv->info->has_clk_mon_regs)
1296                 value = readl(priv->base + CLK_MON_R(clock->off));
1297         else
1298                 value = readl(priv->base + clock->off);
1299
1300         return value & bitmask;
1301 }
1302
1303 static const struct clk_ops rzg2l_mod_clock_ops = {
1304         .enable = rzg2l_mod_clock_enable,
1305         .disable = rzg2l_mod_clock_disable,
1306         .is_enabled = rzg2l_mod_clock_is_enabled,
1307 };
1308
1309 static struct mstp_clock
1310 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1311                              struct rzg2l_cpg_priv *priv)
1312 {
1313         struct clk_hw *hw;
1314         unsigned int i;
1315
1316         for (i = 0; i < priv->num_mod_clks; i++) {
1317                 struct mstp_clock *clk;
1318
1319                 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1320                         continue;
1321
1322                 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1323                 clk = to_mod_clock(hw);
1324                 if (clock->off == clk->off && clock->bit == clk->bit)
1325                         return clk;
1326         }
1327
1328         return NULL;
1329 }
1330
1331 static void __init
1332 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1333                            const struct rzg2l_cpg_info *info,
1334                            struct rzg2l_cpg_priv *priv)
1335 {
1336         struct mstp_clock *clock = NULL;
1337         struct device *dev = priv->dev;
1338         unsigned int id = mod->id;
1339         struct clk_init_data init;
1340         struct clk *parent, *clk;
1341         const char *parent_name;
1342         unsigned int i;
1343
1344         WARN_DEBUG(id < priv->num_core_clks);
1345         WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1346         WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1347         WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1348
1349         if (!mod->name) {
1350                 /* Skip NULLified clock */
1351                 return;
1352         }
1353
1354         parent = priv->clks[mod->parent];
1355         if (IS_ERR(parent)) {
1356                 clk = parent;
1357                 goto fail;
1358         }
1359
1360         clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1361         if (!clock) {
1362                 clk = ERR_PTR(-ENOMEM);
1363                 goto fail;
1364         }
1365
1366         init.name = mod->name;
1367         init.ops = &rzg2l_mod_clock_ops;
1368         init.flags = CLK_SET_RATE_PARENT;
1369         for (i = 0; i < info->num_crit_mod_clks; i++)
1370                 if (id == info->crit_mod_clks[i]) {
1371                         dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1372                                 mod->name);
1373                         init.flags |= CLK_IS_CRITICAL;
1374                         break;
1375                 }
1376
1377         parent_name = __clk_get_name(parent);
1378         init.parent_names = &parent_name;
1379         init.num_parents = 1;
1380
1381         clock->off = mod->off;
1382         clock->bit = mod->bit;
1383         clock->priv = priv;
1384         clock->hw.init = &init;
1385
1386         clk = clk_register(NULL, &clock->hw);
1387         if (IS_ERR(clk))
1388                 goto fail;
1389
1390         dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1391         priv->clks[id] = clk;
1392
1393         if (mod->is_coupled) {
1394                 struct mstp_clock *sibling;
1395
1396                 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1397                 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1398                 if (sibling) {
1399                         clock->sibling = sibling;
1400                         sibling->sibling = clock;
1401                 }
1402         }
1403
1404         return;
1405
1406 fail:
1407         dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1408                 mod->name, PTR_ERR(clk));
1409 }
1410
1411 #define rcdev_to_priv(x)        container_of(x, struct rzg2l_cpg_priv, rcdev)
1412
1413 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1414                             unsigned long id)
1415 {
1416         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1417         const struct rzg2l_cpg_info *info = priv->info;
1418         unsigned int reg = info->resets[id].off;
1419         u32 mask = BIT(info->resets[id].bit);
1420         s8 monbit = info->resets[id].monbit;
1421         u32 value = mask << 16;
1422
1423         dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1424
1425         writel(value, priv->base + CLK_RST_R(reg));
1426
1427         if (info->has_clk_mon_regs) {
1428                 reg = CLK_MRST_R(reg);
1429         } else if (monbit >= 0) {
1430                 reg = CPG_RST_MON;
1431                 mask = BIT(monbit);
1432         } else {
1433                 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1434                 udelay(35);
1435                 return 0;
1436         }
1437
1438         return readl_poll_timeout_atomic(priv->base + reg, value,
1439                                          value & mask, 10, 200);
1440 }
1441
1442 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1443                               unsigned long id)
1444 {
1445         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1446         const struct rzg2l_cpg_info *info = priv->info;
1447         unsigned int reg = info->resets[id].off;
1448         u32 mask = BIT(info->resets[id].bit);
1449         s8 monbit = info->resets[id].monbit;
1450         u32 value = (mask << 16) | mask;
1451
1452         dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1453                 CLK_RST_R(reg));
1454
1455         writel(value, priv->base + CLK_RST_R(reg));
1456
1457         if (info->has_clk_mon_regs) {
1458                 reg = CLK_MRST_R(reg);
1459         } else if (monbit >= 0) {
1460                 reg = CPG_RST_MON;
1461                 mask = BIT(monbit);
1462         } else {
1463                 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1464                 udelay(35);
1465                 return 0;
1466         }
1467
1468         return readl_poll_timeout_atomic(priv->base + reg, value,
1469                                          !(value & mask), 10, 200);
1470 }
1471
1472 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1473                            unsigned long id)
1474 {
1475         int ret;
1476
1477         ret = rzg2l_cpg_assert(rcdev, id);
1478         if (ret)
1479                 return ret;
1480
1481         return rzg2l_cpg_deassert(rcdev, id);
1482 }
1483
1484 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1485                             unsigned long id)
1486 {
1487         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1488         const struct rzg2l_cpg_info *info = priv->info;
1489         s8 monbit = info->resets[id].monbit;
1490         unsigned int reg;
1491         u32 bitmask;
1492
1493         if (info->has_clk_mon_regs) {
1494                 reg = CLK_MRST_R(info->resets[id].off);
1495                 bitmask = BIT(info->resets[id].bit);
1496         } else if (monbit >= 0) {
1497                 reg = CPG_RST_MON;
1498                 bitmask = BIT(monbit);
1499         } else {
1500                 return -ENOTSUPP;
1501         }
1502
1503         return !!(readl(priv->base + reg) & bitmask);
1504 }
1505
1506 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1507         .reset = rzg2l_cpg_reset,
1508         .assert = rzg2l_cpg_assert,
1509         .deassert = rzg2l_cpg_deassert,
1510         .status = rzg2l_cpg_status,
1511 };
1512
1513 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1514                                  const struct of_phandle_args *reset_spec)
1515 {
1516         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1517         const struct rzg2l_cpg_info *info = priv->info;
1518         unsigned int id = reset_spec->args[0];
1519
1520         if (id >= rcdev->nr_resets || !info->resets[id].off) {
1521                 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1522                 return -EINVAL;
1523         }
1524
1525         return id;
1526 }
1527
1528 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1529 {
1530         priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1531         priv->rcdev.of_node = priv->dev->of_node;
1532         priv->rcdev.dev = priv->dev;
1533         priv->rcdev.of_reset_n_cells = 1;
1534         priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1535         priv->rcdev.nr_resets = priv->num_resets;
1536
1537         return devm_reset_controller_register(priv->dev, &priv->rcdev);
1538 }
1539
1540 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1541                                 const struct of_phandle_args *clkspec)
1542 {
1543         const struct rzg2l_cpg_info *info = priv->info;
1544         unsigned int id;
1545         unsigned int i;
1546
1547         if (clkspec->args_count != 2)
1548                 return false;
1549
1550         if (clkspec->args[0] != CPG_MOD)
1551                 return false;
1552
1553         id = clkspec->args[1] + info->num_total_core_clks;
1554         for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1555                 if (info->no_pm_mod_clks[i] == id)
1556                         return false;
1557         }
1558
1559         return true;
1560 }
1561
1562 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1563 {
1564         struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1565         struct device_node *np = dev->of_node;
1566         struct of_phandle_args clkspec;
1567         bool once = true;
1568         struct clk *clk;
1569         int error;
1570         int i = 0;
1571
1572         while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1573                                            &clkspec)) {
1574                 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1575                         if (once) {
1576                                 once = false;
1577                                 error = pm_clk_create(dev);
1578                                 if (error) {
1579                                         of_node_put(clkspec.np);
1580                                         goto err;
1581                                 }
1582                         }
1583                         clk = of_clk_get_from_provider(&clkspec);
1584                         of_node_put(clkspec.np);
1585                         if (IS_ERR(clk)) {
1586                                 error = PTR_ERR(clk);
1587                                 goto fail_destroy;
1588                         }
1589
1590                         error = pm_clk_add_clk(dev, clk);
1591                         if (error) {
1592                                 dev_err(dev, "pm_clk_add_clk failed %d\n",
1593                                         error);
1594                                 goto fail_put;
1595                         }
1596                 } else {
1597                         of_node_put(clkspec.np);
1598                 }
1599                 i++;
1600         }
1601
1602         return 0;
1603
1604 fail_put:
1605         clk_put(clk);
1606
1607 fail_destroy:
1608         pm_clk_destroy(dev);
1609 err:
1610         return error;
1611 }
1612
1613 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1614 {
1615         if (!pm_clk_no_clocks(dev))
1616                 pm_clk_destroy(dev);
1617 }
1618
1619 static void rzg2l_cpg_genpd_remove(void *data)
1620 {
1621         pm_genpd_remove(data);
1622 }
1623
1624 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1625 {
1626         struct device *dev = priv->dev;
1627         struct device_node *np = dev->of_node;
1628         struct generic_pm_domain *genpd = &priv->genpd;
1629         int ret;
1630
1631         genpd->name = np->name;
1632         genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1633                        GENPD_FLAG_ACTIVE_WAKEUP;
1634         genpd->attach_dev = rzg2l_cpg_attach_dev;
1635         genpd->detach_dev = rzg2l_cpg_detach_dev;
1636         ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1637         if (ret)
1638                 return ret;
1639
1640         ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1641         if (ret)
1642                 return ret;
1643
1644         return of_genpd_add_provider_simple(np, genpd);
1645 }
1646
1647 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1648 {
1649         struct device *dev = &pdev->dev;
1650         struct device_node *np = dev->of_node;
1651         const struct rzg2l_cpg_info *info;
1652         struct rzg2l_cpg_priv *priv;
1653         unsigned int nclks, i;
1654         struct clk **clks;
1655         int error;
1656
1657         info = of_device_get_match_data(dev);
1658
1659         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1660         if (!priv)
1661                 return -ENOMEM;
1662
1663         priv->dev = dev;
1664         priv->info = info;
1665         spin_lock_init(&priv->rmw_lock);
1666
1667         priv->base = devm_platform_ioremap_resource(pdev, 0);
1668         if (IS_ERR(priv->base))
1669                 return PTR_ERR(priv->base);
1670
1671         nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1672         clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1673         if (!clks)
1674                 return -ENOMEM;
1675
1676         dev_set_drvdata(dev, priv);
1677         priv->clks = clks;
1678         priv->num_core_clks = info->num_total_core_clks;
1679         priv->num_mod_clks = info->num_hw_mod_clks;
1680         priv->num_resets = info->num_resets;
1681         priv->last_dt_core_clk = info->last_dt_core_clk;
1682
1683         for (i = 0; i < nclks; i++)
1684                 clks[i] = ERR_PTR(-ENOENT);
1685
1686         for (i = 0; i < info->num_core_clks; i++)
1687                 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1688
1689         for (i = 0; i < info->num_mod_clks; i++)
1690                 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1691
1692         error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1693         if (error)
1694                 return error;
1695
1696         error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1697         if (error)
1698                 return error;
1699
1700         error = rzg2l_cpg_add_clk_domain(priv);
1701         if (error)
1702                 return error;
1703
1704         error = rzg2l_cpg_reset_controller_register(priv);
1705         if (error)
1706                 return error;
1707
1708         return 0;
1709 }
1710
1711 static const struct of_device_id rzg2l_cpg_match[] = {
1712 #ifdef CONFIG_CLK_R9A07G043
1713         {
1714                 .compatible = "renesas,r9a07g043-cpg",
1715                 .data = &r9a07g043_cpg_info,
1716         },
1717 #endif
1718 #ifdef CONFIG_CLK_R9A07G044
1719         {
1720                 .compatible = "renesas,r9a07g044-cpg",
1721                 .data = &r9a07g044_cpg_info,
1722         },
1723 #endif
1724 #ifdef CONFIG_CLK_R9A07G054
1725         {
1726                 .compatible = "renesas,r9a07g054-cpg",
1727                 .data = &r9a07g054_cpg_info,
1728         },
1729 #endif
1730 #ifdef CONFIG_CLK_R9A08G045
1731         {
1732                 .compatible = "renesas,r9a08g045-cpg",
1733                 .data = &r9a08g045_cpg_info,
1734         },
1735 #endif
1736 #ifdef CONFIG_CLK_R9A09G011
1737         {
1738                 .compatible = "renesas,r9a09g011-cpg",
1739                 .data = &r9a09g011_cpg_info,
1740         },
1741 #endif
1742         { /* sentinel */ }
1743 };
1744
1745 static struct platform_driver rzg2l_cpg_driver = {
1746         .driver         = {
1747                 .name   = "rzg2l-cpg",
1748                 .of_match_table = rzg2l_cpg_match,
1749         },
1750 };
1751
1752 static int __init rzg2l_cpg_init(void)
1753 {
1754         return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1755 }
1756
1757 subsys_initcall(rzg2l_cpg_init);
1758
1759 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");