GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / clocksource / sh_cmt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SuperH Timer Support - CMT
4  *
5  *  Copyright (C) 2008 Magnus Damm
6  */
7
8 #include <linux/clk.h>
9 #include <linux/clockchips.h>
10 #include <linux/clocksource.h>
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/ioport.h>
18 #include <linux/irq.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_domain.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/sh_timer.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28
29 #ifdef CONFIG_SUPERH
30 #include <asm/platform_early.h>
31 #endif
32
33 struct sh_cmt_device;
34
35 /*
36  * The CMT comes in 5 different identified flavours, depending not only on the
37  * SoC but also on the particular instance. The following table lists the main
38  * characteristics of those flavours.
39  *
40  *                      16B     32B     32B-F   48B     R-Car Gen2
41  * -----------------------------------------------------------------------------
42  * Channels             2       1/4     1       6       2/8
43  * Control Width        16      16      16      16      32
44  * Counter Width        16      32      32      32/48   32/48
45  * Shared Start/Stop    Y       Y       Y       Y       N
46  *
47  * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
48  * located in the channel registers block. All other versions have a shared
49  * start/stop register located in the global space.
50  *
51  * Channels are indexed from 0 to N-1 in the documentation. The channel index
52  * infers the start/stop bit position in the control register and the channel
53  * registers block address. Some CMT instances have a subset of channels
54  * available, in which case the index in the documentation doesn't match the
55  * "real" index as implemented in hardware. This is for instance the case with
56  * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
57  * in the documentation but using start/stop bit 5 and having its registers
58  * block at 0x60.
59  *
60  * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
61  * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
62  */
63
64 enum sh_cmt_model {
65         SH_CMT_16BIT,
66         SH_CMT_32BIT,
67         SH_CMT_48BIT,
68         SH_CMT0_RCAR_GEN2,
69         SH_CMT1_RCAR_GEN2,
70 };
71
72 struct sh_cmt_info {
73         enum sh_cmt_model model;
74
75         unsigned int channels_mask;
76
77         unsigned long width; /* 16 or 32 bit version of hardware block */
78         u32 overflow_bit;
79         u32 clear_bits;
80
81         /* callbacks for CMSTR and CMCSR access */
82         u32 (*read_control)(void __iomem *base, unsigned long offs);
83         void (*write_control)(void __iomem *base, unsigned long offs,
84                               u32 value);
85
86         /* callbacks for CMCNT and CMCOR access */
87         u32 (*read_count)(void __iomem *base, unsigned long offs);
88         void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
89 };
90
91 struct sh_cmt_channel {
92         struct sh_cmt_device *cmt;
93
94         unsigned int index;     /* Index in the documentation */
95         unsigned int hwidx;     /* Real hardware index */
96
97         void __iomem *iostart;
98         void __iomem *ioctrl;
99
100         unsigned int timer_bit;
101         unsigned long flags;
102         u32 match_value;
103         u32 next_match_value;
104         u32 max_match_value;
105         raw_spinlock_t lock;
106         struct clock_event_device ced;
107         struct clocksource cs;
108         u64 total_cycles;
109         bool cs_enabled;
110 };
111
112 struct sh_cmt_device {
113         struct platform_device *pdev;
114
115         const struct sh_cmt_info *info;
116
117         void __iomem *mapbase;
118         struct clk *clk;
119         unsigned long rate;
120         unsigned int reg_delay;
121
122         raw_spinlock_t lock; /* Protect the shared start/stop register */
123
124         struct sh_cmt_channel *channels;
125         unsigned int num_channels;
126         unsigned int hw_channels;
127
128         bool has_clockevent;
129         bool has_clocksource;
130 };
131
132 #define SH_CMT16_CMCSR_CMF              (1 << 7)
133 #define SH_CMT16_CMCSR_CMIE             (1 << 6)
134 #define SH_CMT16_CMCSR_CKS8             (0 << 0)
135 #define SH_CMT16_CMCSR_CKS32            (1 << 0)
136 #define SH_CMT16_CMCSR_CKS128           (2 << 0)
137 #define SH_CMT16_CMCSR_CKS512           (3 << 0)
138 #define SH_CMT16_CMCSR_CKS_MASK         (3 << 0)
139
140 #define SH_CMT32_CMCSR_CMF              (1 << 15)
141 #define SH_CMT32_CMCSR_OVF              (1 << 14)
142 #define SH_CMT32_CMCSR_WRFLG            (1 << 13)
143 #define SH_CMT32_CMCSR_STTF             (1 << 12)
144 #define SH_CMT32_CMCSR_STPF             (1 << 11)
145 #define SH_CMT32_CMCSR_SSIE             (1 << 10)
146 #define SH_CMT32_CMCSR_CMS              (1 << 9)
147 #define SH_CMT32_CMCSR_CMM              (1 << 8)
148 #define SH_CMT32_CMCSR_CMTOUT_IE        (1 << 7)
149 #define SH_CMT32_CMCSR_CMR_NONE         (0 << 4)
150 #define SH_CMT32_CMCSR_CMR_DMA          (1 << 4)
151 #define SH_CMT32_CMCSR_CMR_IRQ          (2 << 4)
152 #define SH_CMT32_CMCSR_CMR_MASK         (3 << 4)
153 #define SH_CMT32_CMCSR_DBGIVD           (1 << 3)
154 #define SH_CMT32_CMCSR_CKS_RCLK8        (4 << 0)
155 #define SH_CMT32_CMCSR_CKS_RCLK32       (5 << 0)
156 #define SH_CMT32_CMCSR_CKS_RCLK128      (6 << 0)
157 #define SH_CMT32_CMCSR_CKS_RCLK1        (7 << 0)
158 #define SH_CMT32_CMCSR_CKS_MASK         (7 << 0)
159
160 static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
161 {
162         return ioread16(base + (offs << 1));
163 }
164
165 static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
166 {
167         return ioread32(base + (offs << 2));
168 }
169
170 static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
171 {
172         iowrite16(value, base + (offs << 1));
173 }
174
175 static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
176 {
177         iowrite32(value, base + (offs << 2));
178 }
179
180 static const struct sh_cmt_info sh_cmt_info[] = {
181         [SH_CMT_16BIT] = {
182                 .model = SH_CMT_16BIT,
183                 .width = 16,
184                 .overflow_bit = SH_CMT16_CMCSR_CMF,
185                 .clear_bits = ~SH_CMT16_CMCSR_CMF,
186                 .read_control = sh_cmt_read16,
187                 .write_control = sh_cmt_write16,
188                 .read_count = sh_cmt_read16,
189                 .write_count = sh_cmt_write16,
190         },
191         [SH_CMT_32BIT] = {
192                 .model = SH_CMT_32BIT,
193                 .width = 32,
194                 .overflow_bit = SH_CMT32_CMCSR_CMF,
195                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
196                 .read_control = sh_cmt_read16,
197                 .write_control = sh_cmt_write16,
198                 .read_count = sh_cmt_read32,
199                 .write_count = sh_cmt_write32,
200         },
201         [SH_CMT_48BIT] = {
202                 .model = SH_CMT_48BIT,
203                 .channels_mask = 0x3f,
204                 .width = 32,
205                 .overflow_bit = SH_CMT32_CMCSR_CMF,
206                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
207                 .read_control = sh_cmt_read32,
208                 .write_control = sh_cmt_write32,
209                 .read_count = sh_cmt_read32,
210                 .write_count = sh_cmt_write32,
211         },
212         [SH_CMT0_RCAR_GEN2] = {
213                 .model = SH_CMT0_RCAR_GEN2,
214                 .channels_mask = 0x60,
215                 .width = 32,
216                 .overflow_bit = SH_CMT32_CMCSR_CMF,
217                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
218                 .read_control = sh_cmt_read32,
219                 .write_control = sh_cmt_write32,
220                 .read_count = sh_cmt_read32,
221                 .write_count = sh_cmt_write32,
222         },
223         [SH_CMT1_RCAR_GEN2] = {
224                 .model = SH_CMT1_RCAR_GEN2,
225                 .channels_mask = 0xff,
226                 .width = 32,
227                 .overflow_bit = SH_CMT32_CMCSR_CMF,
228                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
229                 .read_control = sh_cmt_read32,
230                 .write_control = sh_cmt_write32,
231                 .read_count = sh_cmt_read32,
232                 .write_count = sh_cmt_write32,
233         },
234 };
235
236 #define CMCSR 0 /* channel register */
237 #define CMCNT 1 /* channel register */
238 #define CMCOR 2 /* channel register */
239
240 #define CMCLKE  0x1000  /* CLK Enable Register (R-Car Gen2) */
241
242 static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
243 {
244         if (ch->iostart)
245                 return ch->cmt->info->read_control(ch->iostart, 0);
246         else
247                 return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
248 }
249
250 static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
251 {
252         u32 old_value = sh_cmt_read_cmstr(ch);
253
254         if (value != old_value) {
255                 if (ch->iostart) {
256                         ch->cmt->info->write_control(ch->iostart, 0, value);
257                         udelay(ch->cmt->reg_delay);
258                 } else {
259                         ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
260                         udelay(ch->cmt->reg_delay);
261                 }
262         }
263 }
264
265 static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
266 {
267         return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
268 }
269
270 static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
271 {
272         u32 old_value = sh_cmt_read_cmcsr(ch);
273
274         if (value != old_value) {
275                 ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
276                 udelay(ch->cmt->reg_delay);
277         }
278 }
279
280 static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
281 {
282         return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
283 }
284
285 static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
286 {
287         /* Tests showed that we need to wait 3 clocks here */
288         unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2);
289         u32 reg;
290
291         if (ch->cmt->info->model > SH_CMT_16BIT) {
292                 int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg,
293                                                    !(reg & SH_CMT32_CMCSR_WRFLG),
294                                                    1, cmcnt_delay, false, ch);
295                 if (ret < 0)
296                         return ret;
297         }
298
299         ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
300         udelay(cmcnt_delay);
301         return 0;
302 }
303
304 static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
305 {
306         u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR);
307
308         if (value != old_value) {
309                 ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
310                 udelay(ch->cmt->reg_delay);
311         }
312 }
313
314 static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
315 {
316         u32 v1, v2, v3;
317         u32 o1, o2;
318
319         o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
320
321         /* Make sure the timer value is stable. Stolen from acpi_pm.c */
322         do {
323                 o2 = o1;
324                 v1 = sh_cmt_read_cmcnt(ch);
325                 v2 = sh_cmt_read_cmcnt(ch);
326                 v3 = sh_cmt_read_cmcnt(ch);
327                 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
328         } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
329                           || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
330
331         *has_wrapped = o1;
332         return v2;
333 }
334
335 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
336 {
337         unsigned long flags;
338         u32 value;
339
340         /* start stop register shared by multiple timer channels */
341         raw_spin_lock_irqsave(&ch->cmt->lock, flags);
342         value = sh_cmt_read_cmstr(ch);
343
344         if (start)
345                 value |= 1 << ch->timer_bit;
346         else
347                 value &= ~(1 << ch->timer_bit);
348
349         sh_cmt_write_cmstr(ch, value);
350         raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
351 }
352
353 static int sh_cmt_enable(struct sh_cmt_channel *ch)
354 {
355         int ret;
356
357         pm_runtime_get_sync(&ch->cmt->pdev->dev);
358         dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
359
360         /* enable clock */
361         ret = clk_enable(ch->cmt->clk);
362         if (ret) {
363                 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
364                         ch->index);
365                 goto err0;
366         }
367
368         /* make sure channel is disabled */
369         sh_cmt_start_stop_ch(ch, 0);
370
371         /* configure channel, periodic mode and maximum timeout */
372         if (ch->cmt->info->width == 16) {
373                 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
374                                    SH_CMT16_CMCSR_CKS512);
375         } else {
376                 sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
377                                    SH_CMT32_CMCSR_CMTOUT_IE |
378                                    SH_CMT32_CMCSR_CMR_IRQ |
379                                    SH_CMT32_CMCSR_CKS_RCLK8);
380         }
381
382         sh_cmt_write_cmcor(ch, 0xffffffff);
383         ret = sh_cmt_write_cmcnt(ch, 0);
384
385         if (ret || sh_cmt_read_cmcnt(ch)) {
386                 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
387                         ch->index);
388                 ret = -ETIMEDOUT;
389                 goto err1;
390         }
391
392         /* enable channel */
393         sh_cmt_start_stop_ch(ch, 1);
394         return 0;
395  err1:
396         /* stop clock */
397         clk_disable(ch->cmt->clk);
398
399  err0:
400         return ret;
401 }
402
403 static void sh_cmt_disable(struct sh_cmt_channel *ch)
404 {
405         /* disable channel */
406         sh_cmt_start_stop_ch(ch, 0);
407
408         /* disable interrupts in CMT block */
409         sh_cmt_write_cmcsr(ch, 0);
410
411         /* stop clock */
412         clk_disable(ch->cmt->clk);
413
414         dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
415         pm_runtime_put(&ch->cmt->pdev->dev);
416 }
417
418 /* private flags */
419 #define FLAG_CLOCKEVENT (1 << 0)
420 #define FLAG_CLOCKSOURCE (1 << 1)
421 #define FLAG_REPROGRAM (1 << 2)
422 #define FLAG_SKIPEVENT (1 << 3)
423 #define FLAG_IRQCONTEXT (1 << 4)
424
425 static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
426                                               int absolute)
427 {
428         u32 value = ch->next_match_value;
429         u32 new_match;
430         u32 delay = 0;
431         u32 now = 0;
432         u32 has_wrapped;
433
434         now = sh_cmt_get_counter(ch, &has_wrapped);
435         ch->flags |= FLAG_REPROGRAM; /* force reprogram */
436
437         if (has_wrapped) {
438                 /* we're competing with the interrupt handler.
439                  *  -> let the interrupt handler reprogram the timer.
440                  *  -> interrupt number two handles the event.
441                  */
442                 ch->flags |= FLAG_SKIPEVENT;
443                 return;
444         }
445
446         if (absolute)
447                 now = 0;
448
449         do {
450                 /* reprogram the timer hardware,
451                  * but don't save the new match value yet.
452                  */
453                 new_match = now + value + delay;
454                 if (new_match > ch->max_match_value)
455                         new_match = ch->max_match_value;
456
457                 sh_cmt_write_cmcor(ch, new_match);
458
459                 now = sh_cmt_get_counter(ch, &has_wrapped);
460                 if (has_wrapped && (new_match > ch->match_value)) {
461                         /* we are changing to a greater match value,
462                          * so this wrap must be caused by the counter
463                          * matching the old value.
464                          * -> first interrupt reprograms the timer.
465                          * -> interrupt number two handles the event.
466                          */
467                         ch->flags |= FLAG_SKIPEVENT;
468                         break;
469                 }
470
471                 if (has_wrapped) {
472                         /* we are changing to a smaller match value,
473                          * so the wrap must be caused by the counter
474                          * matching the new value.
475                          * -> save programmed match value.
476                          * -> let isr handle the event.
477                          */
478                         ch->match_value = new_match;
479                         break;
480                 }
481
482                 /* be safe: verify hardware settings */
483                 if (now < new_match) {
484                         /* timer value is below match value, all good.
485                          * this makes sure we won't miss any match events.
486                          * -> save programmed match value.
487                          * -> let isr handle the event.
488                          */
489                         ch->match_value = new_match;
490                         break;
491                 }
492
493                 /* the counter has reached a value greater
494                  * than our new match value. and since the
495                  * has_wrapped flag isn't set we must have
496                  * programmed a too close event.
497                  * -> increase delay and retry.
498                  */
499                 if (delay)
500                         delay <<= 1;
501                 else
502                         delay = 1;
503
504                 if (!delay)
505                         dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
506                                  ch->index);
507
508         } while (delay);
509 }
510
511 static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
512 {
513         if (delta > ch->max_match_value)
514                 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
515                          ch->index);
516
517         ch->next_match_value = delta;
518         sh_cmt_clock_event_program_verify(ch, 0);
519 }
520
521 static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
522 {
523         unsigned long flags;
524
525         raw_spin_lock_irqsave(&ch->lock, flags);
526         __sh_cmt_set_next(ch, delta);
527         raw_spin_unlock_irqrestore(&ch->lock, flags);
528 }
529
530 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
531 {
532         struct sh_cmt_channel *ch = dev_id;
533
534         /* clear flags */
535         sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
536                            ch->cmt->info->clear_bits);
537
538         /* update clock source counter to begin with if enabled
539          * the wrap flag should be cleared by the timer specific
540          * isr before we end up here.
541          */
542         if (ch->flags & FLAG_CLOCKSOURCE)
543                 ch->total_cycles += ch->match_value + 1;
544
545         if (!(ch->flags & FLAG_REPROGRAM))
546                 ch->next_match_value = ch->max_match_value;
547
548         ch->flags |= FLAG_IRQCONTEXT;
549
550         if (ch->flags & FLAG_CLOCKEVENT) {
551                 if (!(ch->flags & FLAG_SKIPEVENT)) {
552                         if (clockevent_state_oneshot(&ch->ced)) {
553                                 ch->next_match_value = ch->max_match_value;
554                                 ch->flags |= FLAG_REPROGRAM;
555                         }
556
557                         ch->ced.event_handler(&ch->ced);
558                 }
559         }
560
561         ch->flags &= ~FLAG_SKIPEVENT;
562
563         if (ch->flags & FLAG_REPROGRAM) {
564                 ch->flags &= ~FLAG_REPROGRAM;
565                 sh_cmt_clock_event_program_verify(ch, 1);
566
567                 if (ch->flags & FLAG_CLOCKEVENT)
568                         if ((clockevent_state_shutdown(&ch->ced))
569                             || (ch->match_value == ch->next_match_value))
570                                 ch->flags &= ~FLAG_REPROGRAM;
571         }
572
573         ch->flags &= ~FLAG_IRQCONTEXT;
574
575         return IRQ_HANDLED;
576 }
577
578 static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
579 {
580         int ret = 0;
581         unsigned long flags;
582
583         raw_spin_lock_irqsave(&ch->lock, flags);
584
585         if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
586                 ret = sh_cmt_enable(ch);
587
588         if (ret)
589                 goto out;
590         ch->flags |= flag;
591
592         /* setup timeout if no clockevent */
593         if (ch->cmt->num_channels == 1 &&
594             flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
595                 __sh_cmt_set_next(ch, ch->max_match_value);
596  out:
597         raw_spin_unlock_irqrestore(&ch->lock, flags);
598
599         return ret;
600 }
601
602 static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
603 {
604         unsigned long flags;
605         unsigned long f;
606
607         raw_spin_lock_irqsave(&ch->lock, flags);
608
609         f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
610         ch->flags &= ~flag;
611
612         if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
613                 sh_cmt_disable(ch);
614
615         /* adjust the timeout to maximum if only clocksource left */
616         if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
617                 __sh_cmt_set_next(ch, ch->max_match_value);
618
619         raw_spin_unlock_irqrestore(&ch->lock, flags);
620 }
621
622 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
623 {
624         return container_of(cs, struct sh_cmt_channel, cs);
625 }
626
627 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
628 {
629         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
630         u32 has_wrapped;
631
632         if (ch->cmt->num_channels == 1) {
633                 unsigned long flags;
634                 u64 value;
635                 u32 raw;
636
637                 raw_spin_lock_irqsave(&ch->lock, flags);
638                 value = ch->total_cycles;
639                 raw = sh_cmt_get_counter(ch, &has_wrapped);
640
641                 if (unlikely(has_wrapped))
642                         raw += ch->match_value + 1;
643                 raw_spin_unlock_irqrestore(&ch->lock, flags);
644
645                 return value + raw;
646         }
647
648         return sh_cmt_get_counter(ch, &has_wrapped);
649 }
650
651 static int sh_cmt_clocksource_enable(struct clocksource *cs)
652 {
653         int ret;
654         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
655
656         WARN_ON(ch->cs_enabled);
657
658         ch->total_cycles = 0;
659
660         ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
661         if (!ret)
662                 ch->cs_enabled = true;
663
664         return ret;
665 }
666
667 static void sh_cmt_clocksource_disable(struct clocksource *cs)
668 {
669         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
670
671         WARN_ON(!ch->cs_enabled);
672
673         sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
674         ch->cs_enabled = false;
675 }
676
677 static void sh_cmt_clocksource_suspend(struct clocksource *cs)
678 {
679         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
680
681         if (!ch->cs_enabled)
682                 return;
683
684         sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
685         pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
686 }
687
688 static void sh_cmt_clocksource_resume(struct clocksource *cs)
689 {
690         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
691
692         if (!ch->cs_enabled)
693                 return;
694
695         pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
696         sh_cmt_start(ch, FLAG_CLOCKSOURCE);
697 }
698
699 static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
700                                        const char *name)
701 {
702         struct clocksource *cs = &ch->cs;
703
704         cs->name = name;
705         cs->rating = 125;
706         cs->read = sh_cmt_clocksource_read;
707         cs->enable = sh_cmt_clocksource_enable;
708         cs->disable = sh_cmt_clocksource_disable;
709         cs->suspend = sh_cmt_clocksource_suspend;
710         cs->resume = sh_cmt_clocksource_resume;
711         cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
712         cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
713
714         dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
715                  ch->index);
716
717         clocksource_register_hz(cs, ch->cmt->rate);
718         return 0;
719 }
720
721 static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
722 {
723         return container_of(ced, struct sh_cmt_channel, ced);
724 }
725
726 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
727 {
728         sh_cmt_start(ch, FLAG_CLOCKEVENT);
729
730         if (periodic)
731                 sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
732         else
733                 sh_cmt_set_next(ch, ch->max_match_value);
734 }
735
736 static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
737 {
738         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
739
740         sh_cmt_stop(ch, FLAG_CLOCKEVENT);
741         return 0;
742 }
743
744 static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
745                                         int periodic)
746 {
747         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
748
749         /* deal with old setting first */
750         if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
751                 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
752
753         dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
754                  ch->index, periodic ? "periodic" : "oneshot");
755         sh_cmt_clock_event_start(ch, periodic);
756         return 0;
757 }
758
759 static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
760 {
761         return sh_cmt_clock_event_set_state(ced, 0);
762 }
763
764 static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
765 {
766         return sh_cmt_clock_event_set_state(ced, 1);
767 }
768
769 static int sh_cmt_clock_event_next(unsigned long delta,
770                                    struct clock_event_device *ced)
771 {
772         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
773
774         BUG_ON(!clockevent_state_oneshot(ced));
775         if (likely(ch->flags & FLAG_IRQCONTEXT))
776                 ch->next_match_value = delta - 1;
777         else
778                 sh_cmt_set_next(ch, delta - 1);
779
780         return 0;
781 }
782
783 static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
784 {
785         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
786
787         pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
788         clk_unprepare(ch->cmt->clk);
789 }
790
791 static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
792 {
793         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
794
795         clk_prepare(ch->cmt->clk);
796         pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
797 }
798
799 static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
800                                       const char *name)
801 {
802         struct clock_event_device *ced = &ch->ced;
803         int irq;
804         int ret;
805
806         irq = platform_get_irq(ch->cmt->pdev, ch->index);
807         if (irq < 0)
808                 return irq;
809
810         ret = request_irq(irq, sh_cmt_interrupt,
811                           IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
812                           dev_name(&ch->cmt->pdev->dev), ch);
813         if (ret) {
814                 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
815                         ch->index, irq);
816                 return ret;
817         }
818
819         ced->name = name;
820         ced->features = CLOCK_EVT_FEAT_PERIODIC;
821         ced->features |= CLOCK_EVT_FEAT_ONESHOT;
822         ced->rating = 125;
823         ced->cpumask = cpu_possible_mask;
824         ced->set_next_event = sh_cmt_clock_event_next;
825         ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
826         ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
827         ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
828         ced->suspend = sh_cmt_clock_event_suspend;
829         ced->resume = sh_cmt_clock_event_resume;
830
831         /* TODO: calculate good shift from rate and counter bit width */
832         ced->shift = 32;
833         ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
834         ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
835         ced->max_delta_ticks = ch->max_match_value;
836         ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
837         ced->min_delta_ticks = 0x1f;
838
839         dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
840                  ch->index);
841         clockevents_register_device(ced);
842
843         return 0;
844 }
845
846 static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
847                            bool clockevent, bool clocksource)
848 {
849         int ret;
850
851         if (clockevent) {
852                 ch->cmt->has_clockevent = true;
853                 ret = sh_cmt_register_clockevent(ch, name);
854                 if (ret < 0)
855                         return ret;
856         }
857
858         if (clocksource) {
859                 ch->cmt->has_clocksource = true;
860                 sh_cmt_register_clocksource(ch, name);
861         }
862
863         return 0;
864 }
865
866 static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
867                                 unsigned int hwidx, bool clockevent,
868                                 bool clocksource, struct sh_cmt_device *cmt)
869 {
870         u32 value;
871         int ret;
872
873         /* Skip unused channels. */
874         if (!clockevent && !clocksource)
875                 return 0;
876
877         ch->cmt = cmt;
878         ch->index = index;
879         ch->hwidx = hwidx;
880         ch->timer_bit = hwidx;
881
882         /*
883          * Compute the address of the channel control register block. For the
884          * timers with a per-channel start/stop register, compute its address
885          * as well.
886          */
887         switch (cmt->info->model) {
888         case SH_CMT_16BIT:
889                 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
890                 break;
891         case SH_CMT_32BIT:
892         case SH_CMT_48BIT:
893                 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
894                 break;
895         case SH_CMT0_RCAR_GEN2:
896         case SH_CMT1_RCAR_GEN2:
897                 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
898                 ch->ioctrl = ch->iostart + 0x10;
899                 ch->timer_bit = 0;
900
901                 /* Enable the clock supply to the channel */
902                 value = ioread32(cmt->mapbase + CMCLKE);
903                 value |= BIT(hwidx);
904                 iowrite32(value, cmt->mapbase + CMCLKE);
905                 break;
906         }
907
908         if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
909                 ch->max_match_value = ~0;
910         else
911                 ch->max_match_value = (1 << cmt->info->width) - 1;
912
913         ch->match_value = ch->max_match_value;
914         raw_spin_lock_init(&ch->lock);
915
916         ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
917                               clockevent, clocksource);
918         if (ret) {
919                 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
920                         ch->index);
921                 return ret;
922         }
923         ch->cs_enabled = false;
924
925         return 0;
926 }
927
928 static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
929 {
930         struct resource *mem;
931
932         mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
933         if (!mem) {
934                 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
935                 return -ENXIO;
936         }
937
938         cmt->mapbase = ioremap(mem->start, resource_size(mem));
939         if (cmt->mapbase == NULL) {
940                 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
941                 return -ENXIO;
942         }
943
944         return 0;
945 }
946
947 static const struct platform_device_id sh_cmt_id_table[] = {
948         { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
949         { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
950         { }
951 };
952 MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
953
954 static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
955         {
956                 /* deprecated, preserved for backward compatibility */
957                 .compatible = "renesas,cmt-48",
958                 .data = &sh_cmt_info[SH_CMT_48BIT]
959         },
960         {
961                 /* deprecated, preserved for backward compatibility */
962                 .compatible = "renesas,cmt-48-gen2",
963                 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
964         },
965         {
966                 .compatible = "renesas,r8a7740-cmt1",
967                 .data = &sh_cmt_info[SH_CMT_48BIT]
968         },
969         {
970                 .compatible = "renesas,sh73a0-cmt1",
971                 .data = &sh_cmt_info[SH_CMT_48BIT]
972         },
973         {
974                 .compatible = "renesas,rcar-gen2-cmt0",
975                 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
976         },
977         {
978                 .compatible = "renesas,rcar-gen2-cmt1",
979                 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
980         },
981         {
982                 .compatible = "renesas,rcar-gen3-cmt0",
983                 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
984         },
985         {
986                 .compatible = "renesas,rcar-gen3-cmt1",
987                 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
988         },
989         { }
990 };
991 MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
992
993 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
994 {
995         unsigned int mask, i;
996         unsigned long rate;
997         int ret;
998
999         cmt->pdev = pdev;
1000         raw_spin_lock_init(&cmt->lock);
1001
1002         if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
1003                 cmt->info = of_device_get_match_data(&pdev->dev);
1004                 cmt->hw_channels = cmt->info->channels_mask;
1005         } else if (pdev->dev.platform_data) {
1006                 struct sh_timer_config *cfg = pdev->dev.platform_data;
1007                 const struct platform_device_id *id = pdev->id_entry;
1008
1009                 cmt->info = (const struct sh_cmt_info *)id->driver_data;
1010                 cmt->hw_channels = cfg->channels_mask;
1011         } else {
1012                 dev_err(&cmt->pdev->dev, "missing platform data\n");
1013                 return -ENXIO;
1014         }
1015
1016         /* Get hold of clock. */
1017         cmt->clk = clk_get(&cmt->pdev->dev, "fck");
1018         if (IS_ERR(cmt->clk)) {
1019                 dev_err(&cmt->pdev->dev, "cannot get clock\n");
1020                 return PTR_ERR(cmt->clk);
1021         }
1022
1023         ret = clk_prepare(cmt->clk);
1024         if (ret < 0)
1025                 goto err_clk_put;
1026
1027         /* Determine clock rate. */
1028         ret = clk_enable(cmt->clk);
1029         if (ret < 0)
1030                 goto err_clk_unprepare;
1031
1032         rate = clk_get_rate(cmt->clk);
1033         if (!rate) {
1034                 ret = -EINVAL;
1035                 goto err_clk_disable;
1036         }
1037
1038         /* We shall wait 2 input clks after register writes */
1039         if (cmt->info->model >= SH_CMT_48BIT)
1040                 cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate);
1041         cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8);
1042
1043         /* Map the memory resource(s). */
1044         ret = sh_cmt_map_memory(cmt);
1045         if (ret < 0)
1046                 goto err_clk_disable;
1047
1048         /* Allocate and setup the channels. */
1049         cmt->num_channels = hweight8(cmt->hw_channels);
1050         cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
1051                                 GFP_KERNEL);
1052         if (cmt->channels == NULL) {
1053                 ret = -ENOMEM;
1054                 goto err_unmap;
1055         }
1056
1057         /*
1058          * Use the first channel as a clock event device and the second channel
1059          * as a clock source. If only one channel is available use it for both.
1060          */
1061         for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1062                 unsigned int hwidx = ffs(mask) - 1;
1063                 bool clocksource = i == 1 || cmt->num_channels == 1;
1064                 bool clockevent = i == 0;
1065
1066                 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1067                                            clockevent, clocksource, cmt);
1068                 if (ret < 0)
1069                         goto err_unmap;
1070
1071                 mask &= ~(1 << hwidx);
1072         }
1073
1074         clk_disable(cmt->clk);
1075
1076         platform_set_drvdata(pdev, cmt);
1077
1078         return 0;
1079
1080 err_unmap:
1081         kfree(cmt->channels);
1082         iounmap(cmt->mapbase);
1083 err_clk_disable:
1084         clk_disable(cmt->clk);
1085 err_clk_unprepare:
1086         clk_unprepare(cmt->clk);
1087 err_clk_put:
1088         clk_put(cmt->clk);
1089         return ret;
1090 }
1091
1092 static int sh_cmt_probe(struct platform_device *pdev)
1093 {
1094         struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
1095         int ret;
1096
1097         if (!is_sh_early_platform_device(pdev)) {
1098                 pm_runtime_set_active(&pdev->dev);
1099                 pm_runtime_enable(&pdev->dev);
1100         }
1101
1102         if (cmt) {
1103                 dev_info(&pdev->dev, "kept as earlytimer\n");
1104                 goto out;
1105         }
1106
1107         cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
1108         if (cmt == NULL)
1109                 return -ENOMEM;
1110
1111         ret = sh_cmt_setup(cmt, pdev);
1112         if (ret) {
1113                 kfree(cmt);
1114                 pm_runtime_idle(&pdev->dev);
1115                 return ret;
1116         }
1117         if (is_sh_early_platform_device(pdev))
1118                 return 0;
1119
1120  out:
1121         if (cmt->has_clockevent || cmt->has_clocksource)
1122                 pm_runtime_irq_safe(&pdev->dev);
1123         else
1124                 pm_runtime_idle(&pdev->dev);
1125
1126         return 0;
1127 }
1128
1129 static int sh_cmt_remove(struct platform_device *pdev)
1130 {
1131         return -EBUSY; /* cannot unregister clockevent and clocksource */
1132 }
1133
1134 static struct platform_driver sh_cmt_device_driver = {
1135         .probe          = sh_cmt_probe,
1136         .remove         = sh_cmt_remove,
1137         .driver         = {
1138                 .name   = "sh_cmt",
1139                 .of_match_table = of_match_ptr(sh_cmt_of_table),
1140         },
1141         .id_table       = sh_cmt_id_table,
1142 };
1143
1144 static int __init sh_cmt_init(void)
1145 {
1146         return platform_driver_register(&sh_cmt_device_driver);
1147 }
1148
1149 static void __exit sh_cmt_exit(void)
1150 {
1151         platform_driver_unregister(&sh_cmt_device_driver);
1152 }
1153
1154 #ifdef CONFIG_SUPERH
1155 sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
1156 #endif
1157
1158 subsys_initcall(sh_cmt_init);
1159 module_exit(sh_cmt_exit);
1160
1161 MODULE_AUTHOR("Magnus Damm");
1162 MODULE_DESCRIPTION("SuperH CMT Timer Driver");
1163 MODULE_LICENSE("GPL v2");