GNU Linux-libre 4.9.318-gnu1
[releases.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #define pr_fmt(fmt)     "arm_arch_timer: " fmt
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched_clock.h>
28 #include <linux/acpi.h>
29
30 #include <asm/arch_timer.h>
31 #include <asm/virt.h>
32
33 #include <clocksource/arm_arch_timer.h>
34
35 #define CNTTIDR         0x08
36 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
37
38 #define CNTACR(n)       (0x40 + ((n) * 4))
39 #define CNTACR_RPCT     BIT(0)
40 #define CNTACR_RVCT     BIT(1)
41 #define CNTACR_RFRQ     BIT(2)
42 #define CNTACR_RVOFF    BIT(3)
43 #define CNTACR_RWVT     BIT(4)
44 #define CNTACR_RWPT     BIT(5)
45
46 #define CNTVCT_LO       0x08
47 #define CNTVCT_HI       0x0c
48 #define CNTFRQ          0x10
49 #define CNTP_TVAL       0x28
50 #define CNTP_CTL        0x2c
51 #define CNTV_TVAL       0x38
52 #define CNTV_CTL        0x3c
53
54 #define ARCH_CP15_TIMER BIT(0)
55 #define ARCH_MEM_TIMER  BIT(1)
56 static unsigned arch_timers_present __initdata;
57
58 static void __iomem *arch_counter_base;
59
60 struct arch_timer {
61         void __iomem *base;
62         struct clock_event_device evt;
63 };
64
65 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
66
67 static u32 arch_timer_rate;
68
69 enum ppi_nr {
70         PHYS_SECURE_PPI,
71         PHYS_NONSECURE_PPI,
72         VIRT_PPI,
73         HYP_PPI,
74         MAX_TIMER_PPI
75 };
76
77 static int arch_timer_ppi[MAX_TIMER_PPI];
78
79 static struct clock_event_device __percpu *arch_timer_evt;
80
81 static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
82 static bool arch_timer_c3stop;
83 static bool arch_timer_mem_use_virtual;
84 static bool arch_counter_suspend_stop;
85
86 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
87
88 static int __init early_evtstrm_cfg(char *buf)
89 {
90         return strtobool(buf, &evtstrm_enable);
91 }
92 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
93
94 /*
95  * Architected system timer support.
96  */
97
98 #ifdef CONFIG_FSL_ERRATUM_A008585
99 /*
100  * The number of retries is an arbitrary value well beyond the highest number
101  * of iterations the loop has been observed to take.
102  */
103 #define __fsl_a008585_read_reg(reg) ({                  \
104         u64 _old, _new;                                 \
105         int _retries = 200;                             \
106                                                         \
107         do {                                            \
108                 _old = read_sysreg(reg);                \
109                 _new = read_sysreg(reg);                \
110                 _retries--;                             \
111         } while (unlikely(_old != _new) && _retries);   \
112                                                         \
113         WARN_ON_ONCE(!_retries);                        \
114         _new;                                           \
115 })
116
117 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
118 {
119         return __fsl_a008585_read_reg(cntp_tval_el0);
120 }
121
122 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
123 {
124         return __fsl_a008585_read_reg(cntv_tval_el0);
125 }
126
127 static u64 notrace fsl_a008585_read_cntvct_el0(void)
128 {
129         return __fsl_a008585_read_reg(cntvct_el0);
130 }
131 #endif
132
133 #ifdef CONFIG_ARM64_ERRATUM_1188873
134 static u64 notrace arm64_1188873_read_cntvct_el0(void)
135 {
136         return read_sysreg(cntvct_el0);
137 }
138 #endif
139
140 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
141 const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
142 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
143
144 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
145 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
146
147 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
148 #ifdef CONFIG_FSL_ERRATUM_A008585
149         {
150                 .match_type = ate_match_dt,
151                 .id = "fsl,erratum-a008585",
152                 .desc = "Freescale erratum a005858",
153                 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
154                 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
155                 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
156         },
157 #endif
158 #ifdef CONFIG_ARM64_ERRATUM_1188873
159         {
160                 .match_type = ate_match_local_cap_id,
161                 .id = (void *)ARM64_WORKAROUND_1188873,
162                 .desc = "ARM erratum 1188873",
163                 .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
164         },
165 #endif
166 };
167
168 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
169                                const void *);
170
171 static
172 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
173                                  const void *arg)
174 {
175         const struct device_node *np = arg;
176
177         return of_property_read_bool(np, wa->id);
178 }
179
180 static
181 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
182                                         const void *arg)
183 {
184         return this_cpu_has_cap((uintptr_t)wa->id);
185 }
186
187 static const struct arch_timer_erratum_workaround *
188 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
189                           ate_match_fn_t match_fn,
190                           void *arg)
191 {
192         int i;
193
194         for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
195                 if (ool_workarounds[i].match_type != type)
196                         continue;
197
198                 if (match_fn(&ool_workarounds[i], arg))
199                         return &ool_workarounds[i];
200         }
201
202         return NULL;
203 }
204
205 static
206 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa)
207 {
208         timer_unstable_counter_workaround = wa;
209         static_branch_enable(&arch_timer_read_ool_enabled);
210 }
211
212 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
213                                             void *arg)
214 {
215         const struct arch_timer_erratum_workaround *wa;
216         ate_match_fn_t match_fn = NULL;
217         bool local = false;
218
219         switch (type) {
220         case ate_match_dt:
221                 match_fn = arch_timer_check_dt_erratum;
222                 break;
223         case ate_match_local_cap_id:
224                 match_fn = arch_timer_check_local_cap_erratum;
225                 local = true;
226                 break;
227         default:
228                 WARN_ON(1);
229                 return;
230         }
231
232         wa = arch_timer_iterate_errata(type, match_fn, arg);
233         if (!wa)
234                 return;
235
236         if (needs_unstable_timer_counter_workaround()) {
237                 if (wa != timer_unstable_counter_workaround)
238                         pr_warn("Can't enable workaround for %s (clashes with %s\n)",
239                                 wa->desc,
240                                 timer_unstable_counter_workaround->desc);
241                 return;
242         }
243
244         arch_timer_enable_workaround(wa);
245         pr_info("Enabling %s workaround for %s\n",
246                 local ? "local" : "global", wa->desc);
247 }
248
249 #else
250 #define arch_timer_check_ool_workaround(t,a)            do { } while(0)
251 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
252
253 static __always_inline
254 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
255                           struct clock_event_device *clk)
256 {
257         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
258                 struct arch_timer *timer = to_arch_timer(clk);
259                 switch (reg) {
260                 case ARCH_TIMER_REG_CTRL:
261                         writel_relaxed(val, timer->base + CNTP_CTL);
262                         break;
263                 case ARCH_TIMER_REG_TVAL:
264                         writel_relaxed(val, timer->base + CNTP_TVAL);
265                         break;
266                 }
267         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
268                 struct arch_timer *timer = to_arch_timer(clk);
269                 switch (reg) {
270                 case ARCH_TIMER_REG_CTRL:
271                         writel_relaxed(val, timer->base + CNTV_CTL);
272                         break;
273                 case ARCH_TIMER_REG_TVAL:
274                         writel_relaxed(val, timer->base + CNTV_TVAL);
275                         break;
276                 }
277         } else {
278                 arch_timer_reg_write_cp15(access, reg, val);
279         }
280 }
281
282 static __always_inline
283 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
284                         struct clock_event_device *clk)
285 {
286         u32 val;
287
288         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
289                 struct arch_timer *timer = to_arch_timer(clk);
290                 switch (reg) {
291                 case ARCH_TIMER_REG_CTRL:
292                         val = readl_relaxed(timer->base + CNTP_CTL);
293                         break;
294                 case ARCH_TIMER_REG_TVAL:
295                         val = readl_relaxed(timer->base + CNTP_TVAL);
296                         break;
297                 }
298         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
299                 struct arch_timer *timer = to_arch_timer(clk);
300                 switch (reg) {
301                 case ARCH_TIMER_REG_CTRL:
302                         val = readl_relaxed(timer->base + CNTV_CTL);
303                         break;
304                 case ARCH_TIMER_REG_TVAL:
305                         val = readl_relaxed(timer->base + CNTV_TVAL);
306                         break;
307                 }
308         } else {
309                 val = arch_timer_reg_read_cp15(access, reg);
310         }
311
312         return val;
313 }
314
315 static __always_inline irqreturn_t timer_handler(const int access,
316                                         struct clock_event_device *evt)
317 {
318         unsigned long ctrl;
319
320         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
321         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
322                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
323                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
324                 evt->event_handler(evt);
325                 return IRQ_HANDLED;
326         }
327
328         return IRQ_NONE;
329 }
330
331 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
332 {
333         struct clock_event_device *evt = dev_id;
334
335         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
336 }
337
338 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
339 {
340         struct clock_event_device *evt = dev_id;
341
342         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
343 }
344
345 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
346 {
347         struct clock_event_device *evt = dev_id;
348
349         return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
350 }
351
352 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
353 {
354         struct clock_event_device *evt = dev_id;
355
356         return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
357 }
358
359 static __always_inline int timer_shutdown(const int access,
360                                           struct clock_event_device *clk)
361 {
362         unsigned long ctrl;
363
364         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
365         ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
366         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
367
368         return 0;
369 }
370
371 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
372 {
373         return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
374 }
375
376 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
377 {
378         return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
379 }
380
381 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
382 {
383         return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
384 }
385
386 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
387 {
388         return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
389 }
390
391 static __always_inline void set_next_event(const int access, unsigned long evt,
392                                            struct clock_event_device *clk)
393 {
394         unsigned long ctrl;
395         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
396         ctrl |= ARCH_TIMER_CTRL_ENABLE;
397         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
398         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
399         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
400 }
401
402 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
403 static __always_inline void erratum_set_next_event_generic(const int access,
404                 unsigned long evt, struct clock_event_device *clk)
405 {
406         unsigned long ctrl;
407         u64 cval = evt + arch_counter_get_cntvct();
408
409         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
410         ctrl |= ARCH_TIMER_CTRL_ENABLE;
411         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
412
413         if (access == ARCH_TIMER_PHYS_ACCESS)
414                 write_sysreg(cval, cntp_cval_el0);
415         else if (access == ARCH_TIMER_VIRT_ACCESS)
416                 write_sysreg(cval, cntv_cval_el0);
417
418         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
419 }
420
421 static int erratum_set_next_event_virt(unsigned long evt,
422                                            struct clock_event_device *clk)
423 {
424         erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
425         return 0;
426 }
427
428 static int erratum_set_next_event_phys(unsigned long evt,
429                                            struct clock_event_device *clk)
430 {
431         erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
432         return 0;
433 }
434 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
435
436 static int arch_timer_set_next_event_virt(unsigned long evt,
437                                           struct clock_event_device *clk)
438 {
439         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
440         return 0;
441 }
442
443 static int arch_timer_set_next_event_phys(unsigned long evt,
444                                           struct clock_event_device *clk)
445 {
446         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
447         return 0;
448 }
449
450 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
451                                               struct clock_event_device *clk)
452 {
453         set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
454         return 0;
455 }
456
457 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
458                                               struct clock_event_device *clk)
459 {
460         set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
461         return 0;
462 }
463
464 static void erratum_workaround_set_sne(struct clock_event_device *clk)
465 {
466 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
467         if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
468                 return;
469
470         if (arch_timer_uses_ppi == VIRT_PPI)
471                 clk->set_next_event = erratum_set_next_event_virt;
472         else
473                 clk->set_next_event = erratum_set_next_event_phys;
474 #endif
475 }
476
477 static void __arch_timer_setup(unsigned type,
478                                struct clock_event_device *clk)
479 {
480         clk->features = CLOCK_EVT_FEAT_ONESHOT;
481
482         if (type == ARCH_CP15_TIMER) {
483                 if (arch_timer_c3stop)
484                         clk->features |= CLOCK_EVT_FEAT_C3STOP;
485                 clk->name = "arch_sys_timer";
486                 clk->rating = 450;
487                 clk->cpumask = cpumask_of(smp_processor_id());
488                 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
489                 switch (arch_timer_uses_ppi) {
490                 case VIRT_PPI:
491                         clk->set_state_shutdown = arch_timer_shutdown_virt;
492                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
493                         clk->set_next_event = arch_timer_set_next_event_virt;
494                         break;
495                 case PHYS_SECURE_PPI:
496                 case PHYS_NONSECURE_PPI:
497                 case HYP_PPI:
498                         clk->set_state_shutdown = arch_timer_shutdown_phys;
499                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
500                         clk->set_next_event = arch_timer_set_next_event_phys;
501                         break;
502                 default:
503                         BUG();
504                 }
505
506                 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
507
508                 erratum_workaround_set_sne(clk);
509         } else {
510                 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
511                 clk->name = "arch_mem_timer";
512                 clk->rating = 400;
513                 clk->cpumask = cpu_all_mask;
514                 if (arch_timer_mem_use_virtual) {
515                         clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
516                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
517                         clk->set_next_event =
518                                 arch_timer_set_next_event_virt_mem;
519                 } else {
520                         clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
521                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
522                         clk->set_next_event =
523                                 arch_timer_set_next_event_phys_mem;
524                 }
525         }
526
527         clk->set_state_shutdown(clk);
528
529         clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
530 }
531
532 static void arch_timer_evtstrm_enable(int divider)
533 {
534         u32 cntkctl = arch_timer_get_cntkctl();
535
536         cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
537         /* Set the divider and enable virtual event stream */
538         cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
539                         | ARCH_TIMER_VIRT_EVT_EN;
540         arch_timer_set_cntkctl(cntkctl);
541         elf_hwcap |= HWCAP_EVTSTRM;
542 #ifdef CONFIG_COMPAT
543         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
544 #endif
545 }
546
547 static void arch_timer_configure_evtstream(void)
548 {
549         int evt_stream_div, lsb;
550
551         /*
552          * As the event stream can at most be generated at half the frequency
553          * of the counter, use half the frequency when computing the divider.
554          */
555         evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
556
557         /*
558          * Find the closest power of two to the divisor. If the adjacent bit
559          * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
560          */
561         lsb = fls(evt_stream_div) - 1;
562         if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
563                 lsb++;
564
565         /* enable event stream */
566         arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
567 }
568
569 static void arch_counter_set_user_access(void)
570 {
571         u32 cntkctl = arch_timer_get_cntkctl();
572
573         /* Disable user access to the timers and the physical counter */
574         /* Also disable virtual event stream */
575         cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
576                         | ARCH_TIMER_USR_VT_ACCESS_EN
577                         | ARCH_TIMER_VIRT_EVT_EN
578                         | ARCH_TIMER_USR_PCT_ACCESS_EN);
579
580         /* Enable user access to the virtual counter */
581         cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
582
583         arch_timer_set_cntkctl(cntkctl);
584 }
585
586 static bool arch_timer_has_nonsecure_ppi(void)
587 {
588         return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
589                 arch_timer_ppi[PHYS_NONSECURE_PPI]);
590 }
591
592 static u32 check_ppi_trigger(int irq)
593 {
594         u32 flags = irq_get_trigger_type(irq);
595
596         if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
597                 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
598                 pr_warn("WARNING: Please fix your firmware\n");
599                 flags = IRQF_TRIGGER_LOW;
600         }
601
602         return flags;
603 }
604
605 static int arch_timer_starting_cpu(unsigned int cpu)
606 {
607         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
608         u32 flags;
609
610         __arch_timer_setup(ARCH_CP15_TIMER, clk);
611
612         flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
613         enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
614
615         if (arch_timer_has_nonsecure_ppi()) {
616                 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
617                 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
618         }
619
620         arch_counter_set_user_access();
621         if (evtstrm_enable)
622                 arch_timer_configure_evtstream();
623
624         return 0;
625 }
626
627 static void
628 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
629 {
630         /* Who has more than one independent system counter? */
631         if (arch_timer_rate)
632                 return;
633
634         /*
635          * Try to determine the frequency from the device tree or CNTFRQ,
636          * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
637          */
638         if (!acpi_disabled ||
639             of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
640                 if (cntbase)
641                         arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
642                 else
643                         arch_timer_rate = arch_timer_get_cntfrq();
644         }
645
646         /* Check the timer frequency. */
647         if (arch_timer_rate == 0)
648                 pr_warn("Architected timer frequency not available\n");
649 }
650
651 static void arch_timer_banner(unsigned type)
652 {
653         pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
654                      type & ARCH_CP15_TIMER ? "cp15" : "",
655                      type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  " and " : "",
656                      type & ARCH_MEM_TIMER ? "mmio" : "",
657                      (unsigned long)arch_timer_rate / 1000000,
658                      (unsigned long)(arch_timer_rate / 10000) % 100,
659                      type & ARCH_CP15_TIMER ?
660                      (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
661                         "",
662                      type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "",
663                      type & ARCH_MEM_TIMER ?
664                         arch_timer_mem_use_virtual ? "virt" : "phys" :
665                         "");
666 }
667
668 u32 arch_timer_get_rate(void)
669 {
670         return arch_timer_rate;
671 }
672
673 static u64 arch_counter_get_cntvct_mem(void)
674 {
675         u32 vct_lo, vct_hi, tmp_hi;
676
677         do {
678                 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
679                 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
680                 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
681         } while (vct_hi != tmp_hi);
682
683         return ((u64) vct_hi << 32) | vct_lo;
684 }
685
686 /*
687  * Default to cp15 based access because arm64 uses this function for
688  * sched_clock() before DT is probed and the cp15 method is guaranteed
689  * to exist on arm64. arm doesn't use this before DT is probed so even
690  * if we don't have the cp15 accessors we won't have a problem.
691  */
692 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
693
694 static cycle_t arch_counter_read(struct clocksource *cs)
695 {
696         return arch_timer_read_counter();
697 }
698
699 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
700 {
701         return arch_timer_read_counter();
702 }
703
704 static struct clocksource clocksource_counter = {
705         .name   = "arch_sys_counter",
706         .rating = 400,
707         .read   = arch_counter_read,
708         .mask   = CLOCKSOURCE_MASK(56),
709         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
710 };
711
712 static struct cyclecounter cyclecounter = {
713         .read   = arch_counter_read_cc,
714         .mask   = CLOCKSOURCE_MASK(56),
715 };
716
717 static struct arch_timer_kvm_info arch_timer_kvm_info;
718
719 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
720 {
721         return &arch_timer_kvm_info;
722 }
723
724 static void __init arch_counter_register(unsigned type)
725 {
726         u64 start_count;
727
728         /* Register the CP15 based counter if we have one */
729         if (type & ARCH_CP15_TIMER) {
730                 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
731                         arch_timer_read_counter = arch_counter_get_cntvct;
732                 else
733                         arch_timer_read_counter = arch_counter_get_cntpct;
734
735                 clocksource_counter.archdata.vdso_direct = true;
736
737 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
738                 /*
739                  * Don't use the vdso fastpath if errata require using
740                  * the out-of-line counter accessor.
741                  */
742                 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
743                         clocksource_counter.archdata.vdso_direct = false;
744 #endif
745         } else {
746                 arch_timer_read_counter = arch_counter_get_cntvct_mem;
747         }
748
749         if (!arch_counter_suspend_stop)
750                 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
751         start_count = arch_timer_read_counter();
752         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
753         cyclecounter.mult = clocksource_counter.mult;
754         cyclecounter.shift = clocksource_counter.shift;
755         timecounter_init(&arch_timer_kvm_info.timecounter,
756                          &cyclecounter, start_count);
757
758         /* 56 bits minimum, so we assume worst case rollover */
759         sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
760 }
761
762 static void arch_timer_stop(struct clock_event_device *clk)
763 {
764         pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
765                  clk->irq, smp_processor_id());
766
767         disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
768         if (arch_timer_has_nonsecure_ppi())
769                 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
770
771         clk->set_state_shutdown(clk);
772 }
773
774 static int arch_timer_dying_cpu(unsigned int cpu)
775 {
776         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
777
778         arch_timer_stop(clk);
779         return 0;
780 }
781
782 #ifdef CONFIG_CPU_PM
783 static unsigned int saved_cntkctl;
784 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
785                                     unsigned long action, void *hcpu)
786 {
787         if (action == CPU_PM_ENTER)
788                 saved_cntkctl = arch_timer_get_cntkctl();
789         else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
790                 arch_timer_set_cntkctl(saved_cntkctl);
791         return NOTIFY_OK;
792 }
793
794 static struct notifier_block arch_timer_cpu_pm_notifier = {
795         .notifier_call = arch_timer_cpu_pm_notify,
796 };
797
798 static int __init arch_timer_cpu_pm_init(void)
799 {
800         return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
801 }
802
803 static void __init arch_timer_cpu_pm_deinit(void)
804 {
805         WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
806 }
807
808 #else
809 static int __init arch_timer_cpu_pm_init(void)
810 {
811         return 0;
812 }
813
814 static void __init arch_timer_cpu_pm_deinit(void)
815 {
816 }
817 #endif
818
819 static int __init arch_timer_register(void)
820 {
821         int err;
822         int ppi;
823
824         arch_timer_evt = alloc_percpu(struct clock_event_device);
825         if (!arch_timer_evt) {
826                 err = -ENOMEM;
827                 goto out;
828         }
829
830         ppi = arch_timer_ppi[arch_timer_uses_ppi];
831         switch (arch_timer_uses_ppi) {
832         case VIRT_PPI:
833                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
834                                          "arch_timer", arch_timer_evt);
835                 break;
836         case PHYS_SECURE_PPI:
837         case PHYS_NONSECURE_PPI:
838                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
839                                          "arch_timer", arch_timer_evt);
840                 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
841                         ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
842                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
843                                                  "arch_timer", arch_timer_evt);
844                         if (err)
845                                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
846                                                 arch_timer_evt);
847                 }
848                 break;
849         case HYP_PPI:
850                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
851                                          "arch_timer", arch_timer_evt);
852                 break;
853         default:
854                 BUG();
855         }
856
857         if (err) {
858                 pr_err("arch_timer: can't register interrupt %d (%d)\n",
859                        ppi, err);
860                 goto out_free;
861         }
862
863         err = arch_timer_cpu_pm_init();
864         if (err)
865                 goto out_unreg_notify;
866
867
868         /* Register and immediately configure the timer on the boot CPU */
869         err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
870                                 "AP_ARM_ARCH_TIMER_STARTING",
871                                 arch_timer_starting_cpu, arch_timer_dying_cpu);
872         if (err)
873                 goto out_unreg_cpupm;
874         return 0;
875
876 out_unreg_cpupm:
877         arch_timer_cpu_pm_deinit();
878
879 out_unreg_notify:
880         free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
881         if (arch_timer_has_nonsecure_ppi())
882                 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
883                                 arch_timer_evt);
884
885 out_free:
886         free_percpu(arch_timer_evt);
887 out:
888         return err;
889 }
890
891 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
892 {
893         int ret;
894         irq_handler_t func;
895         struct arch_timer *t;
896
897         t = kzalloc(sizeof(*t), GFP_KERNEL);
898         if (!t)
899                 return -ENOMEM;
900
901         t->base = base;
902         t->evt.irq = irq;
903         __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
904
905         if (arch_timer_mem_use_virtual)
906                 func = arch_timer_handler_virt_mem;
907         else
908                 func = arch_timer_handler_phys_mem;
909
910         ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
911         if (ret) {
912                 pr_err("arch_timer: Failed to request mem timer irq\n");
913                 kfree(t);
914         }
915
916         return ret;
917 }
918
919 static const struct of_device_id arch_timer_of_match[] __initconst = {
920         { .compatible   = "arm,armv7-timer",    },
921         { .compatible   = "arm,armv8-timer",    },
922         {},
923 };
924
925 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
926         { .compatible   = "arm,armv7-timer-mem", },
927         {},
928 };
929
930 static bool __init
931 arch_timer_needs_probing(int type, const struct of_device_id *matches)
932 {
933         struct device_node *dn;
934         bool needs_probing = false;
935
936         dn = of_find_matching_node(NULL, matches);
937         if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
938                 needs_probing = true;
939         of_node_put(dn);
940
941         return needs_probing;
942 }
943
944 static int __init arch_timer_common_init(void)
945 {
946         unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
947
948         /* Wait until both nodes are probed if we have two timers */
949         if ((arch_timers_present & mask) != mask) {
950                 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
951                         return 0;
952                 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
953                         return 0;
954         }
955
956         arch_timer_banner(arch_timers_present);
957         arch_counter_register(arch_timers_present);
958         return arch_timer_arch_init();
959 }
960
961 static int __init arch_timer_init(void)
962 {
963         int ret;
964         /*
965          * If HYP mode is available, we know that the physical timer
966          * has been configured to be accessible from PL1. Use it, so
967          * that a guest can use the virtual timer instead.
968          *
969          * If no interrupt provided for virtual timer, we'll have to
970          * stick to the physical timer. It'd better be accessible...
971          *
972          * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
973          * accesses to CNTP_*_EL1 registers are silently redirected to
974          * their CNTHP_*_EL2 counterparts, and use a different PPI
975          * number.
976          */
977         if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
978                 bool has_ppi;
979
980                 if (is_kernel_in_hyp_mode()) {
981                         arch_timer_uses_ppi = HYP_PPI;
982                         has_ppi = !!arch_timer_ppi[HYP_PPI];
983                 } else {
984                         arch_timer_uses_ppi = PHYS_SECURE_PPI;
985                         has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
986                                    !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
987                 }
988
989                 if (!has_ppi) {
990                         pr_warn("arch_timer: No interrupt available, giving up\n");
991                         return -EINVAL;
992                 }
993         }
994
995         ret = arch_timer_register();
996         if (ret)
997                 return ret;
998
999         ret = arch_timer_common_init();
1000         if (ret)
1001                 return ret;
1002
1003         arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
1004         
1005         return 0;
1006 }
1007
1008 static int __init arch_timer_of_init(struct device_node *np)
1009 {
1010         int i;
1011
1012         if (arch_timers_present & ARCH_CP15_TIMER) {
1013                 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
1014                 return 0;
1015         }
1016
1017         arch_timers_present |= ARCH_CP15_TIMER;
1018         for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1019                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1020
1021         arch_timer_detect_rate(NULL, np);
1022
1023         arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1024
1025         /* Check for globally applicable workarounds */
1026         arch_timer_check_ool_workaround(ate_match_dt, np);
1027
1028         /*
1029          * If we cannot rely on firmware initializing the timer registers then
1030          * we should use the physical timers instead.
1031          */
1032         if (IS_ENABLED(CONFIG_ARM) &&
1033             of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1034                 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1035
1036         /* On some systems, the counter stops ticking when in suspend. */
1037         arch_counter_suspend_stop = of_property_read_bool(np,
1038                                                          "arm,no-tick-in-suspend");
1039
1040         return arch_timer_init();
1041 }
1042 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1043 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1044
1045 static int __init arch_timer_mem_init(struct device_node *np)
1046 {
1047         struct device_node *frame, *best_frame = NULL;
1048         void __iomem *cntctlbase, *base;
1049         unsigned int irq, ret = -EINVAL;
1050         u32 cnttidr;
1051
1052         arch_timers_present |= ARCH_MEM_TIMER;
1053         cntctlbase = of_iomap(np, 0);
1054         if (!cntctlbase) {
1055                 pr_err("arch_timer: Can't find CNTCTLBase\n");
1056                 return -ENXIO;
1057         }
1058
1059         cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1060
1061         /*
1062          * Try to find a virtual capable frame. Otherwise fall back to a
1063          * physical capable frame.
1064          */
1065         for_each_available_child_of_node(np, frame) {
1066                 int n;
1067                 u32 cntacr;
1068
1069                 if (of_property_read_u32(frame, "frame-number", &n)) {
1070                         pr_err("arch_timer: Missing frame-number\n");
1071                         of_node_put(frame);
1072                         goto out;
1073                 }
1074
1075                 /* Try enabling everything, and see what sticks */
1076                 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1077                          CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1078                 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1079                 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1080
1081                 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1082                     !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1083                         of_node_put(best_frame);
1084                         best_frame = frame;
1085                         arch_timer_mem_use_virtual = true;
1086                         break;
1087                 }
1088
1089                 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1090                         continue;
1091
1092                 of_node_put(best_frame);
1093                 best_frame = of_node_get(frame);
1094         }
1095
1096         ret= -ENXIO;
1097         base = arch_counter_base = of_iomap(best_frame, 0);
1098         if (!base) {
1099                 pr_err("arch_timer: Can't map frame's registers\n");
1100                 goto out;
1101         }
1102
1103         if (arch_timer_mem_use_virtual)
1104                 irq = irq_of_parse_and_map(best_frame, 1);
1105         else
1106                 irq = irq_of_parse_and_map(best_frame, 0);
1107
1108         ret = -EINVAL;
1109         if (!irq) {
1110                 pr_err("arch_timer: Frame missing %s irq",
1111                        arch_timer_mem_use_virtual ? "virt" : "phys");
1112                 goto out;
1113         }
1114
1115         arch_timer_detect_rate(base, np);
1116         ret = arch_timer_mem_register(base, irq);
1117         if (ret)
1118                 goto out;
1119
1120         return arch_timer_common_init();
1121 out:
1122         iounmap(cntctlbase);
1123         of_node_put(best_frame);
1124         return ret;
1125 }
1126 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1127                        arch_timer_mem_init);
1128
1129 #ifdef CONFIG_ACPI
1130 static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1131 {
1132         int trigger, polarity;
1133
1134         if (!interrupt)
1135                 return 0;
1136
1137         trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1138                         : ACPI_LEVEL_SENSITIVE;
1139
1140         polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1141                         : ACPI_ACTIVE_HIGH;
1142
1143         return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1144 }
1145
1146 /* Initialize per-processor generic timer */
1147 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1148 {
1149         struct acpi_table_gtdt *gtdt;
1150
1151         if (arch_timers_present & ARCH_CP15_TIMER) {
1152                 pr_warn("arch_timer: already initialized, skipping\n");
1153                 return -EINVAL;
1154         }
1155
1156         gtdt = container_of(table, struct acpi_table_gtdt, header);
1157
1158         arch_timers_present |= ARCH_CP15_TIMER;
1159
1160         arch_timer_ppi[PHYS_SECURE_PPI] =
1161                 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1162                 gtdt->secure_el1_flags);
1163
1164         arch_timer_ppi[PHYS_NONSECURE_PPI] =
1165                 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1166                 gtdt->non_secure_el1_flags);
1167
1168         arch_timer_ppi[VIRT_PPI] =
1169                 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1170                 gtdt->virtual_timer_flags);
1171
1172         arch_timer_ppi[HYP_PPI] =
1173                 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1174                 gtdt->non_secure_el2_flags);
1175
1176         /* Get the frequency from CNTFRQ */
1177         arch_timer_detect_rate(NULL, NULL);
1178
1179         /* Always-on capability */
1180         arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1181
1182         arch_timer_init();
1183         return 0;
1184 }
1185 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1186 #endif