GNU Linux-libre 4.19.314-gnu1
[releases.git] / arch / arm / mach-imx / mmdc.c
1 /*
2  * Copyright 2017 NXP
3  * Copyright 2011,2016 Freescale Semiconductor, Inc.
4  * Copyright 2011 Linaro Ltd.
5  *
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 #include <linux/clk.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/of_device.h>
23 #include <linux/perf_event.h>
24 #include <linux/slab.h>
25
26 #include "common.h"
27
28 #define MMDC_MAPSR              0x404
29 #define BP_MMDC_MAPSR_PSD       0
30 #define BP_MMDC_MAPSR_PSS       4
31
32 #define MMDC_MDMISC             0x18
33 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
34 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
35
36 #define TOTAL_CYCLES            0x0
37 #define BUSY_CYCLES             0x1
38 #define READ_ACCESSES           0x2
39 #define WRITE_ACCESSES          0x3
40 #define READ_BYTES              0x4
41 #define WRITE_BYTES             0x5
42
43 /* Enables, resets, freezes, overflow profiling*/
44 #define DBG_DIS                 0x0
45 #define DBG_EN                  0x1
46 #define DBG_RST                 0x2
47 #define PRF_FRZ                 0x4
48 #define CYC_OVF                 0x8
49 #define PROFILE_SEL             0x10
50
51 #define MMDC_MADPCR0    0x410
52 #define MMDC_MADPCR1    0x414
53 #define MMDC_MADPSR0    0x418
54 #define MMDC_MADPSR1    0x41C
55 #define MMDC_MADPSR2    0x420
56 #define MMDC_MADPSR3    0x424
57 #define MMDC_MADPSR4    0x428
58 #define MMDC_MADPSR5    0x42C
59
60 #define MMDC_NUM_COUNTERS       6
61
62 #define MMDC_FLAG_PROFILE_SEL   0x1
63 #define MMDC_PRF_AXI_ID_CLEAR   0x0
64
65 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
66
67 static int ddr_type;
68
69 struct fsl_mmdc_devtype_data {
70         unsigned int flags;
71 };
72
73 static const struct fsl_mmdc_devtype_data imx6q_data = {
74 };
75
76 static const struct fsl_mmdc_devtype_data imx6qp_data = {
77         .flags = MMDC_FLAG_PROFILE_SEL,
78 };
79
80 static const struct of_device_id imx_mmdc_dt_ids[] = {
81         { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
82         { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
83         { /* sentinel */ }
84 };
85
86 #ifdef CONFIG_PERF_EVENTS
87
88 static enum cpuhp_state cpuhp_mmdc_state;
89 static DEFINE_IDA(mmdc_ida);
90
91 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
92 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
93 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
94 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
95 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
96 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
97 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
98 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
99 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
100 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
101
102 struct mmdc_pmu {
103         struct pmu pmu;
104         void __iomem *mmdc_base;
105         cpumask_t cpu;
106         struct hrtimer hrtimer;
107         unsigned int active_events;
108         int id;
109         struct device *dev;
110         struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
111         struct hlist_node node;
112         struct fsl_mmdc_devtype_data *devtype_data;
113         struct clk *mmdc_ipg_clk;
114 };
115
116 /*
117  * Polling period is set to one second, overflow of total-cycles (the fastest
118  * increasing counter) takes ten seconds so one second is safe
119  */
120 static unsigned int mmdc_pmu_poll_period_us = 1000000;
121
122 module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
123                 S_IRUGO | S_IWUSR);
124
125 static ktime_t mmdc_pmu_timer_period(void)
126 {
127         return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
128 }
129
130 static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
131                 struct device_attribute *attr, char *buf)
132 {
133         struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
134
135         return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
136 }
137
138 static struct device_attribute mmdc_pmu_cpumask_attr =
139         __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
140
141 static struct attribute *mmdc_pmu_cpumask_attrs[] = {
142         &mmdc_pmu_cpumask_attr.attr,
143         NULL,
144 };
145
146 static struct attribute_group mmdc_pmu_cpumask_attr_group = {
147         .attrs = mmdc_pmu_cpumask_attrs,
148 };
149
150 static struct attribute *mmdc_pmu_events_attrs[] = {
151         &mmdc_pmu_total_cycles.attr.attr,
152         &mmdc_pmu_busy_cycles.attr.attr,
153         &mmdc_pmu_read_accesses.attr.attr,
154         &mmdc_pmu_write_accesses.attr.attr,
155         &mmdc_pmu_read_bytes.attr.attr,
156         &mmdc_pmu_read_bytes_unit.attr.attr,
157         &mmdc_pmu_read_bytes_scale.attr.attr,
158         &mmdc_pmu_write_bytes.attr.attr,
159         &mmdc_pmu_write_bytes_unit.attr.attr,
160         &mmdc_pmu_write_bytes_scale.attr.attr,
161         NULL,
162 };
163
164 static struct attribute_group mmdc_pmu_events_attr_group = {
165         .name = "events",
166         .attrs = mmdc_pmu_events_attrs,
167 };
168
169 PMU_FORMAT_ATTR(event, "config:0-63");
170 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
171
172 static struct attribute *mmdc_pmu_format_attrs[] = {
173         &format_attr_event.attr,
174         &format_attr_axi_id.attr,
175         NULL,
176 };
177
178 static struct attribute_group mmdc_pmu_format_attr_group = {
179         .name = "format",
180         .attrs = mmdc_pmu_format_attrs,
181 };
182
183 static const struct attribute_group *attr_groups[] = {
184         &mmdc_pmu_events_attr_group,
185         &mmdc_pmu_format_attr_group,
186         &mmdc_pmu_cpumask_attr_group,
187         NULL,
188 };
189
190 static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
191 {
192         void __iomem *mmdc_base, *reg;
193
194         mmdc_base = pmu_mmdc->mmdc_base;
195
196         switch (cfg) {
197         case TOTAL_CYCLES:
198                 reg = mmdc_base + MMDC_MADPSR0;
199                 break;
200         case BUSY_CYCLES:
201                 reg = mmdc_base + MMDC_MADPSR1;
202                 break;
203         case READ_ACCESSES:
204                 reg = mmdc_base + MMDC_MADPSR2;
205                 break;
206         case WRITE_ACCESSES:
207                 reg = mmdc_base + MMDC_MADPSR3;
208                 break;
209         case READ_BYTES:
210                 reg = mmdc_base + MMDC_MADPSR4;
211                 break;
212         case WRITE_BYTES:
213                 reg = mmdc_base + MMDC_MADPSR5;
214                 break;
215         default:
216                 return WARN_ONCE(1,
217                         "invalid configuration %d for mmdc counter", cfg);
218         }
219         return readl(reg);
220 }
221
222 static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
223 {
224         struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
225         int target;
226
227         if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
228                 return 0;
229
230         target = cpumask_any_but(cpu_online_mask, cpu);
231         if (target >= nr_cpu_ids)
232                 return 0;
233
234         perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
235         cpumask_set_cpu(target, &pmu_mmdc->cpu);
236
237         return 0;
238 }
239
240 static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
241                                           struct pmu *pmu,
242                                           unsigned long *used_counters)
243 {
244         int cfg = event->attr.config;
245
246         if (is_software_event(event))
247                 return true;
248
249         if (event->pmu != pmu)
250                 return false;
251
252         return !test_and_set_bit(cfg, used_counters);
253 }
254
255 /*
256  * Each event has a single fixed-purpose counter, so we can only have a
257  * single active event for each at any point in time. Here we just check
258  * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
259  * event numbers are valid.
260  */
261 static bool mmdc_pmu_group_is_valid(struct perf_event *event)
262 {
263         struct pmu *pmu = event->pmu;
264         struct perf_event *leader = event->group_leader;
265         struct perf_event *sibling;
266         unsigned long counter_mask = 0;
267
268         set_bit(leader->attr.config, &counter_mask);
269
270         if (event != leader) {
271                 if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
272                         return false;
273         }
274
275         for_each_sibling_event(sibling, leader) {
276                 if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
277                         return false;
278         }
279
280         return true;
281 }
282
283 static int mmdc_pmu_event_init(struct perf_event *event)
284 {
285         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
286         int cfg = event->attr.config;
287
288         if (event->attr.type != event->pmu->type)
289                 return -ENOENT;
290
291         if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
292                 return -EOPNOTSUPP;
293
294         if (event->cpu < 0) {
295                 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
296                 return -EOPNOTSUPP;
297         }
298
299         if (event->attr.exclude_user            ||
300                         event->attr.exclude_kernel      ||
301                         event->attr.exclude_hv          ||
302                         event->attr.exclude_idle        ||
303                         event->attr.exclude_host        ||
304                         event->attr.exclude_guest       ||
305                         event->attr.sample_period)
306                 return -EINVAL;
307
308         if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
309                 return -EINVAL;
310
311         if (!mmdc_pmu_group_is_valid(event))
312                 return -EINVAL;
313
314         event->cpu = cpumask_first(&pmu_mmdc->cpu);
315         return 0;
316 }
317
318 static void mmdc_pmu_event_update(struct perf_event *event)
319 {
320         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
321         struct hw_perf_event *hwc = &event->hw;
322         u64 delta, prev_raw_count, new_raw_count;
323
324         do {
325                 prev_raw_count = local64_read(&hwc->prev_count);
326                 new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
327                                                       event->attr.config);
328         } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
329                 new_raw_count) != prev_raw_count);
330
331         delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
332
333         local64_add(delta, &event->count);
334 }
335
336 static void mmdc_pmu_event_start(struct perf_event *event, int flags)
337 {
338         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
339         struct hw_perf_event *hwc = &event->hw;
340         void __iomem *mmdc_base, *reg;
341         u32 val;
342
343         mmdc_base = pmu_mmdc->mmdc_base;
344         reg = mmdc_base + MMDC_MADPCR0;
345
346         /*
347          * hrtimer is required because mmdc does not provide an interrupt so
348          * polling is necessary
349          */
350         hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
351                         HRTIMER_MODE_REL_PINNED);
352
353         local64_set(&hwc->prev_count, 0);
354
355         writel(DBG_RST, reg);
356
357         /*
358          * Write the AXI id parameter to MADPCR1.
359          */
360         val = event->attr.config1;
361         reg = mmdc_base + MMDC_MADPCR1;
362         writel(val, reg);
363
364         reg = mmdc_base + MMDC_MADPCR0;
365         val = DBG_EN;
366         if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
367                 val |= PROFILE_SEL;
368
369         writel(val, reg);
370 }
371
372 static int mmdc_pmu_event_add(struct perf_event *event, int flags)
373 {
374         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
375         struct hw_perf_event *hwc = &event->hw;
376
377         int cfg = event->attr.config;
378
379         if (flags & PERF_EF_START)
380                 mmdc_pmu_event_start(event, flags);
381
382         if (pmu_mmdc->mmdc_events[cfg] != NULL)
383                 return -EAGAIN;
384
385         pmu_mmdc->mmdc_events[cfg] = event;
386         pmu_mmdc->active_events++;
387
388         local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
389
390         return 0;
391 }
392
393 static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
394 {
395         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
396         void __iomem *mmdc_base, *reg;
397
398         mmdc_base = pmu_mmdc->mmdc_base;
399         reg = mmdc_base + MMDC_MADPCR0;
400
401         writel(PRF_FRZ, reg);
402
403         reg = mmdc_base + MMDC_MADPCR1;
404         writel(MMDC_PRF_AXI_ID_CLEAR, reg);
405
406         mmdc_pmu_event_update(event);
407 }
408
409 static void mmdc_pmu_event_del(struct perf_event *event, int flags)
410 {
411         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
412         int cfg = event->attr.config;
413
414         pmu_mmdc->mmdc_events[cfg] = NULL;
415         pmu_mmdc->active_events--;
416
417         if (pmu_mmdc->active_events == 0)
418                 hrtimer_cancel(&pmu_mmdc->hrtimer);
419
420         mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
421 }
422
423 static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
424 {
425         int i;
426
427         for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
428                 struct perf_event *event = pmu_mmdc->mmdc_events[i];
429
430                 if (event)
431                         mmdc_pmu_event_update(event);
432         }
433 }
434
435 static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
436 {
437         struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
438                         hrtimer);
439
440         mmdc_pmu_overflow_handler(pmu_mmdc);
441         hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
442
443         return HRTIMER_RESTART;
444 }
445
446 static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
447                 void __iomem *mmdc_base, struct device *dev)
448 {
449         *pmu_mmdc = (struct mmdc_pmu) {
450                 .pmu = (struct pmu) {
451                         .task_ctx_nr    = perf_invalid_context,
452                         .attr_groups    = attr_groups,
453                         .event_init     = mmdc_pmu_event_init,
454                         .add            = mmdc_pmu_event_add,
455                         .del            = mmdc_pmu_event_del,
456                         .start          = mmdc_pmu_event_start,
457                         .stop           = mmdc_pmu_event_stop,
458                         .read           = mmdc_pmu_event_update,
459                 },
460                 .mmdc_base = mmdc_base,
461                 .dev = dev,
462                 .active_events = 0,
463         };
464
465         pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
466
467         return pmu_mmdc->id;
468 }
469
470 static int imx_mmdc_remove(struct platform_device *pdev)
471 {
472         struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
473
474         ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
475         cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
476         perf_pmu_unregister(&pmu_mmdc->pmu);
477         iounmap(pmu_mmdc->mmdc_base);
478         clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
479         kfree(pmu_mmdc);
480         return 0;
481 }
482
483 static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
484                               struct clk *mmdc_ipg_clk)
485 {
486         struct mmdc_pmu *pmu_mmdc;
487         char *name;
488         int ret;
489         const struct of_device_id *of_id =
490                 of_match_device(imx_mmdc_dt_ids, &pdev->dev);
491
492         pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
493         if (!pmu_mmdc) {
494                 pr_err("failed to allocate PMU device!\n");
495                 return -ENOMEM;
496         }
497
498         /* The first instance registers the hotplug state */
499         if (!cpuhp_mmdc_state) {
500                 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
501                                               "perf/arm/mmdc:online", NULL,
502                                               mmdc_pmu_offline_cpu);
503                 if (ret < 0) {
504                         pr_err("cpuhp_setup_state_multi failed\n");
505                         goto pmu_free;
506                 }
507                 cpuhp_mmdc_state = ret;
508         }
509
510         ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
511         if (ret < 0)
512                 goto  pmu_free;
513
514         name = devm_kasprintf(&pdev->dev,
515                                 GFP_KERNEL, "mmdc%d", ret);
516         if (!name) {
517                 ret = -ENOMEM;
518                 goto pmu_release_id;
519         }
520
521         pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
522         pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
523
524         hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
525                         HRTIMER_MODE_REL);
526         pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
527
528         cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
529
530         /* Register the pmu instance for cpu hotplug */
531         cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
532
533         ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
534         if (ret)
535                 goto pmu_register_err;
536
537         platform_set_drvdata(pdev, pmu_mmdc);
538         return 0;
539
540 pmu_register_err:
541         pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
542         cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
543         hrtimer_cancel(&pmu_mmdc->hrtimer);
544 pmu_release_id:
545         ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
546 pmu_free:
547         kfree(pmu_mmdc);
548         return ret;
549 }
550
551 #else
552 #define imx_mmdc_remove NULL
553 #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
554 #endif
555
556 static int imx_mmdc_probe(struct platform_device *pdev)
557 {
558         struct device_node *np = pdev->dev.of_node;
559         void __iomem *mmdc_base, *reg;
560         struct clk *mmdc_ipg_clk;
561         u32 val;
562         int err;
563
564         /* the ipg clock is optional */
565         mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL);
566         if (IS_ERR(mmdc_ipg_clk))
567                 mmdc_ipg_clk = NULL;
568
569         err = clk_prepare_enable(mmdc_ipg_clk);
570         if (err) {
571                 dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n");
572                 return err;
573         }
574
575         mmdc_base = of_iomap(np, 0);
576         WARN_ON(!mmdc_base);
577
578         reg = mmdc_base + MMDC_MDMISC;
579         /* Get ddr type */
580         val = readl_relaxed(reg);
581         ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
582                  BP_MMDC_MDMISC_DDR_TYPE;
583
584         reg = mmdc_base + MMDC_MAPSR;
585
586         /* Enable automatic power saving */
587         val = readl_relaxed(reg);
588         val &= ~(1 << BP_MMDC_MAPSR_PSD);
589         writel_relaxed(val, reg);
590
591         err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
592         if (err) {
593                 iounmap(mmdc_base);
594                 clk_disable_unprepare(mmdc_ipg_clk);
595         }
596
597         return err;
598 }
599
600 int imx_mmdc_get_ddr_type(void)
601 {
602         return ddr_type;
603 }
604
605 static struct platform_driver imx_mmdc_driver = {
606         .driver         = {
607                 .name   = "imx-mmdc",
608                 .of_match_table = imx_mmdc_dt_ids,
609         },
610         .probe          = imx_mmdc_probe,
611         .remove         = imx_mmdc_remove,
612 };
613
614 static int __init imx_mmdc_init(void)
615 {
616         return platform_driver_register(&imx_mmdc_driver);
617 }
618 postcore_initcall(imx_mmdc_init);