1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright 2011,2016 Freescale Semiconductor, Inc.
5 * Copyright 2011 Linaro Ltd.
9 #include <linux/hrtimer.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/perf_event.h>
18 #include <linux/slab.h>
22 #define MMDC_MAPSR 0x404
23 #define BP_MMDC_MAPSR_PSD 0
24 #define BP_MMDC_MAPSR_PSS 4
26 #define MMDC_MDMISC 0x18
27 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
28 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
30 #define TOTAL_CYCLES 0x0
31 #define BUSY_CYCLES 0x1
32 #define READ_ACCESSES 0x2
33 #define WRITE_ACCESSES 0x3
34 #define READ_BYTES 0x4
35 #define WRITE_BYTES 0x5
37 /* Enables, resets, freezes, overflow profiling*/
43 #define PROFILE_SEL 0x10
45 #define MMDC_MADPCR0 0x410
46 #define MMDC_MADPCR1 0x414
47 #define MMDC_MADPSR0 0x418
48 #define MMDC_MADPSR1 0x41C
49 #define MMDC_MADPSR2 0x420
50 #define MMDC_MADPSR3 0x424
51 #define MMDC_MADPSR4 0x428
52 #define MMDC_MADPSR5 0x42C
54 #define MMDC_NUM_COUNTERS 6
56 #define MMDC_FLAG_PROFILE_SEL 0x1
57 #define MMDC_PRF_AXI_ID_CLEAR 0x0
59 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
63 struct fsl_mmdc_devtype_data {
67 static const struct fsl_mmdc_devtype_data imx6q_data = {
70 static const struct fsl_mmdc_devtype_data imx6qp_data = {
71 .flags = MMDC_FLAG_PROFILE_SEL,
74 static const struct of_device_id imx_mmdc_dt_ids[] = {
75 { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
76 { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
80 #ifdef CONFIG_PERF_EVENTS
82 static enum cpuhp_state cpuhp_mmdc_state;
83 static DEFINE_IDA(mmdc_ida);
85 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
86 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
87 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
88 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
89 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
90 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
91 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
92 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
93 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
94 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
98 void __iomem *mmdc_base;
100 struct hrtimer hrtimer;
101 unsigned int active_events;
103 struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
104 struct hlist_node node;
105 struct fsl_mmdc_devtype_data *devtype_data;
106 struct clk *mmdc_ipg_clk;
110 * Polling period is set to one second, overflow of total-cycles (the fastest
111 * increasing counter) takes ten seconds so one second is safe
113 static unsigned int mmdc_pmu_poll_period_us = 1000000;
115 module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
118 static ktime_t mmdc_pmu_timer_period(void)
120 return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
123 static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
126 struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
128 return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
131 static struct device_attribute mmdc_pmu_cpumask_attr =
132 __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
134 static struct attribute *mmdc_pmu_cpumask_attrs[] = {
135 &mmdc_pmu_cpumask_attr.attr,
139 static struct attribute_group mmdc_pmu_cpumask_attr_group = {
140 .attrs = mmdc_pmu_cpumask_attrs,
143 static struct attribute *mmdc_pmu_events_attrs[] = {
144 &mmdc_pmu_total_cycles.attr.attr,
145 &mmdc_pmu_busy_cycles.attr.attr,
146 &mmdc_pmu_read_accesses.attr.attr,
147 &mmdc_pmu_write_accesses.attr.attr,
148 &mmdc_pmu_read_bytes.attr.attr,
149 &mmdc_pmu_read_bytes_unit.attr.attr,
150 &mmdc_pmu_read_bytes_scale.attr.attr,
151 &mmdc_pmu_write_bytes.attr.attr,
152 &mmdc_pmu_write_bytes_unit.attr.attr,
153 &mmdc_pmu_write_bytes_scale.attr.attr,
157 static struct attribute_group mmdc_pmu_events_attr_group = {
159 .attrs = mmdc_pmu_events_attrs,
162 PMU_FORMAT_ATTR(event, "config:0-63");
163 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
165 static struct attribute *mmdc_pmu_format_attrs[] = {
166 &format_attr_event.attr,
167 &format_attr_axi_id.attr,
171 static struct attribute_group mmdc_pmu_format_attr_group = {
173 .attrs = mmdc_pmu_format_attrs,
176 static const struct attribute_group *attr_groups[] = {
177 &mmdc_pmu_events_attr_group,
178 &mmdc_pmu_format_attr_group,
179 &mmdc_pmu_cpumask_attr_group,
183 static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
185 void __iomem *mmdc_base, *reg;
187 mmdc_base = pmu_mmdc->mmdc_base;
191 reg = mmdc_base + MMDC_MADPSR0;
194 reg = mmdc_base + MMDC_MADPSR1;
197 reg = mmdc_base + MMDC_MADPSR2;
200 reg = mmdc_base + MMDC_MADPSR3;
203 reg = mmdc_base + MMDC_MADPSR4;
206 reg = mmdc_base + MMDC_MADPSR5;
210 "invalid configuration %d for mmdc counter", cfg);
215 static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
217 struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
220 if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
223 target = cpumask_any_but(cpu_online_mask, cpu);
224 if (target >= nr_cpu_ids)
227 perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
228 cpumask_set_cpu(target, &pmu_mmdc->cpu);
233 static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
235 unsigned long *used_counters)
237 int cfg = event->attr.config;
239 if (is_software_event(event))
242 if (event->pmu != pmu)
245 return !test_and_set_bit(cfg, used_counters);
249 * Each event has a single fixed-purpose counter, so we can only have a
250 * single active event for each at any point in time. Here we just check
251 * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
252 * event numbers are valid.
254 static bool mmdc_pmu_group_is_valid(struct perf_event *event)
256 struct pmu *pmu = event->pmu;
257 struct perf_event *leader = event->group_leader;
258 struct perf_event *sibling;
259 unsigned long counter_mask = 0;
261 set_bit(leader->attr.config, &counter_mask);
263 if (event != leader) {
264 if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
268 for_each_sibling_event(sibling, leader) {
269 if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
276 static int mmdc_pmu_event_init(struct perf_event *event)
278 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
279 int cfg = event->attr.config;
281 if (event->attr.type != event->pmu->type)
284 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
287 if (event->cpu < 0) {
288 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
292 if (event->attr.sample_period)
295 if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
298 if (!mmdc_pmu_group_is_valid(event))
301 event->cpu = cpumask_first(&pmu_mmdc->cpu);
305 static void mmdc_pmu_event_update(struct perf_event *event)
307 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
308 struct hw_perf_event *hwc = &event->hw;
309 u64 delta, prev_raw_count, new_raw_count;
312 prev_raw_count = local64_read(&hwc->prev_count);
313 new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
315 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
316 new_raw_count) != prev_raw_count);
318 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
320 local64_add(delta, &event->count);
323 static void mmdc_pmu_event_start(struct perf_event *event, int flags)
325 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
326 struct hw_perf_event *hwc = &event->hw;
327 void __iomem *mmdc_base, *reg;
330 mmdc_base = pmu_mmdc->mmdc_base;
331 reg = mmdc_base + MMDC_MADPCR0;
334 * hrtimer is required because mmdc does not provide an interrupt so
335 * polling is necessary
337 hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
338 HRTIMER_MODE_REL_PINNED);
340 local64_set(&hwc->prev_count, 0);
342 writel(DBG_RST, reg);
345 * Write the AXI id parameter to MADPCR1.
347 val = event->attr.config1;
348 reg = mmdc_base + MMDC_MADPCR1;
351 reg = mmdc_base + MMDC_MADPCR0;
353 if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
359 static int mmdc_pmu_event_add(struct perf_event *event, int flags)
361 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
362 struct hw_perf_event *hwc = &event->hw;
364 int cfg = event->attr.config;
366 if (flags & PERF_EF_START)
367 mmdc_pmu_event_start(event, flags);
369 if (pmu_mmdc->mmdc_events[cfg] != NULL)
372 pmu_mmdc->mmdc_events[cfg] = event;
373 pmu_mmdc->active_events++;
375 local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
380 static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
382 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
383 void __iomem *mmdc_base, *reg;
385 mmdc_base = pmu_mmdc->mmdc_base;
386 reg = mmdc_base + MMDC_MADPCR0;
388 writel(PRF_FRZ, reg);
390 reg = mmdc_base + MMDC_MADPCR1;
391 writel(MMDC_PRF_AXI_ID_CLEAR, reg);
393 mmdc_pmu_event_update(event);
396 static void mmdc_pmu_event_del(struct perf_event *event, int flags)
398 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
399 int cfg = event->attr.config;
401 pmu_mmdc->mmdc_events[cfg] = NULL;
402 pmu_mmdc->active_events--;
404 if (pmu_mmdc->active_events == 0)
405 hrtimer_cancel(&pmu_mmdc->hrtimer);
407 mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
410 static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
414 for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
415 struct perf_event *event = pmu_mmdc->mmdc_events[i];
418 mmdc_pmu_event_update(event);
422 static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
424 struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
427 mmdc_pmu_overflow_handler(pmu_mmdc);
428 hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
430 return HRTIMER_RESTART;
433 static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
434 void __iomem *mmdc_base, struct device *dev)
438 *pmu_mmdc = (struct mmdc_pmu) {
439 .pmu = (struct pmu) {
440 .task_ctx_nr = perf_invalid_context,
441 .attr_groups = attr_groups,
442 .event_init = mmdc_pmu_event_init,
443 .add = mmdc_pmu_event_add,
444 .del = mmdc_pmu_event_del,
445 .start = mmdc_pmu_event_start,
446 .stop = mmdc_pmu_event_stop,
447 .read = mmdc_pmu_event_update,
448 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
450 .mmdc_base = mmdc_base,
455 mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
460 static int imx_mmdc_remove(struct platform_device *pdev)
462 struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
464 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
465 perf_pmu_unregister(&pmu_mmdc->pmu);
466 iounmap(pmu_mmdc->mmdc_base);
467 clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
472 static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
473 struct clk *mmdc_ipg_clk)
475 struct mmdc_pmu *pmu_mmdc;
479 const struct of_device_id *of_id =
480 of_match_device(imx_mmdc_dt_ids, &pdev->dev);
482 pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
484 pr_err("failed to allocate PMU device!\n");
488 /* The first instance registers the hotplug state */
489 if (!cpuhp_mmdc_state) {
490 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
491 "perf/arm/mmdc:online", NULL,
492 mmdc_pmu_offline_cpu);
494 pr_err("cpuhp_setup_state_multi failed\n");
497 cpuhp_mmdc_state = ret;
500 mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
501 pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
505 name = devm_kasprintf(&pdev->dev,
506 GFP_KERNEL, "mmdc%d", mmdc_num);
508 pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
510 hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
512 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
514 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
516 /* Register the pmu instance for cpu hotplug */
517 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
519 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
521 goto pmu_register_err;
523 platform_set_drvdata(pdev, pmu_mmdc);
527 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
528 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
529 hrtimer_cancel(&pmu_mmdc->hrtimer);
536 #define imx_mmdc_remove NULL
537 #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
540 static int imx_mmdc_probe(struct platform_device *pdev)
542 struct device_node *np = pdev->dev.of_node;
543 void __iomem *mmdc_base, *reg;
544 struct clk *mmdc_ipg_clk;
548 /* the ipg clock is optional */
549 mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL);
550 if (IS_ERR(mmdc_ipg_clk))
553 err = clk_prepare_enable(mmdc_ipg_clk);
555 dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n");
559 mmdc_base = of_iomap(np, 0);
562 reg = mmdc_base + MMDC_MDMISC;
564 val = readl_relaxed(reg);
565 ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
566 BP_MMDC_MDMISC_DDR_TYPE;
568 reg = mmdc_base + MMDC_MAPSR;
570 /* Enable automatic power saving */
571 val = readl_relaxed(reg);
572 val &= ~(1 << BP_MMDC_MAPSR_PSD);
573 writel_relaxed(val, reg);
575 err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
578 clk_disable_unprepare(mmdc_ipg_clk);
584 int imx_mmdc_get_ddr_type(void)
589 static struct platform_driver imx_mmdc_driver = {
592 .of_match_table = imx_mmdc_dt_ids,
594 .probe = imx_mmdc_probe,
595 .remove = imx_mmdc_remove,
598 static int __init imx_mmdc_init(void)
600 return platform_driver_register(&imx_mmdc_driver);
602 postcore_initcall(imx_mmdc_init);