1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016 Freescale Semiconductor, Inc.
7 #include <linux/bitfield.h>
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/perf_event.h>
17 #include <linux/slab.h>
19 #define COUNTER_CNTL 0x0
20 #define COUNTER_READ 0x20
22 #define COUNTER_DPCR1 0x30
25 #define CNTL_CLEAR 0x2
27 #define CNTL_EN_MASK 0xFFFFFFFB
28 #define CNTL_CLEAR_MASK 0xFFFFFFFD
29 #define CNTL_OVER_MASK 0xFFFFFFFE
31 #define CNTL_CSV_SHIFT 24
32 #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
34 #define EVENT_CYCLES_ID 0
35 #define EVENT_CYCLES_COUNTER 0
36 #define NUM_COUNTERS 4
38 #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
42 #define DDR_PERF_DEV_NAME "imx8_ddr"
43 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
45 static DEFINE_IDA(ddr_ida);
47 /* DDR Perf hardware feature */
48 #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
49 #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
51 struct fsl_ddr_devtype_data {
52 unsigned int quirks; /* quirks needed for different DDR Perf core */
55 static const struct fsl_ddr_devtype_data imx8_devtype_data;
57 static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
58 .quirks = DDR_CAP_AXI_ID_FILTER,
61 static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
62 .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
65 static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
71 MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
77 struct hlist_node node;
79 struct perf_event *events[NUM_COUNTERS];
81 enum cpuhp_state cpuhp_state;
82 const struct fsl_ddr_devtype_data *devtype_data;
88 enum ddr_perf_filter_capabilities {
89 PERF_CAP_AXI_ID_FILTER = 0,
90 PERF_CAP_AXI_ID_FILTER_ENHANCED,
91 PERF_CAP_AXI_ID_FEAT_MAX,
94 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
96 u32 quirks = pmu->devtype_data->quirks;
99 case PERF_CAP_AXI_ID_FILTER:
100 return !!(quirks & DDR_CAP_AXI_ID_FILTER);
101 case PERF_CAP_AXI_ID_FILTER_ENHANCED:
102 quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
103 return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
105 WARN(1, "unknown filter cap %d\n", cap);
111 static ssize_t ddr_perf_filter_cap_show(struct device *dev,
112 struct device_attribute *attr,
115 struct ddr_pmu *pmu = dev_get_drvdata(dev);
116 struct dev_ext_attribute *ea =
117 container_of(attr, struct dev_ext_attribute, attr);
118 int cap = (long)ea->var;
120 return snprintf(buf, PAGE_SIZE, "%u\n",
121 ddr_perf_filter_cap_get(pmu, cap));
124 #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
125 (&((struct dev_ext_attribute) { \
126 __ATTR(_name, 0444, _func, NULL), (void *)_var \
129 #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
130 PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
132 static struct attribute *ddr_perf_filter_cap_attr[] = {
133 PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
134 PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
138 static struct attribute_group ddr_perf_filter_cap_attr_group = {
140 .attrs = ddr_perf_filter_cap_attr,
143 static ssize_t ddr_perf_cpumask_show(struct device *dev,
144 struct device_attribute *attr, char *buf)
146 struct ddr_pmu *pmu = dev_get_drvdata(dev);
148 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
151 static struct device_attribute ddr_perf_cpumask_attr =
152 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
154 static struct attribute *ddr_perf_cpumask_attrs[] = {
155 &ddr_perf_cpumask_attr.attr,
159 static struct attribute_group ddr_perf_cpumask_attr_group = {
160 .attrs = ddr_perf_cpumask_attrs,
164 ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
167 struct perf_pmu_events_attr *pmu_attr;
169 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
170 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
173 #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
174 (&((struct perf_pmu_events_attr[]) { \
175 { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
179 static struct attribute *ddr_perf_events_attrs[] = {
180 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
181 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
182 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
183 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
184 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
185 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
186 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
187 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
188 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
189 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
190 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
191 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
192 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
193 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
194 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
195 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
196 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
197 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
198 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
199 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
200 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
201 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
202 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
203 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
204 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
205 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
206 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
207 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
208 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
209 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
210 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
211 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
215 static struct attribute_group ddr_perf_events_attr_group = {
217 .attrs = ddr_perf_events_attrs,
220 PMU_FORMAT_ATTR(event, "config:0-7");
221 PMU_FORMAT_ATTR(axi_id, "config1:0-15");
222 PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
224 static struct attribute *ddr_perf_format_attrs[] = {
225 &format_attr_event.attr,
226 &format_attr_axi_id.attr,
227 &format_attr_axi_mask.attr,
231 static struct attribute_group ddr_perf_format_attr_group = {
233 .attrs = ddr_perf_format_attrs,
236 static const struct attribute_group *attr_groups[] = {
237 &ddr_perf_events_attr_group,
238 &ddr_perf_format_attr_group,
239 &ddr_perf_cpumask_attr_group,
240 &ddr_perf_filter_cap_attr_group,
244 static bool ddr_perf_is_filtered(struct perf_event *event)
246 return event->attr.config == 0x41 || event->attr.config == 0x42;
249 static u32 ddr_perf_filter_val(struct perf_event *event)
251 return event->attr.config1;
254 static bool ddr_perf_filters_compatible(struct perf_event *a,
255 struct perf_event *b)
257 if (!ddr_perf_is_filtered(a))
259 if (!ddr_perf_is_filtered(b))
261 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
264 static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
267 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
269 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
270 return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
271 ddr_perf_is_filtered(event);
274 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
279 * Always map cycle event to counter 0
280 * Cycles counter is dedicated for cycle event
281 * can't used for the other events
283 if (event == EVENT_CYCLES_ID) {
284 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
285 return EVENT_CYCLES_COUNTER;
290 for (i = 1; i < NUM_COUNTERS; i++) {
291 if (pmu->events[i] == NULL)
298 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
300 pmu->events[counter] = NULL;
303 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
305 struct perf_event *event = pmu->events[counter];
306 void __iomem *base = pmu->base;
309 * return bytes instead of bursts from ddr transaction for
310 * axid-read and axid-write event if PMU core supports enhanced
313 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
315 return readl_relaxed(base + counter * 4);
318 static int ddr_perf_event_init(struct perf_event *event)
320 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
321 struct hw_perf_event *hwc = &event->hw;
322 struct perf_event *sibling;
324 if (event->attr.type != event->pmu->type)
327 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
330 if (event->cpu < 0) {
331 dev_warn(pmu->dev, "Can't provide per-task data!\n");
336 * We must NOT create groups containing mixed PMUs, although software
337 * events are acceptable (for example to create a CCN group
338 * periodically read when a hrtimer aka cpu-clock leader triggers).
340 if (event->group_leader->pmu != event->pmu &&
341 !is_software_event(event->group_leader))
344 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
345 if (!ddr_perf_filters_compatible(event, event->group_leader))
347 for_each_sibling_event(sibling, event->group_leader) {
348 if (!ddr_perf_filters_compatible(event, sibling))
353 for_each_sibling_event(sibling, event->group_leader) {
354 if (sibling->pmu != event->pmu &&
355 !is_software_event(sibling))
359 event->cpu = pmu->cpu;
366 static void ddr_perf_event_update(struct perf_event *event)
368 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
369 struct hw_perf_event *hwc = &event->hw;
370 u64 delta, prev_raw_count, new_raw_count;
371 int counter = hwc->idx;
374 prev_raw_count = local64_read(&hwc->prev_count);
375 new_raw_count = ddr_perf_read_counter(pmu, counter);
376 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
377 new_raw_count) != prev_raw_count);
379 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
381 local64_add(delta, &event->count);
384 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
385 int counter, bool enable)
387 u8 reg = counter * 4 + COUNTER_CNTL;
392 * cycle counter is special which should firstly write 0 then
393 * write 1 into CLEAR bit to clear it. Other counters only
394 * need write 0 into CLEAR bit and it turns out to be 1 by
395 * hardware. Below enable flow is harmless for all counters.
397 writel(0, pmu->base + reg);
398 val = CNTL_EN | CNTL_CLEAR;
399 val |= FIELD_PREP(CNTL_CSV_MASK, config);
400 writel(val, pmu->base + reg);
402 /* Disable counter */
403 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
404 writel(val, pmu->base + reg);
408 static void ddr_perf_event_start(struct perf_event *event, int flags)
410 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
411 struct hw_perf_event *hwc = &event->hw;
412 int counter = hwc->idx;
414 local64_set(&hwc->prev_count, 0);
416 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
418 if (!pmu->active_counter++)
419 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
420 EVENT_CYCLES_COUNTER, true);
425 static int ddr_perf_event_add(struct perf_event *event, int flags)
427 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
428 struct hw_perf_event *hwc = &event->hw;
430 int cfg = event->attr.config;
431 int cfg1 = event->attr.config1;
433 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
436 for (i = 1; i < NUM_COUNTERS; i++) {
437 if (pmu->events[i] &&
438 !ddr_perf_filters_compatible(event, pmu->events[i]))
442 if (ddr_perf_is_filtered(event)) {
443 /* revert axi id masking(axi_mask) value */
444 cfg1 ^= AXI_MASKING_REVERT;
445 writel(cfg1, pmu->base + COUNTER_DPCR1);
449 counter = ddr_perf_alloc_counter(pmu, cfg);
451 dev_dbg(pmu->dev, "There are not enough counters\n");
455 pmu->events[counter] = event;
456 pmu->active_events++;
459 hwc->state |= PERF_HES_STOPPED;
461 if (flags & PERF_EF_START)
462 ddr_perf_event_start(event, flags);
467 static void ddr_perf_event_stop(struct perf_event *event, int flags)
469 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
470 struct hw_perf_event *hwc = &event->hw;
471 int counter = hwc->idx;
473 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
474 ddr_perf_event_update(event);
476 if (!--pmu->active_counter)
477 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
478 EVENT_CYCLES_COUNTER, false);
480 hwc->state |= PERF_HES_STOPPED;
483 static void ddr_perf_event_del(struct perf_event *event, int flags)
485 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
486 struct hw_perf_event *hwc = &event->hw;
487 int counter = hwc->idx;
489 ddr_perf_event_stop(event, PERF_EF_UPDATE);
491 ddr_perf_free_counter(pmu, counter);
492 pmu->active_events--;
496 static void ddr_perf_pmu_enable(struct pmu *pmu)
500 static void ddr_perf_pmu_disable(struct pmu *pmu)
504 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
507 *pmu = (struct ddr_pmu) {
508 .pmu = (struct pmu) {
509 .module = THIS_MODULE,
510 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
511 .task_ctx_nr = perf_invalid_context,
512 .attr_groups = attr_groups,
513 .event_init = ddr_perf_event_init,
514 .add = ddr_perf_event_add,
515 .del = ddr_perf_event_del,
516 .start = ddr_perf_event_start,
517 .stop = ddr_perf_event_stop,
518 .read = ddr_perf_event_update,
519 .pmu_enable = ddr_perf_pmu_enable,
520 .pmu_disable = ddr_perf_pmu_disable,
526 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
530 static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
533 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
534 struct perf_event *event, *cycle_event = NULL;
536 /* all counter will stop if cycle counter disabled */
537 ddr_perf_counter_enable(pmu,
539 EVENT_CYCLES_COUNTER,
542 * When the cycle counter overflows, all counters are stopped,
543 * and an IRQ is raised. If any other counter overflows, it
544 * continues counting, and no IRQ is raised.
546 * Cycles occur at least 4 times as often as other events, so we
547 * can update all events on a cycle counter overflow and not
551 for (i = 0; i < NUM_COUNTERS; i++) {
556 event = pmu->events[i];
558 ddr_perf_event_update(event);
560 if (event->hw.idx == EVENT_CYCLES_COUNTER)
564 ddr_perf_counter_enable(pmu,
566 EVENT_CYCLES_COUNTER,
569 ddr_perf_event_update(cycle_event);
574 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
576 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
582 target = cpumask_any_but(cpu_online_mask, cpu);
583 if (target >= nr_cpu_ids)
586 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
589 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
594 static int ddr_perf_probe(struct platform_device *pdev)
597 struct device_node *np;
604 base = devm_platform_ioremap_resource(pdev, 0);
606 return PTR_ERR(base);
608 np = pdev->dev.of_node;
610 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
614 num = ddr_perf_init(pmu, base, &pdev->dev);
616 platform_set_drvdata(pdev, pmu);
618 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
622 goto cpuhp_state_err;
625 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
627 pmu->cpu = raw_smp_processor_id();
628 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
631 ddr_perf_offline_cpu);
634 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
635 goto cpuhp_state_err;
638 pmu->cpuhp_state = ret;
640 /* Register the pmu instance for cpu hotplug */
641 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
643 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
644 goto cpuhp_instance_err;
648 irq = of_irq_get(np, 0);
650 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
655 ret = devm_request_irq(&pdev->dev, irq,
656 ddr_perf_irq_handler,
657 IRQF_NOBALANCING | IRQF_NO_THREAD,
661 dev_err(&pdev->dev, "Request irq failed: %d", ret);
666 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
668 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
672 ret = perf_pmu_register(&pmu->pmu, name, -1);
679 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
681 cpuhp_remove_multi_state(pmu->cpuhp_state);
683 ida_simple_remove(&ddr_ida, pmu->id);
684 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
688 static int ddr_perf_remove(struct platform_device *pdev)
690 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
692 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
693 cpuhp_remove_multi_state(pmu->cpuhp_state);
694 irq_set_affinity_hint(pmu->irq, NULL);
696 perf_pmu_unregister(&pmu->pmu);
698 ida_simple_remove(&ddr_ida, pmu->id);
702 static struct platform_driver imx_ddr_pmu_driver = {
704 .name = "imx-ddr-pmu",
705 .of_match_table = imx_ddr_pmu_dt_ids,
706 .suppress_bind_attrs = true,
708 .probe = ddr_perf_probe,
709 .remove = ddr_perf_remove,
712 module_platform_driver(imx_ddr_pmu_driver);
713 MODULE_LICENSE("GPL v2");