1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2022 Amlogic, Inc. All rights reserved.
6 #include <linux/bitfield.h>
7 #include <linux/init.h>
8 #include <linux/irqreturn.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/perf_event.h>
13 #include <linux/platform_device.h>
14 #include <linux/printk.h>
15 #include <linux/sysfs.h>
16 #include <linux/types.h>
18 #include <soc/amlogic/meson_ddr_pmu.h>
23 struct dmc_counter counters; /* save counters from hw */
27 struct hlist_node node;
28 enum cpuhp_state cpuhp_state;
29 int cpu; /* for cpu hotplug */
32 #define DDR_PERF_DEV_NAME "meson_ddr_bw"
33 #define MAX_AXI_PORTS_OF_CHANNEL 4 /* A DMC channel can monitor max 4 axi ports */
35 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
36 #define dmc_info_to_pmu(p) container_of(p, struct ddr_pmu, info)
38 static void dmc_pmu_enable(struct ddr_pmu *pmu)
40 if (!pmu->pmu_enabled)
41 pmu->info.hw_info->enable(&pmu->info);
43 pmu->pmu_enabled = true;
46 static void dmc_pmu_disable(struct ddr_pmu *pmu)
49 pmu->info.hw_info->disable(&pmu->info);
51 pmu->pmu_enabled = false;
54 static void meson_ddr_set_axi_filter(struct perf_event *event, u8 axi_id)
56 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
59 if (event->attr.config > ALL_CHAN_COUNTER_ID &&
60 event->attr.config < COUNTER_MAX_ID) {
61 chann = event->attr.config - CHAN1_COUNTER_ID;
63 pmu->info.hw_info->set_axi_filter(&pmu->info, axi_id, chann);
67 static void ddr_cnt_addition(struct dmc_counter *sum,
68 struct dmc_counter *add1,
69 struct dmc_counter *add2,
75 sum->all_cnt = add1->all_cnt + add2->all_cnt;
76 sum->all_req = add1->all_req + add2->all_req;
77 for (i = 0; i < chann_nr; i++) {
78 cnt1 = add1->channel_cnt[i];
79 cnt2 = add2->channel_cnt[i];
81 sum->channel_cnt[i] = cnt1 + cnt2;
85 static void meson_ddr_perf_event_update(struct perf_event *event)
87 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
88 u64 new_raw_count = 0;
89 struct dmc_counter dc = {0}, sum_dc = {0};
91 int chann_nr = pmu->info.hw_info->chann_nr;
93 /* get the remain counters in register. */
94 pmu->info.hw_info->get_counters(&pmu->info, &dc);
96 ddr_cnt_addition(&sum_dc, &pmu->counters, &dc, chann_nr);
98 switch (event->attr.config) {
99 case ALL_CHAN_COUNTER_ID:
100 new_raw_count = sum_dc.all_cnt;
102 case CHAN1_COUNTER_ID:
103 case CHAN2_COUNTER_ID:
104 case CHAN3_COUNTER_ID:
105 case CHAN4_COUNTER_ID:
106 case CHAN5_COUNTER_ID:
107 case CHAN6_COUNTER_ID:
108 case CHAN7_COUNTER_ID:
109 case CHAN8_COUNTER_ID:
110 idx = event->attr.config - CHAN1_COUNTER_ID;
111 new_raw_count = sum_dc.channel_cnt[idx];
115 local64_set(&event->count, new_raw_count);
118 static int meson_ddr_perf_event_init(struct perf_event *event)
120 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
121 u64 config1 = event->attr.config1;
122 u64 config2 = event->attr.config2;
124 if (event->attr.type != event->pmu->type)
127 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
133 /* check if the number of parameters is too much */
134 if (event->attr.config != ALL_CHAN_COUNTER_ID &&
135 hweight64(config1) + hweight64(config2) > MAX_AXI_PORTS_OF_CHANNEL)
138 event->cpu = pmu->cpu;
143 static void meson_ddr_perf_event_start(struct perf_event *event, int flags)
145 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
147 memset(&pmu->counters, 0, sizeof(pmu->counters));
151 static int meson_ddr_perf_event_add(struct perf_event *event, int flags)
153 u64 config1 = event->attr.config1;
154 u64 config2 = event->attr.config2;
158 (const unsigned long *)&config1,
159 BITS_PER_TYPE(config1))
160 meson_ddr_set_axi_filter(event, i);
163 (const unsigned long *)&config2,
164 BITS_PER_TYPE(config2))
165 meson_ddr_set_axi_filter(event, i + 64);
167 if (flags & PERF_EF_START)
168 meson_ddr_perf_event_start(event, flags);
173 static void meson_ddr_perf_event_stop(struct perf_event *event, int flags)
175 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
177 if (flags & PERF_EF_UPDATE)
178 meson_ddr_perf_event_update(event);
180 dmc_pmu_disable(pmu);
183 static void meson_ddr_perf_event_del(struct perf_event *event, int flags)
185 meson_ddr_perf_event_stop(event, PERF_EF_UPDATE);
188 static ssize_t meson_ddr_perf_cpumask_show(struct device *dev,
189 struct device_attribute *attr,
192 struct ddr_pmu *pmu = dev_get_drvdata(dev);
194 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
197 static struct device_attribute meson_ddr_perf_cpumask_attr =
198 __ATTR(cpumask, 0444, meson_ddr_perf_cpumask_show, NULL);
200 static struct attribute *meson_ddr_perf_cpumask_attrs[] = {
201 &meson_ddr_perf_cpumask_attr.attr,
205 static const struct attribute_group ddr_perf_cpumask_attr_group = {
206 .attrs = meson_ddr_perf_cpumask_attrs,
210 pmu_event_show(struct device *dev, struct device_attribute *attr,
213 struct perf_pmu_events_attr *pmu_attr;
215 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
216 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
220 event_show_unit(struct device *dev, struct device_attribute *attr,
223 return sysfs_emit(page, "MB\n");
227 event_show_scale(struct device *dev, struct device_attribute *attr,
230 /* one count = 16byte = 1.52587890625e-05 MB */
231 return sysfs_emit(page, "1.52587890625e-05\n");
234 #define AML_DDR_PMU_EVENT_ATTR(_name, _id) \
236 .attr = __ATTR(_name, 0444, pmu_event_show, NULL), \
240 #define AML_DDR_PMU_EVENT_UNIT_ATTR(_name) \
241 __ATTR(_name.unit, 0444, event_show_unit, NULL)
243 #define AML_DDR_PMU_EVENT_SCALE_ATTR(_name) \
244 __ATTR(_name.scale, 0444, event_show_scale, NULL)
246 static struct device_attribute event_unit_attrs[] = {
247 AML_DDR_PMU_EVENT_UNIT_ATTR(total_rw_bytes),
248 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_1_rw_bytes),
249 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_2_rw_bytes),
250 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_3_rw_bytes),
251 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_4_rw_bytes),
252 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_5_rw_bytes),
253 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_6_rw_bytes),
254 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_7_rw_bytes),
255 AML_DDR_PMU_EVENT_UNIT_ATTR(chan_8_rw_bytes),
258 static struct device_attribute event_scale_attrs[] = {
259 AML_DDR_PMU_EVENT_SCALE_ATTR(total_rw_bytes),
260 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_1_rw_bytes),
261 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_2_rw_bytes),
262 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_3_rw_bytes),
263 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_4_rw_bytes),
264 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_5_rw_bytes),
265 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_6_rw_bytes),
266 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_7_rw_bytes),
267 AML_DDR_PMU_EVENT_SCALE_ATTR(chan_8_rw_bytes),
270 static struct perf_pmu_events_attr event_attrs[] = {
271 AML_DDR_PMU_EVENT_ATTR(total_rw_bytes, ALL_CHAN_COUNTER_ID),
272 AML_DDR_PMU_EVENT_ATTR(chan_1_rw_bytes, CHAN1_COUNTER_ID),
273 AML_DDR_PMU_EVENT_ATTR(chan_2_rw_bytes, CHAN2_COUNTER_ID),
274 AML_DDR_PMU_EVENT_ATTR(chan_3_rw_bytes, CHAN3_COUNTER_ID),
275 AML_DDR_PMU_EVENT_ATTR(chan_4_rw_bytes, CHAN4_COUNTER_ID),
276 AML_DDR_PMU_EVENT_ATTR(chan_5_rw_bytes, CHAN5_COUNTER_ID),
277 AML_DDR_PMU_EVENT_ATTR(chan_6_rw_bytes, CHAN6_COUNTER_ID),
278 AML_DDR_PMU_EVENT_ATTR(chan_7_rw_bytes, CHAN7_COUNTER_ID),
279 AML_DDR_PMU_EVENT_ATTR(chan_8_rw_bytes, CHAN8_COUNTER_ID),
282 /* three attrs are combined an event */
283 static struct attribute *ddr_perf_events_attrs[COUNTER_MAX_ID * 3];
285 static struct attribute_group ddr_perf_events_attr_group = {
287 .attrs = ddr_perf_events_attrs,
290 static umode_t meson_ddr_perf_format_attr_visible(struct kobject *kobj,
291 struct attribute *attr,
294 struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
295 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
296 const u64 *capability = ddr_pmu->info.hw_info->capability;
297 struct device_attribute *dev_attr;
299 char value[20]; // config1:xxx, 20 is enough
301 dev_attr = container_of(attr, struct device_attribute, attr);
302 dev_attr->show(NULL, NULL, value);
304 if (sscanf(value, "config1:%d", &id) == 1)
305 return capability[0] & (1ULL << id) ? attr->mode : 0;
307 if (sscanf(value, "config2:%d", &id) == 1)
308 return capability[1] & (1ULL << id) ? attr->mode : 0;
313 static struct attribute_group ddr_perf_format_attr_group = {
315 .is_visible = meson_ddr_perf_format_attr_visible,
318 static ssize_t meson_ddr_perf_identifier_show(struct device *dev,
319 struct device_attribute *attr,
322 struct ddr_pmu *pmu = dev_get_drvdata(dev);
324 return sysfs_emit(page, "%s\n", pmu->name);
327 static struct device_attribute meson_ddr_perf_identifier_attr =
328 __ATTR(identifier, 0444, meson_ddr_perf_identifier_show, NULL);
330 static struct attribute *meson_ddr_perf_identifier_attrs[] = {
331 &meson_ddr_perf_identifier_attr.attr,
335 static const struct attribute_group ddr_perf_identifier_attr_group = {
336 .attrs = meson_ddr_perf_identifier_attrs,
339 static const struct attribute_group *attr_groups[] = {
340 &ddr_perf_events_attr_group,
341 &ddr_perf_format_attr_group,
342 &ddr_perf_cpumask_attr_group,
343 &ddr_perf_identifier_attr_group,
347 static irqreturn_t dmc_irq_handler(int irq, void *dev_id)
349 struct dmc_info *info = dev_id;
351 struct dmc_counter counters, *sum_cnter;
354 pmu = dmc_info_to_pmu(info);
356 if (info->hw_info->irq_handler(info, &counters) != 0)
359 sum_cnter = &pmu->counters;
360 sum_cnter->all_cnt += counters.all_cnt;
361 sum_cnter->all_req += counters.all_req;
363 for (i = 0; i < pmu->info.hw_info->chann_nr; i++)
364 sum_cnter->channel_cnt[i] += counters.channel_cnt[i];
366 if (pmu->pmu_enabled)
368 * the timer interrupt only supprt
369 * one shot mode, we have to re-enable
370 * it in ISR to support continue mode.
372 info->hw_info->enable(info);
374 dev_dbg(pmu->dev, "counts: %llu %llu %llu, %llu, %llu, %llu\t\t"
375 "sum: %llu %llu %llu, %llu, %llu, %llu\n",
378 counters.channel_cnt[0],
379 counters.channel_cnt[1],
380 counters.channel_cnt[2],
381 counters.channel_cnt[3],
383 pmu->counters.all_req,
384 pmu->counters.all_cnt,
385 pmu->counters.channel_cnt[0],
386 pmu->counters.channel_cnt[1],
387 pmu->counters.channel_cnt[2],
388 pmu->counters.channel_cnt[3]);
393 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
395 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
401 target = cpumask_any_but(cpu_online_mask, cpu);
402 if (target >= nr_cpu_ids)
405 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
408 WARN_ON(irq_set_affinity(pmu->info.irq_num, cpumask_of(pmu->cpu)));
413 static void fill_event_attr(struct ddr_pmu *pmu)
416 struct attribute **dst = ddr_perf_events_attrs;
421 /* fill ALL_CHAN_COUNTER_ID event */
422 dst[j++] = &event_attrs[k].attr.attr;
423 dst[j++] = &event_unit_attrs[k].attr;
424 dst[j++] = &event_scale_attrs[k].attr;
428 /* fill each channel event */
429 for (i = 0; i < pmu->info.hw_info->chann_nr; i++, k++) {
430 dst[j++] = &event_attrs[k].attr.attr;
431 dst[j++] = &event_unit_attrs[k].attr;
432 dst[j++] = &event_scale_attrs[k].attr;
435 dst[j] = NULL; /* mark end */
438 static void fmt_attr_fill(struct attribute **fmt_attr)
440 ddr_perf_format_attr_group.attrs = fmt_attr;
443 static int ddr_pmu_parse_dt(struct platform_device *pdev,
444 struct dmc_info *info)
449 info->hw_info = of_device_get_match_data(&pdev->dev);
451 for (i = 0; i < info->hw_info->dmc_nr; i++) {
452 /* resource 0 for ddr register base */
453 base = devm_platform_ioremap_resource(pdev, i);
455 return PTR_ERR(base);
457 info->ddr_reg[i] = base;
460 /* resource i for pll register base */
461 base = devm_platform_ioremap_resource(pdev, i);
463 return PTR_ERR(base);
465 info->pll_reg = base;
467 ret = platform_get_irq(pdev, 0);
473 ret = devm_request_irq(&pdev->dev, info->irq_num, dmc_irq_handler,
474 IRQF_NOBALANCING, dev_name(&pdev->dev),
482 int meson_ddr_pmu_create(struct platform_device *pdev)
488 pmu = devm_kzalloc(&pdev->dev, sizeof(struct ddr_pmu), GFP_KERNEL);
492 *pmu = (struct ddr_pmu) {
494 .module = THIS_MODULE,
495 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
496 .task_ctx_nr = perf_invalid_context,
497 .attr_groups = attr_groups,
498 .event_init = meson_ddr_perf_event_init,
499 .add = meson_ddr_perf_event_add,
500 .del = meson_ddr_perf_event_del,
501 .start = meson_ddr_perf_event_start,
502 .stop = meson_ddr_perf_event_stop,
503 .read = meson_ddr_perf_event_update,
507 ret = ddr_pmu_parse_dt(pdev, &pmu->info);
511 fmt_attr_fill(pmu->info.hw_info->fmt_attr);
513 pmu->cpu = smp_processor_id();
515 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME);
519 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, name, NULL,
520 ddr_perf_offline_cpu);
524 pmu->cpuhp_state = ret;
526 /* Register the pmu instance for cpu hotplug */
527 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
529 goto cpuhp_instance_err;
531 fill_event_attr(pmu);
533 ret = perf_pmu_register(&pmu->pmu, name, -1);
535 goto pmu_register_err;
538 pmu->dev = &pdev->dev;
539 pmu->pmu_enabled = false;
541 platform_set_drvdata(pdev, pmu);
546 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
549 cpuhp_remove_state(pmu->cpuhp_state);
554 int meson_ddr_pmu_remove(struct platform_device *pdev)
556 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
558 perf_pmu_unregister(&pmu->pmu);
559 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
560 cpuhp_remove_state(pmu->cpuhp_state);