1 // SPDX-License-Identifier: GPL-2.0-only
3 * HiSilicon SoC Hardware event counters support
5 * Copyright (C) 2017 HiSilicon Limited
6 * Author: Anurup M <anurup.m@huawei.com>
7 * Shaokun Zhang <zhangshaokun@hisilicon.com>
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
11 #include <linux/bitmap.h>
12 #include <linux/bitops.h>
13 #include <linux/bug.h>
14 #include <linux/err.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
18 #include <asm/cputype.h>
19 #include <asm/local64.h>
21 #include "hisi_uncore_pmu.h"
23 #define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
26 * PMU format attributes
28 ssize_t hisi_format_sysfs_show(struct device *dev,
29 struct device_attribute *attr, char *buf)
31 struct dev_ext_attribute *eattr;
33 eattr = container_of(attr, struct dev_ext_attribute, attr);
35 return sysfs_emit(buf, "%s\n", (char *)eattr->var);
37 EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
40 * PMU event attributes
42 ssize_t hisi_event_sysfs_show(struct device *dev,
43 struct device_attribute *attr, char *page)
45 struct dev_ext_attribute *eattr;
47 eattr = container_of(attr, struct dev_ext_attribute, attr);
49 return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
51 EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
54 * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
56 ssize_t hisi_cpumask_sysfs_show(struct device *dev,
57 struct device_attribute *attr, char *buf)
59 struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
61 return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
63 EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
65 static bool hisi_validate_event_group(struct perf_event *event)
67 struct perf_event *sibling, *leader = event->group_leader;
68 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
69 /* Include count for the event */
72 if (!is_software_event(leader)) {
74 * We must NOT create groups containing mixed PMUs, although
75 * software events are acceptable
77 if (leader->pmu != event->pmu)
80 /* Increment counter for the leader */
85 for_each_sibling_event(sibling, event->group_leader) {
86 if (is_software_event(sibling))
88 if (sibling->pmu != event->pmu)
90 /* Increment counter for each sibling */
94 /* The group can not count events more than the counters in the HW */
95 return counters <= hisi_pmu->num_counters;
98 int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
100 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
101 unsigned long *used_mask = hisi_pmu->pmu_events.used_mask;
102 u32 num_counters = hisi_pmu->num_counters;
105 idx = find_first_zero_bit(used_mask, num_counters);
106 if (idx == num_counters)
109 set_bit(idx, used_mask);
113 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
115 ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
116 struct device_attribute *attr,
119 struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
121 return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
123 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
125 static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
127 clear_bit(idx, hisi_pmu->pmu_events.used_mask);
130 static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
132 struct hisi_pmu *hisi_pmu = data;
133 struct perf_event *event;
134 unsigned long overflown;
137 overflown = hisi_pmu->ops->get_int_status(hisi_pmu);
142 * Find the counter index which overflowed if the bit was set
145 for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) {
146 /* Write 1 to clear the IRQ status flag */
147 hisi_pmu->ops->clear_int_status(hisi_pmu, idx);
148 /* Get the corresponding event struct */
149 event = hisi_pmu->pmu_events.hw_events[idx];
153 hisi_uncore_pmu_event_update(event);
154 hisi_uncore_pmu_set_event_period(event);
160 int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
161 struct platform_device *pdev)
165 irq = platform_get_irq(pdev, 0);
169 ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
170 IRQF_NOBALANCING | IRQF_NO_THREAD,
171 dev_name(&pdev->dev), hisi_pmu);
174 "Fail to request IRQ: %d ret: %d.\n", irq, ret);
182 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
184 int hisi_uncore_pmu_event_init(struct perf_event *event)
186 struct hw_perf_event *hwc = &event->hw;
187 struct hisi_pmu *hisi_pmu;
189 if (event->attr.type != event->pmu->type)
193 * We do not support sampling as the counters are all
194 * shared by all CPU cores in a CPU die(SCCL). Also we
195 * do not support attach to a task(per-process mode)
197 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
201 * The uncore counters not specific to any CPU, so cannot
208 * Validate if the events in group does not exceed the
209 * available counters in hardware.
211 if (!hisi_validate_event_group(event))
214 hisi_pmu = to_hisi_pmu(event->pmu);
215 if (event->attr.config > hisi_pmu->check_event)
218 if (hisi_pmu->on_cpu == -1)
221 * We don't assign an index until we actually place the event onto
222 * hardware. Use -1 to signify that we haven't decided where to put it
226 hwc->config_base = event->attr.config;
228 if (hisi_pmu->ops->check_filter && hisi_pmu->ops->check_filter(event))
231 /* Enforce to use the same CPU for all events in this PMU */
232 event->cpu = hisi_pmu->on_cpu;
236 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
239 * Set the counter to count the event that we're interested in,
240 * and enable interrupt and counter.
242 static void hisi_uncore_pmu_enable_event(struct perf_event *event)
244 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
245 struct hw_perf_event *hwc = &event->hw;
247 hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
248 HISI_GET_EVENTID(event));
250 if (hisi_pmu->ops->enable_filter)
251 hisi_pmu->ops->enable_filter(event);
253 hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
254 hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
258 * Disable counter and interrupt.
260 static void hisi_uncore_pmu_disable_event(struct perf_event *event)
262 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
263 struct hw_perf_event *hwc = &event->hw;
265 hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
266 hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
268 if (hisi_pmu->ops->disable_filter)
269 hisi_pmu->ops->disable_filter(event);
272 void hisi_uncore_pmu_set_event_period(struct perf_event *event)
274 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
275 struct hw_perf_event *hwc = &event->hw;
278 * The HiSilicon PMU counters support 32 bits or 48 bits, depending on
279 * the PMU. We reduce it to 2^(counter_bits - 1) to account for the
280 * extreme interrupt latency. So we could hopefully handle the overflow
281 * interrupt before another 2^(counter_bits - 1) events occur and the
282 * counter overtakes its previous value.
284 u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
286 local64_set(&hwc->prev_count, val);
287 /* Write start value to the hardware event counter */
288 hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
290 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
292 void hisi_uncore_pmu_event_update(struct perf_event *event)
294 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
295 struct hw_perf_event *hwc = &event->hw;
296 u64 delta, prev_raw_count, new_raw_count;
299 /* Read the count from the counter register */
300 new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc);
301 prev_raw_count = local64_read(&hwc->prev_count);
302 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
303 new_raw_count) != prev_raw_count);
307 delta = (new_raw_count - prev_raw_count) &
308 HISI_MAX_PERIOD(hisi_pmu->counter_bits);
309 local64_add(delta, &event->count);
311 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
313 void hisi_uncore_pmu_start(struct perf_event *event, int flags)
315 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
316 struct hw_perf_event *hwc = &event->hw;
318 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
321 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
323 hisi_uncore_pmu_set_event_period(event);
325 if (flags & PERF_EF_RELOAD) {
326 u64 prev_raw_count = local64_read(&hwc->prev_count);
328 hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count);
331 hisi_uncore_pmu_enable_event(event);
332 perf_event_update_userpage(event);
334 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
336 void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
338 struct hw_perf_event *hwc = &event->hw;
340 hisi_uncore_pmu_disable_event(event);
341 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
342 hwc->state |= PERF_HES_STOPPED;
344 if (hwc->state & PERF_HES_UPTODATE)
347 /* Read hardware counter and update the perf counter statistics */
348 hisi_uncore_pmu_event_update(event);
349 hwc->state |= PERF_HES_UPTODATE;
351 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
353 int hisi_uncore_pmu_add(struct perf_event *event, int flags)
355 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
356 struct hw_perf_event *hwc = &event->hw;
359 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
361 /* Get an available counter index for counting */
362 idx = hisi_pmu->ops->get_event_idx(event);
367 hisi_pmu->pmu_events.hw_events[idx] = event;
369 if (flags & PERF_EF_START)
370 hisi_uncore_pmu_start(event, PERF_EF_RELOAD);
374 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
376 void hisi_uncore_pmu_del(struct perf_event *event, int flags)
378 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
379 struct hw_perf_event *hwc = &event->hw;
381 hisi_uncore_pmu_stop(event, PERF_EF_UPDATE);
382 hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx);
383 perf_event_update_userpage(event);
384 hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
386 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
388 void hisi_uncore_pmu_read(struct perf_event *event)
390 /* Read hardware counter and update the perf counter statistics */
391 hisi_uncore_pmu_event_update(event);
393 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
395 void hisi_uncore_pmu_enable(struct pmu *pmu)
397 struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
398 bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask,
399 hisi_pmu->num_counters);
404 hisi_pmu->ops->start_counters(hisi_pmu);
406 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
408 void hisi_uncore_pmu_disable(struct pmu *pmu)
410 struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
412 hisi_pmu->ops->stop_counters(hisi_pmu);
414 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
418 * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
419 * determined from the MPIDR_EL1, but the encoding varies by CPU:
421 * - For MT variants of TSV110:
422 * SCCL is Aff2[7:3], CCL is Aff2[2:0]
424 * - For other MT parts:
425 * SCCL is Aff3[7:0], CCL is Aff2[7:0]
427 * - For non-MT parts:
428 * SCCL is Aff2[7:0], CCL is Aff1[7:0]
430 static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
432 u64 mpidr = read_cpuid_mpidr();
433 int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
434 int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
435 int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
436 bool mt = mpidr & MPIDR_MT_BITMASK;
439 if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
457 * Check whether the CPU is associated with this uncore PMU
459 static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
463 /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
464 if (hisi_pmu->sccl_id == -1)
467 if (hisi_pmu->ccl_id == -1) {
468 /* If CCL_ID is -1, the PMU only shares the same SCCL */
469 hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
471 return sccl_id == hisi_pmu->sccl_id;
474 hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
476 return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
479 int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
481 struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
484 if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
487 cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
489 /* If another CPU is already managing this PMU, simply return. */
490 if (hisi_pmu->on_cpu != -1)
493 /* Use this CPU in cpumask for event counting */
494 hisi_pmu->on_cpu = cpu;
496 /* Overflow interrupt also should use the same CPU */
497 WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
501 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
503 int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
505 struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
507 cpumask_t pmu_online_cpus;
510 if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
513 /* Nothing to do if this CPU doesn't own the PMU */
514 if (hisi_pmu->on_cpu != cpu)
517 /* Give up ownership of the PMU */
518 hisi_pmu->on_cpu = -1;
520 /* Choose a new CPU to migrate ownership of the PMU to */
521 cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
523 target = cpumask_any_but(&pmu_online_cpus, cpu);
524 if (target >= nr_cpu_ids)
527 perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
528 /* Use this CPU for event counting */
529 hisi_pmu->on_cpu = target;
530 WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
534 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
536 void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
538 struct pmu *pmu = &hisi_pmu->pmu;
540 pmu->module = module;
541 pmu->task_ctx_nr = perf_invalid_context;
542 pmu->event_init = hisi_uncore_pmu_event_init;
543 pmu->pmu_enable = hisi_uncore_pmu_enable;
544 pmu->pmu_disable = hisi_uncore_pmu_disable;
545 pmu->add = hisi_uncore_pmu_add;
546 pmu->del = hisi_uncore_pmu_del;
547 pmu->start = hisi_uncore_pmu_start;
548 pmu->stop = hisi_uncore_pmu_stop;
549 pmu->read = hisi_uncore_pmu_read;
550 pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
551 pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
553 EXPORT_SYMBOL_GPL(hisi_pmu_init);
555 MODULE_LICENSE("GPL v2");