1 // SPDX-License-Identifier: GPL-2.0
3 * Linux performance counter support for LoongArch.
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
8 * Copyright (C) 2010 MIPS Technologies, Inc.
9 * Copyright (C) 2011 Cavium Networks, Inc.
10 * Author: Deng-Cheng Zhu
13 #include <linux/cpumask.h>
14 #include <linux/interrupt.h>
15 #include <linux/smp.h>
16 #include <linux/kernel.h>
17 #include <linux/perf_event.h>
18 #include <linux/uaccess.h>
19 #include <linux/sched/task_stack.h>
22 #include <asm/irq_regs.h>
23 #include <asm/stacktrace.h>
24 #include <asm/unwind.h>
27 * Get the return address for a single stackframe and return a pointer to the
31 user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
34 unsigned long __user *user_frame_tail;
35 struct stack_frame buftail;
37 user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stack_frame));
39 /* Also check accessibility of one struct frame_tail beyond */
40 if (!access_ok(user_frame_tail, sizeof(buftail)))
44 err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
47 if (err || (unsigned long)user_frame_tail >= buftail.fp)
50 perf_callchain_store(entry, buftail.ra);
55 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
60 if (perf_guest_state()) {
61 /* We don't support guest os callchain now */
65 perf_callchain_store(entry, regs->csr_era);
69 while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
70 fp = user_backtrace(entry, fp);
73 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
76 struct unwind_state state;
79 for (unwind_start(&state, current, regs);
80 !unwind_done(&state); unwind_next_frame(&state)) {
81 addr = unwind_get_return_address(&state);
82 if (!addr || perf_callchain_store(entry, addr))
87 #define LOONGARCH_MAX_HWEVENTS 32
89 struct cpu_hw_events {
90 /* Array of events on this cpu. */
91 struct perf_event *events[LOONGARCH_MAX_HWEVENTS];
94 * Set the bit (indexed by the counter number) when the counter
95 * is used for an event.
97 unsigned long used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
100 * Software copy of the control register for each performance counter.
102 unsigned int saved_ctrl[LOONGARCH_MAX_HWEVENTS];
104 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
108 /* The description of LoongArch performance events. */
109 struct loongarch_perf_event {
110 unsigned int event_id;
113 static struct loongarch_perf_event raw_event;
114 static DEFINE_MUTEX(raw_event_mutex);
116 #define C(x) PERF_COUNT_HW_CACHE_##x
117 #define HW_OP_UNSUPPORTED 0xffffffff
118 #define CACHE_OP_UNSUPPORTED 0xffffffff
120 #define PERF_MAP_ALL_UNSUPPORTED \
121 [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
123 #define PERF_CACHE_MAP_ALL_UNSUPPORTED \
124 [0 ... C(MAX) - 1] = { \
125 [0 ... C(OP_MAX) - 1] = { \
126 [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED}, \
130 struct loongarch_pmu {
135 unsigned int num_counters;
136 u64 (*read_counter)(unsigned int idx);
137 void (*write_counter)(unsigned int idx, u64 val);
138 const struct loongarch_perf_event *(*map_raw_event)(u64 config);
139 const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
140 const struct loongarch_perf_event (*cache_event_map)
141 [PERF_COUNT_HW_CACHE_MAX]
142 [PERF_COUNT_HW_CACHE_OP_MAX]
143 [PERF_COUNT_HW_CACHE_RESULT_MAX];
146 static struct loongarch_pmu loongarch_pmu;
148 #define M_PERFCTL_EVENT(event) (event & CSR_PERFCTRL_EVENT)
150 #define M_PERFCTL_COUNT_EVENT_WHENEVER (CSR_PERFCTRL_PLV0 | \
151 CSR_PERFCTRL_PLV1 | \
152 CSR_PERFCTRL_PLV2 | \
153 CSR_PERFCTRL_PLV3 | \
156 #define M_PERFCTL_CONFIG_MASK 0x1f0000
158 static void pause_local_counters(void);
159 static void resume_local_counters(void);
161 static u64 loongarch_pmu_read_counter(unsigned int idx)
167 val = read_csr_perfcntr0();
170 val = read_csr_perfcntr1();
173 val = read_csr_perfcntr2();
176 val = read_csr_perfcntr3();
179 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
186 static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
190 write_csr_perfcntr0(val);
193 write_csr_perfcntr1(val);
196 write_csr_perfcntr2(val);
199 write_csr_perfcntr3(val);
202 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
207 static unsigned int loongarch_pmu_read_control(unsigned int idx)
209 unsigned int val = -1;
213 val = read_csr_perfctrl0();
216 val = read_csr_perfctrl1();
219 val = read_csr_perfctrl2();
222 val = read_csr_perfctrl3();
225 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
232 static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
236 write_csr_perfctrl0(val);
239 write_csr_perfctrl1(val);
242 write_csr_perfctrl2(val);
245 write_csr_perfctrl3(val);
248 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
253 static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
257 for (i = 0; i < loongarch_pmu.num_counters; i++) {
258 if (!test_and_set_bit(i, cpuc->used_mask))
265 static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
268 struct perf_event *event = container_of(evt, struct perf_event, hw);
269 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
271 WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
273 /* Make sure interrupt enabled. */
274 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) |
275 (evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE;
277 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
280 * We do not actually let the counter run. Leave it until start().
282 pr_debug("Enabling perf counter for CPU%d\n", cpu);
285 static void loongarch_pmu_disable_event(int idx)
288 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
290 WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
292 local_irq_save(flags);
293 cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
294 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
295 loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
296 local_irq_restore(flags);
299 static int loongarch_pmu_event_set_period(struct perf_event *event,
300 struct hw_perf_event *hwc,
304 u64 left = local64_read(&hwc->period_left);
305 u64 period = hwc->sample_period;
307 if (unlikely((left + period) & (1ULL << 63))) {
308 /* left underflowed by more than period. */
310 local64_set(&hwc->period_left, left);
311 hwc->last_period = period;
313 } else if (unlikely((left + period) <= period)) {
314 /* left underflowed by less than period. */
316 local64_set(&hwc->period_left, left);
317 hwc->last_period = period;
321 if (left > loongarch_pmu.max_period) {
322 left = loongarch_pmu.max_period;
323 local64_set(&hwc->period_left, left);
326 local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
328 loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
330 perf_event_update_userpage(event);
335 static void loongarch_pmu_event_update(struct perf_event *event,
336 struct hw_perf_event *hwc,
340 u64 prev_raw_count, new_raw_count;
343 prev_raw_count = local64_read(&hwc->prev_count);
344 new_raw_count = loongarch_pmu.read_counter(idx);
346 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
347 new_raw_count) != prev_raw_count)
350 delta = new_raw_count - prev_raw_count;
352 local64_add(delta, &event->count);
353 local64_sub(delta, &hwc->period_left);
356 static void loongarch_pmu_start(struct perf_event *event, int flags)
358 struct hw_perf_event *hwc = &event->hw;
360 if (flags & PERF_EF_RELOAD)
361 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
365 /* Set the period for the event. */
366 loongarch_pmu_event_set_period(event, hwc, hwc->idx);
368 /* Enable the event. */
369 loongarch_pmu_enable_event(hwc, hwc->idx);
372 static void loongarch_pmu_stop(struct perf_event *event, int flags)
374 struct hw_perf_event *hwc = &event->hw;
376 if (!(hwc->state & PERF_HES_STOPPED)) {
377 /* We are working on a local event. */
378 loongarch_pmu_disable_event(hwc->idx);
380 loongarch_pmu_event_update(event, hwc, hwc->idx);
381 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
385 static int loongarch_pmu_add(struct perf_event *event, int flags)
388 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
389 struct hw_perf_event *hwc = &event->hw;
391 perf_pmu_disable(event->pmu);
393 /* To look for a free counter for this event. */
394 idx = loongarch_pmu_alloc_counter(cpuc, hwc);
401 * If there is an event in the counter we are going to use then
402 * make sure it is disabled.
405 loongarch_pmu_disable_event(idx);
406 cpuc->events[idx] = event;
408 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
409 if (flags & PERF_EF_START)
410 loongarch_pmu_start(event, PERF_EF_RELOAD);
412 /* Propagate our changes to the userspace mapping. */
413 perf_event_update_userpage(event);
416 perf_pmu_enable(event->pmu);
420 static void loongarch_pmu_del(struct perf_event *event, int flags)
422 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
423 struct hw_perf_event *hwc = &event->hw;
426 WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
428 loongarch_pmu_stop(event, PERF_EF_UPDATE);
429 cpuc->events[idx] = NULL;
430 clear_bit(idx, cpuc->used_mask);
432 perf_event_update_userpage(event);
435 static void loongarch_pmu_read(struct perf_event *event)
437 struct hw_perf_event *hwc = &event->hw;
439 /* Don't read disabled counters! */
443 loongarch_pmu_event_update(event, hwc, hwc->idx);
446 static void loongarch_pmu_enable(struct pmu *pmu)
448 resume_local_counters();
451 static void loongarch_pmu_disable(struct pmu *pmu)
453 pause_local_counters();
456 static DEFINE_MUTEX(pmu_reserve_mutex);
457 static atomic_t active_events = ATOMIC_INIT(0);
459 static int get_pmc_irq(void)
461 struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
464 return irq_create_mapping(d, INT_PCOV);
469 static void reset_counters(void *arg);
470 static int __hw_perf_event_init(struct perf_event *event);
472 static void hw_perf_event_destroy(struct perf_event *event)
474 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
475 on_each_cpu(reset_counters, NULL, 1);
476 free_irq(get_pmc_irq(), &loongarch_pmu);
477 mutex_unlock(&pmu_reserve_mutex);
481 static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
482 struct perf_sample_data *data, struct pt_regs *regs)
484 struct perf_event *event = cpuc->events[idx];
485 struct hw_perf_event *hwc = &event->hw;
487 loongarch_pmu_event_update(event, hwc, idx);
488 data->period = event->hw.last_period;
489 if (!loongarch_pmu_event_set_period(event, hwc, idx))
492 if (perf_event_overflow(event, data, regs))
493 loongarch_pmu_disable_event(idx);
496 static irqreturn_t pmu_handle_irq(int irq, void *dev)
499 int handled = IRQ_NONE;
501 struct pt_regs *regs;
502 struct perf_sample_data data;
503 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
506 * First we pause the local counters, so that when we are locked
507 * here, the counters are all paused. When it gets locked due to
508 * perf_disable(), the timer interrupt handler will be delayed.
510 * See also loongarch_pmu_start().
512 pause_local_counters();
514 regs = get_irq_regs();
516 perf_sample_data_init(&data, 0, 0);
518 for (n = 0; n < loongarch_pmu.num_counters; n++) {
519 if (test_bit(n, cpuc->used_mask)) {
520 counter = loongarch_pmu.read_counter(n);
521 if (counter & loongarch_pmu.overflow) {
522 handle_associated_event(cpuc, n, &data, regs);
523 handled = IRQ_HANDLED;
528 resume_local_counters();
531 * Do all the work for the pending perf events. We can do this
532 * in here because the performance counter interrupt is a regular
533 * interrupt, not NMI.
535 if (handled == IRQ_HANDLED)
541 static int loongarch_pmu_event_init(struct perf_event *event)
546 /* does not support taken branch sampling */
547 if (has_branch_stack(event))
550 switch (event->attr.type) {
552 case PERF_TYPE_HARDWARE:
553 case PERF_TYPE_HW_CACHE:
557 /* Init it to avoid false validate_group */
558 event->hw.event_base = 0xffffffff;
562 if (event->cpu >= 0 && !cpu_online(event->cpu))
566 flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
567 if (!atomic_inc_not_zero(&active_events)) {
568 mutex_lock(&pmu_reserve_mutex);
569 if (atomic_read(&active_events) == 0) {
570 r = request_irq(irq, pmu_handle_irq, flags, "Perf_PMU", &loongarch_pmu);
572 mutex_unlock(&pmu_reserve_mutex);
573 pr_warn("PMU IRQ request failed\n");
577 atomic_inc(&active_events);
578 mutex_unlock(&pmu_reserve_mutex);
581 return __hw_perf_event_init(event);
584 static struct pmu pmu = {
585 .pmu_enable = loongarch_pmu_enable,
586 .pmu_disable = loongarch_pmu_disable,
587 .event_init = loongarch_pmu_event_init,
588 .add = loongarch_pmu_add,
589 .del = loongarch_pmu_del,
590 .start = loongarch_pmu_start,
591 .stop = loongarch_pmu_stop,
592 .read = loongarch_pmu_read,
595 static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
597 return M_PERFCTL_EVENT(pev->event_id);
600 static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
602 const struct loongarch_perf_event *pev;
604 pev = &(*loongarch_pmu.general_event_map)[idx];
606 if (pev->event_id == HW_OP_UNSUPPORTED)
607 return ERR_PTR(-ENOENT);
612 static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
614 unsigned int cache_type, cache_op, cache_result;
615 const struct loongarch_perf_event *pev;
617 cache_type = (config >> 0) & 0xff;
618 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
619 return ERR_PTR(-EINVAL);
621 cache_op = (config >> 8) & 0xff;
622 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
623 return ERR_PTR(-EINVAL);
625 cache_result = (config >> 16) & 0xff;
626 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
627 return ERR_PTR(-EINVAL);
629 pev = &((*loongarch_pmu.cache_event_map)
634 if (pev->event_id == CACHE_OP_UNSUPPORTED)
635 return ERR_PTR(-ENOENT);
640 static int validate_group(struct perf_event *event)
642 struct cpu_hw_events fake_cpuc;
643 struct perf_event *sibling, *leader = event->group_leader;
645 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
647 if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
650 for_each_sibling_event(sibling, leader) {
651 if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
655 if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
661 static void reset_counters(void *arg)
664 int counters = loongarch_pmu.num_counters;
666 for (n = 0; n < counters; n++) {
667 loongarch_pmu_write_control(n, 0);
668 loongarch_pmu.write_counter(n, 0);
672 static const struct loongarch_perf_event loongson_event_map[PERF_COUNT_HW_MAX] = {
673 PERF_MAP_ALL_UNSUPPORTED,
674 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
675 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
676 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
677 [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
678 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
679 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
682 static const struct loongarch_perf_event loongson_cache_map
683 [PERF_COUNT_HW_CACHE_MAX]
684 [PERF_COUNT_HW_CACHE_OP_MAX]
685 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
686 PERF_CACHE_MAP_ALL_UNSUPPORTED,
689 * Like some other architectures (e.g. ARM), the performance
690 * counters don't differentiate between read and write
691 * accesses/misses, so this isn't strictly correct, but it's the
692 * best we can do. Writes and reads get combined.
695 [C(RESULT_ACCESS)] = { 0x8 },
696 [C(RESULT_MISS)] = { 0x9 },
699 [C(RESULT_ACCESS)] = { 0x8 },
700 [C(RESULT_MISS)] = { 0x9 },
703 [C(RESULT_ACCESS)] = { 0xaa },
704 [C(RESULT_MISS)] = { 0xa9 },
709 [C(RESULT_ACCESS)] = { 0x6 },
710 [C(RESULT_MISS)] = { 0x7 },
715 [C(RESULT_ACCESS)] = { 0xc },
716 [C(RESULT_MISS)] = { 0xd },
719 [C(RESULT_ACCESS)] = { 0xc },
720 [C(RESULT_MISS)] = { 0xd },
725 [C(RESULT_MISS)] = { 0x3b },
730 [C(RESULT_ACCESS)] = { 0x4 },
731 [C(RESULT_MISS)] = { 0x3c },
734 [C(RESULT_ACCESS)] = { 0x4 },
735 [C(RESULT_MISS)] = { 0x3c },
739 /* Using the same code for *HW_BRANCH* */
741 [C(RESULT_ACCESS)] = { 0x02 },
742 [C(RESULT_MISS)] = { 0x03 },
747 static int __hw_perf_event_init(struct perf_event *event)
750 struct hw_perf_event *hwc = &event->hw;
751 struct perf_event_attr *attr = &event->attr;
752 const struct loongarch_perf_event *pev;
754 /* Returning LoongArch event descriptor for generic perf event. */
755 if (PERF_TYPE_HARDWARE == event->attr.type) {
756 if (event->attr.config >= PERF_COUNT_HW_MAX)
758 pev = loongarch_pmu_map_general_event(event->attr.config);
759 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
760 pev = loongarch_pmu_map_cache_event(event->attr.config);
761 } else if (PERF_TYPE_RAW == event->attr.type) {
762 /* We are working on the global raw event. */
763 mutex_lock(&raw_event_mutex);
764 pev = loongarch_pmu.map_raw_event(event->attr.config);
766 /* The event type is not (yet) supported. */
771 if (PERF_TYPE_RAW == event->attr.type)
772 mutex_unlock(&raw_event_mutex);
777 * We allow max flexibility on how each individual counter shared
778 * by the single CPU operates (the mode exclusion and the range).
780 hwc->config_base = CSR_PERFCTRL_IE;
782 hwc->event_base = loongarch_pmu_perf_event_encode(pev);
783 if (PERF_TYPE_RAW == event->attr.type)
784 mutex_unlock(&raw_event_mutex);
786 if (!attr->exclude_user) {
787 hwc->config_base |= CSR_PERFCTRL_PLV3;
788 hwc->config_base |= CSR_PERFCTRL_PLV2;
790 if (!attr->exclude_kernel) {
791 hwc->config_base |= CSR_PERFCTRL_PLV0;
793 if (!attr->exclude_hv) {
794 hwc->config_base |= CSR_PERFCTRL_PLV1;
797 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
799 * The event can belong to another cpu. We do not assign a local
800 * counter for it for now.
805 if (!hwc->sample_period) {
806 hwc->sample_period = loongarch_pmu.max_period;
807 hwc->last_period = hwc->sample_period;
808 local64_set(&hwc->period_left, hwc->sample_period);
812 if (event->group_leader != event)
813 err = validate_group(event);
815 event->destroy = hw_perf_event_destroy;
818 event->destroy(event);
823 static void pause_local_counters(void)
826 int ctr = loongarch_pmu.num_counters;
827 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
829 local_irq_save(flags);
832 cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
833 loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
834 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
836 local_irq_restore(flags);
839 static void resume_local_counters(void)
841 int ctr = loongarch_pmu.num_counters;
842 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
846 loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
850 static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
852 raw_event.event_id = M_PERFCTL_EVENT(config);
857 static int __init init_hw_perf_events(void)
864 pr_info("Performance counters: ");
865 counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
867 loongarch_pmu.num_counters = counters;
868 loongarch_pmu.max_period = (1ULL << 63) - 1;
869 loongarch_pmu.valid_count = (1ULL << 63) - 1;
870 loongarch_pmu.overflow = 1ULL << 63;
871 loongarch_pmu.name = "loongarch/loongson64";
872 loongarch_pmu.read_counter = loongarch_pmu_read_counter;
873 loongarch_pmu.write_counter = loongarch_pmu_write_counter;
874 loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
875 loongarch_pmu.general_event_map = &loongson_event_map;
876 loongarch_pmu.cache_event_map = &loongson_cache_map;
878 on_each_cpu(reset_counters, NULL, 1);
880 pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
881 loongarch_pmu.name, counters, 64);
883 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
887 early_initcall(init_hw_perf_events);