1 // SPDX-License-Identifier: GPL-2.0-only
3 * ARMv8 PMUv3 Performance Events handling code.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This code is based heavily on the ARMv7 perf event code.
11 #include <asm/irq_regs.h>
12 #include <asm/perf_event.h>
15 #include <clocksource/arm_arch_timer.h>
17 #include <linux/acpi.h>
18 #include <linux/bitfield.h>
19 #include <linux/clocksource.h>
21 #include <linux/perf/arm_pmu.h>
22 #include <linux/perf/arm_pmuv3.h>
23 #include <linux/platform_device.h>
24 #include <linux/sched_clock.h>
25 #include <linux/smp.h>
26 #include <linux/nmi.h>
28 #include <asm/arm_pmuv3.h>
30 /* ARMv8 Cortex-A53 specific event types. */
31 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
33 /* ARMv8 Cavium ThunderX specific event types. */
34 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
35 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
36 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
37 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
38 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
41 * ARMv8 Architectural defined events, not all of these may
42 * be supported on any given implementation. Unsupported events will
43 * be disabled at run-time based on the PMCEID registers.
45 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
46 PERF_MAP_ALL_UNSUPPORTED,
47 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
48 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
49 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
50 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
51 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
52 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
53 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
54 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
57 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
58 [PERF_COUNT_HW_CACHE_OP_MAX]
59 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
60 PERF_CACHE_MAP_ALL_UNSUPPORTED,
62 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
63 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
65 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
66 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
68 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
69 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
71 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
72 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
74 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
75 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
77 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
78 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
81 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
82 [PERF_COUNT_HW_CACHE_OP_MAX]
83 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
84 PERF_CACHE_MAP_ALL_UNSUPPORTED,
86 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
88 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
89 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
92 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
93 [PERF_COUNT_HW_CACHE_OP_MAX]
94 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
95 PERF_CACHE_MAP_ALL_UNSUPPORTED,
97 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
98 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
99 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
100 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
102 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
103 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
105 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
106 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
109 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
110 [PERF_COUNT_HW_CACHE_OP_MAX]
111 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
112 PERF_CACHE_MAP_ALL_UNSUPPORTED,
114 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
115 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
118 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
119 [PERF_COUNT_HW_CACHE_OP_MAX]
120 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
121 PERF_CACHE_MAP_ALL_UNSUPPORTED,
123 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
124 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
125 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
126 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
127 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
128 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
130 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
131 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
133 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
134 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
135 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
136 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
139 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
140 [PERF_COUNT_HW_CACHE_OP_MAX]
141 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
142 PERF_CACHE_MAP_ALL_UNSUPPORTED,
144 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
145 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
146 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
147 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
149 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
150 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
151 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
152 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
154 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
155 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
159 armv8pmu_events_sysfs_show(struct device *dev,
160 struct device_attribute *attr, char *page)
162 struct perf_pmu_events_attr *pmu_attr;
164 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
166 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
169 #define ARMV8_EVENT_ATTR(name, config) \
170 PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
172 static struct attribute *armv8_pmuv3_event_attrs[] = {
174 * Don't expose the sw_incr event in /sys. It's not usable as writes to
175 * PMSWINC_EL0 will trap as PMUSERENR.{SW,EN}=={0,0} and event rotation
176 * means we don't have a fixed event<->counter relationship regardless.
178 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
179 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
180 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
181 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
182 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
183 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
184 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
185 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
186 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
187 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
188 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
189 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
190 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
191 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
192 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
193 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
194 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
195 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
196 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
197 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
198 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
199 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
200 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
201 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
202 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
203 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
204 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
205 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
206 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
207 /* Don't expose the chain event in /sys, since it's useless in isolation */
208 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
209 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
210 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
211 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
212 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
213 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
214 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
215 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
216 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
217 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
218 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
219 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
220 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
221 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
222 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
223 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
224 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
225 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
226 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
227 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
228 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
229 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
230 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
231 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
232 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
233 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
234 ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
235 ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
236 ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
237 ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
238 ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
239 ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
240 ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
241 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
242 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
243 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
244 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
245 ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
246 ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
247 ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
248 ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
249 ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
250 ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
251 ARMV8_EVENT_ATTR(trb_wrap, ARMV8_PMUV3_PERFCTR_TRB_WRAP),
252 ARMV8_EVENT_ATTR(trb_trig, ARMV8_PMUV3_PERFCTR_TRB_TRIG),
253 ARMV8_EVENT_ATTR(trcextout0, ARMV8_PMUV3_PERFCTR_TRCEXTOUT0),
254 ARMV8_EVENT_ATTR(trcextout1, ARMV8_PMUV3_PERFCTR_TRCEXTOUT1),
255 ARMV8_EVENT_ATTR(trcextout2, ARMV8_PMUV3_PERFCTR_TRCEXTOUT2),
256 ARMV8_EVENT_ATTR(trcextout3, ARMV8_PMUV3_PERFCTR_TRCEXTOUT3),
257 ARMV8_EVENT_ATTR(cti_trigout4, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4),
258 ARMV8_EVENT_ATTR(cti_trigout5, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5),
259 ARMV8_EVENT_ATTR(cti_trigout6, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6),
260 ARMV8_EVENT_ATTR(cti_trigout7, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7),
261 ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
262 ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
263 ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
264 ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
265 ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
266 ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
271 armv8pmu_event_attr_is_visible(struct kobject *kobj,
272 struct attribute *attr, int unused)
274 struct device *dev = kobj_to_dev(kobj);
275 struct pmu *pmu = dev_get_drvdata(dev);
276 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
277 struct perf_pmu_events_attr *pmu_attr;
279 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
281 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
282 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
285 if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
286 u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
288 if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
289 test_bit(id, cpu_pmu->pmceid_ext_bitmap))
296 static const struct attribute_group armv8_pmuv3_events_attr_group = {
298 .attrs = armv8_pmuv3_event_attrs,
299 .is_visible = armv8pmu_event_attr_is_visible,
303 #define ATTR_CFG_FLD_event_CFG config
304 #define ATTR_CFG_FLD_event_LO 0
305 #define ATTR_CFG_FLD_event_HI 15
306 #define ATTR_CFG_FLD_long_CFG config1
307 #define ATTR_CFG_FLD_long_LO 0
308 #define ATTR_CFG_FLD_long_HI 0
309 #define ATTR_CFG_FLD_rdpmc_CFG config1
310 #define ATTR_CFG_FLD_rdpmc_LO 1
311 #define ATTR_CFG_FLD_rdpmc_HI 1
312 #define ATTR_CFG_FLD_threshold_count_CFG config1 /* PMEVTYPER.TC[0] */
313 #define ATTR_CFG_FLD_threshold_count_LO 2
314 #define ATTR_CFG_FLD_threshold_count_HI 2
315 #define ATTR_CFG_FLD_threshold_compare_CFG config1 /* PMEVTYPER.TC[2:1] */
316 #define ATTR_CFG_FLD_threshold_compare_LO 3
317 #define ATTR_CFG_FLD_threshold_compare_HI 4
318 #define ATTR_CFG_FLD_threshold_CFG config1 /* PMEVTYPER.TH */
319 #define ATTR_CFG_FLD_threshold_LO 5
320 #define ATTR_CFG_FLD_threshold_HI 16
322 GEN_PMU_FORMAT_ATTR(event);
323 GEN_PMU_FORMAT_ATTR(long);
324 GEN_PMU_FORMAT_ATTR(rdpmc);
325 GEN_PMU_FORMAT_ATTR(threshold_count);
326 GEN_PMU_FORMAT_ATTR(threshold_compare);
327 GEN_PMU_FORMAT_ATTR(threshold);
329 static int sysctl_perf_user_access __read_mostly;
331 static bool armv8pmu_event_is_64bit(struct perf_event *event)
333 return ATTR_CFG_GET_FLD(&event->attr, long);
336 static bool armv8pmu_event_want_user_access(struct perf_event *event)
338 return ATTR_CFG_GET_FLD(&event->attr, rdpmc);
341 static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr)
343 u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare);
344 u8 th_count = ATTR_CFG_GET_FLD(attr, threshold_count);
347 * The count bit is always the bottom bit of the full control field, and
348 * the comparison is the upper two bits, but it's not explicitly
349 * labelled in the Arm ARM. For the Perf interface we split it into two
350 * fields, so reconstruct it here.
352 return (th_compare << 1) | th_count;
355 static struct attribute *armv8_pmuv3_format_attrs[] = {
356 &format_attr_event.attr,
357 &format_attr_long.attr,
358 &format_attr_rdpmc.attr,
359 &format_attr_threshold.attr,
360 &format_attr_threshold_compare.attr,
361 &format_attr_threshold_count.attr,
365 static const struct attribute_group armv8_pmuv3_format_attr_group = {
367 .attrs = armv8_pmuv3_format_attrs,
370 static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
373 struct pmu *pmu = dev_get_drvdata(dev);
374 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
375 u32 slots = FIELD_GET(ARMV8_PMU_SLOTS, cpu_pmu->reg_pmmir);
377 return sysfs_emit(page, "0x%08x\n", slots);
380 static DEVICE_ATTR_RO(slots);
382 static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
385 struct pmu *pmu = dev_get_drvdata(dev);
386 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
387 u32 bus_slots = FIELD_GET(ARMV8_PMU_BUS_SLOTS, cpu_pmu->reg_pmmir);
389 return sysfs_emit(page, "0x%08x\n", bus_slots);
392 static DEVICE_ATTR_RO(bus_slots);
394 static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
397 struct pmu *pmu = dev_get_drvdata(dev);
398 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
399 u32 bus_width = FIELD_GET(ARMV8_PMU_BUS_WIDTH, cpu_pmu->reg_pmmir);
402 /* Encoded as Log2(number of bytes), plus one */
403 if (bus_width > 2 && bus_width < 13)
404 val = 1 << (bus_width - 1);
406 return sysfs_emit(page, "0x%08x\n", val);
409 static DEVICE_ATTR_RO(bus_width);
411 static u32 threshold_max(struct arm_pmu *cpu_pmu)
414 * PMMIR.THWIDTH is readable and non-zero on aarch32, but it would be
415 * impossible to write the threshold in the upper 32 bits of PMEVTYPER.
417 if (IS_ENABLED(CONFIG_ARM))
421 * The largest value that can be written to PMEVTYPER<n>_EL0.TH is
422 * (2 ^ PMMIR.THWIDTH) - 1.
424 return (1 << FIELD_GET(ARMV8_PMU_THWIDTH, cpu_pmu->reg_pmmir)) - 1;
427 static ssize_t threshold_max_show(struct device *dev,
428 struct device_attribute *attr, char *page)
430 struct pmu *pmu = dev_get_drvdata(dev);
431 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
433 return sysfs_emit(page, "0x%08x\n", threshold_max(cpu_pmu));
436 static DEVICE_ATTR_RO(threshold_max);
438 static struct attribute *armv8_pmuv3_caps_attrs[] = {
439 &dev_attr_slots.attr,
440 &dev_attr_bus_slots.attr,
441 &dev_attr_bus_width.attr,
442 &dev_attr_threshold_max.attr,
446 static const struct attribute_group armv8_pmuv3_caps_attr_group = {
448 .attrs = armv8_pmuv3_caps_attrs,
452 * Perf Events' indices
454 #define ARMV8_IDX_CYCLE_COUNTER 0
455 #define ARMV8_IDX_COUNTER0 1
456 #define ARMV8_IDX_CYCLE_COUNTER_USER 32
459 * We unconditionally enable ARMv8.5-PMU long event counter support
460 * (64-bit events) where supported. Indicate if this arm_pmu has long
461 * event counter support.
463 * On AArch32, long counters make no sense (you can't access the top
464 * bits), so we only enable this on AArch64.
466 static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
468 return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
471 static bool armv8pmu_event_has_user_read(struct perf_event *event)
473 return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
477 * We must chain two programmable counters for 64 bit events,
478 * except when we have allocated the 64bit cycle counter (for CPU
479 * cycles event) or when user space counter access is enabled.
481 static bool armv8pmu_event_is_chained(struct perf_event *event)
483 int idx = event->hw.idx;
484 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
486 return !armv8pmu_event_has_user_read(event) &&
487 armv8pmu_event_is_64bit(event) &&
488 !armv8pmu_has_long_event(cpu_pmu) &&
489 (idx != ARMV8_IDX_CYCLE_COUNTER);
493 * ARMv8 low level PMU access
497 * Perf Event to low level counters mapping
499 #define ARMV8_IDX_TO_COUNTER(x) \
500 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
502 static u64 armv8pmu_pmcr_read(void)
507 static void armv8pmu_pmcr_write(u64 val)
509 val &= ARMV8_PMU_PMCR_MASK;
514 static int armv8pmu_has_overflowed(u32 pmovsr)
516 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
519 static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
521 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
524 static u64 armv8pmu_read_evcntr(int idx)
526 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
528 return read_pmevcntrn(counter);
531 static u64 armv8pmu_read_hw_counter(struct perf_event *event)
533 int idx = event->hw.idx;
534 u64 val = armv8pmu_read_evcntr(idx);
536 if (armv8pmu_event_is_chained(event))
537 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
542 * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
543 * is set the event counters also become 64-bit counters. Unless the
544 * user has requested a long counter (attr.config1) then we want to
545 * interrupt upon 32-bit overflow - we achieve this by applying a bias.
547 static bool armv8pmu_event_needs_bias(struct perf_event *event)
549 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
550 struct hw_perf_event *hwc = &event->hw;
553 if (armv8pmu_event_is_64bit(event))
556 if (armv8pmu_has_long_event(cpu_pmu) ||
557 idx == ARMV8_IDX_CYCLE_COUNTER)
563 static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
565 if (armv8pmu_event_needs_bias(event))
566 value |= GENMASK_ULL(63, 32);
571 static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
573 if (armv8pmu_event_needs_bias(event))
574 value &= ~GENMASK_ULL(63, 32);
579 static u64 armv8pmu_read_counter(struct perf_event *event)
581 struct hw_perf_event *hwc = &event->hw;
585 if (idx == ARMV8_IDX_CYCLE_COUNTER)
586 value = read_pmccntr();
588 value = armv8pmu_read_hw_counter(event);
590 return armv8pmu_unbias_long_counter(event, value);
593 static void armv8pmu_write_evcntr(int idx, u64 value)
595 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
597 write_pmevcntrn(counter, value);
600 static void armv8pmu_write_hw_counter(struct perf_event *event,
603 int idx = event->hw.idx;
605 if (armv8pmu_event_is_chained(event)) {
606 armv8pmu_write_evcntr(idx, upper_32_bits(value));
607 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
609 armv8pmu_write_evcntr(idx, value);
613 static void armv8pmu_write_counter(struct perf_event *event, u64 value)
615 struct hw_perf_event *hwc = &event->hw;
618 value = armv8pmu_bias_long_counter(event, value);
620 if (idx == ARMV8_IDX_CYCLE_COUNTER)
621 write_pmccntr(value);
623 armv8pmu_write_hw_counter(event, value);
626 static void armv8pmu_write_evtype(int idx, unsigned long val)
628 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
629 unsigned long mask = ARMV8_PMU_EVTYPE_EVENT |
630 ARMV8_PMU_INCLUDE_EL2 |
631 ARMV8_PMU_EXCLUDE_EL0 |
632 ARMV8_PMU_EXCLUDE_EL1;
634 if (IS_ENABLED(CONFIG_ARM64))
635 mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH;
638 write_pmevtypern(counter, val);
641 static void armv8pmu_write_event_type(struct perf_event *event)
643 struct hw_perf_event *hwc = &event->hw;
647 * For chained events, the low counter is programmed to count
648 * the event of interest and the high counter is programmed
649 * with CHAIN event code with filters set to count at all ELs.
651 if (armv8pmu_event_is_chained(event)) {
652 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
653 ARMV8_PMU_INCLUDE_EL2;
655 armv8pmu_write_evtype(idx - 1, hwc->config_base);
656 armv8pmu_write_evtype(idx, chain_evt);
658 if (idx == ARMV8_IDX_CYCLE_COUNTER)
659 write_pmccfiltr(hwc->config_base);
661 armv8pmu_write_evtype(idx, hwc->config_base);
665 static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
667 int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
668 u32 mask = BIT(counter);
670 if (armv8pmu_event_is_chained(event))
671 mask |= BIT(counter - 1);
675 static void armv8pmu_enable_counter(u32 mask)
678 * Make sure event configuration register writes are visible before we
679 * enable the counter.
682 write_pmcntenset(mask);
685 static void armv8pmu_enable_event_counter(struct perf_event *event)
687 struct perf_event_attr *attr = &event->attr;
688 u32 mask = armv8pmu_event_cnten_mask(event);
690 kvm_set_pmu_events(mask, attr);
692 /* We rely on the hypervisor switch code to enable guest counters */
693 if (!kvm_pmu_counter_deferred(attr))
694 armv8pmu_enable_counter(mask);
697 static void armv8pmu_disable_counter(u32 mask)
699 write_pmcntenclr(mask);
701 * Make sure the effects of disabling the counter are visible before we
702 * start configuring the event.
707 static void armv8pmu_disable_event_counter(struct perf_event *event)
709 struct perf_event_attr *attr = &event->attr;
710 u32 mask = armv8pmu_event_cnten_mask(event);
712 kvm_clr_pmu_events(mask);
714 /* We rely on the hypervisor switch code to disable guest counters */
715 if (!kvm_pmu_counter_deferred(attr))
716 armv8pmu_disable_counter(mask);
719 static void armv8pmu_enable_intens(u32 mask)
721 write_pmintenset(mask);
724 static void armv8pmu_enable_event_irq(struct perf_event *event)
726 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
727 armv8pmu_enable_intens(BIT(counter));
730 static void armv8pmu_disable_intens(u32 mask)
732 write_pmintenclr(mask);
734 /* Clear the overflow flag in case an interrupt is pending. */
735 write_pmovsclr(mask);
739 static void armv8pmu_disable_event_irq(struct perf_event *event)
741 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
742 armv8pmu_disable_intens(BIT(counter));
745 static u32 armv8pmu_getreset_flags(void)
750 value = read_pmovsclr();
752 /* Write to clear flags */
753 value &= ARMV8_PMU_OVERFLOWED_MASK;
754 write_pmovsclr(value);
759 static void update_pmuserenr(u64 val)
761 lockdep_assert_irqs_disabled();
764 * The current PMUSERENR_EL0 value might be the value for the guest.
765 * If that's the case, have KVM keep tracking of the register value
766 * for the host EL0 so that KVM can restore it before returning to
767 * the host EL0. Otherwise, update the register now.
769 if (kvm_set_pmuserenr(val))
772 write_pmuserenr(val);
775 static void armv8pmu_disable_user_access(void)
780 static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
783 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
785 /* Clear any unused counters to avoid leaking their contents */
786 for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
787 if (i == ARMV8_IDX_CYCLE_COUNTER)
790 armv8pmu_write_evcntr(i, 0);
793 update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
796 static void armv8pmu_enable_event(struct perf_event *event)
799 * Enable counter and interrupt, and set the counter to count
800 * the event that we're interested in.
802 armv8pmu_disable_event_counter(event);
803 armv8pmu_write_event_type(event);
804 armv8pmu_enable_event_irq(event);
805 armv8pmu_enable_event_counter(event);
808 static void armv8pmu_disable_event(struct perf_event *event)
810 armv8pmu_disable_event_counter(event);
811 armv8pmu_disable_event_irq(event);
814 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
816 struct perf_event_context *ctx;
819 ctx = perf_cpu_task_ctx();
821 nr_user = ctx->nr_user;
823 if (sysctl_perf_user_access && nr_user)
824 armv8pmu_enable_user_access(cpu_pmu);
826 armv8pmu_disable_user_access();
828 /* Enable all counters */
829 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
831 kvm_vcpu_pmu_resync_el0();
834 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
836 /* Disable all counters */
837 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
840 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
843 struct perf_sample_data data;
844 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
845 struct pt_regs *regs;
849 * Get and reset the IRQ flags
851 pmovsr = armv8pmu_getreset_flags();
854 * Did an overflow occur?
856 if (!armv8pmu_has_overflowed(pmovsr))
860 * Handle the counter(s) overflow(s)
862 regs = get_irq_regs();
865 * Stop the PMU while processing the counter overflows
866 * to prevent skews in group events.
868 armv8pmu_stop(cpu_pmu);
869 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
870 struct perf_event *event = cpuc->events[idx];
871 struct hw_perf_event *hwc;
873 /* Ignore if we don't have an event. */
878 * We have a single interrupt for all counters. Check that
879 * each counter has overflowed before we process it.
881 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
885 armpmu_event_update(event);
886 perf_sample_data_init(&data, 0, hwc->last_period);
887 if (!armpmu_event_set_period(event))
891 * Perf event overflow will queue the processing of the event as
892 * an irq_work which will be taken care of in the handling of
895 if (perf_event_overflow(event, &data, regs))
896 cpu_pmu->disable(event);
898 armv8pmu_start(cpu_pmu);
903 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
904 struct arm_pmu *cpu_pmu)
908 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
909 if (!test_and_set_bit(idx, cpuc->used_mask))
915 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
916 struct arm_pmu *cpu_pmu)
921 * Chaining requires two consecutive event counters, where
922 * the lower idx must be even.
924 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
925 if (!test_and_set_bit(idx, cpuc->used_mask)) {
926 /* Check if the preceding even counter is available */
927 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
929 /* Release the Odd counter */
930 clear_bit(idx, cpuc->used_mask);
936 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
937 struct perf_event *event)
939 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
940 struct hw_perf_event *hwc = &event->hw;
941 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
943 /* Always prefer to place a cycle counter into the cycle counter. */
944 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
945 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
946 return ARMV8_IDX_CYCLE_COUNTER;
947 else if (armv8pmu_event_is_64bit(event) &&
948 armv8pmu_event_want_user_access(event) &&
949 !armv8pmu_has_long_event(cpu_pmu))
954 * Otherwise use events counters
956 if (armv8pmu_event_is_chained(event))
957 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
959 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
962 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
963 struct perf_event *event)
965 int idx = event->hw.idx;
967 clear_bit(idx, cpuc->used_mask);
968 if (armv8pmu_event_is_chained(event))
969 clear_bit(idx - 1, cpuc->used_mask);
972 static int armv8pmu_user_event_idx(struct perf_event *event)
974 if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
978 * We remap the cycle counter index to 32 to
979 * match the offset applied to the rest of
980 * the counter indices.
982 if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
983 return ARMV8_IDX_CYCLE_COUNTER_USER;
985 return event->hw.idx;
989 * Add an event filter to a given event.
991 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
992 struct perf_event_attr *attr)
994 unsigned long config_base = 0;
995 struct perf_event *perf_event = container_of(attr, struct perf_event,
997 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
1000 if (attr->exclude_idle) {
1001 pr_debug("ARM performance counters do not support mode exclusion\n");
1006 * If we're running in hyp mode, then we *are* the hypervisor.
1007 * Therefore we ignore exclude_hv in this configuration, since
1008 * there's no hypervisor to sample anyway. This is consistent
1009 * with other architectures (x86 and Power).
1011 if (is_kernel_in_hyp_mode()) {
1012 if (!attr->exclude_kernel && !attr->exclude_host)
1013 config_base |= ARMV8_PMU_INCLUDE_EL2;
1014 if (attr->exclude_guest)
1015 config_base |= ARMV8_PMU_EXCLUDE_EL1;
1016 if (attr->exclude_host)
1017 config_base |= ARMV8_PMU_EXCLUDE_EL0;
1019 if (!attr->exclude_hv && !attr->exclude_host)
1020 config_base |= ARMV8_PMU_INCLUDE_EL2;
1024 * Filter out !VHE kernels and guest kernels
1026 if (attr->exclude_kernel)
1027 config_base |= ARMV8_PMU_EXCLUDE_EL1;
1029 if (attr->exclude_user)
1030 config_base |= ARMV8_PMU_EXCLUDE_EL0;
1033 * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will
1034 * be 0 and will also trigger this check, preventing it from being used.
1036 th = ATTR_CFG_GET_FLD(attr, threshold);
1037 if (th > threshold_max(cpu_pmu)) {
1038 pr_debug("PMU event threshold exceeds max value\n");
1042 if (IS_ENABLED(CONFIG_ARM64) && th) {
1043 config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th);
1044 config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC,
1045 armv8pmu_event_threshold_control(attr));
1049 * Install the filter into config_base as this is used to
1050 * construct the event type.
1052 event->config_base = config_base;
1057 static void armv8pmu_reset(void *info)
1059 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1062 /* The counter and interrupt enable registers are unknown at reset. */
1063 armv8pmu_disable_counter(U32_MAX);
1064 armv8pmu_disable_intens(U32_MAX);
1066 /* Clear the counters we flip at guest entry/exit */
1067 kvm_clr_pmu_events(U32_MAX);
1070 * Initialize & Reset PMNC. Request overflow interrupt for
1071 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
1073 pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
1075 /* Enable long event counter support where available */
1076 if (armv8pmu_has_long_event(cpu_pmu))
1077 pmcr |= ARMV8_PMU_PMCR_LP;
1079 armv8pmu_pmcr_write(pmcr);
1082 static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
1083 struct perf_event *event)
1085 if (event->attr.type == PERF_TYPE_HARDWARE &&
1086 event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
1088 if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
1089 armpmu->pmceid_bitmap))
1090 return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
1092 if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
1093 armpmu->pmceid_bitmap))
1094 return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
1096 return HW_OP_UNSUPPORTED;
1099 return armpmu_map_event(event, &armv8_pmuv3_perf_map,
1100 &armv8_pmuv3_perf_cache_map,
1101 ARMV8_PMU_EVTYPE_EVENT);
1104 static int __armv8_pmuv3_map_event(struct perf_event *event,
1105 const unsigned (*extra_event_map)
1106 [PERF_COUNT_HW_MAX],
1107 const unsigned (*extra_cache_map)
1108 [PERF_COUNT_HW_CACHE_MAX]
1109 [PERF_COUNT_HW_CACHE_OP_MAX]
1110 [PERF_COUNT_HW_CACHE_RESULT_MAX])
1113 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1115 hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event);
1118 * CHAIN events only work when paired with an adjacent counter, and it
1119 * never makes sense for a user to open one in isolation, as they'll be
1120 * rotated arbitrarily.
1122 if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
1125 if (armv8pmu_event_is_64bit(event))
1126 event->hw.flags |= ARMPMU_EVT_64BIT;
1129 * User events must be allocated into a single counter, and so
1130 * must not be chained.
1132 * Most 64-bit events require long counter support, but 64-bit
1133 * CPU_CYCLES events can be placed into the dedicated cycle
1134 * counter when this is free.
1136 if (armv8pmu_event_want_user_access(event)) {
1137 if (!(event->attach_state & PERF_ATTACH_TASK))
1139 if (armv8pmu_event_is_64bit(event) &&
1140 (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
1141 !armv8pmu_has_long_event(armpmu))
1144 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
1147 /* Only expose micro/arch events supported by this PMU */
1148 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
1149 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
1153 return armpmu_map_event(event, extra_event_map, extra_cache_map,
1154 ARMV8_PMU_EVTYPE_EVENT);
1157 static int armv8_pmuv3_map_event(struct perf_event *event)
1159 return __armv8_pmuv3_map_event(event, NULL, NULL);
1162 static int armv8_a53_map_event(struct perf_event *event)
1164 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
1167 static int armv8_a57_map_event(struct perf_event *event)
1169 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
1172 static int armv8_a73_map_event(struct perf_event *event)
1174 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
1177 static int armv8_thunder_map_event(struct perf_event *event)
1179 return __armv8_pmuv3_map_event(event, NULL,
1180 &armv8_thunder_perf_cache_map);
1183 static int armv8_vulcan_map_event(struct perf_event *event)
1185 return __armv8_pmuv3_map_event(event, NULL,
1186 &armv8_vulcan_perf_cache_map);
1189 struct armv8pmu_probe_info {
1190 struct arm_pmu *pmu;
1194 static void __armv8pmu_probe_pmu(void *info)
1196 struct armv8pmu_probe_info *probe = info;
1197 struct arm_pmu *cpu_pmu = probe->pmu;
1202 pmuver = read_pmuver();
1203 if (!pmuv3_implemented(pmuver))
1206 cpu_pmu->pmuver = pmuver;
1207 probe->present = true;
1209 /* Read the nb of CNTx counters supported from PMNC */
1210 cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read());
1212 /* Add the CPU cycles counter */
1213 cpu_pmu->num_events += 1;
1215 pmceid[0] = pmceid_raw[0] = read_pmceid0();
1216 pmceid[1] = pmceid_raw[1] = read_pmceid1();
1218 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
1219 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1221 pmceid[0] = pmceid_raw[0] >> 32;
1222 pmceid[1] = pmceid_raw[1] >> 32;
1224 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
1225 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1227 /* store PMMIR register for sysfs */
1228 if (is_pmuv3p4(pmuver))
1229 cpu_pmu->reg_pmmir = read_pmmir();
1231 cpu_pmu->reg_pmmir = 0;
1234 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
1236 struct armv8pmu_probe_info probe = {
1242 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1243 __armv8pmu_probe_pmu,
1248 return probe.present ? 0 : -ENODEV;
1251 static void armv8pmu_disable_user_access_ipi(void *unused)
1253 armv8pmu_disable_user_access();
1256 static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
1257 void *buffer, size_t *lenp, loff_t *ppos)
1259 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1260 if (ret || !write || sysctl_perf_user_access)
1263 on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
1267 static struct ctl_table armv8_pmu_sysctl_table[] = {
1269 .procname = "perf_user_access",
1270 .data = &sysctl_perf_user_access,
1271 .maxlen = sizeof(unsigned int),
1273 .proc_handler = armv8pmu_proc_user_access_handler,
1274 .extra1 = SYSCTL_ZERO,
1275 .extra2 = SYSCTL_ONE,
1279 static void armv8_pmu_register_sysctl_table(void)
1281 static u32 tbl_registered = 0;
1283 if (!cmpxchg_relaxed(&tbl_registered, 0, 1))
1284 register_sysctl("kernel", armv8_pmu_sysctl_table);
1287 static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
1288 int (*map_event)(struct perf_event *event))
1290 int ret = armv8pmu_probe_pmu(cpu_pmu);
1294 cpu_pmu->handle_irq = armv8pmu_handle_irq;
1295 cpu_pmu->enable = armv8pmu_enable_event;
1296 cpu_pmu->disable = armv8pmu_disable_event;
1297 cpu_pmu->read_counter = armv8pmu_read_counter;
1298 cpu_pmu->write_counter = armv8pmu_write_counter;
1299 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
1300 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
1301 cpu_pmu->start = armv8pmu_start;
1302 cpu_pmu->stop = armv8pmu_stop;
1303 cpu_pmu->reset = armv8pmu_reset;
1304 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1306 cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
1308 cpu_pmu->name = name;
1309 cpu_pmu->map_event = map_event;
1310 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group;
1311 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group;
1312 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = &armv8_pmuv3_caps_attr_group;
1313 armv8_pmu_register_sysctl_table();
1317 #define PMUV3_INIT_SIMPLE(name) \
1318 static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
1320 return armv8_pmu_init(cpu_pmu, #name, armv8_pmuv3_map_event); \
1323 #define PMUV3_INIT_MAP_EVENT(name, map_event) \
1324 static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
1326 return armv8_pmu_init(cpu_pmu, #name, map_event); \
1329 PMUV3_INIT_SIMPLE(armv8_pmuv3)
1331 PMUV3_INIT_SIMPLE(armv8_cortex_a34)
1332 PMUV3_INIT_SIMPLE(armv8_cortex_a55)
1333 PMUV3_INIT_SIMPLE(armv8_cortex_a65)
1334 PMUV3_INIT_SIMPLE(armv8_cortex_a75)
1335 PMUV3_INIT_SIMPLE(armv8_cortex_a76)
1336 PMUV3_INIT_SIMPLE(armv8_cortex_a77)
1337 PMUV3_INIT_SIMPLE(armv8_cortex_a78)
1338 PMUV3_INIT_SIMPLE(armv9_cortex_a510)
1339 PMUV3_INIT_SIMPLE(armv9_cortex_a520)
1340 PMUV3_INIT_SIMPLE(armv9_cortex_a710)
1341 PMUV3_INIT_SIMPLE(armv9_cortex_a715)
1342 PMUV3_INIT_SIMPLE(armv9_cortex_a720)
1343 PMUV3_INIT_SIMPLE(armv8_cortex_x1)
1344 PMUV3_INIT_SIMPLE(armv9_cortex_x2)
1345 PMUV3_INIT_SIMPLE(armv9_cortex_x3)
1346 PMUV3_INIT_SIMPLE(armv9_cortex_x4)
1347 PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
1348 PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
1349 PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
1350 PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
1352 PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
1353 PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
1355 PMUV3_INIT_MAP_EVENT(armv8_cortex_a35, armv8_a53_map_event)
1356 PMUV3_INIT_MAP_EVENT(armv8_cortex_a53, armv8_a53_map_event)
1357 PMUV3_INIT_MAP_EVENT(armv8_cortex_a57, armv8_a57_map_event)
1358 PMUV3_INIT_MAP_EVENT(armv8_cortex_a72, armv8_a57_map_event)
1359 PMUV3_INIT_MAP_EVENT(armv8_cortex_a73, armv8_a73_map_event)
1360 PMUV3_INIT_MAP_EVENT(armv8_cavium_thunder, armv8_thunder_map_event)
1361 PMUV3_INIT_MAP_EVENT(armv8_brcm_vulcan, armv8_vulcan_map_event)
1363 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1364 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
1365 {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
1366 {.compatible = "arm,cortex-a35-pmu", .data = armv8_cortex_a35_pmu_init},
1367 {.compatible = "arm,cortex-a53-pmu", .data = armv8_cortex_a53_pmu_init},
1368 {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
1369 {.compatible = "arm,cortex-a57-pmu", .data = armv8_cortex_a57_pmu_init},
1370 {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
1371 {.compatible = "arm,cortex-a72-pmu", .data = armv8_cortex_a72_pmu_init},
1372 {.compatible = "arm,cortex-a73-pmu", .data = armv8_cortex_a73_pmu_init},
1373 {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
1374 {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
1375 {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
1376 {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
1377 {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
1378 {.compatible = "arm,cortex-a520-pmu", .data = armv9_cortex_a520_pmu_init},
1379 {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
1380 {.compatible = "arm,cortex-a715-pmu", .data = armv9_cortex_a715_pmu_init},
1381 {.compatible = "arm,cortex-a720-pmu", .data = armv9_cortex_a720_pmu_init},
1382 {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
1383 {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
1384 {.compatible = "arm,cortex-x3-pmu", .data = armv9_cortex_x3_pmu_init},
1385 {.compatible = "arm,cortex-x4-pmu", .data = armv9_cortex_x4_pmu_init},
1386 {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
1387 {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
1388 {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
1389 {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
1390 {.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
1391 {.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
1392 {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
1393 {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
1397 static int armv8_pmu_device_probe(struct platform_device *pdev)
1399 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1402 static struct platform_driver armv8_pmu_driver = {
1404 .name = ARMV8_PMU_PDEV_NAME,
1405 .of_match_table = armv8_pmu_of_device_ids,
1406 .suppress_bind_attrs = true,
1408 .probe = armv8_pmu_device_probe,
1411 static int __init armv8_pmu_driver_init(void)
1416 ret = platform_driver_register(&armv8_pmu_driver);
1418 ret = arm_pmu_acpi_probe(armv8_pmuv3_pmu_init);
1421 lockup_detector_retry_init();
1425 device_initcall(armv8_pmu_driver_init)
1427 void arch_perf_update_userpage(struct perf_event *event,
1428 struct perf_event_mmap_page *userpg, u64 now)
1430 struct clock_read_data *rd;
1434 userpg->cap_user_time = 0;
1435 userpg->cap_user_time_zero = 0;
1436 userpg->cap_user_time_short = 0;
1437 userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
1439 if (userpg->cap_user_rdpmc) {
1440 if (event->hw.flags & ARMPMU_EVT_64BIT)
1441 userpg->pmc_width = 64;
1443 userpg->pmc_width = 32;
1447 rd = sched_clock_read_begin(&seq);
1449 if (rd->read_sched_clock != arch_timer_read_counter)
1452 userpg->time_mult = rd->mult;
1453 userpg->time_shift = rd->shift;
1454 userpg->time_zero = rd->epoch_ns;
1455 userpg->time_cycles = rd->epoch_cyc;
1456 userpg->time_mask = rd->sched_clock_mask;
1459 * Subtract the cycle base, such that software that
1460 * doesn't know about cap_user_time_short still 'works'
1461 * assuming no wraps.
1463 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
1464 userpg->time_zero -= ns;
1466 } while (sched_clock_read_retry(seq));
1468 userpg->time_offset = userpg->time_zero - now;
1471 * time_shift is not expected to be greater than 31 due to
1472 * the original published conversion algorithm shifting a
1473 * 32-bit value (now specifies a 64-bit value) - refer
1474 * perf_event_mmap_page documentation in perf_event.h.
1476 if (userpg->time_shift == 32) {
1477 userpg->time_shift = 31;
1478 userpg->time_mult >>= 1;
1482 * Internal timekeeping for enabled/running/stopped times
1483 * is always computed with the sched_clock.
1485 userpg->cap_user_time = 1;
1486 userpg->cap_user_time_zero = 1;
1487 userpg->cap_user_time_short = 1;