GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / arm64 / kernel / perf_event.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ARMv8 PMUv3 Performance Events handling code.
4  *
5  * Copyright (C) 2012 ARM Limited
6  * Author: Will Deacon <will.deacon@arm.com>
7  *
8  * This code is based heavily on the ARMv7 perf event code.
9  */
10
11 #include <asm/irq_regs.h>
12 #include <asm/perf_event.h>
13 #include <asm/sysreg.h>
14 #include <asm/virt.h>
15
16 #include <linux/acpi.h>
17 #include <linux/clocksource.h>
18 #include <linux/kvm_host.h>
19 #include <linux/of.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
22 #include <linux/smp.h>
23
24 /* ARMv8 Cortex-A53 specific event types. */
25 #define ARMV8_A53_PERFCTR_PREF_LINEFILL                         0xC2
26
27 /* ARMv8 Cavium ThunderX specific event types. */
28 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST                 0xE9
29 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS             0xEA
30 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS               0xEB
31 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS             0xEC
32 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS               0xED
33
34 /*
35  * ARMv8 Architectural defined events, not all of these may
36  * be supported on any given implementation. Unsupported events will
37  * be disabled at run-time based on the PMCEID registers.
38  */
39 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
40         PERF_MAP_ALL_UNSUPPORTED,
41         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
42         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
43         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
44         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
45         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
46         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
47         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
48         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
49         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
50 };
51
52 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
53                                                 [PERF_COUNT_HW_CACHE_OP_MAX]
54                                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
55         PERF_CACHE_MAP_ALL_UNSUPPORTED,
56
57         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
58         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
59
60         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
61         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
62
63         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
64         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
65
66         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
67         [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
68
69         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_BR_PRED,
70         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
71 };
72
73 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
74                                               [PERF_COUNT_HW_CACHE_OP_MAX]
75                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
76         PERF_CACHE_MAP_ALL_UNSUPPORTED,
77
78         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
79
80         [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
81         [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
82 };
83
84 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
85                                               [PERF_COUNT_HW_CACHE_OP_MAX]
86                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
87         PERF_CACHE_MAP_ALL_UNSUPPORTED,
88
89         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
90         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
91         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
92         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
93
94         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
95         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
96
97         [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
98         [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
99 };
100
101 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
102                                               [PERF_COUNT_HW_CACHE_OP_MAX]
103                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
104         PERF_CACHE_MAP_ALL_UNSUPPORTED,
105
106         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
107         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
108 };
109
110 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
111                                                    [PERF_COUNT_HW_CACHE_OP_MAX]
112                                                    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
113         PERF_CACHE_MAP_ALL_UNSUPPORTED,
114
115         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
116         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
117         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
118         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
119         [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
120         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
121
122         [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
123         [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
124
125         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
126         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
127         [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
128         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
129 };
130
131 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
132                                               [PERF_COUNT_HW_CACHE_OP_MAX]
133                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
134         PERF_CACHE_MAP_ALL_UNSUPPORTED,
135
136         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
137         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
138         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
139         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
140
141         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
142         [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
143         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
144         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
145
146         [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
147         [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
148 };
149
150 static ssize_t
151 armv8pmu_events_sysfs_show(struct device *dev,
152                            struct device_attribute *attr, char *page)
153 {
154         struct perf_pmu_events_attr *pmu_attr;
155
156         pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
157
158         return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
159 }
160
161 #define ARMV8_EVENT_ATTR(name, config) \
162         PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
163                        config, armv8pmu_events_sysfs_show)
164
165 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
166 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
167 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
168 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
169 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
170 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
171 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
172 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
173 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
174 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
175 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
176 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
177 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
178 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
179 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
180 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
181 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
182 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
183 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
184 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
185 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
186 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
187 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
188 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
189 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
190 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
191 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
192 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
193 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
194 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
195 /* Don't expose the chain event in /sys, since it's useless in isolation */
196 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
197 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
198 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
199 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
200 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
201 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
202 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
203 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
204 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
205 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
206 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
207 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
208 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
209 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
210 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
211 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
212 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
213 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
214 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
215 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
216 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
217 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
218 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
219 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
220 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
221 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
222 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
223 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
224 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
225 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
226
227 static struct attribute *armv8_pmuv3_event_attrs[] = {
228         &armv8_event_attr_sw_incr.attr.attr,
229         &armv8_event_attr_l1i_cache_refill.attr.attr,
230         &armv8_event_attr_l1i_tlb_refill.attr.attr,
231         &armv8_event_attr_l1d_cache_refill.attr.attr,
232         &armv8_event_attr_l1d_cache.attr.attr,
233         &armv8_event_attr_l1d_tlb_refill.attr.attr,
234         &armv8_event_attr_ld_retired.attr.attr,
235         &armv8_event_attr_st_retired.attr.attr,
236         &armv8_event_attr_inst_retired.attr.attr,
237         &armv8_event_attr_exc_taken.attr.attr,
238         &armv8_event_attr_exc_return.attr.attr,
239         &armv8_event_attr_cid_write_retired.attr.attr,
240         &armv8_event_attr_pc_write_retired.attr.attr,
241         &armv8_event_attr_br_immed_retired.attr.attr,
242         &armv8_event_attr_br_return_retired.attr.attr,
243         &armv8_event_attr_unaligned_ldst_retired.attr.attr,
244         &armv8_event_attr_br_mis_pred.attr.attr,
245         &armv8_event_attr_cpu_cycles.attr.attr,
246         &armv8_event_attr_br_pred.attr.attr,
247         &armv8_event_attr_mem_access.attr.attr,
248         &armv8_event_attr_l1i_cache.attr.attr,
249         &armv8_event_attr_l1d_cache_wb.attr.attr,
250         &armv8_event_attr_l2d_cache.attr.attr,
251         &armv8_event_attr_l2d_cache_refill.attr.attr,
252         &armv8_event_attr_l2d_cache_wb.attr.attr,
253         &armv8_event_attr_bus_access.attr.attr,
254         &armv8_event_attr_memory_error.attr.attr,
255         &armv8_event_attr_inst_spec.attr.attr,
256         &armv8_event_attr_ttbr_write_retired.attr.attr,
257         &armv8_event_attr_bus_cycles.attr.attr,
258         &armv8_event_attr_l1d_cache_allocate.attr.attr,
259         &armv8_event_attr_l2d_cache_allocate.attr.attr,
260         &armv8_event_attr_br_retired.attr.attr,
261         &armv8_event_attr_br_mis_pred_retired.attr.attr,
262         &armv8_event_attr_stall_frontend.attr.attr,
263         &armv8_event_attr_stall_backend.attr.attr,
264         &armv8_event_attr_l1d_tlb.attr.attr,
265         &armv8_event_attr_l1i_tlb.attr.attr,
266         &armv8_event_attr_l2i_cache.attr.attr,
267         &armv8_event_attr_l2i_cache_refill.attr.attr,
268         &armv8_event_attr_l3d_cache_allocate.attr.attr,
269         &armv8_event_attr_l3d_cache_refill.attr.attr,
270         &armv8_event_attr_l3d_cache.attr.attr,
271         &armv8_event_attr_l3d_cache_wb.attr.attr,
272         &armv8_event_attr_l2d_tlb_refill.attr.attr,
273         &armv8_event_attr_l2i_tlb_refill.attr.attr,
274         &armv8_event_attr_l2d_tlb.attr.attr,
275         &armv8_event_attr_l2i_tlb.attr.attr,
276         &armv8_event_attr_remote_access.attr.attr,
277         &armv8_event_attr_ll_cache.attr.attr,
278         &armv8_event_attr_ll_cache_miss.attr.attr,
279         &armv8_event_attr_dtlb_walk.attr.attr,
280         &armv8_event_attr_itlb_walk.attr.attr,
281         &armv8_event_attr_ll_cache_rd.attr.attr,
282         &armv8_event_attr_ll_cache_miss_rd.attr.attr,
283         &armv8_event_attr_remote_access_rd.attr.attr,
284         &armv8_event_attr_sample_pop.attr.attr,
285         &armv8_event_attr_sample_feed.attr.attr,
286         &armv8_event_attr_sample_filtrate.attr.attr,
287         &armv8_event_attr_sample_collision.attr.attr,
288         NULL,
289 };
290
291 static umode_t
292 armv8pmu_event_attr_is_visible(struct kobject *kobj,
293                                struct attribute *attr, int unused)
294 {
295         struct device *dev = kobj_to_dev(kobj);
296         struct pmu *pmu = dev_get_drvdata(dev);
297         struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
298         struct perf_pmu_events_attr *pmu_attr;
299
300         pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
301
302         if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
303             test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
304                 return attr->mode;
305
306         if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
307                 u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
308
309                 if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
310                     test_bit(id, cpu_pmu->pmceid_ext_bitmap))
311                         return attr->mode;
312         }
313
314         return 0;
315 }
316
317 static struct attribute_group armv8_pmuv3_events_attr_group = {
318         .name = "events",
319         .attrs = armv8_pmuv3_event_attrs,
320         .is_visible = armv8pmu_event_attr_is_visible,
321 };
322
323 PMU_FORMAT_ATTR(event, "config:0-15");
324 PMU_FORMAT_ATTR(long, "config1:0");
325
326 static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
327 {
328         return event->attr.config1 & 0x1;
329 }
330
331 static struct attribute *armv8_pmuv3_format_attrs[] = {
332         &format_attr_event.attr,
333         &format_attr_long.attr,
334         NULL,
335 };
336
337 static struct attribute_group armv8_pmuv3_format_attr_group = {
338         .name = "format",
339         .attrs = armv8_pmuv3_format_attrs,
340 };
341
342 /*
343  * Perf Events' indices
344  */
345 #define ARMV8_IDX_CYCLE_COUNTER 0
346 #define ARMV8_IDX_COUNTER0      1
347 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
348         (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
349
350 /*
351  * We must chain two programmable counters for 64 bit events,
352  * except when we have allocated the 64bit cycle counter (for CPU
353  * cycles event). This must be called only when the event has
354  * a counter allocated.
355  */
356 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
357 {
358         int idx = event->hw.idx;
359
360         return !WARN_ON(idx < 0) &&
361                armv8pmu_event_is_64bit(event) &&
362                (idx != ARMV8_IDX_CYCLE_COUNTER);
363 }
364
365 /*
366  * ARMv8 low level PMU access
367  */
368
369 /*
370  * Perf Event to low level counters mapping
371  */
372 #define ARMV8_IDX_TO_COUNTER(x) \
373         (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
374
375 static inline u32 armv8pmu_pmcr_read(void)
376 {
377         return read_sysreg(pmcr_el0);
378 }
379
380 static inline void armv8pmu_pmcr_write(u32 val)
381 {
382         val &= ARMV8_PMU_PMCR_MASK;
383         isb();
384         write_sysreg(val, pmcr_el0);
385 }
386
387 static inline int armv8pmu_has_overflowed(u32 pmovsr)
388 {
389         return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
390 }
391
392 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
393 {
394         return idx >= ARMV8_IDX_CYCLE_COUNTER &&
395                 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
396 }
397
398 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
399 {
400         return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
401 }
402
403 static inline void armv8pmu_select_counter(int idx)
404 {
405         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
406         write_sysreg(counter, pmselr_el0);
407         isb();
408 }
409
410 static inline u32 armv8pmu_read_evcntr(int idx)
411 {
412         armv8pmu_select_counter(idx);
413         return read_sysreg(pmxevcntr_el0);
414 }
415
416 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
417 {
418         int idx = event->hw.idx;
419         u64 val = 0;
420
421         val = armv8pmu_read_evcntr(idx);
422         if (armv8pmu_event_is_chained(event))
423                 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
424         return val;
425 }
426
427 static u64 armv8pmu_read_counter(struct perf_event *event)
428 {
429         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
430         struct hw_perf_event *hwc = &event->hw;
431         int idx = hwc->idx;
432         u64 value = 0;
433
434         if (!armv8pmu_counter_valid(cpu_pmu, idx))
435                 pr_err("CPU%u reading wrong counter %d\n",
436                         smp_processor_id(), idx);
437         else if (idx == ARMV8_IDX_CYCLE_COUNTER)
438                 value = read_sysreg(pmccntr_el0);
439         else
440                 value = armv8pmu_read_hw_counter(event);
441
442         return value;
443 }
444
445 static inline void armv8pmu_write_evcntr(int idx, u32 value)
446 {
447         armv8pmu_select_counter(idx);
448         write_sysreg(value, pmxevcntr_el0);
449 }
450
451 static inline void armv8pmu_write_hw_counter(struct perf_event *event,
452                                              u64 value)
453 {
454         int idx = event->hw.idx;
455
456         if (armv8pmu_event_is_chained(event)) {
457                 armv8pmu_write_evcntr(idx, upper_32_bits(value));
458                 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
459         } else {
460                 armv8pmu_write_evcntr(idx, value);
461         }
462 }
463
464 static void armv8pmu_write_counter(struct perf_event *event, u64 value)
465 {
466         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
467         struct hw_perf_event *hwc = &event->hw;
468         int idx = hwc->idx;
469
470         if (!armv8pmu_counter_valid(cpu_pmu, idx))
471                 pr_err("CPU%u writing wrong counter %d\n",
472                         smp_processor_id(), idx);
473         else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
474                 /*
475                  * The cycles counter is really a 64-bit counter.
476                  * When treating it as a 32-bit counter, we only count
477                  * the lower 32 bits, and set the upper 32-bits so that
478                  * we get an interrupt upon 32-bit overflow.
479                  */
480                 if (!armv8pmu_event_is_64bit(event))
481                         value |= 0xffffffff00000000ULL;
482                 write_sysreg(value, pmccntr_el0);
483         } else
484                 armv8pmu_write_hw_counter(event, value);
485 }
486
487 static inline void armv8pmu_write_evtype(int idx, u32 val)
488 {
489         armv8pmu_select_counter(idx);
490         val &= ARMV8_PMU_EVTYPE_MASK;
491         write_sysreg(val, pmxevtyper_el0);
492 }
493
494 static inline void armv8pmu_write_event_type(struct perf_event *event)
495 {
496         struct hw_perf_event *hwc = &event->hw;
497         int idx = hwc->idx;
498
499         /*
500          * For chained events, the low counter is programmed to count
501          * the event of interest and the high counter is programmed
502          * with CHAIN event code with filters set to count at all ELs.
503          */
504         if (armv8pmu_event_is_chained(event)) {
505                 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
506                                 ARMV8_PMU_INCLUDE_EL2;
507
508                 armv8pmu_write_evtype(idx - 1, hwc->config_base);
509                 armv8pmu_write_evtype(idx, chain_evt);
510         } else {
511                 armv8pmu_write_evtype(idx, hwc->config_base);
512         }
513 }
514
515 static inline int armv8pmu_enable_counter(int idx)
516 {
517         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
518         write_sysreg(BIT(counter), pmcntenset_el0);
519         return idx;
520 }
521
522 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
523 {
524         struct perf_event_attr *attr = &event->attr;
525         int idx = event->hw.idx;
526         u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
527
528         if (armv8pmu_event_is_chained(event))
529                 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
530
531         kvm_set_pmu_events(counter_bits, attr);
532
533         /* We rely on the hypervisor switch code to enable guest counters */
534         if (!kvm_pmu_counter_deferred(attr)) {
535                 armv8pmu_enable_counter(idx);
536                 if (armv8pmu_event_is_chained(event))
537                         armv8pmu_enable_counter(idx - 1);
538         }
539 }
540
541 static inline int armv8pmu_disable_counter(int idx)
542 {
543         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
544         write_sysreg(BIT(counter), pmcntenclr_el0);
545         return idx;
546 }
547
548 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
549 {
550         struct hw_perf_event *hwc = &event->hw;
551         struct perf_event_attr *attr = &event->attr;
552         int idx = hwc->idx;
553         u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
554
555         if (armv8pmu_event_is_chained(event))
556                 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
557
558         kvm_clr_pmu_events(counter_bits);
559
560         /* We rely on the hypervisor switch code to disable guest counters */
561         if (!kvm_pmu_counter_deferred(attr)) {
562                 if (armv8pmu_event_is_chained(event))
563                         armv8pmu_disable_counter(idx - 1);
564                 armv8pmu_disable_counter(idx);
565         }
566 }
567
568 static inline int armv8pmu_enable_intens(int idx)
569 {
570         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
571         write_sysreg(BIT(counter), pmintenset_el1);
572         return idx;
573 }
574
575 static inline int armv8pmu_enable_event_irq(struct perf_event *event)
576 {
577         return armv8pmu_enable_intens(event->hw.idx);
578 }
579
580 static inline int armv8pmu_disable_intens(int idx)
581 {
582         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
583         write_sysreg(BIT(counter), pmintenclr_el1);
584         isb();
585         /* Clear the overflow flag in case an interrupt is pending. */
586         write_sysreg(BIT(counter), pmovsclr_el0);
587         isb();
588
589         return idx;
590 }
591
592 static inline int armv8pmu_disable_event_irq(struct perf_event *event)
593 {
594         return armv8pmu_disable_intens(event->hw.idx);
595 }
596
597 static inline u32 armv8pmu_getreset_flags(void)
598 {
599         u32 value;
600
601         /* Read */
602         value = read_sysreg(pmovsclr_el0);
603
604         /* Write to clear flags */
605         value &= ARMV8_PMU_OVSR_MASK;
606         write_sysreg(value, pmovsclr_el0);
607
608         return value;
609 }
610
611 static void armv8pmu_enable_event(struct perf_event *event)
612 {
613         unsigned long flags;
614         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
615         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
616
617         /*
618          * Enable counter and interrupt, and set the counter to count
619          * the event that we're interested in.
620          */
621         raw_spin_lock_irqsave(&events->pmu_lock, flags);
622
623         /*
624          * Disable counter
625          */
626         armv8pmu_disable_event_counter(event);
627
628         /*
629          * Set event (if destined for PMNx counters).
630          */
631         armv8pmu_write_event_type(event);
632
633         /*
634          * Enable interrupt for this counter
635          */
636         armv8pmu_enable_event_irq(event);
637
638         /*
639          * Enable counter
640          */
641         armv8pmu_enable_event_counter(event);
642
643         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
644 }
645
646 static void armv8pmu_disable_event(struct perf_event *event)
647 {
648         unsigned long flags;
649         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
650         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
651
652         /*
653          * Disable counter and interrupt
654          */
655         raw_spin_lock_irqsave(&events->pmu_lock, flags);
656
657         /*
658          * Disable counter
659          */
660         armv8pmu_disable_event_counter(event);
661
662         /*
663          * Disable interrupt for this counter
664          */
665         armv8pmu_disable_event_irq(event);
666
667         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
668 }
669
670 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
671 {
672         unsigned long flags;
673         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
674
675         raw_spin_lock_irqsave(&events->pmu_lock, flags);
676         /* Enable all counters */
677         armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
678         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
679 }
680
681 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
682 {
683         unsigned long flags;
684         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
685
686         raw_spin_lock_irqsave(&events->pmu_lock, flags);
687         /* Disable all counters */
688         armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
689         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
690 }
691
692 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
693 {
694         u32 pmovsr;
695         struct perf_sample_data data;
696         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
697         struct pt_regs *regs;
698         int idx;
699
700         /*
701          * Get and reset the IRQ flags
702          */
703         pmovsr = armv8pmu_getreset_flags();
704
705         /*
706          * Did an overflow occur?
707          */
708         if (!armv8pmu_has_overflowed(pmovsr))
709                 return IRQ_NONE;
710
711         /*
712          * Handle the counter(s) overflow(s)
713          */
714         regs = get_irq_regs();
715
716         /*
717          * Stop the PMU while processing the counter overflows
718          * to prevent skews in group events.
719          */
720         armv8pmu_stop(cpu_pmu);
721         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
722                 struct perf_event *event = cpuc->events[idx];
723                 struct hw_perf_event *hwc;
724
725                 /* Ignore if we don't have an event. */
726                 if (!event)
727                         continue;
728
729                 /*
730                  * We have a single interrupt for all counters. Check that
731                  * each counter has overflowed before we process it.
732                  */
733                 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
734                         continue;
735
736                 hwc = &event->hw;
737                 armpmu_event_update(event);
738                 perf_sample_data_init(&data, 0, hwc->last_period);
739                 if (!armpmu_event_set_period(event))
740                         continue;
741
742                 if (perf_event_overflow(event, &data, regs))
743                         cpu_pmu->disable(event);
744         }
745         armv8pmu_start(cpu_pmu);
746
747         /*
748          * Handle the pending perf events.
749          *
750          * Note: this call *must* be run with interrupts disabled. For
751          * platforms that can have the PMU interrupts raised as an NMI, this
752          * will not work.
753          */
754         irq_work_run();
755
756         return IRQ_HANDLED;
757 }
758
759 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
760                                     struct arm_pmu *cpu_pmu)
761 {
762         int idx;
763
764         for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
765                 if (!test_and_set_bit(idx, cpuc->used_mask))
766                         return idx;
767         }
768         return -EAGAIN;
769 }
770
771 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
772                                    struct arm_pmu *cpu_pmu)
773 {
774         int idx;
775
776         /*
777          * Chaining requires two consecutive event counters, where
778          * the lower idx must be even.
779          */
780         for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
781                 if (!test_and_set_bit(idx, cpuc->used_mask)) {
782                         /* Check if the preceding even counter is available */
783                         if (!test_and_set_bit(idx - 1, cpuc->used_mask))
784                                 return idx;
785                         /* Release the Odd counter */
786                         clear_bit(idx, cpuc->used_mask);
787                 }
788         }
789         return -EAGAIN;
790 }
791
792 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
793                                   struct perf_event *event)
794 {
795         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
796         struct hw_perf_event *hwc = &event->hw;
797         unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
798
799         /* Always prefer to place a cycle counter into the cycle counter. */
800         if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
801                 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
802                         return ARMV8_IDX_CYCLE_COUNTER;
803         }
804
805         /*
806          * Otherwise use events counters
807          */
808         if (armv8pmu_event_is_64bit(event))
809                 return  armv8pmu_get_chain_idx(cpuc, cpu_pmu);
810         else
811                 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
812 }
813
814 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
815                                      struct perf_event *event)
816 {
817         int idx = event->hw.idx;
818
819         clear_bit(idx, cpuc->used_mask);
820         if (armv8pmu_event_is_chained(event))
821                 clear_bit(idx - 1, cpuc->used_mask);
822 }
823
824 /*
825  * Add an event filter to a given event.
826  */
827 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
828                                      struct perf_event_attr *attr)
829 {
830         unsigned long config_base = 0;
831
832         if (attr->exclude_idle)
833                 return -EPERM;
834
835         /*
836          * If we're running in hyp mode, then we *are* the hypervisor.
837          * Therefore we ignore exclude_hv in this configuration, since
838          * there's no hypervisor to sample anyway. This is consistent
839          * with other architectures (x86 and Power).
840          */
841         if (is_kernel_in_hyp_mode()) {
842                 if (!attr->exclude_kernel && !attr->exclude_host)
843                         config_base |= ARMV8_PMU_INCLUDE_EL2;
844                 if (attr->exclude_guest)
845                         config_base |= ARMV8_PMU_EXCLUDE_EL1;
846                 if (attr->exclude_host)
847                         config_base |= ARMV8_PMU_EXCLUDE_EL0;
848         } else {
849                 if (!attr->exclude_hv && !attr->exclude_host)
850                         config_base |= ARMV8_PMU_INCLUDE_EL2;
851         }
852
853         /*
854          * Filter out !VHE kernels and guest kernels
855          */
856         if (attr->exclude_kernel)
857                 config_base |= ARMV8_PMU_EXCLUDE_EL1;
858
859         if (attr->exclude_user)
860                 config_base |= ARMV8_PMU_EXCLUDE_EL0;
861
862         /*
863          * Install the filter into config_base as this is used to
864          * construct the event type.
865          */
866         event->config_base = config_base;
867
868         return 0;
869 }
870
871 static int armv8pmu_filter_match(struct perf_event *event)
872 {
873         unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
874         return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
875 }
876
877 static void armv8pmu_reset(void *info)
878 {
879         struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
880         u32 idx, nb_cnt = cpu_pmu->num_events;
881
882         /* The counter and interrupt enable registers are unknown at reset. */
883         for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
884                 armv8pmu_disable_counter(idx);
885                 armv8pmu_disable_intens(idx);
886         }
887
888         /* Clear the counters we flip at guest entry/exit */
889         kvm_clr_pmu_events(U32_MAX);
890
891         /*
892          * Initialize & Reset PMNC. Request overflow interrupt for
893          * 64 bit cycle counter but cheat in armv8pmu_write_counter().
894          */
895         armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
896                             ARMV8_PMU_PMCR_LC);
897 }
898
899 static int __armv8_pmuv3_map_event(struct perf_event *event,
900                                    const unsigned (*extra_event_map)
901                                                   [PERF_COUNT_HW_MAX],
902                                    const unsigned (*extra_cache_map)
903                                                   [PERF_COUNT_HW_CACHE_MAX]
904                                                   [PERF_COUNT_HW_CACHE_OP_MAX]
905                                                   [PERF_COUNT_HW_CACHE_RESULT_MAX])
906 {
907         int hw_event_id;
908         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
909
910         hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
911                                        &armv8_pmuv3_perf_cache_map,
912                                        ARMV8_PMU_EVTYPE_EVENT);
913
914         if (armv8pmu_event_is_64bit(event))
915                 event->hw.flags |= ARMPMU_EVT_64BIT;
916
917         /* Only expose micro/arch events supported by this PMU */
918         if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
919             && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
920                 return hw_event_id;
921         }
922
923         return armpmu_map_event(event, extra_event_map, extra_cache_map,
924                                 ARMV8_PMU_EVTYPE_EVENT);
925 }
926
927 static int armv8_pmuv3_map_event(struct perf_event *event)
928 {
929         return __armv8_pmuv3_map_event(event, NULL, NULL);
930 }
931
932 static int armv8_a53_map_event(struct perf_event *event)
933 {
934         return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
935 }
936
937 static int armv8_a57_map_event(struct perf_event *event)
938 {
939         return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
940 }
941
942 static int armv8_a73_map_event(struct perf_event *event)
943 {
944         return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
945 }
946
947 static int armv8_thunder_map_event(struct perf_event *event)
948 {
949         return __armv8_pmuv3_map_event(event, NULL,
950                                        &armv8_thunder_perf_cache_map);
951 }
952
953 static int armv8_vulcan_map_event(struct perf_event *event)
954 {
955         return __armv8_pmuv3_map_event(event, NULL,
956                                        &armv8_vulcan_perf_cache_map);
957 }
958
959 struct armv8pmu_probe_info {
960         struct arm_pmu *pmu;
961         bool present;
962 };
963
964 static void __armv8pmu_probe_pmu(void *info)
965 {
966         struct armv8pmu_probe_info *probe = info;
967         struct arm_pmu *cpu_pmu = probe->pmu;
968         u64 dfr0;
969         u64 pmceid_raw[2];
970         u32 pmceid[2];
971         int pmuver;
972
973         dfr0 = read_sysreg(id_aa64dfr0_el1);
974         pmuver = cpuid_feature_extract_unsigned_field(dfr0,
975                         ID_AA64DFR0_PMUVER_SHIFT);
976         if (pmuver == 0xf || pmuver == 0)
977                 return;
978
979         probe->present = true;
980
981         /* Read the nb of CNTx counters supported from PMNC */
982         cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
983                 & ARMV8_PMU_PMCR_N_MASK;
984
985         /* Add the CPU cycles counter */
986         cpu_pmu->num_events += 1;
987
988         pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
989         pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
990
991         bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
992                              pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
993
994         pmceid[0] = pmceid_raw[0] >> 32;
995         pmceid[1] = pmceid_raw[1] >> 32;
996
997         bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
998                              pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
999 }
1000
1001 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
1002 {
1003         struct armv8pmu_probe_info probe = {
1004                 .pmu = cpu_pmu,
1005                 .present = false,
1006         };
1007         int ret;
1008
1009         ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1010                                     __armv8pmu_probe_pmu,
1011                                     &probe, 1);
1012         if (ret)
1013                 return ret;
1014
1015         return probe.present ? 0 : -ENODEV;
1016 }
1017
1018 static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
1019 {
1020         int ret = armv8pmu_probe_pmu(cpu_pmu);
1021         if (ret)
1022                 return ret;
1023
1024         cpu_pmu->handle_irq             = armv8pmu_handle_irq;
1025         cpu_pmu->enable                 = armv8pmu_enable_event;
1026         cpu_pmu->disable                = armv8pmu_disable_event;
1027         cpu_pmu->read_counter           = armv8pmu_read_counter;
1028         cpu_pmu->write_counter          = armv8pmu_write_counter;
1029         cpu_pmu->get_event_idx          = armv8pmu_get_event_idx;
1030         cpu_pmu->clear_event_idx        = armv8pmu_clear_event_idx;
1031         cpu_pmu->start                  = armv8pmu_start;
1032         cpu_pmu->stop                   = armv8pmu_stop;
1033         cpu_pmu->reset                  = armv8pmu_reset;
1034         cpu_pmu->set_event_filter       = armv8pmu_set_event_filter;
1035         cpu_pmu->filter_match           = armv8pmu_filter_match;
1036
1037         return 0;
1038 }
1039
1040 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
1041 {
1042         int ret = armv8_pmu_init(cpu_pmu);
1043         if (ret)
1044                 return ret;
1045
1046         cpu_pmu->name                   = "armv8_pmuv3";
1047         cpu_pmu->map_event              = armv8_pmuv3_map_event;
1048         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1049                 &armv8_pmuv3_events_attr_group;
1050         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1051                 &armv8_pmuv3_format_attr_group;
1052
1053         return 0;
1054 }
1055
1056 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1057 {
1058         int ret = armv8_pmu_init(cpu_pmu);
1059         if (ret)
1060                 return ret;
1061
1062         cpu_pmu->name                   = "armv8_cortex_a35";
1063         cpu_pmu->map_event              = armv8_a53_map_event;
1064         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1065                 &armv8_pmuv3_events_attr_group;
1066         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1067                 &armv8_pmuv3_format_attr_group;
1068
1069         return 0;
1070 }
1071
1072 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1073 {
1074         int ret = armv8_pmu_init(cpu_pmu);
1075         if (ret)
1076                 return ret;
1077
1078         cpu_pmu->name                   = "armv8_cortex_a53";
1079         cpu_pmu->map_event              = armv8_a53_map_event;
1080         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1081                 &armv8_pmuv3_events_attr_group;
1082         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1083                 &armv8_pmuv3_format_attr_group;
1084
1085         return 0;
1086 }
1087
1088 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1089 {
1090         int ret = armv8_pmu_init(cpu_pmu);
1091         if (ret)
1092                 return ret;
1093
1094         cpu_pmu->name                   = "armv8_cortex_a57";
1095         cpu_pmu->map_event              = armv8_a57_map_event;
1096         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1097                 &armv8_pmuv3_events_attr_group;
1098         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1099                 &armv8_pmuv3_format_attr_group;
1100
1101         return 0;
1102 }
1103
1104 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1105 {
1106         int ret = armv8_pmu_init(cpu_pmu);
1107         if (ret)
1108                 return ret;
1109
1110         cpu_pmu->name                   = "armv8_cortex_a72";
1111         cpu_pmu->map_event              = armv8_a57_map_event;
1112         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1113                 &armv8_pmuv3_events_attr_group;
1114         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1115                 &armv8_pmuv3_format_attr_group;
1116
1117         return 0;
1118 }
1119
1120 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1121 {
1122         int ret = armv8_pmu_init(cpu_pmu);
1123         if (ret)
1124                 return ret;
1125
1126         cpu_pmu->name                   = "armv8_cortex_a73";
1127         cpu_pmu->map_event              = armv8_a73_map_event;
1128         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1129                 &armv8_pmuv3_events_attr_group;
1130         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1131                 &armv8_pmuv3_format_attr_group;
1132
1133         return 0;
1134 }
1135
1136 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1137 {
1138         int ret = armv8_pmu_init(cpu_pmu);
1139         if (ret)
1140                 return ret;
1141
1142         cpu_pmu->name                   = "armv8_cavium_thunder";
1143         cpu_pmu->map_event              = armv8_thunder_map_event;
1144         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1145                 &armv8_pmuv3_events_attr_group;
1146         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1147                 &armv8_pmuv3_format_attr_group;
1148
1149         return 0;
1150 }
1151
1152 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1153 {
1154         int ret = armv8_pmu_init(cpu_pmu);
1155         if (ret)
1156                 return ret;
1157
1158         cpu_pmu->name                   = "armv8_brcm_vulcan";
1159         cpu_pmu->map_event              = armv8_vulcan_map_event;
1160         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1161                 &armv8_pmuv3_events_attr_group;
1162         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1163                 &armv8_pmuv3_format_attr_group;
1164
1165         return 0;
1166 }
1167
1168 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1169         {.compatible = "arm,armv8-pmuv3",       .data = armv8_pmuv3_init},
1170         {.compatible = "arm,cortex-a35-pmu",    .data = armv8_a35_pmu_init},
1171         {.compatible = "arm,cortex-a53-pmu",    .data = armv8_a53_pmu_init},
1172         {.compatible = "arm,cortex-a57-pmu",    .data = armv8_a57_pmu_init},
1173         {.compatible = "arm,cortex-a72-pmu",    .data = armv8_a72_pmu_init},
1174         {.compatible = "arm,cortex-a73-pmu",    .data = armv8_a73_pmu_init},
1175         {.compatible = "cavium,thunder-pmu",    .data = armv8_thunder_pmu_init},
1176         {.compatible = "brcm,vulcan-pmu",       .data = armv8_vulcan_pmu_init},
1177         {},
1178 };
1179
1180 static int armv8_pmu_device_probe(struct platform_device *pdev)
1181 {
1182         return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1183 }
1184
1185 static struct platform_driver armv8_pmu_driver = {
1186         .driver         = {
1187                 .name   = ARMV8_PMU_PDEV_NAME,
1188                 .of_match_table = armv8_pmu_of_device_ids,
1189                 .suppress_bind_attrs = true,
1190         },
1191         .probe          = armv8_pmu_device_probe,
1192 };
1193
1194 static int __init armv8_pmu_driver_init(void)
1195 {
1196         if (acpi_disabled)
1197                 return platform_driver_register(&armv8_pmu_driver);
1198         else
1199                 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1200 }
1201 device_initcall(armv8_pmu_driver_init)
1202
1203 void arch_perf_update_userpage(struct perf_event *event,
1204                                struct perf_event_mmap_page *userpg, u64 now)
1205 {
1206         u32 freq;
1207         u32 shift;
1208
1209         /*
1210          * Internal timekeeping for enabled/running/stopped times
1211          * is always computed with the sched_clock.
1212          */
1213         freq = arch_timer_get_rate();
1214         userpg->cap_user_time = 1;
1215
1216         clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
1217                         NSEC_PER_SEC, 0);
1218         /*
1219          * time_shift is not expected to be greater than 31 due to
1220          * the original published conversion algorithm shifting a
1221          * 32-bit value (now specifies a 64-bit value) - refer
1222          * perf_event_mmap_page documentation in perf_event.h.
1223          */
1224         if (shift == 32) {
1225                 shift = 31;
1226                 userpg->time_mult >>= 1;
1227         }
1228         userpg->time_shift = (u16)shift;
1229         userpg->time_offset = -now;
1230 }