GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / perf / arm_spe_pmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Perf support for the Statistical Profiling Extension, introduced as
4  * part of ARMv8.2.
5  *
6  * Copyright (C) 2016 ARM Limited
7  *
8  * Author: Will Deacon <will.deacon@arm.com>
9  */
10
11 #define PMUNAME                                 "arm_spe"
12 #define DRVNAME                                 PMUNAME "_pmu"
13 #define pr_fmt(fmt)                             DRVNAME ": " fmt
14
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/capability.h>
18 #include <linux/cpuhotplug.h>
19 #include <linux/cpumask.h>
20 #include <linux/device.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/perf_event.h>
30 #include <linux/perf/arm_pmu.h>
31 #include <linux/platform_device.h>
32 #include <linux/printk.h>
33 #include <linux/slab.h>
34 #include <linux/smp.h>
35 #include <linux/vmalloc.h>
36
37 #include <asm/barrier.h>
38 #include <asm/cpufeature.h>
39 #include <asm/mmu.h>
40 #include <asm/sysreg.h>
41
42 /*
43  * Cache if the event is allowed to trace Context information.
44  * This allows us to perform the check, i.e, perfmon_capable(),
45  * in the context of the event owner, once, during the event_init().
46  */
47 #define SPE_PMU_HW_FLAGS_CX                     BIT(0)
48
49 static void set_spe_event_has_cx(struct perf_event *event)
50 {
51         if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
52                 event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
53 }
54
55 static bool get_spe_event_has_cx(struct perf_event *event)
56 {
57         return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
58 }
59
60 #define ARM_SPE_BUF_PAD_BYTE                    0
61
62 struct arm_spe_pmu_buf {
63         int                                     nr_pages;
64         bool                                    snapshot;
65         void                                    *base;
66 };
67
68 struct arm_spe_pmu {
69         struct pmu                              pmu;
70         struct platform_device                  *pdev;
71         cpumask_t                               supported_cpus;
72         struct hlist_node                       hotplug_node;
73
74         int                                     irq; /* PPI */
75
76         u16                                     min_period;
77         u16                                     counter_sz;
78
79 #define SPE_PMU_FEAT_FILT_EVT                   (1UL << 0)
80 #define SPE_PMU_FEAT_FILT_TYP                   (1UL << 1)
81 #define SPE_PMU_FEAT_FILT_LAT                   (1UL << 2)
82 #define SPE_PMU_FEAT_ARCH_INST                  (1UL << 3)
83 #define SPE_PMU_FEAT_LDS                        (1UL << 4)
84 #define SPE_PMU_FEAT_ERND                       (1UL << 5)
85 #define SPE_PMU_FEAT_DEV_PROBED                 (1UL << 63)
86         u64                                     features;
87
88         u16                                     max_record_sz;
89         u16                                     align;
90         struct perf_output_handle __percpu      *handle;
91 };
92
93 #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
94
95 /* Convert a free-running index from perf into an SPE buffer offset */
96 #define PERF_IDX2OFF(idx, buf)  ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
97
98 /* Keep track of our dynamic hotplug state */
99 static enum cpuhp_state arm_spe_pmu_online;
100
101 enum arm_spe_pmu_buf_fault_action {
102         SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
103         SPE_PMU_BUF_FAULT_ACT_FATAL,
104         SPE_PMU_BUF_FAULT_ACT_OK,
105 };
106
107 /* This sysfs gunk was really good fun to write. */
108 enum arm_spe_pmu_capabilities {
109         SPE_PMU_CAP_ARCH_INST = 0,
110         SPE_PMU_CAP_ERND,
111         SPE_PMU_CAP_FEAT_MAX,
112         SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
113         SPE_PMU_CAP_MIN_IVAL,
114 };
115
116 static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
117         [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
118         [SPE_PMU_CAP_ERND]      = SPE_PMU_FEAT_ERND,
119 };
120
121 static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
122 {
123         if (cap < SPE_PMU_CAP_FEAT_MAX)
124                 return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
125
126         switch (cap) {
127         case SPE_PMU_CAP_CNT_SZ:
128                 return spe_pmu->counter_sz;
129         case SPE_PMU_CAP_MIN_IVAL:
130                 return spe_pmu->min_period;
131         default:
132                 WARN(1, "unknown cap %d\n", cap);
133         }
134
135         return 0;
136 }
137
138 static ssize_t arm_spe_pmu_cap_show(struct device *dev,
139                                     struct device_attribute *attr,
140                                     char *buf)
141 {
142         struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
143         struct dev_ext_attribute *ea =
144                 container_of(attr, struct dev_ext_attribute, attr);
145         int cap = (long)ea->var;
146
147         return snprintf(buf, PAGE_SIZE, "%u\n",
148                 arm_spe_pmu_cap_get(spe_pmu, cap));
149 }
150
151 #define SPE_EXT_ATTR_ENTRY(_name, _func, _var)                          \
152         &((struct dev_ext_attribute[]) {                                \
153                 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var }   \
154         })[0].attr.attr
155
156 #define SPE_CAP_EXT_ATTR_ENTRY(_name, _var)                             \
157         SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
158
159 static struct attribute *arm_spe_pmu_cap_attr[] = {
160         SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
161         SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
162         SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
163         SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
164         NULL,
165 };
166
167 static struct attribute_group arm_spe_pmu_cap_group = {
168         .name   = "caps",
169         .attrs  = arm_spe_pmu_cap_attr,
170 };
171
172 /* User ABI */
173 #define ATTR_CFG_FLD_ts_enable_CFG              config  /* PMSCR_EL1.TS */
174 #define ATTR_CFG_FLD_ts_enable_LO               0
175 #define ATTR_CFG_FLD_ts_enable_HI               0
176 #define ATTR_CFG_FLD_pa_enable_CFG              config  /* PMSCR_EL1.PA */
177 #define ATTR_CFG_FLD_pa_enable_LO               1
178 #define ATTR_CFG_FLD_pa_enable_HI               1
179 #define ATTR_CFG_FLD_pct_enable_CFG             config  /* PMSCR_EL1.PCT */
180 #define ATTR_CFG_FLD_pct_enable_LO              2
181 #define ATTR_CFG_FLD_pct_enable_HI              2
182 #define ATTR_CFG_FLD_jitter_CFG                 config  /* PMSIRR_EL1.RND */
183 #define ATTR_CFG_FLD_jitter_LO                  16
184 #define ATTR_CFG_FLD_jitter_HI                  16
185 #define ATTR_CFG_FLD_branch_filter_CFG          config  /* PMSFCR_EL1.B */
186 #define ATTR_CFG_FLD_branch_filter_LO           32
187 #define ATTR_CFG_FLD_branch_filter_HI           32
188 #define ATTR_CFG_FLD_load_filter_CFG            config  /* PMSFCR_EL1.LD */
189 #define ATTR_CFG_FLD_load_filter_LO             33
190 #define ATTR_CFG_FLD_load_filter_HI             33
191 #define ATTR_CFG_FLD_store_filter_CFG           config  /* PMSFCR_EL1.ST */
192 #define ATTR_CFG_FLD_store_filter_LO            34
193 #define ATTR_CFG_FLD_store_filter_HI            34
194
195 #define ATTR_CFG_FLD_event_filter_CFG           config1 /* PMSEVFR_EL1 */
196 #define ATTR_CFG_FLD_event_filter_LO            0
197 #define ATTR_CFG_FLD_event_filter_HI            63
198
199 #define ATTR_CFG_FLD_min_latency_CFG            config2 /* PMSLATFR_EL1.MINLAT */
200 #define ATTR_CFG_FLD_min_latency_LO             0
201 #define ATTR_CFG_FLD_min_latency_HI             11
202
203 /* Why does everything I do descend into this? */
204 #define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)                              \
205         (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
206
207 #define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi)                               \
208         __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
209
210 #define GEN_PMU_FORMAT_ATTR(name)                                       \
211         PMU_FORMAT_ATTR(name,                                           \
212         _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG,                 \
213                              ATTR_CFG_FLD_##name##_LO,                  \
214                              ATTR_CFG_FLD_##name##_HI))
215
216 #define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)                            \
217         ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
218
219 #define ATTR_CFG_GET_FLD(attr, name)                                    \
220         _ATTR_CFG_GET_FLD(attr,                                         \
221                           ATTR_CFG_FLD_##name##_CFG,                    \
222                           ATTR_CFG_FLD_##name##_LO,                     \
223                           ATTR_CFG_FLD_##name##_HI)
224
225 GEN_PMU_FORMAT_ATTR(ts_enable);
226 GEN_PMU_FORMAT_ATTR(pa_enable);
227 GEN_PMU_FORMAT_ATTR(pct_enable);
228 GEN_PMU_FORMAT_ATTR(jitter);
229 GEN_PMU_FORMAT_ATTR(branch_filter);
230 GEN_PMU_FORMAT_ATTR(load_filter);
231 GEN_PMU_FORMAT_ATTR(store_filter);
232 GEN_PMU_FORMAT_ATTR(event_filter);
233 GEN_PMU_FORMAT_ATTR(min_latency);
234
235 static struct attribute *arm_spe_pmu_formats_attr[] = {
236         &format_attr_ts_enable.attr,
237         &format_attr_pa_enable.attr,
238         &format_attr_pct_enable.attr,
239         &format_attr_jitter.attr,
240         &format_attr_branch_filter.attr,
241         &format_attr_load_filter.attr,
242         &format_attr_store_filter.attr,
243         &format_attr_event_filter.attr,
244         &format_attr_min_latency.attr,
245         NULL,
246 };
247
248 static struct attribute_group arm_spe_pmu_format_group = {
249         .name   = "format",
250         .attrs  = arm_spe_pmu_formats_attr,
251 };
252
253 static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
254                                             struct device_attribute *attr,
255                                             char *buf)
256 {
257         struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
258
259         return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
260 }
261 static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
262
263 static struct attribute *arm_spe_pmu_attrs[] = {
264         &dev_attr_cpumask.attr,
265         NULL,
266 };
267
268 static struct attribute_group arm_spe_pmu_group = {
269         .attrs  = arm_spe_pmu_attrs,
270 };
271
272 static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
273         &arm_spe_pmu_group,
274         &arm_spe_pmu_cap_group,
275         &arm_spe_pmu_format_group,
276         NULL,
277 };
278
279 /* Convert between user ABI and register values */
280 static u64 arm_spe_event_to_pmscr(struct perf_event *event)
281 {
282         struct perf_event_attr *attr = &event->attr;
283         u64 reg = 0;
284
285         reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
286         reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
287         reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
288
289         if (!attr->exclude_user)
290                 reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
291
292         if (!attr->exclude_kernel)
293                 reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
294
295         if (get_spe_event_has_cx(event))
296                 reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
297
298         return reg;
299 }
300
301 static void arm_spe_event_sanitise_period(struct perf_event *event)
302 {
303         struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
304         u64 period = event->hw.sample_period;
305         u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
306                          << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
307
308         if (period < spe_pmu->min_period)
309                 period = spe_pmu->min_period;
310         else if (period > max_period)
311                 period = max_period;
312         else
313                 period &= max_period;
314
315         event->hw.sample_period = period;
316 }
317
318 static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
319 {
320         struct perf_event_attr *attr = &event->attr;
321         u64 reg = 0;
322
323         arm_spe_event_sanitise_period(event);
324
325         reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
326         reg |= event->hw.sample_period;
327
328         return reg;
329 }
330
331 static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
332 {
333         struct perf_event_attr *attr = &event->attr;
334         u64 reg = 0;
335
336         reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
337         reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
338         reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
339
340         if (reg)
341                 reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
342
343         if (ATTR_CFG_GET_FLD(attr, event_filter))
344                 reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
345
346         if (ATTR_CFG_GET_FLD(attr, min_latency))
347                 reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
348
349         return reg;
350 }
351
352 static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
353 {
354         struct perf_event_attr *attr = &event->attr;
355         return ATTR_CFG_GET_FLD(attr, event_filter);
356 }
357
358 static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
359 {
360         struct perf_event_attr *attr = &event->attr;
361         return ATTR_CFG_GET_FLD(attr, min_latency)
362                << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
363 }
364
365 static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
366 {
367         struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
368         u64 head = PERF_IDX2OFF(handle->head, buf);
369
370         memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
371         if (!buf->snapshot)
372                 perf_aux_output_skip(handle, len);
373 }
374
375 static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
376 {
377         struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
378         struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
379         u64 head = PERF_IDX2OFF(handle->head, buf);
380         u64 limit = buf->nr_pages * PAGE_SIZE;
381
382         /*
383          * The trace format isn't parseable in reverse, so clamp
384          * the limit to half of the buffer size in snapshot mode
385          * so that the worst case is half a buffer of records, as
386          * opposed to a single record.
387          */
388         if (head < limit >> 1)
389                 limit >>= 1;
390
391         /*
392          * If we're within max_record_sz of the limit, we must
393          * pad, move the head index and recompute the limit.
394          */
395         if (limit - head < spe_pmu->max_record_sz) {
396                 arm_spe_pmu_pad_buf(handle, limit - head);
397                 handle->head = PERF_IDX2OFF(limit, buf);
398                 limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
399         }
400
401         return limit;
402 }
403
404 static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
405 {
406         struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
407         struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
408         const u64 bufsize = buf->nr_pages * PAGE_SIZE;
409         u64 limit = bufsize;
410         u64 head, tail, wakeup;
411
412         /*
413          * The head can be misaligned for two reasons:
414          *
415          * 1. The hardware left PMBPTR pointing to the first byte after
416          *    a record when generating a buffer management event.
417          *
418          * 2. We used perf_aux_output_skip to consume handle->size bytes
419          *    and CIRC_SPACE was used to compute the size, which always
420          *    leaves one entry free.
421          *
422          * Deal with this by padding to the next alignment boundary and
423          * moving the head index. If we run out of buffer space, we'll
424          * reduce handle->size to zero and end up reporting truncation.
425          */
426         head = PERF_IDX2OFF(handle->head, buf);
427         if (!IS_ALIGNED(head, spe_pmu->align)) {
428                 unsigned long delta = roundup(head, spe_pmu->align) - head;
429
430                 delta = min(delta, handle->size);
431                 arm_spe_pmu_pad_buf(handle, delta);
432                 head = PERF_IDX2OFF(handle->head, buf);
433         }
434
435         /* If we've run out of free space, then nothing more to do */
436         if (!handle->size)
437                 goto no_space;
438
439         /* Compute the tail and wakeup indices now that we've aligned head */
440         tail = PERF_IDX2OFF(handle->head + handle->size, buf);
441         wakeup = PERF_IDX2OFF(handle->wakeup, buf);
442
443         /*
444          * Avoid clobbering unconsumed data. We know we have space, so
445          * if we see head == tail we know that the buffer is empty. If
446          * head > tail, then there's nothing to clobber prior to
447          * wrapping.
448          */
449         if (head < tail)
450                 limit = round_down(tail, PAGE_SIZE);
451
452         /*
453          * Wakeup may be arbitrarily far into the future. If it's not in
454          * the current generation, either we'll wrap before hitting it,
455          * or it's in the past and has been handled already.
456          *
457          * If there's a wakeup before we wrap, arrange to be woken up by
458          * the page boundary following it. Keep the tail boundary if
459          * that's lower.
460          */
461         if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
462                 limit = min(limit, round_up(wakeup, PAGE_SIZE));
463
464         if (limit > head)
465                 return limit;
466
467         arm_spe_pmu_pad_buf(handle, handle->size);
468 no_space:
469         perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
470         perf_aux_output_end(handle, 0);
471         return 0;
472 }
473
474 static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
475 {
476         struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
477         struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
478         u64 limit = __arm_spe_pmu_next_off(handle);
479         u64 head = PERF_IDX2OFF(handle->head, buf);
480
481         /*
482          * If the head has come too close to the end of the buffer,
483          * then pad to the end and recompute the limit.
484          */
485         if (limit && (limit - head < spe_pmu->max_record_sz)) {
486                 arm_spe_pmu_pad_buf(handle, limit - head);
487                 limit = __arm_spe_pmu_next_off(handle);
488         }
489
490         return limit;
491 }
492
493 static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
494                                           struct perf_event *event)
495 {
496         u64 base, limit;
497         struct arm_spe_pmu_buf *buf;
498
499         /* Start a new aux session */
500         buf = perf_aux_output_begin(handle, event);
501         if (!buf) {
502                 event->hw.state |= PERF_HES_STOPPED;
503                 /*
504                  * We still need to clear the limit pointer, since the
505                  * profiler might only be disabled by virtue of a fault.
506                  */
507                 limit = 0;
508                 goto out_write_limit;
509         }
510
511         limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
512                               : arm_spe_pmu_next_off(handle);
513         if (limit)
514                 limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
515
516         limit += (u64)buf->base;
517         base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
518         write_sysreg_s(base, SYS_PMBPTR_EL1);
519
520 out_write_limit:
521         write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
522 }
523
524 static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
525 {
526         struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
527         u64 offset, size;
528
529         offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
530         size = offset - PERF_IDX2OFF(handle->head, buf);
531
532         if (buf->snapshot)
533                 handle->head = offset;
534
535         perf_aux_output_end(handle, size);
536 }
537
538 static void arm_spe_pmu_disable_and_drain_local(void)
539 {
540         /* Disable profiling at EL0 and EL1 */
541         write_sysreg_s(0, SYS_PMSCR_EL1);
542         isb();
543
544         /* Drain any buffered data */
545         psb_csync();
546         dsb(nsh);
547
548         /* Disable the profiling buffer */
549         write_sysreg_s(0, SYS_PMBLIMITR_EL1);
550         isb();
551 }
552
553 /* IRQ handling */
554 static enum arm_spe_pmu_buf_fault_action
555 arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
556 {
557         const char *err_str;
558         u64 pmbsr;
559         enum arm_spe_pmu_buf_fault_action ret;
560
561         /*
562          * Ensure new profiling data is visible to the CPU and any external
563          * aborts have been resolved.
564          */
565         psb_csync();
566         dsb(nsh);
567
568         /* Ensure hardware updates to PMBPTR_EL1 are visible */
569         isb();
570
571         /* Service required? */
572         pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
573         if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
574                 return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
575
576         /*
577          * If we've lost data, disable profiling and also set the PARTIAL
578          * flag to indicate that the last record is corrupted.
579          */
580         if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
581                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
582                                              PERF_AUX_FLAG_PARTIAL);
583
584         /* Report collisions to userspace so that it can up the period */
585         if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
586                 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
587
588         /* We only expect buffer management events */
589         switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
590         case SYS_PMBSR_EL1_EC_BUF:
591                 /* Handled below */
592                 break;
593         case SYS_PMBSR_EL1_EC_FAULT_S1:
594         case SYS_PMBSR_EL1_EC_FAULT_S2:
595                 err_str = "Unexpected buffer fault";
596                 goto out_err;
597         default:
598                 err_str = "Unknown error code";
599                 goto out_err;
600         }
601
602         /* Buffer management event */
603         switch (pmbsr &
604                 (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
605         case SYS_PMBSR_EL1_BUF_BSC_FULL:
606                 ret = SPE_PMU_BUF_FAULT_ACT_OK;
607                 goto out_stop;
608         default:
609                 err_str = "Unknown buffer status code";
610         }
611
612 out_err:
613         pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
614                            err_str, smp_processor_id(), pmbsr,
615                            read_sysreg_s(SYS_PMBPTR_EL1),
616                            read_sysreg_s(SYS_PMBLIMITR_EL1));
617         ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
618
619 out_stop:
620         arm_spe_perf_aux_output_end(handle);
621         return ret;
622 }
623
624 static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
625 {
626         struct perf_output_handle *handle = dev;
627         struct perf_event *event = handle->event;
628         enum arm_spe_pmu_buf_fault_action act;
629
630         if (!perf_get_aux(handle))
631                 return IRQ_NONE;
632
633         act = arm_spe_pmu_buf_get_fault_act(handle);
634         if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
635                 return IRQ_NONE;
636
637         /*
638          * Ensure perf callbacks have completed, which may disable the
639          * profiling buffer in response to a TRUNCATION flag.
640          */
641         irq_work_run();
642
643         switch (act) {
644         case SPE_PMU_BUF_FAULT_ACT_FATAL:
645                 /*
646                  * If a fatal exception occurred then leaving the profiling
647                  * buffer enabled is a recipe waiting to happen. Since
648                  * fatal faults don't always imply truncation, make sure
649                  * that the profiling buffer is disabled explicitly before
650                  * clearing the syndrome register.
651                  */
652                 arm_spe_pmu_disable_and_drain_local();
653                 break;
654         case SPE_PMU_BUF_FAULT_ACT_OK:
655                 /*
656                  * We handled the fault (the buffer was full), so resume
657                  * profiling as long as we didn't detect truncation.
658                  * PMBPTR might be misaligned, but we'll burn that bridge
659                  * when we get to it.
660                  */
661                 if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
662                         arm_spe_perf_aux_output_begin(handle, event);
663                         isb();
664                 }
665                 break;
666         case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
667                 /* We've seen you before, but GCC has the memory of a sieve. */
668                 break;
669         }
670
671         /* The buffer pointers are now sane, so resume profiling. */
672         write_sysreg_s(0, SYS_PMBSR_EL1);
673         return IRQ_HANDLED;
674 }
675
676 /* Perf callbacks */
677 static int arm_spe_pmu_event_init(struct perf_event *event)
678 {
679         u64 reg;
680         struct perf_event_attr *attr = &event->attr;
681         struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
682
683         /* This is, of course, deeply driver-specific */
684         if (attr->type != event->pmu->type)
685                 return -ENOENT;
686
687         if (event->cpu >= 0 &&
688             !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
689                 return -ENOENT;
690
691         if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0)
692                 return -EOPNOTSUPP;
693
694         if (attr->exclude_idle)
695                 return -EOPNOTSUPP;
696
697         /*
698          * Feedback-directed frequency throttling doesn't work when we
699          * have a buffer of samples. We'd need to manually count the
700          * samples in the buffer when it fills up and adjust the event
701          * count to reflect that. Instead, just force the user to specify
702          * a sample period.
703          */
704         if (attr->freq)
705                 return -EINVAL;
706
707         reg = arm_spe_event_to_pmsfcr(event);
708         if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
709             !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
710                 return -EOPNOTSUPP;
711
712         if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
713             !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
714                 return -EOPNOTSUPP;
715
716         if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
717             !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
718                 return -EOPNOTSUPP;
719
720         set_spe_event_has_cx(event);
721         reg = arm_spe_event_to_pmscr(event);
722         if (!perfmon_capable() &&
723             (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
724                     BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
725                 return -EACCES;
726
727         return 0;
728 }
729
730 static void arm_spe_pmu_start(struct perf_event *event, int flags)
731 {
732         u64 reg;
733         struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
734         struct hw_perf_event *hwc = &event->hw;
735         struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
736
737         hwc->state = 0;
738         arm_spe_perf_aux_output_begin(handle, event);
739         if (hwc->state)
740                 return;
741
742         reg = arm_spe_event_to_pmsfcr(event);
743         write_sysreg_s(reg, SYS_PMSFCR_EL1);
744
745         reg = arm_spe_event_to_pmsevfr(event);
746         write_sysreg_s(reg, SYS_PMSEVFR_EL1);
747
748         reg = arm_spe_event_to_pmslatfr(event);
749         write_sysreg_s(reg, SYS_PMSLATFR_EL1);
750
751         if (flags & PERF_EF_RELOAD) {
752                 reg = arm_spe_event_to_pmsirr(event);
753                 write_sysreg_s(reg, SYS_PMSIRR_EL1);
754                 isb();
755                 reg = local64_read(&hwc->period_left);
756                 write_sysreg_s(reg, SYS_PMSICR_EL1);
757         }
758
759         reg = arm_spe_event_to_pmscr(event);
760         isb();
761         write_sysreg_s(reg, SYS_PMSCR_EL1);
762 }
763
764 static void arm_spe_pmu_stop(struct perf_event *event, int flags)
765 {
766         struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
767         struct hw_perf_event *hwc = &event->hw;
768         struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
769
770         /* If we're already stopped, then nothing to do */
771         if (hwc->state & PERF_HES_STOPPED)
772                 return;
773
774         /* Stop all trace generation */
775         arm_spe_pmu_disable_and_drain_local();
776
777         if (flags & PERF_EF_UPDATE) {
778                 /*
779                  * If there's a fault pending then ensure we contain it
780                  * to this buffer, since we might be on the context-switch
781                  * path.
782                  */
783                 if (perf_get_aux(handle)) {
784                         enum arm_spe_pmu_buf_fault_action act;
785
786                         act = arm_spe_pmu_buf_get_fault_act(handle);
787                         if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
788                                 arm_spe_perf_aux_output_end(handle);
789                         else
790                                 write_sysreg_s(0, SYS_PMBSR_EL1);
791                 }
792
793                 /*
794                  * This may also contain ECOUNT, but nobody else should
795                  * be looking at period_left, since we forbid frequency
796                  * based sampling.
797                  */
798                 local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
799                 hwc->state |= PERF_HES_UPTODATE;
800         }
801
802         hwc->state |= PERF_HES_STOPPED;
803 }
804
805 static int arm_spe_pmu_add(struct perf_event *event, int flags)
806 {
807         int ret = 0;
808         struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
809         struct hw_perf_event *hwc = &event->hw;
810         int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
811
812         if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
813                 return -ENOENT;
814
815         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
816
817         if (flags & PERF_EF_START) {
818                 arm_spe_pmu_start(event, PERF_EF_RELOAD);
819                 if (hwc->state & PERF_HES_STOPPED)
820                         ret = -EINVAL;
821         }
822
823         return ret;
824 }
825
826 static void arm_spe_pmu_del(struct perf_event *event, int flags)
827 {
828         arm_spe_pmu_stop(event, PERF_EF_UPDATE);
829 }
830
831 static void arm_spe_pmu_read(struct perf_event *event)
832 {
833 }
834
835 static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
836                                    int nr_pages, bool snapshot)
837 {
838         int i, cpu = event->cpu;
839         struct page **pglist;
840         struct arm_spe_pmu_buf *buf;
841
842         /* We need at least two pages for this to work. */
843         if (nr_pages < 2)
844                 return NULL;
845
846         /*
847          * We require an even number of pages for snapshot mode, so that
848          * we can effectively treat the buffer as consisting of two equal
849          * parts and give userspace a fighting chance of getting some
850          * useful data out of it.
851          */
852         if (snapshot && (nr_pages & 1))
853                 return NULL;
854
855         if (cpu == -1)
856                 cpu = raw_smp_processor_id();
857
858         buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
859         if (!buf)
860                 return NULL;
861
862         pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
863         if (!pglist)
864                 goto out_free_buf;
865
866         for (i = 0; i < nr_pages; ++i)
867                 pglist[i] = virt_to_page(pages[i]);
868
869         buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
870         if (!buf->base)
871                 goto out_free_pglist;
872
873         buf->nr_pages   = nr_pages;
874         buf->snapshot   = snapshot;
875
876         kfree(pglist);
877         return buf;
878
879 out_free_pglist:
880         kfree(pglist);
881 out_free_buf:
882         kfree(buf);
883         return NULL;
884 }
885
886 static void arm_spe_pmu_free_aux(void *aux)
887 {
888         struct arm_spe_pmu_buf *buf = aux;
889
890         vunmap(buf->base);
891         kfree(buf);
892 }
893
894 /* Initialisation and teardown functions */
895 static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
896 {
897         static atomic_t pmu_idx = ATOMIC_INIT(-1);
898
899         int idx;
900         char *name;
901         struct device *dev = &spe_pmu->pdev->dev;
902
903         spe_pmu->pmu = (struct pmu) {
904                 .module = THIS_MODULE,
905                 .capabilities   = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
906                 .attr_groups    = arm_spe_pmu_attr_groups,
907                 /*
908                  * We hitch a ride on the software context here, so that
909                  * we can support per-task profiling (which is not possible
910                  * with the invalid context as it doesn't get sched callbacks).
911                  * This requires that userspace either uses a dummy event for
912                  * perf_event_open, since the aux buffer is not setup until
913                  * a subsequent mmap, or creates the profiling event in a
914                  * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
915                  * once the buffer has been created.
916                  */
917                 .task_ctx_nr    = perf_sw_context,
918                 .event_init     = arm_spe_pmu_event_init,
919                 .add            = arm_spe_pmu_add,
920                 .del            = arm_spe_pmu_del,
921                 .start          = arm_spe_pmu_start,
922                 .stop           = arm_spe_pmu_stop,
923                 .read           = arm_spe_pmu_read,
924                 .setup_aux      = arm_spe_pmu_setup_aux,
925                 .free_aux       = arm_spe_pmu_free_aux,
926         };
927
928         idx = atomic_inc_return(&pmu_idx);
929         name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
930         if (!name) {
931                 dev_err(dev, "failed to allocate name for pmu %d\n", idx);
932                 return -ENOMEM;
933         }
934
935         return perf_pmu_register(&spe_pmu->pmu, name, -1);
936 }
937
938 static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
939 {
940         perf_pmu_unregister(&spe_pmu->pmu);
941 }
942
943 static void __arm_spe_pmu_dev_probe(void *info)
944 {
945         int fld;
946         u64 reg;
947         struct arm_spe_pmu *spe_pmu = info;
948         struct device *dev = &spe_pmu->pdev->dev;
949
950         fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
951                                                    ID_AA64DFR0_PMSVER_SHIFT);
952         if (!fld) {
953                 dev_err(dev,
954                         "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
955                         fld, smp_processor_id());
956                 return;
957         }
958
959         /* Read PMBIDR first to determine whether or not we have access */
960         reg = read_sysreg_s(SYS_PMBIDR_EL1);
961         if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
962                 dev_err(dev,
963                         "profiling buffer owned by higher exception level\n");
964                 return;
965         }
966
967         /* Minimum alignment. If it's out-of-range, then fail the probe */
968         fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
969         spe_pmu->align = 1 << fld;
970         if (spe_pmu->align > SZ_2K) {
971                 dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
972                         fld, smp_processor_id());
973                 return;
974         }
975
976         /* It's now safe to read PMSIDR and figure out what we've got */
977         reg = read_sysreg_s(SYS_PMSIDR_EL1);
978         if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
979                 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
980
981         if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
982                 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
983
984         if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
985                 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
986
987         if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
988                 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
989
990         if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
991                 spe_pmu->features |= SPE_PMU_FEAT_LDS;
992
993         if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
994                 spe_pmu->features |= SPE_PMU_FEAT_ERND;
995
996         /* This field has a spaced out encoding, so just use a look-up */
997         fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
998         switch (fld) {
999         case 0:
1000                 spe_pmu->min_period = 256;
1001                 break;
1002         case 2:
1003                 spe_pmu->min_period = 512;
1004                 break;
1005         case 3:
1006                 spe_pmu->min_period = 768;
1007                 break;
1008         case 4:
1009                 spe_pmu->min_period = 1024;
1010                 break;
1011         case 5:
1012                 spe_pmu->min_period = 1536;
1013                 break;
1014         case 6:
1015                 spe_pmu->min_period = 2048;
1016                 break;
1017         case 7:
1018                 spe_pmu->min_period = 3072;
1019                 break;
1020         default:
1021                 dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
1022                          fld);
1023                 fallthrough;
1024         case 8:
1025                 spe_pmu->min_period = 4096;
1026         }
1027
1028         /* Maximum record size. If it's out-of-range, then fail the probe */
1029         fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
1030         spe_pmu->max_record_sz = 1 << fld;
1031         if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
1032                 dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
1033                         fld, smp_processor_id());
1034                 return;
1035         }
1036
1037         fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
1038         switch (fld) {
1039         default:
1040                 dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
1041                          fld);
1042                 fallthrough;
1043         case 2:
1044                 spe_pmu->counter_sz = 12;
1045         }
1046
1047         dev_info(dev,
1048                  "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
1049                  cpumask_pr_args(&spe_pmu->supported_cpus),
1050                  spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
1051
1052         spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
1053         return;
1054 }
1055
1056 static void __arm_spe_pmu_reset_local(void)
1057 {
1058         /*
1059          * This is probably overkill, as we have no idea where we're
1060          * draining any buffered data to...
1061          */
1062         arm_spe_pmu_disable_and_drain_local();
1063
1064         /* Reset the buffer base pointer */
1065         write_sysreg_s(0, SYS_PMBPTR_EL1);
1066         isb();
1067
1068         /* Clear any pending management interrupts */
1069         write_sysreg_s(0, SYS_PMBSR_EL1);
1070         isb();
1071 }
1072
1073 static void __arm_spe_pmu_setup_one(void *info)
1074 {
1075         struct arm_spe_pmu *spe_pmu = info;
1076
1077         __arm_spe_pmu_reset_local();
1078         enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
1079 }
1080
1081 static void __arm_spe_pmu_stop_one(void *info)
1082 {
1083         struct arm_spe_pmu *spe_pmu = info;
1084
1085         disable_percpu_irq(spe_pmu->irq);
1086         __arm_spe_pmu_reset_local();
1087 }
1088
1089 static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
1090 {
1091         struct arm_spe_pmu *spe_pmu;
1092
1093         spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1094         if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1095                 return 0;
1096
1097         __arm_spe_pmu_setup_one(spe_pmu);
1098         return 0;
1099 }
1100
1101 static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1102 {
1103         struct arm_spe_pmu *spe_pmu;
1104
1105         spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1106         if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1107                 return 0;
1108
1109         __arm_spe_pmu_stop_one(spe_pmu);
1110         return 0;
1111 }
1112
1113 static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
1114 {
1115         int ret;
1116         cpumask_t *mask = &spe_pmu->supported_cpus;
1117
1118         /* Make sure we probe the hardware on a relevant CPU */
1119         ret = smp_call_function_any(mask,  __arm_spe_pmu_dev_probe, spe_pmu, 1);
1120         if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
1121                 return -ENXIO;
1122
1123         /* Request our PPIs (note that the IRQ is still disabled) */
1124         ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
1125                                  spe_pmu->handle);
1126         if (ret)
1127                 return ret;
1128
1129         /*
1130          * Register our hotplug notifier now so we don't miss any events.
1131          * This will enable the IRQ for any supported CPUs that are already
1132          * up.
1133          */
1134         ret = cpuhp_state_add_instance(arm_spe_pmu_online,
1135                                        &spe_pmu->hotplug_node);
1136         if (ret)
1137                 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1138
1139         return ret;
1140 }
1141
1142 static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
1143 {
1144         cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
1145         free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1146 }
1147
1148 /* Driver and device probing */
1149 static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
1150 {
1151         struct platform_device *pdev = spe_pmu->pdev;
1152         int irq = platform_get_irq(pdev, 0);
1153
1154         if (irq < 0)
1155                 return -ENXIO;
1156
1157         if (!irq_is_percpu(irq)) {
1158                 dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
1159                 return -EINVAL;
1160         }
1161
1162         if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
1163                 dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
1164                 return -EINVAL;
1165         }
1166
1167         spe_pmu->irq = irq;
1168         return 0;
1169 }
1170
1171 static const struct of_device_id arm_spe_pmu_of_match[] = {
1172         { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1173         { /* Sentinel */ },
1174 };
1175 MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
1176
1177 static const struct platform_device_id arm_spe_match[] = {
1178         { ARMV8_SPE_PDEV_NAME, 0},
1179         { }
1180 };
1181 MODULE_DEVICE_TABLE(platform, arm_spe_match);
1182
1183 static int arm_spe_pmu_device_probe(struct platform_device *pdev)
1184 {
1185         int ret;
1186         struct arm_spe_pmu *spe_pmu;
1187         struct device *dev = &pdev->dev;
1188
1189         /*
1190          * If kernelspace is unmapped when running at EL0, then the SPE
1191          * buffer will fault and prematurely terminate the AUX session.
1192          */
1193         if (arm64_kernel_unmapped_at_el0()) {
1194                 dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
1195                 return -EPERM;
1196         }
1197
1198         spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
1199         if (!spe_pmu) {
1200                 dev_err(dev, "failed to allocate spe_pmu\n");
1201                 return -ENOMEM;
1202         }
1203
1204         spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
1205         if (!spe_pmu->handle)
1206                 return -ENOMEM;
1207
1208         spe_pmu->pdev = pdev;
1209         platform_set_drvdata(pdev, spe_pmu);
1210
1211         ret = arm_spe_pmu_irq_probe(spe_pmu);
1212         if (ret)
1213                 goto out_free_handle;
1214
1215         ret = arm_spe_pmu_dev_init(spe_pmu);
1216         if (ret)
1217                 goto out_free_handle;
1218
1219         ret = arm_spe_pmu_perf_init(spe_pmu);
1220         if (ret)
1221                 goto out_teardown_dev;
1222
1223         return 0;
1224
1225 out_teardown_dev:
1226         arm_spe_pmu_dev_teardown(spe_pmu);
1227 out_free_handle:
1228         free_percpu(spe_pmu->handle);
1229         return ret;
1230 }
1231
1232 static int arm_spe_pmu_device_remove(struct platform_device *pdev)
1233 {
1234         struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
1235
1236         arm_spe_pmu_perf_destroy(spe_pmu);
1237         arm_spe_pmu_dev_teardown(spe_pmu);
1238         free_percpu(spe_pmu->handle);
1239         return 0;
1240 }
1241
1242 static struct platform_driver arm_spe_pmu_driver = {
1243         .id_table = arm_spe_match,
1244         .driver = {
1245                 .name           = DRVNAME,
1246                 .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
1247                 .suppress_bind_attrs = true,
1248         },
1249         .probe  = arm_spe_pmu_device_probe,
1250         .remove = arm_spe_pmu_device_remove,
1251 };
1252
1253 static int __init arm_spe_pmu_init(void)
1254 {
1255         int ret;
1256
1257         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1258                                       arm_spe_pmu_cpu_startup,
1259                                       arm_spe_pmu_cpu_teardown);
1260         if (ret < 0)
1261                 return ret;
1262         arm_spe_pmu_online = ret;
1263
1264         ret = platform_driver_register(&arm_spe_pmu_driver);
1265         if (ret)
1266                 cpuhp_remove_multi_state(arm_spe_pmu_online);
1267
1268         return ret;
1269 }
1270
1271 static void __exit arm_spe_pmu_exit(void)
1272 {
1273         platform_driver_unregister(&arm_spe_pmu_driver);
1274         cpuhp_remove_multi_state(arm_spe_pmu_online);
1275 }
1276
1277 module_init(arm_spe_pmu_init);
1278 module_exit(arm_spe_pmu_exit);
1279
1280 MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
1281 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1282 MODULE_LICENSE("GPL v2");