GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / hwtracing / coresight / coresight-etm-perf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6
7 #include <linux/coresight.h>
8 #include <linux/coresight-pmu.h>
9 #include <linux/cpumask.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu-defs.h>
16 #include <linux/slab.h>
17 #include <linux/stringhash.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
20
21 #include "coresight-etm-perf.h"
22 #include "coresight-priv.h"
23
24 static struct pmu etm_pmu;
25 static bool etm_perf_up;
26
27 static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
28 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
29
30 /* ETMv3.5/PTM's ETMCR is 'config' */
31 PMU_FORMAT_ATTR(cycacc,         "config:" __stringify(ETM_OPT_CYCACC));
32 PMU_FORMAT_ATTR(contextid,      "config:" __stringify(ETM_OPT_CTXTID));
33 PMU_FORMAT_ATTR(timestamp,      "config:" __stringify(ETM_OPT_TS));
34 PMU_FORMAT_ATTR(retstack,       "config:" __stringify(ETM_OPT_RETSTK));
35 /* Sink ID - same for all ETMs */
36 PMU_FORMAT_ATTR(sinkid,         "config2:0-31");
37
38 static struct attribute *etm_config_formats_attr[] = {
39         &format_attr_cycacc.attr,
40         &format_attr_contextid.attr,
41         &format_attr_timestamp.attr,
42         &format_attr_retstack.attr,
43         &format_attr_sinkid.attr,
44         NULL,
45 };
46
47 static const struct attribute_group etm_pmu_format_group = {
48         .name   = "format",
49         .attrs  = etm_config_formats_attr,
50 };
51
52 static struct attribute *etm_config_sinks_attr[] = {
53         NULL,
54 };
55
56 static const struct attribute_group etm_pmu_sinks_group = {
57         .name   = "sinks",
58         .attrs  = etm_config_sinks_attr,
59 };
60
61 static const struct attribute_group *etm_pmu_attr_groups[] = {
62         &etm_pmu_format_group,
63         &etm_pmu_sinks_group,
64         NULL,
65 };
66
67 static inline struct list_head **
68 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
69 {
70         return per_cpu_ptr(data->path, cpu);
71 }
72
73 static inline struct list_head *
74 etm_event_cpu_path(struct etm_event_data *data, int cpu)
75 {
76         return *etm_event_cpu_path_ptr(data, cpu);
77 }
78
79 static void etm_event_read(struct perf_event *event) {}
80
81 static int etm_addr_filters_alloc(struct perf_event *event)
82 {
83         struct etm_filters *filters;
84         int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
85
86         filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
87         if (!filters)
88                 return -ENOMEM;
89
90         if (event->parent)
91                 memcpy(filters, event->parent->hw.addr_filters,
92                        sizeof(*filters));
93
94         event->hw.addr_filters = filters;
95
96         return 0;
97 }
98
99 static void etm_event_destroy(struct perf_event *event)
100 {
101         kfree(event->hw.addr_filters);
102         event->hw.addr_filters = NULL;
103 }
104
105 static int etm_event_init(struct perf_event *event)
106 {
107         int ret = 0;
108
109         if (event->attr.type != etm_pmu.type) {
110                 ret = -ENOENT;
111                 goto out;
112         }
113
114         ret = etm_addr_filters_alloc(event);
115         if (ret)
116                 goto out;
117
118         event->destroy = etm_event_destroy;
119 out:
120         return ret;
121 }
122
123 static void free_sink_buffer(struct etm_event_data *event_data)
124 {
125         int cpu;
126         cpumask_t *mask = &event_data->mask;
127         struct coresight_device *sink;
128
129         if (!event_data->snk_config)
130                 return;
131
132         if (WARN_ON(cpumask_empty(mask)))
133                 return;
134
135         cpu = cpumask_first(mask);
136         sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
137         sink_ops(sink)->free_buffer(event_data->snk_config);
138 }
139
140 static void free_event_data(struct work_struct *work)
141 {
142         int cpu;
143         cpumask_t *mask;
144         struct etm_event_data *event_data;
145
146         event_data = container_of(work, struct etm_event_data, work);
147         mask = &event_data->mask;
148
149         /* Free the sink buffers, if there are any */
150         free_sink_buffer(event_data);
151
152         for_each_cpu(cpu, mask) {
153                 struct list_head **ppath;
154
155                 ppath = etm_event_cpu_path_ptr(event_data, cpu);
156                 if (!(IS_ERR_OR_NULL(*ppath)))
157                         coresight_release_path(*ppath);
158                 *ppath = NULL;
159         }
160
161         free_percpu(event_data->path);
162         kfree(event_data);
163 }
164
165 static void *alloc_event_data(int cpu)
166 {
167         cpumask_t *mask;
168         struct etm_event_data *event_data;
169
170         /* First get memory for the session's data */
171         event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
172         if (!event_data)
173                 return NULL;
174
175
176         mask = &event_data->mask;
177         if (cpu != -1)
178                 cpumask_set_cpu(cpu, mask);
179         else
180                 cpumask_copy(mask, cpu_present_mask);
181
182         /*
183          * Each CPU has a single path between source and destination.  As such
184          * allocate an array using CPU numbers as indexes.  That way a path
185          * for any CPU can easily be accessed at any given time.  We proceed
186          * the same way for sessions involving a single CPU.  The cost of
187          * unused memory when dealing with single CPU trace scenarios is small
188          * compared to the cost of searching through an optimized array.
189          */
190         event_data->path = alloc_percpu(struct list_head *);
191
192         if (!event_data->path) {
193                 kfree(event_data);
194                 return NULL;
195         }
196
197         return event_data;
198 }
199
200 static void etm_free_aux(void *data)
201 {
202         struct etm_event_data *event_data = data;
203
204         schedule_work(&event_data->work);
205 }
206
207 static void *etm_setup_aux(struct perf_event *event, void **pages,
208                            int nr_pages, bool overwrite)
209 {
210         u32 id;
211         int cpu = event->cpu;
212         cpumask_t *mask;
213         struct coresight_device *sink = NULL;
214         struct etm_event_data *event_data = NULL;
215
216         event_data = alloc_event_data(cpu);
217         if (!event_data)
218                 return NULL;
219         INIT_WORK(&event_data->work, free_event_data);
220
221         /* First get the selected sink from user space. */
222         if (event->attr.config2) {
223                 id = (u32)event->attr.config2;
224                 sink = coresight_get_sink_by_id(id);
225         }
226
227         mask = &event_data->mask;
228
229         /*
230          * Setup the path for each CPU in a trace session. We try to build
231          * trace path for each CPU in the mask. If we don't find an ETM
232          * for the CPU or fail to build a path, we clear the CPU from the
233          * mask and continue with the rest. If ever we try to trace on those
234          * CPUs, we can handle it and fail the session.
235          */
236         for_each_cpu(cpu, mask) {
237                 struct list_head *path;
238                 struct coresight_device *csdev;
239
240                 csdev = per_cpu(csdev_src, cpu);
241                 /*
242                  * If there is no ETM associated with this CPU clear it from
243                  * the mask and continue with the rest. If ever we try to trace
244                  * on this CPU, we handle it accordingly.
245                  */
246                 if (!csdev) {
247                         cpumask_clear_cpu(cpu, mask);
248                         continue;
249                 }
250
251                 /*
252                  * No sink provided - look for a default sink for one of the
253                  * devices. At present we only support topology where all CPUs
254                  * use the same sink [N:1], so only need to find one sink. The
255                  * coresight_build_path later will remove any CPU that does not
256                  * attach to the sink, or if we have not found a sink.
257                  */
258                 if (!sink)
259                         sink = coresight_find_default_sink(csdev);
260
261                 /*
262                  * Building a path doesn't enable it, it simply builds a
263                  * list of devices from source to sink that can be
264                  * referenced later when the path is actually needed.
265                  */
266                 path = coresight_build_path(csdev, sink);
267                 if (IS_ERR(path)) {
268                         cpumask_clear_cpu(cpu, mask);
269                         continue;
270                 }
271
272                 *etm_event_cpu_path_ptr(event_data, cpu) = path;
273         }
274
275         /* no sink found for any CPU - cannot trace */
276         if (!sink)
277                 goto err;
278
279         /* If we don't have any CPUs ready for tracing, abort */
280         cpu = cpumask_first(mask);
281         if (cpu >= nr_cpu_ids)
282                 goto err;
283
284         if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
285                 goto err;
286
287         /* Allocate the sink buffer for this session */
288         event_data->snk_config =
289                         sink_ops(sink)->alloc_buffer(sink, event, pages,
290                                                      nr_pages, overwrite);
291         if (!event_data->snk_config)
292                 goto err;
293
294 out:
295         return event_data;
296
297 err:
298         etm_free_aux(event_data);
299         event_data = NULL;
300         goto out;
301 }
302
303 static void etm_event_start(struct perf_event *event, int flags)
304 {
305         int cpu = smp_processor_id();
306         struct etm_event_data *event_data;
307         struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
308         struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
309         struct list_head *path;
310
311         if (!csdev)
312                 goto fail;
313
314         /*
315          * Deal with the ring buffer API and get a handle on the
316          * session's information.
317          */
318         event_data = perf_aux_output_begin(handle, event);
319         if (!event_data)
320                 goto fail;
321
322         /*
323          * Check if this ETM is allowed to trace, as decided
324          * at etm_setup_aux(). This could be due to an unreachable
325          * sink from this ETM. We can't do much in this case if
326          * the sink was specified or hinted to the driver. For
327          * now, simply don't record anything on this ETM.
328          */
329         if (!cpumask_test_cpu(cpu, &event_data->mask))
330                 goto fail_end_stop;
331
332         path = etm_event_cpu_path(event_data, cpu);
333         /* We need a sink, no need to continue without one */
334         sink = coresight_get_sink(path);
335         if (WARN_ON_ONCE(!sink))
336                 goto fail_end_stop;
337
338         /* Nothing will happen without a path */
339         if (coresight_enable_path(path, CS_MODE_PERF, handle))
340                 goto fail_end_stop;
341
342         /* Tell the perf core the event is alive */
343         event->hw.state = 0;
344
345         /* Finally enable the tracer */
346         if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
347                 goto fail_disable_path;
348
349 out:
350         return;
351
352 fail_disable_path:
353         coresight_disable_path(path);
354 fail_end_stop:
355         perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
356         perf_aux_output_end(handle, 0);
357 fail:
358         event->hw.state = PERF_HES_STOPPED;
359         goto out;
360 }
361
362 static void etm_event_stop(struct perf_event *event, int mode)
363 {
364         int cpu = smp_processor_id();
365         unsigned long size;
366         struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
367         struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
368         struct etm_event_data *event_data = perf_get_aux(handle);
369         struct list_head *path;
370
371         if (event->hw.state == PERF_HES_STOPPED)
372                 return;
373
374         if (!csdev)
375                 return;
376
377         path = etm_event_cpu_path(event_data, cpu);
378         if (!path)
379                 return;
380
381         sink = coresight_get_sink(path);
382         if (!sink)
383                 return;
384
385         /* stop tracer */
386         source_ops(csdev)->disable(csdev, event);
387
388         /* tell the core */
389         event->hw.state = PERF_HES_STOPPED;
390
391         if (mode & PERF_EF_UPDATE) {
392                 if (WARN_ON_ONCE(handle->event != event))
393                         return;
394
395                 /* update trace information */
396                 if (!sink_ops(sink)->update_buffer)
397                         return;
398
399                 size = sink_ops(sink)->update_buffer(sink, handle,
400                                               event_data->snk_config);
401                 perf_aux_output_end(handle, size);
402         }
403
404         /* Disabling the path make its elements available to other sessions */
405         coresight_disable_path(path);
406 }
407
408 static int etm_event_add(struct perf_event *event, int mode)
409 {
410         int ret = 0;
411         struct hw_perf_event *hwc = &event->hw;
412
413         if (mode & PERF_EF_START) {
414                 etm_event_start(event, 0);
415                 if (hwc->state & PERF_HES_STOPPED)
416                         ret = -EINVAL;
417         } else {
418                 hwc->state = PERF_HES_STOPPED;
419         }
420
421         return ret;
422 }
423
424 static void etm_event_del(struct perf_event *event, int mode)
425 {
426         etm_event_stop(event, PERF_EF_UPDATE);
427 }
428
429 static int etm_addr_filters_validate(struct list_head *filters)
430 {
431         bool range = false, address = false;
432         int index = 0;
433         struct perf_addr_filter *filter;
434
435         list_for_each_entry(filter, filters, entry) {
436                 /*
437                  * No need to go further if there's no more
438                  * room for filters.
439                  */
440                 if (++index > ETM_ADDR_CMP_MAX)
441                         return -EOPNOTSUPP;
442
443                 /* filter::size==0 means single address trigger */
444                 if (filter->size) {
445                         /*
446                          * The existing code relies on START/STOP filters
447                          * being address filters.
448                          */
449                         if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
450                             filter->action == PERF_ADDR_FILTER_ACTION_STOP)
451                                 return -EOPNOTSUPP;
452
453                         range = true;
454                 } else
455                         address = true;
456
457                 /*
458                  * At this time we don't allow range and start/stop filtering
459                  * to cohabitate, they have to be mutually exclusive.
460                  */
461                 if (range && address)
462                         return -EOPNOTSUPP;
463         }
464
465         return 0;
466 }
467
468 static void etm_addr_filters_sync(struct perf_event *event)
469 {
470         struct perf_addr_filters_head *head = perf_event_addr_filters(event);
471         unsigned long start, stop;
472         struct perf_addr_filter_range *fr = event->addr_filter_ranges;
473         struct etm_filters *filters = event->hw.addr_filters;
474         struct etm_filter *etm_filter;
475         struct perf_addr_filter *filter;
476         int i = 0;
477
478         list_for_each_entry(filter, &head->list, entry) {
479                 start = fr[i].start;
480                 stop = start + fr[i].size;
481                 etm_filter = &filters->etm_filter[i];
482
483                 switch (filter->action) {
484                 case PERF_ADDR_FILTER_ACTION_FILTER:
485                         etm_filter->start_addr = start;
486                         etm_filter->stop_addr = stop;
487                         etm_filter->type = ETM_ADDR_TYPE_RANGE;
488                         break;
489                 case PERF_ADDR_FILTER_ACTION_START:
490                         etm_filter->start_addr = start;
491                         etm_filter->type = ETM_ADDR_TYPE_START;
492                         break;
493                 case PERF_ADDR_FILTER_ACTION_STOP:
494                         etm_filter->stop_addr = stop;
495                         etm_filter->type = ETM_ADDR_TYPE_STOP;
496                         break;
497                 }
498                 i++;
499         }
500
501         filters->nr_filters = i;
502 }
503
504 int etm_perf_symlink(struct coresight_device *csdev, bool link)
505 {
506         char entry[sizeof("cpu9999999")];
507         int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
508         struct device *pmu_dev = etm_pmu.dev;
509         struct device *cs_dev = &csdev->dev;
510
511         sprintf(entry, "cpu%d", cpu);
512
513         if (!etm_perf_up)
514                 return -EPROBE_DEFER;
515
516         if (link) {
517                 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
518                 if (ret)
519                         return ret;
520                 per_cpu(csdev_src, cpu) = csdev;
521         } else {
522                 sysfs_remove_link(&pmu_dev->kobj, entry);
523                 per_cpu(csdev_src, cpu) = NULL;
524         }
525
526         return 0;
527 }
528 EXPORT_SYMBOL_GPL(etm_perf_symlink);
529
530 static ssize_t etm_perf_sink_name_show(struct device *dev,
531                                        struct device_attribute *dattr,
532                                        char *buf)
533 {
534         struct dev_ext_attribute *ea;
535
536         ea = container_of(dattr, struct dev_ext_attribute, attr);
537         return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
538 }
539
540 int etm_perf_add_symlink_sink(struct coresight_device *csdev)
541 {
542         int ret;
543         unsigned long hash;
544         const char *name;
545         struct device *pmu_dev = etm_pmu.dev;
546         struct device *dev = &csdev->dev;
547         struct dev_ext_attribute *ea;
548
549         if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
550             csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
551                 return -EINVAL;
552
553         if (csdev->ea != NULL)
554                 return -EINVAL;
555
556         if (!etm_perf_up)
557                 return -EPROBE_DEFER;
558
559         ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
560         if (!ea)
561                 return -ENOMEM;
562
563         name = dev_name(dev);
564         /* See function coresight_get_sink_by_id() to know where this is used */
565         hash = hashlen_hash(hashlen_string(NULL, name));
566
567         sysfs_attr_init(&ea->attr.attr);
568         ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
569         if (!ea->attr.attr.name)
570                 return -ENOMEM;
571
572         ea->attr.attr.mode = 0444;
573         ea->attr.show = etm_perf_sink_name_show;
574         ea->var = (unsigned long *)hash;
575
576         ret = sysfs_add_file_to_group(&pmu_dev->kobj,
577                                       &ea->attr.attr, "sinks");
578
579         if (!ret)
580                 csdev->ea = ea;
581
582         return ret;
583 }
584
585 void etm_perf_del_symlink_sink(struct coresight_device *csdev)
586 {
587         struct device *pmu_dev = etm_pmu.dev;
588         struct dev_ext_attribute *ea = csdev->ea;
589
590         if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
591             csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
592                 return;
593
594         if (!ea)
595                 return;
596
597         sysfs_remove_file_from_group(&pmu_dev->kobj,
598                                      &ea->attr.attr, "sinks");
599         csdev->ea = NULL;
600 }
601
602 int __init etm_perf_init(void)
603 {
604         int ret;
605
606         etm_pmu.capabilities            = (PERF_PMU_CAP_EXCLUSIVE |
607                                            PERF_PMU_CAP_ITRACE);
608
609         etm_pmu.attr_groups             = etm_pmu_attr_groups;
610         etm_pmu.task_ctx_nr             = perf_sw_context;
611         etm_pmu.read                    = etm_event_read;
612         etm_pmu.event_init              = etm_event_init;
613         etm_pmu.setup_aux               = etm_setup_aux;
614         etm_pmu.free_aux                = etm_free_aux;
615         etm_pmu.start                   = etm_event_start;
616         etm_pmu.stop                    = etm_event_stop;
617         etm_pmu.add                     = etm_event_add;
618         etm_pmu.del                     = etm_event_del;
619         etm_pmu.addr_filters_sync       = etm_addr_filters_sync;
620         etm_pmu.addr_filters_validate   = etm_addr_filters_validate;
621         etm_pmu.nr_addr_filters         = ETM_ADDR_CMP_MAX;
622
623         ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
624         if (ret == 0)
625                 etm_perf_up = true;
626
627         return ret;
628 }
629
630 void __exit etm_perf_exit(void)
631 {
632         perf_pmu_unregister(&etm_pmu);
633 }