2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/coresight.h>
19 #include <linux/coresight-pmu.h>
20 #include <linux/cpumask.h>
21 #include <linux/device.h>
22 #include <linux/list.h>
24 #include <linux/init.h>
25 #include <linux/perf_event.h>
26 #include <linux/percpu-defs.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
31 #include "coresight-etm-perf.h"
32 #include "coresight-priv.h"
34 static struct pmu etm_pmu;
35 static bool etm_perf_up;
38 * struct etm_event_data - Coresight specifics associated to an event
39 * @work: Handle to free allocated memory outside IRQ context.
40 * @mask: Hold the CPU(s) this event was set for.
41 * @snk_config: The sink configuration.
42 * @path: An array of path, each slot for one CPU.
44 struct etm_event_data {
45 struct work_struct work;
48 struct list_head * __percpu *path;
51 static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
52 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
54 /* ETMv3.5/PTM's ETMCR is 'config' */
55 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
56 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
57 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
59 static struct attribute *etm_config_formats_attr[] = {
60 &format_attr_cycacc.attr,
61 &format_attr_timestamp.attr,
62 &format_attr_retstack.attr,
66 static const struct attribute_group etm_pmu_format_group = {
68 .attrs = etm_config_formats_attr,
71 static const struct attribute_group *etm_pmu_attr_groups[] = {
72 &etm_pmu_format_group,
76 static inline struct list_head **
77 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
79 return per_cpu_ptr(data->path, cpu);
82 static inline struct list_head *
83 etm_event_cpu_path(struct etm_event_data *data, int cpu)
85 return *etm_event_cpu_path_ptr(data, cpu);
88 static void etm_event_read(struct perf_event *event) {}
90 static int etm_addr_filters_alloc(struct perf_event *event)
92 struct etm_filters *filters;
93 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
95 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
100 memcpy(filters, event->parent->hw.addr_filters,
103 event->hw.addr_filters = filters;
108 static void etm_event_destroy(struct perf_event *event)
110 kfree(event->hw.addr_filters);
111 event->hw.addr_filters = NULL;
114 static int etm_event_init(struct perf_event *event)
118 if (event->attr.type != etm_pmu.type) {
123 ret = etm_addr_filters_alloc(event);
127 event->destroy = etm_event_destroy;
132 static void free_event_data(struct work_struct *work)
136 struct etm_event_data *event_data;
137 struct coresight_device *sink;
139 event_data = container_of(work, struct etm_event_data, work);
140 mask = &event_data->mask;
142 * First deal with the sink configuration. See comment in
143 * etm_setup_aux() about why we take the first available path.
145 if (event_data->snk_config) {
146 cpu = cpumask_first(mask);
147 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
148 if (sink_ops(sink)->free_buffer)
149 sink_ops(sink)->free_buffer(event_data->snk_config);
152 for_each_cpu(cpu, mask) {
153 struct list_head **ppath;
155 ppath = etm_event_cpu_path_ptr(event_data, cpu);
156 if (!(IS_ERR_OR_NULL(*ppath)))
157 coresight_release_path(*ppath);
161 free_percpu(event_data->path);
165 static void *alloc_event_data(int cpu)
168 struct etm_event_data *event_data;
170 /* First get memory for the session's data */
171 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
175 /* Make sure nothing disappears under us */
178 mask = &event_data->mask;
180 cpumask_set_cpu(cpu, mask);
182 cpumask_copy(mask, cpu_online_mask);
186 * Each CPU has a single path between source and destination. As such
187 * allocate an array using CPU numbers as indexes. That way a path
188 * for any CPU can easily be accessed at any given time. We proceed
189 * the same way for sessions involving a single CPU. The cost of
190 * unused memory when dealing with single CPU trace scenarios is small
191 * compared to the cost of searching through an optimized array.
193 event_data->path = alloc_percpu(struct list_head *);
195 if (!event_data->path) {
203 static void etm_free_aux(void *data)
205 struct etm_event_data *event_data = data;
207 schedule_work(&event_data->work);
210 static void *etm_setup_aux(int event_cpu, void **pages,
211 int nr_pages, bool overwrite)
215 struct coresight_device *sink;
216 struct etm_event_data *event_data = NULL;
218 event_data = alloc_event_data(event_cpu);
221 INIT_WORK(&event_data->work, free_event_data);
224 * In theory nothing prevent tracers in a trace session from being
225 * associated with different sinks, nor having a sink per tracer. But
226 * until we have HW with this kind of topology we need to assume tracers
227 * in a trace session are using the same sink. Therefore go through
228 * the coresight bus and pick the first enabled sink.
230 * When operated from sysFS users are responsible to enable the sink
231 * while from perf, the perf tools will do it based on the choice made
232 * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
234 sink = coresight_get_enabled_sink(true);
238 mask = &event_data->mask;
240 /* Setup the path for each CPU in a trace session */
241 for_each_cpu(cpu, mask) {
242 struct list_head *path;
243 struct coresight_device *csdev;
245 csdev = per_cpu(csdev_src, cpu);
250 * Building a path doesn't enable it, it simply builds a
251 * list of devices from source to sink that can be
252 * referenced later when the path is actually needed.
254 path = coresight_build_path(csdev, sink);
258 *etm_event_cpu_path_ptr(event_data, cpu) = path;
261 if (!sink_ops(sink)->alloc_buffer)
264 cpu = cpumask_first(mask);
265 /* Get the AUX specific data from the sink buffer */
266 event_data->snk_config =
267 sink_ops(sink)->alloc_buffer(sink, cpu, pages,
268 nr_pages, overwrite);
269 if (!event_data->snk_config)
276 etm_free_aux(event_data);
281 static void etm_event_start(struct perf_event *event, int flags)
283 int cpu = smp_processor_id();
284 struct etm_event_data *event_data;
285 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
286 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
287 struct list_head *path;
293 * Deal with the ring buffer API and get a handle on the
294 * session's information.
296 event_data = perf_aux_output_begin(handle, event);
300 path = etm_event_cpu_path(event_data, cpu);
301 /* We need a sink, no need to continue without one */
302 sink = coresight_get_sink(path);
303 if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
306 /* Configure the sink */
307 if (sink_ops(sink)->set_buffer(sink, handle,
308 event_data->snk_config))
311 /* Nothing will happen without a path */
312 if (coresight_enable_path(path, CS_MODE_PERF))
315 /* Tell the perf core the event is alive */
318 /* Finally enable the tracer */
319 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
320 goto fail_disable_path;
326 coresight_disable_path(path);
328 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
329 perf_aux_output_end(handle, 0);
331 event->hw.state = PERF_HES_STOPPED;
335 static void etm_event_stop(struct perf_event *event, int mode)
337 int cpu = smp_processor_id();
339 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
340 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
341 struct etm_event_data *event_data = perf_get_aux(handle);
342 struct list_head *path;
344 if (event->hw.state == PERF_HES_STOPPED)
350 path = etm_event_cpu_path(event_data, cpu);
354 sink = coresight_get_sink(path);
359 source_ops(csdev)->disable(csdev, event);
362 event->hw.state = PERF_HES_STOPPED;
364 if (mode & PERF_EF_UPDATE) {
365 if (WARN_ON_ONCE(handle->event != event))
368 /* update trace information */
369 if (!sink_ops(sink)->update_buffer)
372 sink_ops(sink)->update_buffer(sink, handle,
373 event_data->snk_config);
375 if (!sink_ops(sink)->reset_buffer)
378 size = sink_ops(sink)->reset_buffer(sink, handle,
379 event_data->snk_config);
381 perf_aux_output_end(handle, size);
384 /* Disabling the path make its elements available to other sessions */
385 coresight_disable_path(path);
388 static int etm_event_add(struct perf_event *event, int mode)
391 struct hw_perf_event *hwc = &event->hw;
393 if (mode & PERF_EF_START) {
394 etm_event_start(event, 0);
395 if (hwc->state & PERF_HES_STOPPED)
398 hwc->state = PERF_HES_STOPPED;
404 static void etm_event_del(struct perf_event *event, int mode)
406 etm_event_stop(event, PERF_EF_UPDATE);
409 static int etm_addr_filters_validate(struct list_head *filters)
411 bool range = false, address = false;
413 struct perf_addr_filter *filter;
415 list_for_each_entry(filter, filters, entry) {
417 * No need to go further if there's no more
420 if (++index > ETM_ADDR_CMP_MAX)
424 * As taken from the struct perf_addr_filter documentation:
425 * @range: 1: range, 0: address
427 * At this time we don't allow range and start/stop filtering
428 * to cohabitate, they have to be mutually exclusive.
430 if ((filter->range == 1) && address)
433 if ((filter->range == 0) && range)
437 * For range filtering, the second address in the address
438 * range comparator needs to be higher than the first.
441 if (filter->range && filter->size == 0)
445 * Everything checks out with this filter, record what we've
446 * received before moving on to the next one.
457 static void etm_addr_filters_sync(struct perf_event *event)
459 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
460 unsigned long start, stop, *offs = event->addr_filters_offs;
461 struct etm_filters *filters = event->hw.addr_filters;
462 struct etm_filter *etm_filter;
463 struct perf_addr_filter *filter;
466 list_for_each_entry(filter, &head->list, entry) {
467 start = filter->offset + offs[i];
468 stop = start + filter->size;
469 etm_filter = &filters->etm_filter[i];
471 if (filter->range == 1) {
472 etm_filter->start_addr = start;
473 etm_filter->stop_addr = stop;
474 etm_filter->type = ETM_ADDR_TYPE_RANGE;
476 if (filter->filter == 1) {
477 etm_filter->start_addr = start;
478 etm_filter->type = ETM_ADDR_TYPE_START;
480 etm_filter->stop_addr = stop;
481 etm_filter->type = ETM_ADDR_TYPE_STOP;
487 filters->nr_filters = i;
490 int etm_perf_symlink(struct coresight_device *csdev, bool link)
492 char entry[sizeof("cpu9999999")];
493 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
494 struct device *pmu_dev = etm_pmu.dev;
495 struct device *cs_dev = &csdev->dev;
497 sprintf(entry, "cpu%d", cpu);
500 return -EPROBE_DEFER;
503 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
506 per_cpu(csdev_src, cpu) = csdev;
508 sysfs_remove_link(&pmu_dev->kobj, entry);
509 per_cpu(csdev_src, cpu) = NULL;
515 static int __init etm_perf_init(void)
519 etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
521 etm_pmu.attr_groups = etm_pmu_attr_groups;
522 etm_pmu.task_ctx_nr = perf_sw_context;
523 etm_pmu.read = etm_event_read;
524 etm_pmu.event_init = etm_event_init;
525 etm_pmu.setup_aux = etm_setup_aux;
526 etm_pmu.free_aux = etm_free_aux;
527 etm_pmu.start = etm_event_start;
528 etm_pmu.stop = etm_event_stop;
529 etm_pmu.add = etm_event_add;
530 etm_pmu.del = etm_event_del;
531 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
532 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
533 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
535 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
541 device_initcall(etm_perf_init);