Mention branches and keyring.
[releases.git] / hwtracing / coresight / coresight-tmc-etf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6
7 #include <linux/circ_buf.h>
8 #include <linux/coresight.h>
9 #include <linux/perf_event.h>
10 #include <linux/slab.h>
11 #include "coresight-priv.h"
12 #include "coresight-tmc.h"
13
14 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
15 {
16         CS_UNLOCK(drvdata->base);
17
18         /* Wait for TMCSReady bit to be set */
19         tmc_wait_for_tmcready(drvdata);
20
21         writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
22         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
23                        TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
24                        TMC_FFCR_TRIGON_TRIGIN,
25                        drvdata->base + TMC_FFCR);
26
27         writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
28         tmc_enable_hw(drvdata);
29
30         CS_LOCK(drvdata->base);
31 }
32
33 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
34 {
35         char *bufp;
36         u32 read_data, lost;
37         int i;
38
39         /* Check if the buffer wrapped around. */
40         lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
41         bufp = drvdata->buf;
42         drvdata->len = 0;
43         while (1) {
44                 for (i = 0; i < drvdata->memwidth; i++) {
45                         read_data = readl_relaxed(drvdata->base + TMC_RRD);
46                         if (read_data == 0xFFFFFFFF)
47                                 goto done;
48                         memcpy(bufp, &read_data, 4);
49                         bufp += 4;
50                         drvdata->len += 4;
51                 }
52         }
53 done:
54         if (lost)
55                 coresight_insert_barrier_packet(drvdata->buf);
56         return;
57 }
58
59 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
60 {
61         CS_UNLOCK(drvdata->base);
62
63         tmc_flush_and_stop(drvdata);
64         /*
65          * When operating in sysFS mode the content of the buffer needs to be
66          * read before the TMC is disabled.
67          */
68         if (drvdata->mode == CS_MODE_SYSFS)
69                 tmc_etb_dump_hw(drvdata);
70         tmc_disable_hw(drvdata);
71
72         CS_LOCK(drvdata->base);
73 }
74
75 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
76 {
77         CS_UNLOCK(drvdata->base);
78
79         /* Wait for TMCSReady bit to be set */
80         tmc_wait_for_tmcready(drvdata);
81
82         writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
83         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
84                        drvdata->base + TMC_FFCR);
85         writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
86         tmc_enable_hw(drvdata);
87
88         CS_LOCK(drvdata->base);
89 }
90
91 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
92 {
93         CS_UNLOCK(drvdata->base);
94
95         tmc_flush_and_stop(drvdata);
96         tmc_disable_hw(drvdata);
97
98         CS_LOCK(drvdata->base);
99 }
100
101 /*
102  * Return the available trace data in the buffer from @pos, with
103  * a maximum limit of @len, updating the @bufpp on where to
104  * find it.
105  */
106 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
107                                 loff_t pos, size_t len, char **bufpp)
108 {
109         ssize_t actual = len;
110
111         /* Adjust the len to available size @pos */
112         if (pos + actual > drvdata->len)
113                 actual = drvdata->len - pos;
114         if (actual > 0)
115                 *bufpp = drvdata->buf + pos;
116         return actual;
117 }
118
119 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
120 {
121         int ret = 0;
122         bool used = false;
123         char *buf = NULL;
124         unsigned long flags;
125         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
126
127         /*
128          * If we don't have a buffer release the lock and allocate memory.
129          * Otherwise keep the lock and move along.
130          */
131         spin_lock_irqsave(&drvdata->spinlock, flags);
132         if (!drvdata->buf) {
133                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
134
135                 /* Allocating the memory here while outside of the spinlock */
136                 buf = kzalloc(drvdata->size, GFP_KERNEL);
137                 if (!buf)
138                         return -ENOMEM;
139
140                 /* Let's try again */
141                 spin_lock_irqsave(&drvdata->spinlock, flags);
142         }
143
144         if (drvdata->reading) {
145                 ret = -EBUSY;
146                 goto out;
147         }
148
149         /*
150          * In sysFS mode we can have multiple writers per sink.  Since this
151          * sink is already enabled no memory is needed and the HW need not be
152          * touched.
153          */
154         if (drvdata->mode == CS_MODE_SYSFS)
155                 goto out;
156
157         /*
158          * If drvdata::buf isn't NULL, memory was allocated for a previous
159          * trace run but wasn't read.  If so simply zero-out the memory.
160          * Otherwise use the memory allocated above.
161          *
162          * The memory is freed when users read the buffer using the
163          * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
164          * details.
165          */
166         if (drvdata->buf) {
167                 memset(drvdata->buf, 0, drvdata->size);
168         } else {
169                 used = true;
170                 drvdata->buf = buf;
171         }
172
173         drvdata->mode = CS_MODE_SYSFS;
174         tmc_etb_enable_hw(drvdata);
175 out:
176         spin_unlock_irqrestore(&drvdata->spinlock, flags);
177
178         /* Free memory outside the spinlock if need be */
179         if (!used)
180                 kfree(buf);
181
182         return ret;
183 }
184
185 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
186 {
187         int ret = 0;
188         unsigned long flags;
189         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
190
191         spin_lock_irqsave(&drvdata->spinlock, flags);
192         if (drvdata->reading) {
193                 ret = -EINVAL;
194                 goto out;
195         }
196
197         /*
198          * In Perf mode there can be only one writer per sink.  There
199          * is also no need to continue if the ETB/ETR is already operated
200          * from sysFS.
201          */
202         if (drvdata->mode != CS_MODE_DISABLED) {
203                 ret = -EINVAL;
204                 goto out;
205         }
206
207         drvdata->mode = CS_MODE_PERF;
208         tmc_etb_enable_hw(drvdata);
209 out:
210         spin_unlock_irqrestore(&drvdata->spinlock, flags);
211
212         return ret;
213 }
214
215 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
216 {
217         int ret;
218         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
219
220         switch (mode) {
221         case CS_MODE_SYSFS:
222                 ret = tmc_enable_etf_sink_sysfs(csdev);
223                 break;
224         case CS_MODE_PERF:
225                 ret = tmc_enable_etf_sink_perf(csdev);
226                 break;
227         /* We shouldn't be here */
228         default:
229                 ret = -EINVAL;
230                 break;
231         }
232
233         if (ret)
234                 return ret;
235
236         dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
237         return 0;
238 }
239
240 static void tmc_disable_etf_sink(struct coresight_device *csdev)
241 {
242         unsigned long flags;
243         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
244
245         spin_lock_irqsave(&drvdata->spinlock, flags);
246         if (drvdata->reading) {
247                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
248                 return;
249         }
250
251         /* Disable the TMC only if it needs to */
252         if (drvdata->mode != CS_MODE_DISABLED) {
253                 tmc_etb_disable_hw(drvdata);
254                 drvdata->mode = CS_MODE_DISABLED;
255         }
256
257         spin_unlock_irqrestore(&drvdata->spinlock, flags);
258
259         dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
260 }
261
262 static int tmc_enable_etf_link(struct coresight_device *csdev,
263                                int inport, int outport)
264 {
265         unsigned long flags;
266         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
267
268         spin_lock_irqsave(&drvdata->spinlock, flags);
269         if (drvdata->reading) {
270                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
271                 return -EBUSY;
272         }
273
274         tmc_etf_enable_hw(drvdata);
275         drvdata->mode = CS_MODE_SYSFS;
276         spin_unlock_irqrestore(&drvdata->spinlock, flags);
277
278         dev_info(drvdata->dev, "TMC-ETF enabled\n");
279         return 0;
280 }
281
282 static void tmc_disable_etf_link(struct coresight_device *csdev,
283                                  int inport, int outport)
284 {
285         unsigned long flags;
286         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
287
288         spin_lock_irqsave(&drvdata->spinlock, flags);
289         if (drvdata->reading) {
290                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
291                 return;
292         }
293
294         tmc_etf_disable_hw(drvdata);
295         drvdata->mode = CS_MODE_DISABLED;
296         spin_unlock_irqrestore(&drvdata->spinlock, flags);
297
298         dev_info(drvdata->dev, "TMC-ETF disabled\n");
299 }
300
301 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
302                                   void **pages, int nr_pages, bool overwrite)
303 {
304         int node;
305         struct cs_buffers *buf;
306
307         node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
308
309         /* Allocate memory structure for interaction with Perf */
310         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
311         if (!buf)
312                 return NULL;
313
314         buf->snapshot = overwrite;
315         buf->nr_pages = nr_pages;
316         buf->data_pages = pages;
317
318         return buf;
319 }
320
321 static void tmc_free_etf_buffer(void *config)
322 {
323         struct cs_buffers *buf = config;
324
325         kfree(buf);
326 }
327
328 static int tmc_set_etf_buffer(struct coresight_device *csdev,
329                               struct perf_output_handle *handle,
330                               void *sink_config)
331 {
332         int ret = 0;
333         unsigned long head;
334         struct cs_buffers *buf = sink_config;
335
336         /* wrap head around to the amount of space we have */
337         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
338
339         /* find the page to write to */
340         buf->cur = head / PAGE_SIZE;
341
342         /* and offset within that page */
343         buf->offset = head % PAGE_SIZE;
344
345         local_set(&buf->data_size, 0);
346
347         return ret;
348 }
349
350 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
351                                           struct perf_output_handle *handle,
352                                           void *sink_config)
353 {
354         long size = 0;
355         struct cs_buffers *buf = sink_config;
356
357         if (buf) {
358                 /*
359                  * In snapshot mode ->data_size holds the new address of the
360                  * ring buffer's head.  The size itself is the whole address
361                  * range since we want the latest information.
362                  */
363                 if (buf->snapshot)
364                         handle->head = local_xchg(&buf->data_size,
365                                                   buf->nr_pages << PAGE_SHIFT);
366                 /*
367                  * Tell the tracer PMU how much we got in this run and if
368                  * something went wrong along the way.  Nobody else can use
369                  * this cs_buffers instance until we are done.  As such
370                  * resetting parameters here and squaring off with the ring
371                  * buffer API in the tracer PMU is fine.
372                  */
373                 size = local_xchg(&buf->data_size, 0);
374         }
375
376         return size;
377 }
378
379 static void tmc_update_etf_buffer(struct coresight_device *csdev,
380                                   struct perf_output_handle *handle,
381                                   void *sink_config)
382 {
383         bool lost = false;
384         int i, cur;
385         const u32 *barrier;
386         u32 *buf_ptr;
387         u64 read_ptr, write_ptr;
388         u32 status, to_read;
389         unsigned long offset;
390         struct cs_buffers *buf = sink_config;
391         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
392
393         if (!buf)
394                 return;
395
396         /* This shouldn't happen */
397         if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
398                 return;
399
400         CS_UNLOCK(drvdata->base);
401
402         tmc_flush_and_stop(drvdata);
403
404         read_ptr = tmc_read_rrp(drvdata);
405         write_ptr = tmc_read_rwp(drvdata);
406
407         /*
408          * Get a hold of the status register and see if a wrap around
409          * has occurred.  If so adjust things accordingly.
410          */
411         status = readl_relaxed(drvdata->base + TMC_STS);
412         if (status & TMC_STS_FULL) {
413                 lost = true;
414                 to_read = drvdata->size;
415         } else {
416                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
417         }
418
419         /*
420          * The TMC RAM buffer may be bigger than the space available in the
421          * perf ring buffer (handle->size).  If so advance the RRP so that we
422          * get the latest trace data.
423          */
424         if (to_read > handle->size) {
425                 u32 mask = 0;
426
427                 /*
428                  * The value written to RRP must be byte-address aligned to
429                  * the width of the trace memory databus _and_ to a frame
430                  * boundary (16 byte), whichever is the biggest. For example,
431                  * for 32-bit, 64-bit and 128-bit wide trace memory, the four
432                  * LSBs must be 0s. For 256-bit wide trace memory, the five
433                  * LSBs must be 0s.
434                  */
435                 switch (drvdata->memwidth) {
436                 case TMC_MEM_INTF_WIDTH_32BITS:
437                 case TMC_MEM_INTF_WIDTH_64BITS:
438                 case TMC_MEM_INTF_WIDTH_128BITS:
439                         mask = GENMASK(31, 4);
440                         break;
441                 case TMC_MEM_INTF_WIDTH_256BITS:
442                         mask = GENMASK(31, 5);
443                         break;
444                 }
445
446                 /*
447                  * Make sure the new size is aligned in accordance with the
448                  * requirement explained above.
449                  */
450                 to_read = handle->size & mask;
451                 /* Move the RAM read pointer up */
452                 read_ptr = (write_ptr + drvdata->size) - to_read;
453                 /* Make sure we are still within our limits */
454                 if (read_ptr > (drvdata->size - 1))
455                         read_ptr -= drvdata->size;
456                 /* Tell the HW */
457                 tmc_write_rrp(drvdata, read_ptr);
458                 lost = true;
459         }
460
461         if (lost)
462                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
463
464         cur = buf->cur;
465         offset = buf->offset;
466         barrier = barrier_pkt;
467
468         /* for every byte to read */
469         for (i = 0; i < to_read; i += 4) {
470                 buf_ptr = buf->data_pages[cur] + offset;
471                 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
472
473                 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
474                         *buf_ptr = *barrier;
475                         barrier++;
476                 }
477
478                 offset += 4;
479                 if (offset >= PAGE_SIZE) {
480                         offset = 0;
481                         cur++;
482                         /* wrap around at the end of the buffer */
483                         cur &= buf->nr_pages - 1;
484                 }
485         }
486
487         /*
488          * In snapshot mode all we have to do is communicate to
489          * perf_aux_output_end() the address of the current head.  In full
490          * trace mode the same function expects a size to move rb->aux_head
491          * forward.
492          */
493         if (buf->snapshot)
494                 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
495         else
496                 local_add(to_read, &buf->data_size);
497
498         CS_LOCK(drvdata->base);
499 }
500
501 static const struct coresight_ops_sink tmc_etf_sink_ops = {
502         .enable         = tmc_enable_etf_sink,
503         .disable        = tmc_disable_etf_sink,
504         .alloc_buffer   = tmc_alloc_etf_buffer,
505         .free_buffer    = tmc_free_etf_buffer,
506         .set_buffer     = tmc_set_etf_buffer,
507         .reset_buffer   = tmc_reset_etf_buffer,
508         .update_buffer  = tmc_update_etf_buffer,
509 };
510
511 static const struct coresight_ops_link tmc_etf_link_ops = {
512         .enable         = tmc_enable_etf_link,
513         .disable        = tmc_disable_etf_link,
514 };
515
516 const struct coresight_ops tmc_etb_cs_ops = {
517         .sink_ops       = &tmc_etf_sink_ops,
518 };
519
520 const struct coresight_ops tmc_etf_cs_ops = {
521         .sink_ops       = &tmc_etf_sink_ops,
522         .link_ops       = &tmc_etf_link_ops,
523 };
524
525 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
526 {
527         enum tmc_mode mode;
528         int ret = 0;
529         unsigned long flags;
530
531         /* config types are set a boot time and never change */
532         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
533                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
534                 return -EINVAL;
535
536         spin_lock_irqsave(&drvdata->spinlock, flags);
537
538         if (drvdata->reading) {
539                 ret = -EBUSY;
540                 goto out;
541         }
542
543         /* There is no point in reading a TMC in HW FIFO mode */
544         mode = readl_relaxed(drvdata->base + TMC_MODE);
545         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
546                 ret = -EINVAL;
547                 goto out;
548         }
549
550         /* Don't interfere if operated from Perf */
551         if (drvdata->mode == CS_MODE_PERF) {
552                 ret = -EINVAL;
553                 goto out;
554         }
555
556         /* If drvdata::buf is NULL the trace data has been read already */
557         if (drvdata->buf == NULL) {
558                 ret = -EINVAL;
559                 goto out;
560         }
561
562         /* Disable the TMC if need be */
563         if (drvdata->mode == CS_MODE_SYSFS)
564                 tmc_etb_disable_hw(drvdata);
565
566         drvdata->reading = true;
567 out:
568         spin_unlock_irqrestore(&drvdata->spinlock, flags);
569
570         return ret;
571 }
572
573 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
574 {
575         char *buf = NULL;
576         enum tmc_mode mode;
577         unsigned long flags;
578
579         /* config types are set a boot time and never change */
580         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
581                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
582                 return -EINVAL;
583
584         spin_lock_irqsave(&drvdata->spinlock, flags);
585
586         /* Re-enable the TMC if need be */
587         if (drvdata->mode == CS_MODE_SYSFS) {
588                 /* There is no point in reading a TMC in HW FIFO mode */
589                 mode = readl_relaxed(drvdata->base + TMC_MODE);
590                 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
591                         spin_unlock_irqrestore(&drvdata->spinlock, flags);
592                         return -EINVAL;
593                 }
594                 /*
595                  * The trace run will continue with the same allocated trace
596                  * buffer. As such zero-out the buffer so that we don't end
597                  * up with stale data.
598                  *
599                  * Since the tracer is still enabled drvdata::buf
600                  * can't be NULL.
601                  */
602                 memset(drvdata->buf, 0, drvdata->size);
603                 tmc_etb_enable_hw(drvdata);
604         } else {
605                 /*
606                  * The ETB/ETF is not tracing and the buffer was just read.
607                  * As such prepare to free the trace buffer.
608                  */
609                 buf = drvdata->buf;
610                 drvdata->buf = NULL;
611         }
612
613         drvdata->reading = false;
614         spin_unlock_irqrestore(&drvdata->spinlock, flags);
615
616         /*
617          * Free allocated memory outside of the spinlock.  There is no need
618          * to assert the validity of 'buf' since calling kfree(NULL) is safe.
619          */
620         kfree(buf);
621
622         return 0;
623 }