1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/atomic.h>
8 #include <linux/coresight.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/iommu.h>
11 #include <linux/idr.h>
12 #include <linux/mutex.h>
13 #include <linux/refcount.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/vmalloc.h>
17 #include "coresight-catu.h"
18 #include "coresight-etm-perf.h"
19 #include "coresight-priv.h"
20 #include "coresight-tmc.h"
30 * etr_perf_buffer - Perf buffer used for ETR
31 * @drvdata - The ETR drvdaga this buffer has been allocated for.
32 * @etr_buf - Actual buffer used by the ETR
33 * @pid - The PID this etr_perf_buffer belongs to.
34 * @snaphost - Perf session mode
35 * @nr_pages - Number of pages in the ring buffer.
36 * @pages - Array of Pages in the ring buffer.
38 struct etr_perf_buffer {
39 struct tmc_drvdata *drvdata;
40 struct etr_buf *etr_buf;
47 /* Convert the perf index to an offset within the ETR buffer */
48 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
50 /* Lower limit for ETR hardware buffer */
51 #define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
54 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
55 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
56 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
57 * contain more than one SG buffer and tables.
59 * A table entry has the following format:
61 * ---Bit31------------Bit4-------Bit1-----Bit0--
62 * | Address[39:12] | SBZ | Entry Type |
63 * ----------------------------------------------
65 * Address: Bits [39:12] of a physical page address. Bits [11:0] are
70 * b01 - Last entry in the tables, points to 4K page buffer.
71 * b10 - Normal entry, points to 4K page buffer.
72 * b11 - Link. The address points to the base of next table.
77 #define ETR_SG_PAGE_SHIFT 12
78 #define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
79 #define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
80 #define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
81 #define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
83 #define ETR_SG_ET_MASK 0x3
84 #define ETR_SG_ET_LAST 0x1
85 #define ETR_SG_ET_NORMAL 0x2
86 #define ETR_SG_ET_LINK 0x3
88 #define ETR_SG_ADDR_SHIFT 4
90 #define ETR_SG_ENTRY(addr, type) \
91 (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
92 (type & ETR_SG_ET_MASK))
94 #define ETR_SG_ADDR(entry) \
95 (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
96 #define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
99 * struct etr_sg_table : ETR SG Table
100 * @sg_table: Generic SG Table holding the data/table pages.
101 * @hwaddr: hwaddress used by the TMC, which is the base
102 * address of the table.
104 struct etr_sg_table {
105 struct tmc_sg_table *sg_table;
110 * tmc_etr_sg_table_entries: Total number of table entries required to map
111 * @nr_pages system pages.
113 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
114 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
115 * with the last entry pointing to another page of table entries.
116 * If we spill over to a new page for mapping 1 entry, we could as
117 * well replace the link entry of the previous page with the last entry.
119 static inline unsigned long __attribute_const__
120 tmc_etr_sg_table_entries(int nr_pages)
122 unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
123 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
125 * If we spill over to a new page for 1 entry, we could as well
126 * make it the LAST entry in the previous page, skipping the Link
129 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
131 return nr_sgpages + nr_sglinks;
135 * tmc_pages_get_offset: Go through all the pages in the tmc_pages
136 * and map the device address @addr to an offset within the virtual
140 tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
143 dma_addr_t page_start;
145 for (i = 0; i < tmc_pages->nr_pages; i++) {
146 page_start = tmc_pages->daddrs[i];
147 if (addr >= page_start && addr < (page_start + PAGE_SIZE))
148 return i * PAGE_SIZE + (addr - page_start);
155 * tmc_pages_free : Unmap and free the pages used by tmc_pages.
156 * If the pages were not allocated in tmc_pages_alloc(), we would
157 * simply drop the refcount.
159 static void tmc_pages_free(struct tmc_pages *tmc_pages,
160 struct device *dev, enum dma_data_direction dir)
163 struct device *real_dev = dev->parent;
165 for (i = 0; i < tmc_pages->nr_pages; i++) {
166 if (tmc_pages->daddrs && tmc_pages->daddrs[i])
167 dma_unmap_page(real_dev, tmc_pages->daddrs[i],
169 if (tmc_pages->pages && tmc_pages->pages[i])
170 __free_page(tmc_pages->pages[i]);
173 kfree(tmc_pages->pages);
174 kfree(tmc_pages->daddrs);
175 tmc_pages->pages = NULL;
176 tmc_pages->daddrs = NULL;
177 tmc_pages->nr_pages = 0;
181 * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
182 * If @pages is not NULL, the list of page virtual addresses are
183 * used as the data pages. The pages are then dma_map'ed for @dev
184 * with dma_direction @dir.
186 * Returns 0 upon success, else the error number.
188 static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
189 struct device *dev, int node,
190 enum dma_data_direction dir, void **pages)
195 struct device *real_dev = dev->parent;
197 nr_pages = tmc_pages->nr_pages;
198 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
200 if (!tmc_pages->daddrs)
202 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
204 if (!tmc_pages->pages) {
205 kfree(tmc_pages->daddrs);
206 tmc_pages->daddrs = NULL;
210 for (i = 0; i < nr_pages; i++) {
211 if (pages && pages[i]) {
212 page = virt_to_page(pages[i]);
213 /* Hold a refcount on the page */
216 page = alloc_pages_node(node,
217 GFP_KERNEL | __GFP_ZERO, 0);
221 paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
222 if (dma_mapping_error(real_dev, paddr))
224 tmc_pages->daddrs[i] = paddr;
225 tmc_pages->pages[i] = page;
229 tmc_pages_free(tmc_pages, dev, dir);
234 tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
236 return tmc_pages_get_offset(&sg_table->data_pages, addr);
239 static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
241 if (sg_table->table_vaddr)
242 vunmap(sg_table->table_vaddr);
243 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
246 static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
248 if (sg_table->data_vaddr)
249 vunmap(sg_table->data_vaddr);
250 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
253 void tmc_free_sg_table(struct tmc_sg_table *sg_table)
255 tmc_free_table_pages(sg_table);
256 tmc_free_data_pages(sg_table);
258 EXPORT_SYMBOL_GPL(tmc_free_sg_table);
261 * Alloc pages for the table. Since this will be used by the device,
262 * allocate the pages closer to the device (i.e, dev_to_node(dev)
263 * rather than the CPU node).
265 static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
268 struct tmc_pages *table_pages = &sg_table->table_pages;
270 rc = tmc_pages_alloc(table_pages, sg_table->dev,
271 dev_to_node(sg_table->dev),
272 DMA_TO_DEVICE, NULL);
275 sg_table->table_vaddr = vmap(table_pages->pages,
276 table_pages->nr_pages,
279 if (!sg_table->table_vaddr)
282 sg_table->table_daddr = table_pages->daddrs[0];
286 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
290 /* Allocate data pages on the node requested by the caller */
291 rc = tmc_pages_alloc(&sg_table->data_pages,
292 sg_table->dev, sg_table->node,
293 DMA_FROM_DEVICE, pages);
295 sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
296 sg_table->data_pages.nr_pages,
299 if (!sg_table->data_vaddr)
306 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
307 * and data buffers. TMC writes to the data buffers and reads from the SG
310 * @dev - Coresight device to which page should be DMA mapped.
311 * @node - Numa node for mem allocations
312 * @nr_tpages - Number of pages for the table entries.
313 * @nr_dpages - Number of pages for Data buffer.
314 * @pages - Optional list of virtual address of pages.
316 struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
323 struct tmc_sg_table *sg_table;
325 sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
327 return ERR_PTR(-ENOMEM);
328 sg_table->data_pages.nr_pages = nr_dpages;
329 sg_table->table_pages.nr_pages = nr_tpages;
330 sg_table->node = node;
333 rc = tmc_alloc_data_pages(sg_table, pages);
335 rc = tmc_alloc_table_pages(sg_table);
337 tmc_free_sg_table(sg_table);
344 EXPORT_SYMBOL_GPL(tmc_alloc_sg_table);
347 * tmc_sg_table_sync_data_range: Sync the data buffer written
348 * by the device from @offset upto a @size bytes.
350 void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
351 u64 offset, u64 size)
354 int npages = DIV_ROUND_UP(size, PAGE_SIZE);
355 struct device *real_dev = table->dev->parent;
356 struct tmc_pages *data = &table->data_pages;
358 start = offset >> PAGE_SHIFT;
359 for (i = start; i < (start + npages); i++) {
360 index = i % data->nr_pages;
361 dma_sync_single_for_cpu(real_dev, data->daddrs[index],
362 PAGE_SIZE, DMA_FROM_DEVICE);
365 EXPORT_SYMBOL_GPL(tmc_sg_table_sync_data_range);
367 /* tmc_sg_sync_table: Sync the page table */
368 void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
371 struct device *real_dev = sg_table->dev->parent;
372 struct tmc_pages *table_pages = &sg_table->table_pages;
374 for (i = 0; i < table_pages->nr_pages; i++)
375 dma_sync_single_for_device(real_dev, table_pages->daddrs[i],
376 PAGE_SIZE, DMA_TO_DEVICE);
378 EXPORT_SYMBOL_GPL(tmc_sg_table_sync_table);
381 * tmc_sg_table_get_data: Get the buffer pointer for data @offset
382 * in the SG buffer. The @bufpp is updated to point to the buffer.
384 * the length of linear data available at @offset.
386 * <= 0 if no data is available.
388 ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
389 u64 offset, size_t len, char **bufpp)
392 int pg_idx = offset >> PAGE_SHIFT;
393 int pg_offset = offset & (PAGE_SIZE - 1);
394 struct tmc_pages *data_pages = &sg_table->data_pages;
396 size = tmc_sg_table_buf_size(sg_table);
400 /* Make sure we don't go beyond the end */
401 len = (len < (size - offset)) ? len : size - offset;
402 /* Respect the page boundaries */
403 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
405 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
408 EXPORT_SYMBOL_GPL(tmc_sg_table_get_data);
411 /* Map a dma address to virtual address */
413 tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
414 dma_addr_t addr, bool table)
418 struct tmc_pages *tmc_pages;
421 tmc_pages = &sg_table->table_pages;
422 base = (unsigned long)sg_table->table_vaddr;
424 tmc_pages = &sg_table->data_pages;
425 base = (unsigned long)sg_table->data_vaddr;
428 offset = tmc_pages_get_offset(tmc_pages, addr);
431 return base + offset;
434 /* Dump the given sg_table */
435 static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
440 struct tmc_sg_table *sg_table = etr_table->sg_table;
442 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
443 etr_table->hwaddr, true);
445 addr = ETR_SG_ADDR(*ptr);
446 switch (ETR_SG_ET(*ptr)) {
447 case ETR_SG_ET_NORMAL:
448 dev_dbg(sg_table->dev,
449 "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
453 dev_dbg(sg_table->dev,
454 "%05d: *** %p\t:{L} 0x%llx ***\n",
456 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
460 dev_dbg(sg_table->dev,
461 "%05d: ### %p\t:[L] 0x%llx ###\n",
465 dev_dbg(sg_table->dev,
466 "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
472 dev_dbg(sg_table->dev, "******* End of Table *****\n");
475 static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
479 * Populate the SG Table page table entries from table/data
480 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
481 * So does a Table page. So we keep track of indices of the tables
482 * in each system page and move the pointers accordingly.
484 #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
485 static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
488 int i, type, nr_entries;
489 int tpidx = 0; /* index to the current system table_page */
490 int sgtidx = 0; /* index to the sg_table within the current syspage */
491 int sgtentry = 0; /* the entry within the sg_table */
492 int dpidx = 0; /* index to the current system data_page */
493 int spidx = 0; /* index to the SG page within the current data page */
494 sgte_t *ptr; /* pointer to the table entry to fill */
495 struct tmc_sg_table *sg_table = etr_table->sg_table;
496 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
497 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
499 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
501 * Use the contiguous virtual address of the table to update entries.
503 ptr = sg_table->table_vaddr;
505 * Fill all the entries, except the last entry to avoid special
506 * checks within the loop.
508 for (i = 0; i < nr_entries - 1; i++) {
509 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
511 * Last entry in a sg_table page is a link address to
512 * the next table page. If this sg_table is the last
513 * one in the system page, it links to the first
514 * sg_table in the next system page. Otherwise, it
515 * links to the next sg_table page within the system
518 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
519 paddr = table_daddrs[tpidx + 1];
521 paddr = table_daddrs[tpidx] +
522 (ETR_SG_PAGE_SIZE * (sgtidx + 1));
524 type = ETR_SG_ET_LINK;
527 * Update the indices to the data_pages to point to the
528 * next sg_page in the data buffer.
530 type = ETR_SG_ET_NORMAL;
531 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
532 if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
535 *ptr++ = ETR_SG_ENTRY(paddr, type);
537 * Move to the next table pointer, moving the table page index
540 if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
541 if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
546 /* Set up the last entry, which is always a data pointer */
547 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
548 *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
552 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
553 * populate the table.
555 * @dev - Device pointer for the TMC
556 * @node - NUMA node where the memory should be allocated
557 * @size - Total size of the data buffer
558 * @pages - Optional list of page virtual address
560 static struct etr_sg_table *
561 tmc_init_etr_sg_table(struct device *dev, int node,
562 unsigned long size, void **pages)
564 int nr_entries, nr_tpages;
565 int nr_dpages = size >> PAGE_SHIFT;
566 struct tmc_sg_table *sg_table;
567 struct etr_sg_table *etr_table;
569 etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
571 return ERR_PTR(-ENOMEM);
572 nr_entries = tmc_etr_sg_table_entries(nr_dpages);
573 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
575 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
576 if (IS_ERR(sg_table)) {
578 return ERR_CAST(sg_table);
581 etr_table->sg_table = sg_table;
582 /* TMC should use table base address for DBA */
583 etr_table->hwaddr = sg_table->table_daddr;
584 tmc_etr_sg_table_populate(etr_table);
585 /* Sync the table pages for the HW */
586 tmc_sg_table_sync_table(sg_table);
587 tmc_etr_sg_table_dump(etr_table);
593 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
595 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
596 struct etr_buf *etr_buf, int node,
599 struct etr_flat_buf *flat_buf;
600 struct device *real_dev = drvdata->csdev->dev.parent;
602 /* We cannot reuse existing pages for flat buf */
606 flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
610 flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
612 DMA_FROM_DEVICE, GFP_KERNEL);
613 if (!flat_buf->vaddr) {
618 flat_buf->size = etr_buf->size;
619 flat_buf->dev = &drvdata->csdev->dev;
620 etr_buf->hwaddr = flat_buf->daddr;
621 etr_buf->mode = ETR_MODE_FLAT;
622 etr_buf->private = flat_buf;
626 static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
628 struct etr_flat_buf *flat_buf = etr_buf->private;
630 if (flat_buf && flat_buf->daddr) {
631 struct device *real_dev = flat_buf->dev->parent;
633 dma_free_noncoherent(real_dev, etr_buf->size,
634 flat_buf->vaddr, flat_buf->daddr,
640 static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
642 struct etr_flat_buf *flat_buf = etr_buf->private;
643 struct device *real_dev = flat_buf->dev->parent;
646 * Adjust the buffer to point to the beginning of the trace data
647 * and update the available trace data.
649 etr_buf->offset = rrp - etr_buf->hwaddr;
651 etr_buf->len = etr_buf->size;
653 etr_buf->len = rwp - rrp;
656 * The driver always starts tracing at the beginning of the buffer,
657 * the only reason why we would get a wrap around is when the buffer
658 * is full. Sync the entire buffer in one go for this case.
660 if (etr_buf->offset + etr_buf->len > etr_buf->size)
661 dma_sync_single_for_cpu(real_dev, flat_buf->daddr,
662 etr_buf->size, DMA_FROM_DEVICE);
664 dma_sync_single_for_cpu(real_dev,
665 flat_buf->daddr + etr_buf->offset,
666 etr_buf->len, DMA_FROM_DEVICE);
669 static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
670 u64 offset, size_t len, char **bufpp)
672 struct etr_flat_buf *flat_buf = etr_buf->private;
674 *bufpp = (char *)flat_buf->vaddr + offset;
676 * tmc_etr_buf_get_data already adjusts the length to handle
677 * buffer wrapping around.
682 static const struct etr_buf_operations etr_flat_buf_ops = {
683 .alloc = tmc_etr_alloc_flat_buf,
684 .free = tmc_etr_free_flat_buf,
685 .sync = tmc_etr_sync_flat_buf,
686 .get_data = tmc_etr_get_data_flat_buf,
690 * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
693 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
694 struct etr_buf *etr_buf, int node,
697 struct etr_sg_table *etr_table;
698 struct device *dev = &drvdata->csdev->dev;
700 etr_table = tmc_init_etr_sg_table(dev, node,
701 etr_buf->size, pages);
702 if (IS_ERR(etr_table))
704 etr_buf->hwaddr = etr_table->hwaddr;
705 etr_buf->mode = ETR_MODE_ETR_SG;
706 etr_buf->private = etr_table;
710 static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
712 struct etr_sg_table *etr_table = etr_buf->private;
715 tmc_free_sg_table(etr_table->sg_table);
720 static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
721 size_t len, char **bufpp)
723 struct etr_sg_table *etr_table = etr_buf->private;
725 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
728 static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
730 long r_offset, w_offset;
731 struct etr_sg_table *etr_table = etr_buf->private;
732 struct tmc_sg_table *table = etr_table->sg_table;
734 /* Convert hw address to offset in the buffer */
735 r_offset = tmc_sg_get_data_page_offset(table, rrp);
738 "Unable to map RRP %llx to offset\n", rrp);
743 w_offset = tmc_sg_get_data_page_offset(table, rwp);
746 "Unable to map RWP %llx to offset\n", rwp);
751 etr_buf->offset = r_offset;
753 etr_buf->len = etr_buf->size;
755 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
757 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
760 static const struct etr_buf_operations etr_sg_buf_ops = {
761 .alloc = tmc_etr_alloc_sg_buf,
762 .free = tmc_etr_free_sg_buf,
763 .sync = tmc_etr_sync_sg_buf,
764 .get_data = tmc_etr_get_data_sg_buf,
768 * TMC ETR could be connected to a CATU device, which can provide address
769 * translation service. This is represented by the Output port of the TMC
770 * (ETR) connected to the input port of the CATU.
772 * Returns : coresight_device ptr for the CATU device if a CATU is found.
775 struct coresight_device *
776 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
779 struct coresight_device *tmp, *etr = drvdata->csdev;
781 if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
784 for (i = 0; i < etr->pdata->nr_outport; i++) {
785 tmp = etr->pdata->conns[i].child_dev;
786 if (tmp && coresight_is_catu_device(tmp))
792 EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device);
794 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
795 struct etr_buf *etr_buf)
797 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
799 if (catu && helper_ops(catu)->enable)
800 return helper_ops(catu)->enable(catu, etr_buf);
804 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
806 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
808 if (catu && helper_ops(catu)->disable)
809 helper_ops(catu)->disable(catu, drvdata->etr_buf);
812 static const struct etr_buf_operations *etr_buf_ops[] = {
813 [ETR_MODE_FLAT] = &etr_flat_buf_ops,
814 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
815 [ETR_MODE_CATU] = NULL,
818 void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
820 etr_buf_ops[ETR_MODE_CATU] = catu;
822 EXPORT_SYMBOL_GPL(tmc_etr_set_catu_ops);
824 void tmc_etr_remove_catu_ops(void)
826 etr_buf_ops[ETR_MODE_CATU] = NULL;
828 EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops);
830 static inline int tmc_etr_mode_alloc_buf(int mode,
831 struct tmc_drvdata *drvdata,
832 struct etr_buf *etr_buf, int node,
839 case ETR_MODE_ETR_SG:
841 if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
842 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
845 etr_buf->ops = etr_buf_ops[mode];
853 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
854 * @drvdata : ETR device details.
855 * @size : size of the requested buffer.
856 * @flags : Required properties for the buffer.
857 * @node : Node for memory allocations.
858 * @pages : An optional list of pages.
860 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
861 ssize_t size, int flags,
862 int node, void **pages)
865 bool has_etr_sg, has_iommu;
866 bool has_sg, has_catu;
867 struct etr_buf *etr_buf;
868 struct device *dev = &drvdata->csdev->dev;
870 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
871 has_iommu = iommu_get_domain_for_dev(dev->parent);
872 has_catu = !!tmc_etr_get_catu_device(drvdata);
874 has_sg = has_catu || has_etr_sg;
876 etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
878 return ERR_PTR(-ENOMEM);
880 etr_buf->size = size;
883 * If we have to use an existing list of pages, we cannot reliably
884 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
885 * we use the contiguous DMA memory if at least one of the following
886 * conditions is true:
887 * a) The ETR cannot use Scatter-Gather.
888 * b) we have a backing IOMMU
889 * c) The requested memory size is smaller (< 1M).
891 * Fallback to available mechanisms.
895 (!has_sg || has_iommu || size < SZ_1M))
896 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
897 etr_buf, node, pages);
898 if (rc && has_etr_sg)
899 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
900 etr_buf, node, pages);
902 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
903 etr_buf, node, pages);
909 refcount_set(&etr_buf->refcount, 1);
910 dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
911 (unsigned long)size >> 10, etr_buf->mode);
915 static void tmc_free_etr_buf(struct etr_buf *etr_buf)
917 WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
918 etr_buf->ops->free(etr_buf);
923 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
924 * with a maximum of @len bytes.
925 * Returns: The size of the linear data available @pos, with *bufpp
926 * updated to point to the buffer.
928 static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
929 u64 offset, size_t len, char **bufpp)
931 /* Adjust the length to limit this transaction to end of buffer */
932 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
934 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
938 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
943 len = tmc_etr_buf_get_data(etr_buf, offset,
944 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
945 if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
947 coresight_insert_barrier_packet(bufp);
948 return offset + CORESIGHT_BARRIER_PKT_SIZE;
952 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
953 * Makes sure the trace data is synced to the memory for consumption.
954 * @etr_buf->offset will hold the offset to the beginning of the trace data
955 * within the buffer, with @etr_buf->len bytes to consume.
957 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
959 struct etr_buf *etr_buf = drvdata->etr_buf;
963 rrp = tmc_read_rrp(drvdata);
964 rwp = tmc_read_rwp(drvdata);
965 status = readl_relaxed(drvdata->base + TMC_STS);
968 * If there were memory errors in the session, truncate the
971 if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) {
972 dev_dbg(&drvdata->csdev->dev,
973 "tmc memory error detected, truncating buffer\n");
975 etr_buf->full = false;
979 etr_buf->full = !!(status & TMC_STS_FULL);
981 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
983 etr_buf->ops->sync(etr_buf, rrp, rwp);
986 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
989 struct etr_buf *etr_buf = drvdata->etr_buf;
991 CS_UNLOCK(drvdata->base);
993 /* Wait for TMCSReady bit to be set */
994 tmc_wait_for_tmcready(drvdata);
996 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
997 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
999 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
1000 axictl &= ~TMC_AXICTL_CLEAR_MASK;
1001 axictl |= TMC_AXICTL_PROT_CTL_B1;
1002 axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size);
1003 axictl |= TMC_AXICTL_AXCACHE_OS;
1005 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
1006 axictl &= ~TMC_AXICTL_ARCACHE_MASK;
1007 axictl |= TMC_AXICTL_ARCACHE_OS;
1010 if (etr_buf->mode == ETR_MODE_ETR_SG)
1011 axictl |= TMC_AXICTL_SCT_GAT_MODE;
1013 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
1014 tmc_write_dba(drvdata, etr_buf->hwaddr);
1016 * If the TMC pointers must be programmed before the session,
1017 * we have to set it properly (i.e, RRP/RWP to base address and
1018 * STS to "not full").
1020 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
1021 tmc_write_rrp(drvdata, etr_buf->hwaddr);
1022 tmc_write_rwp(drvdata, etr_buf->hwaddr);
1023 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
1024 writel_relaxed(sts, drvdata->base + TMC_STS);
1027 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
1028 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
1029 TMC_FFCR_TRIGON_TRIGIN,
1030 drvdata->base + TMC_FFCR);
1031 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
1032 tmc_enable_hw(drvdata);
1034 CS_LOCK(drvdata->base);
1037 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
1038 struct etr_buf *etr_buf)
1042 /* Callers should provide an appropriate buffer for use */
1043 if (WARN_ON(!etr_buf))
1046 if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
1047 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
1050 if (WARN_ON(drvdata->etr_buf))
1054 * If this ETR is connected to a CATU, enable it before we turn
1057 rc = tmc_etr_enable_catu(drvdata, etr_buf);
1060 rc = coresight_claim_device(drvdata->csdev);
1062 drvdata->etr_buf = etr_buf;
1063 __tmc_etr_enable_hw(drvdata);
1070 * Return the available trace data in the buffer (starts at etr_buf->offset,
1071 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
1072 * also updating the @bufpp on where to find it. Since the trace data
1073 * starts at anywhere in the buffer, depending on the RRP, we adjust the
1074 * @len returned to handle buffer wrapping around.
1076 * We are protected here by drvdata->reading != 0, which ensures the
1077 * sysfs_buf stays alive.
1079 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
1080 loff_t pos, size_t len, char **bufpp)
1083 ssize_t actual = len;
1084 struct etr_buf *etr_buf = drvdata->sysfs_buf;
1086 if (pos + actual > etr_buf->len)
1087 actual = etr_buf->len - pos;
1091 /* Compute the offset from which we read the data */
1092 offset = etr_buf->offset + pos;
1093 if (offset >= etr_buf->size)
1094 offset -= etr_buf->size;
1095 return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
1098 static struct etr_buf *
1099 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
1101 return tmc_alloc_etr_buf(drvdata, drvdata->size,
1102 0, cpu_to_node(0), NULL);
1106 tmc_etr_free_sysfs_buf(struct etr_buf *buf)
1109 tmc_free_etr_buf(buf);
1112 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1114 struct etr_buf *etr_buf = drvdata->etr_buf;
1116 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1117 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1118 drvdata->sysfs_buf = NULL;
1120 tmc_sync_etr_buf(drvdata);
1122 * Insert barrier packets at the beginning, if there was
1126 tmc_etr_buf_insert_barrier_packet(etr_buf,
1131 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1133 CS_UNLOCK(drvdata->base);
1135 tmc_flush_and_stop(drvdata);
1137 * When operating in sysFS mode the content of the buffer needs to be
1138 * read before the TMC is disabled.
1140 if (drvdata->mode == CS_MODE_SYSFS)
1141 tmc_etr_sync_sysfs_buf(drvdata);
1143 tmc_disable_hw(drvdata);
1145 CS_LOCK(drvdata->base);
1149 void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1151 __tmc_etr_disable_hw(drvdata);
1152 /* Disable CATU device if this ETR is connected to one */
1153 tmc_etr_disable_catu(drvdata);
1154 coresight_disclaim_device(drvdata->csdev);
1155 /* Reset the ETR buf used by hardware */
1156 drvdata->etr_buf = NULL;
1159 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1162 unsigned long flags;
1163 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1164 struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1167 * If we are enabling the ETR from disabled state, we need to make
1168 * sure we have a buffer with the right size. The etr_buf is not reset
1169 * immediately after we stop the tracing in SYSFS mode as we wait for
1170 * the user to collect the data. We may be able to reuse the existing
1171 * buffer, provided the size matches. Any allocation has to be done
1172 * with the lock released.
1174 spin_lock_irqsave(&drvdata->spinlock, flags);
1175 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1176 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1177 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1179 /* Allocate memory with the locks released */
1180 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1181 if (IS_ERR(new_buf))
1182 return PTR_ERR(new_buf);
1184 /* Let's try again */
1185 spin_lock_irqsave(&drvdata->spinlock, flags);
1188 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
1194 * In sysFS mode we can have multiple writers per sink. Since this
1195 * sink is already enabled no memory is needed and the HW need not be
1196 * touched, even if the buffer size has changed.
1198 if (drvdata->mode == CS_MODE_SYSFS) {
1199 atomic_inc(csdev->refcnt);
1204 * If we don't have a buffer or it doesn't match the requested size,
1205 * use the buffer allocated above. Otherwise reuse the existing buffer.
1207 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1208 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1209 free_buf = sysfs_buf;
1210 drvdata->sysfs_buf = new_buf;
1213 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1215 drvdata->mode = CS_MODE_SYSFS;
1216 atomic_inc(csdev->refcnt);
1219 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1221 /* Free memory outside the spinlock if need be */
1223 tmc_etr_free_sysfs_buf(free_buf);
1226 dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
1232 * alloc_etr_buf: Allocate ETR buffer for use by perf.
1233 * The size of the hardware buffer is dependent on the size configured
1234 * via sysfs and the perf ring buffer size. We prefer to allocate the
1235 * largest possible size, scaling down the size by half until it
1236 * reaches a minimum limit (1M), beyond which we give up.
1238 static struct etr_buf *
1239 alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1240 int nr_pages, void **pages, bool snapshot)
1243 struct etr_buf *etr_buf;
1246 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
1248 * Try to match the perf ring buffer size if it is larger
1249 * than the size requested via sysfs.
1251 if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
1252 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
1254 if (!IS_ERR(etr_buf))
1259 * Else switch to configured size for this ETR
1260 * and scale down until we hit the minimum limit.
1262 size = drvdata->size;
1264 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
1265 if (!IS_ERR(etr_buf))
1268 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
1270 return ERR_PTR(-ENOMEM);
1276 static struct etr_buf *
1277 get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
1278 struct perf_event *event, int nr_pages,
1279 void **pages, bool snapshot)
1282 pid_t pid = task_pid_nr(event->owner);
1283 struct etr_buf *etr_buf;
1287 * An etr_perf_buffer is associated with an event and holds a reference
1288 * to the AUX ring buffer that was created for that event. In CPU-wide
1289 * N:1 mode multiple events (one per CPU), each with its own AUX ring
1290 * buffer, share a sink. As such an etr_perf_buffer is created for each
1291 * event but a single etr_buf associated with the ETR is shared between
1292 * them. The last event in a trace session will copy the content of the
1293 * etr_buf to its AUX ring buffer. Ring buffer associated to other
1294 * events are simply not used an freed as events are destoyed. We still
1295 * need to allocate a ring buffer for each event since we don't know
1296 * which event will be last.
1300 * The first thing to do here is check if an etr_buf has already been
1301 * allocated for this session. If so it is shared with this event,
1302 * otherwise it is created.
1304 mutex_lock(&drvdata->idr_mutex);
1305 etr_buf = idr_find(&drvdata->idr, pid);
1307 refcount_inc(&etr_buf->refcount);
1308 mutex_unlock(&drvdata->idr_mutex);
1312 /* If we made it here no buffer has been allocated, do so now. */
1313 mutex_unlock(&drvdata->idr_mutex);
1315 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1316 if (IS_ERR(etr_buf))
1319 /* Now that we have a buffer, add it to the IDR. */
1320 mutex_lock(&drvdata->idr_mutex);
1321 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
1322 mutex_unlock(&drvdata->idr_mutex);
1324 /* Another event with this session ID has allocated this buffer. */
1325 if (ret == -ENOSPC) {
1326 tmc_free_etr_buf(etr_buf);
1330 /* The IDR can't allocate room for a new session, abandon ship. */
1331 if (ret == -ENOMEM) {
1332 tmc_free_etr_buf(etr_buf);
1333 return ERR_PTR(ret);
1340 static struct etr_buf *
1341 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
1342 struct perf_event *event, int nr_pages,
1343 void **pages, bool snapshot)
1346 * In per-thread mode the etr_buf isn't shared, so just go ahead
1347 * with memory allocation.
1349 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1352 static struct etr_buf *
1353 get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1354 int nr_pages, void **pages, bool snapshot)
1356 if (event->cpu == -1)
1357 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
1360 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
1364 static struct etr_perf_buffer *
1365 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1366 int nr_pages, void **pages, bool snapshot)
1369 struct etr_buf *etr_buf;
1370 struct etr_perf_buffer *etr_perf;
1372 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
1374 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
1376 return ERR_PTR(-ENOMEM);
1378 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1379 if (!IS_ERR(etr_buf))
1383 return ERR_PTR(-ENOMEM);
1387 * Keep a reference to the ETR this buffer has been allocated for
1388 * in order to have access to the IDR in tmc_free_etr_buffer().
1390 etr_perf->drvdata = drvdata;
1391 etr_perf->etr_buf = etr_buf;
1397 static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1398 struct perf_event *event, void **pages,
1399 int nr_pages, bool snapshot)
1401 struct etr_perf_buffer *etr_perf;
1402 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1404 etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
1405 nr_pages, pages, snapshot);
1406 if (IS_ERR(etr_perf)) {
1407 dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n");
1411 etr_perf->pid = task_pid_nr(event->owner);
1412 etr_perf->snapshot = snapshot;
1413 etr_perf->nr_pages = nr_pages;
1414 etr_perf->pages = pages;
1419 static void tmc_free_etr_buffer(void *config)
1421 struct etr_perf_buffer *etr_perf = config;
1422 struct tmc_drvdata *drvdata = etr_perf->drvdata;
1423 struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
1426 goto free_etr_perf_buffer;
1428 mutex_lock(&drvdata->idr_mutex);
1429 /* If we are not the last one to use the buffer, don't touch it. */
1430 if (!refcount_dec_and_test(&etr_buf->refcount)) {
1431 mutex_unlock(&drvdata->idr_mutex);
1432 goto free_etr_perf_buffer;
1435 /* We are the last one, remove from the IDR and free the buffer. */
1436 buf = idr_remove(&drvdata->idr, etr_perf->pid);
1437 mutex_unlock(&drvdata->idr_mutex);
1440 * Something went very wrong if the buffer associated with this ID
1441 * is not the same in the IDR. Leak to avoid use after free.
1443 if (buf && WARN_ON(buf != etr_buf))
1444 goto free_etr_perf_buffer;
1446 tmc_free_etr_buf(etr_perf->etr_buf);
1448 free_etr_perf_buffer:
1453 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
1454 * buffer to the perf ring buffer.
1456 static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
1458 unsigned long src_offset,
1459 unsigned long to_copy)
1462 long pg_idx, pg_offset;
1463 char **dst_pages, *src_buf;
1464 struct etr_buf *etr_buf = etr_perf->etr_buf;
1466 head = PERF_IDX2OFF(head, etr_perf);
1467 pg_idx = head >> PAGE_SHIFT;
1468 pg_offset = head & (PAGE_SIZE - 1);
1469 dst_pages = (char **)etr_perf->pages;
1471 while (to_copy > 0) {
1473 * In one iteration, we can copy minimum of :
1474 * 1) what is available in the source buffer,
1475 * 2) what is available in the source buffer, before it
1477 * 3) what is available in the destination page.
1480 if (src_offset >= etr_buf->size)
1481 src_offset -= etr_buf->size;
1482 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
1484 if (WARN_ON_ONCE(bytes <= 0))
1486 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
1488 memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
1492 /* Move destination pointers */
1494 if (pg_offset == PAGE_SIZE) {
1496 if (++pg_idx == etr_perf->nr_pages)
1500 /* Move source pointers */
1501 src_offset += bytes;
1506 * tmc_update_etr_buffer : Update the perf ring buffer with the
1507 * available trace data. We use software double buffering at the moment.
1509 * TODO: Add support for reusing the perf ring buffer.
1511 static unsigned long
1512 tmc_update_etr_buffer(struct coresight_device *csdev,
1513 struct perf_output_handle *handle,
1517 unsigned long flags, offset, size = 0;
1518 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1519 struct etr_perf_buffer *etr_perf = config;
1520 struct etr_buf *etr_buf = etr_perf->etr_buf;
1522 spin_lock_irqsave(&drvdata->spinlock, flags);
1524 /* Don't do anything if another tracer is using this sink */
1525 if (atomic_read(csdev->refcnt) != 1) {
1526 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1530 if (WARN_ON(drvdata->perf_buf != etr_buf)) {
1532 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1536 CS_UNLOCK(drvdata->base);
1538 tmc_flush_and_stop(drvdata);
1539 tmc_sync_etr_buf(drvdata);
1541 CS_LOCK(drvdata->base);
1542 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1544 lost = etr_buf->full;
1545 offset = etr_buf->offset;
1546 size = etr_buf->len;
1549 * The ETR buffer may be bigger than the space available in the
1550 * perf ring buffer (handle->size). If so advance the offset so that we
1551 * get the latest trace data. In snapshot mode none of that matters
1552 * since we are expected to clobber stale data in favour of the latest
1555 if (!etr_perf->snapshot && size > handle->size) {
1556 u32 mask = tmc_get_memwidth_mask(drvdata);
1559 * Make sure the new size is aligned in accordance with the
1560 * requirement explained in function tmc_get_memwidth_mask().
1562 size = handle->size & mask;
1563 offset = etr_buf->offset + etr_buf->len - size;
1565 if (offset >= etr_buf->size)
1566 offset -= etr_buf->size;
1570 /* Insert barrier packets at the beginning, if there was an overflow */
1572 tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
1573 tmc_etr_sync_perf_buffer(etr_perf, handle->head, offset, size);
1576 * In snapshot mode we simply increment the head by the number of byte
1577 * that were written. User space will figure out how many bytes to get
1578 * from the AUX buffer based on the position of the head.
1580 if (etr_perf->snapshot)
1581 handle->head += size;
1584 * Ensure that the AUX trace data is visible before the aux_head
1585 * is updated via perf_aux_output_end(), as expected by the
1592 * Don't set the TRUNCATED flag in snapshot mode because 1) the
1593 * captured buffer is expected to be truncated and 2) a full buffer
1594 * prevents the event from being re-enabled by the perf core,
1595 * resulting in stale data being send to user space.
1597 if (!etr_perf->snapshot && lost)
1598 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1602 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1606 unsigned long flags;
1607 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1608 struct perf_output_handle *handle = data;
1609 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
1611 spin_lock_irqsave(&drvdata->spinlock, flags);
1612 /* Don't use this sink if it is already claimed by sysFS */
1613 if (drvdata->mode == CS_MODE_SYSFS) {
1618 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
1623 /* Get a handle on the pid of the process to monitor */
1624 pid = etr_perf->pid;
1626 /* Do not proceed if this device is associated with another session */
1627 if (drvdata->pid != -1 && drvdata->pid != pid) {
1633 * No HW configuration is needed if the sink is already in
1634 * use for this session.
1636 if (drvdata->pid == pid) {
1637 atomic_inc(csdev->refcnt);
1641 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1643 /* Associate with monitored process. */
1645 drvdata->mode = CS_MODE_PERF;
1646 drvdata->perf_buf = etr_perf->etr_buf;
1647 atomic_inc(csdev->refcnt);
1651 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1655 static int tmc_enable_etr_sink(struct coresight_device *csdev,
1656 u32 mode, void *data)
1660 return tmc_enable_etr_sink_sysfs(csdev);
1662 return tmc_enable_etr_sink_perf(csdev, data);
1665 /* We shouldn't be here */
1669 static int tmc_disable_etr_sink(struct coresight_device *csdev)
1671 unsigned long flags;
1672 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1674 spin_lock_irqsave(&drvdata->spinlock, flags);
1676 if (drvdata->reading) {
1677 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1681 if (atomic_dec_return(csdev->refcnt)) {
1682 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1686 /* Complain if we (somehow) got out of sync */
1687 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
1688 tmc_etr_disable_hw(drvdata);
1689 /* Dissociate from monitored process. */
1691 drvdata->mode = CS_MODE_DISABLED;
1692 /* Reset perf specific data */
1693 drvdata->perf_buf = NULL;
1695 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1697 dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
1701 static const struct coresight_ops_sink tmc_etr_sink_ops = {
1702 .enable = tmc_enable_etr_sink,
1703 .disable = tmc_disable_etr_sink,
1704 .alloc_buffer = tmc_alloc_etr_buffer,
1705 .update_buffer = tmc_update_etr_buffer,
1706 .free_buffer = tmc_free_etr_buffer,
1709 const struct coresight_ops tmc_etr_cs_ops = {
1710 .sink_ops = &tmc_etr_sink_ops,
1713 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1716 unsigned long flags;
1718 /* config types are set a boot time and never change */
1719 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1722 spin_lock_irqsave(&drvdata->spinlock, flags);
1723 if (drvdata->reading) {
1729 * We can safely allow reads even if the ETR is operating in PERF mode,
1730 * since the sysfs session is captured in mode specific data.
1731 * If drvdata::sysfs_data is NULL the trace data has been read already.
1733 if (!drvdata->sysfs_buf) {
1738 /* Disable the TMC if we are trying to read from a running session. */
1739 if (drvdata->mode == CS_MODE_SYSFS)
1740 __tmc_etr_disable_hw(drvdata);
1742 drvdata->reading = true;
1744 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1749 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1751 unsigned long flags;
1752 struct etr_buf *sysfs_buf = NULL;
1754 /* config types are set a boot time and never change */
1755 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1758 spin_lock_irqsave(&drvdata->spinlock, flags);
1760 /* RE-enable the TMC if need be */
1761 if (drvdata->mode == CS_MODE_SYSFS) {
1763 * The trace run will continue with the same allocated trace
1764 * buffer. Since the tracer is still enabled drvdata::buf can't
1767 __tmc_etr_enable_hw(drvdata);
1770 * The ETR is not tracing and the buffer was just read.
1771 * As such prepare to free the trace buffer.
1773 sysfs_buf = drvdata->sysfs_buf;
1774 drvdata->sysfs_buf = NULL;
1777 drvdata->reading = false;
1778 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1780 /* Free allocated memory out side of the spinlock */
1782 tmc_etr_free_sysfs_buf(sysfs_buf);