1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/atomic.h>
8 #include <linux/coresight.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/iommu.h>
11 #include <linux/idr.h>
12 #include <linux/mutex.h>
13 #include <linux/refcount.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/vmalloc.h>
17 #include "coresight-catu.h"
18 #include "coresight-etm-perf.h"
19 #include "coresight-priv.h"
20 #include "coresight-tmc.h"
30 * etr_perf_buffer - Perf buffer used for ETR
31 * @drvdata - The ETR drvdaga this buffer has been allocated for.
32 * @etr_buf - Actual buffer used by the ETR
33 * @pid - The PID this etr_perf_buffer belongs to.
34 * @snaphost - Perf session mode
35 * @head - handle->head at the beginning of the session.
36 * @nr_pages - Number of pages in the ring buffer.
37 * @pages - Array of Pages in the ring buffer.
39 struct etr_perf_buffer {
40 struct tmc_drvdata *drvdata;
41 struct etr_buf *etr_buf;
49 /* Convert the perf index to an offset within the ETR buffer */
50 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
52 /* Lower limit for ETR hardware buffer */
53 #define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
56 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
57 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
58 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
59 * contain more than one SG buffer and tables.
61 * A table entry has the following format:
63 * ---Bit31------------Bit4-------Bit1-----Bit0--
64 * | Address[39:12] | SBZ | Entry Type |
65 * ----------------------------------------------
67 * Address: Bits [39:12] of a physical page address. Bits [11:0] are
72 * b01 - Last entry in the tables, points to 4K page buffer.
73 * b10 - Normal entry, points to 4K page buffer.
74 * b11 - Link. The address points to the base of next table.
79 #define ETR_SG_PAGE_SHIFT 12
80 #define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
81 #define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
82 #define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
83 #define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
85 #define ETR_SG_ET_MASK 0x3
86 #define ETR_SG_ET_LAST 0x1
87 #define ETR_SG_ET_NORMAL 0x2
88 #define ETR_SG_ET_LINK 0x3
90 #define ETR_SG_ADDR_SHIFT 4
92 #define ETR_SG_ENTRY(addr, type) \
93 (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
94 (type & ETR_SG_ET_MASK))
96 #define ETR_SG_ADDR(entry) \
97 (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
98 #define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
101 * struct etr_sg_table : ETR SG Table
102 * @sg_table: Generic SG Table holding the data/table pages.
103 * @hwaddr: hwaddress used by the TMC, which is the base
104 * address of the table.
106 struct etr_sg_table {
107 struct tmc_sg_table *sg_table;
112 * tmc_etr_sg_table_entries: Total number of table entries required to map
113 * @nr_pages system pages.
115 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
116 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
117 * with the last entry pointing to another page of table entries.
118 * If we spill over to a new page for mapping 1 entry, we could as
119 * well replace the link entry of the previous page with the last entry.
121 static inline unsigned long __attribute_const__
122 tmc_etr_sg_table_entries(int nr_pages)
124 unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
125 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
127 * If we spill over to a new page for 1 entry, we could as well
128 * make it the LAST entry in the previous page, skipping the Link
131 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
133 return nr_sgpages + nr_sglinks;
137 * tmc_pages_get_offset: Go through all the pages in the tmc_pages
138 * and map the device address @addr to an offset within the virtual
142 tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
145 dma_addr_t page_start;
147 for (i = 0; i < tmc_pages->nr_pages; i++) {
148 page_start = tmc_pages->daddrs[i];
149 if (addr >= page_start && addr < (page_start + PAGE_SIZE))
150 return i * PAGE_SIZE + (addr - page_start);
157 * tmc_pages_free : Unmap and free the pages used by tmc_pages.
158 * If the pages were not allocated in tmc_pages_alloc(), we would
159 * simply drop the refcount.
161 static void tmc_pages_free(struct tmc_pages *tmc_pages,
162 struct device *dev, enum dma_data_direction dir)
165 struct device *real_dev = dev->parent;
167 for (i = 0; i < tmc_pages->nr_pages; i++) {
168 if (tmc_pages->daddrs && tmc_pages->daddrs[i])
169 dma_unmap_page(real_dev, tmc_pages->daddrs[i],
171 if (tmc_pages->pages && tmc_pages->pages[i])
172 __free_page(tmc_pages->pages[i]);
175 kfree(tmc_pages->pages);
176 kfree(tmc_pages->daddrs);
177 tmc_pages->pages = NULL;
178 tmc_pages->daddrs = NULL;
179 tmc_pages->nr_pages = 0;
183 * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
184 * If @pages is not NULL, the list of page virtual addresses are
185 * used as the data pages. The pages are then dma_map'ed for @dev
186 * with dma_direction @dir.
188 * Returns 0 upon success, else the error number.
190 static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
191 struct device *dev, int node,
192 enum dma_data_direction dir, void **pages)
197 struct device *real_dev = dev->parent;
199 nr_pages = tmc_pages->nr_pages;
200 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
202 if (!tmc_pages->daddrs)
204 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
206 if (!tmc_pages->pages) {
207 kfree(tmc_pages->daddrs);
208 tmc_pages->daddrs = NULL;
212 for (i = 0; i < nr_pages; i++) {
213 if (pages && pages[i]) {
214 page = virt_to_page(pages[i]);
215 /* Hold a refcount on the page */
218 page = alloc_pages_node(node,
219 GFP_KERNEL | __GFP_ZERO, 0);
223 paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
224 if (dma_mapping_error(real_dev, paddr))
226 tmc_pages->daddrs[i] = paddr;
227 tmc_pages->pages[i] = page;
231 tmc_pages_free(tmc_pages, dev, dir);
236 tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
238 return tmc_pages_get_offset(&sg_table->data_pages, addr);
241 static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
243 if (sg_table->table_vaddr)
244 vunmap(sg_table->table_vaddr);
245 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
248 static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
250 if (sg_table->data_vaddr)
251 vunmap(sg_table->data_vaddr);
252 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
255 void tmc_free_sg_table(struct tmc_sg_table *sg_table)
257 tmc_free_table_pages(sg_table);
258 tmc_free_data_pages(sg_table);
260 EXPORT_SYMBOL_GPL(tmc_free_sg_table);
263 * Alloc pages for the table. Since this will be used by the device,
264 * allocate the pages closer to the device (i.e, dev_to_node(dev)
265 * rather than the CPU node).
267 static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
270 struct tmc_pages *table_pages = &sg_table->table_pages;
272 rc = tmc_pages_alloc(table_pages, sg_table->dev,
273 dev_to_node(sg_table->dev),
274 DMA_TO_DEVICE, NULL);
277 sg_table->table_vaddr = vmap(table_pages->pages,
278 table_pages->nr_pages,
281 if (!sg_table->table_vaddr)
284 sg_table->table_daddr = table_pages->daddrs[0];
288 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
292 /* Allocate data pages on the node requested by the caller */
293 rc = tmc_pages_alloc(&sg_table->data_pages,
294 sg_table->dev, sg_table->node,
295 DMA_FROM_DEVICE, pages);
297 sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
298 sg_table->data_pages.nr_pages,
301 if (!sg_table->data_vaddr)
308 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
309 * and data buffers. TMC writes to the data buffers and reads from the SG
312 * @dev - Coresight device to which page should be DMA mapped.
313 * @node - Numa node for mem allocations
314 * @nr_tpages - Number of pages for the table entries.
315 * @nr_dpages - Number of pages for Data buffer.
316 * @pages - Optional list of virtual address of pages.
318 struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
325 struct tmc_sg_table *sg_table;
327 sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
329 return ERR_PTR(-ENOMEM);
330 sg_table->data_pages.nr_pages = nr_dpages;
331 sg_table->table_pages.nr_pages = nr_tpages;
332 sg_table->node = node;
335 rc = tmc_alloc_data_pages(sg_table, pages);
337 rc = tmc_alloc_table_pages(sg_table);
339 tmc_free_sg_table(sg_table);
346 EXPORT_SYMBOL_GPL(tmc_alloc_sg_table);
349 * tmc_sg_table_sync_data_range: Sync the data buffer written
350 * by the device from @offset upto a @size bytes.
352 void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
353 u64 offset, u64 size)
356 int npages = DIV_ROUND_UP(size, PAGE_SIZE);
357 struct device *real_dev = table->dev->parent;
358 struct tmc_pages *data = &table->data_pages;
360 start = offset >> PAGE_SHIFT;
361 for (i = start; i < (start + npages); i++) {
362 index = i % data->nr_pages;
363 dma_sync_single_for_cpu(real_dev, data->daddrs[index],
364 PAGE_SIZE, DMA_FROM_DEVICE);
367 EXPORT_SYMBOL_GPL(tmc_sg_table_sync_data_range);
369 /* tmc_sg_sync_table: Sync the page table */
370 void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
373 struct device *real_dev = sg_table->dev->parent;
374 struct tmc_pages *table_pages = &sg_table->table_pages;
376 for (i = 0; i < table_pages->nr_pages; i++)
377 dma_sync_single_for_device(real_dev, table_pages->daddrs[i],
378 PAGE_SIZE, DMA_TO_DEVICE);
380 EXPORT_SYMBOL_GPL(tmc_sg_table_sync_table);
383 * tmc_sg_table_get_data: Get the buffer pointer for data @offset
384 * in the SG buffer. The @bufpp is updated to point to the buffer.
386 * the length of linear data available at @offset.
388 * <= 0 if no data is available.
390 ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
391 u64 offset, size_t len, char **bufpp)
394 int pg_idx = offset >> PAGE_SHIFT;
395 int pg_offset = offset & (PAGE_SIZE - 1);
396 struct tmc_pages *data_pages = &sg_table->data_pages;
398 size = tmc_sg_table_buf_size(sg_table);
402 /* Make sure we don't go beyond the end */
403 len = (len < (size - offset)) ? len : size - offset;
404 /* Respect the page boundaries */
405 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
407 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
410 EXPORT_SYMBOL_GPL(tmc_sg_table_get_data);
413 /* Map a dma address to virtual address */
415 tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
416 dma_addr_t addr, bool table)
420 struct tmc_pages *tmc_pages;
423 tmc_pages = &sg_table->table_pages;
424 base = (unsigned long)sg_table->table_vaddr;
426 tmc_pages = &sg_table->data_pages;
427 base = (unsigned long)sg_table->data_vaddr;
430 offset = tmc_pages_get_offset(tmc_pages, addr);
433 return base + offset;
436 /* Dump the given sg_table */
437 static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
442 struct tmc_sg_table *sg_table = etr_table->sg_table;
444 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
445 etr_table->hwaddr, true);
447 addr = ETR_SG_ADDR(*ptr);
448 switch (ETR_SG_ET(*ptr)) {
449 case ETR_SG_ET_NORMAL:
450 dev_dbg(sg_table->dev,
451 "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
455 dev_dbg(sg_table->dev,
456 "%05d: *** %p\t:{L} 0x%llx ***\n",
458 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
462 dev_dbg(sg_table->dev,
463 "%05d: ### %p\t:[L] 0x%llx ###\n",
467 dev_dbg(sg_table->dev,
468 "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
474 dev_dbg(sg_table->dev, "******* End of Table *****\n");
477 static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
481 * Populate the SG Table page table entries from table/data
482 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
483 * So does a Table page. So we keep track of indices of the tables
484 * in each system page and move the pointers accordingly.
486 #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
487 static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
490 int i, type, nr_entries;
491 int tpidx = 0; /* index to the current system table_page */
492 int sgtidx = 0; /* index to the sg_table within the current syspage */
493 int sgtentry = 0; /* the entry within the sg_table */
494 int dpidx = 0; /* index to the current system data_page */
495 int spidx = 0; /* index to the SG page within the current data page */
496 sgte_t *ptr; /* pointer to the table entry to fill */
497 struct tmc_sg_table *sg_table = etr_table->sg_table;
498 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
499 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
501 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
503 * Use the contiguous virtual address of the table to update entries.
505 ptr = sg_table->table_vaddr;
507 * Fill all the entries, except the last entry to avoid special
508 * checks within the loop.
510 for (i = 0; i < nr_entries - 1; i++) {
511 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
513 * Last entry in a sg_table page is a link address to
514 * the next table page. If this sg_table is the last
515 * one in the system page, it links to the first
516 * sg_table in the next system page. Otherwise, it
517 * links to the next sg_table page within the system
520 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
521 paddr = table_daddrs[tpidx + 1];
523 paddr = table_daddrs[tpidx] +
524 (ETR_SG_PAGE_SIZE * (sgtidx + 1));
526 type = ETR_SG_ET_LINK;
529 * Update the indices to the data_pages to point to the
530 * next sg_page in the data buffer.
532 type = ETR_SG_ET_NORMAL;
533 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
534 if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
537 *ptr++ = ETR_SG_ENTRY(paddr, type);
539 * Move to the next table pointer, moving the table page index
542 if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
543 if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
548 /* Set up the last entry, which is always a data pointer */
549 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
550 *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
554 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
555 * populate the table.
557 * @dev - Device pointer for the TMC
558 * @node - NUMA node where the memory should be allocated
559 * @size - Total size of the data buffer
560 * @pages - Optional list of page virtual address
562 static struct etr_sg_table *
563 tmc_init_etr_sg_table(struct device *dev, int node,
564 unsigned long size, void **pages)
566 int nr_entries, nr_tpages;
567 int nr_dpages = size >> PAGE_SHIFT;
568 struct tmc_sg_table *sg_table;
569 struct etr_sg_table *etr_table;
571 etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
573 return ERR_PTR(-ENOMEM);
574 nr_entries = tmc_etr_sg_table_entries(nr_dpages);
575 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
577 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
578 if (IS_ERR(sg_table)) {
580 return ERR_CAST(sg_table);
583 etr_table->sg_table = sg_table;
584 /* TMC should use table base address for DBA */
585 etr_table->hwaddr = sg_table->table_daddr;
586 tmc_etr_sg_table_populate(etr_table);
587 /* Sync the table pages for the HW */
588 tmc_sg_table_sync_table(sg_table);
589 tmc_etr_sg_table_dump(etr_table);
595 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
597 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
598 struct etr_buf *etr_buf, int node,
601 struct etr_flat_buf *flat_buf;
602 struct device *real_dev = drvdata->csdev->dev.parent;
604 /* We cannot reuse existing pages for flat buf */
608 flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
612 flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
613 &flat_buf->daddr, GFP_KERNEL);
614 if (!flat_buf->vaddr) {
619 flat_buf->size = etr_buf->size;
620 flat_buf->dev = &drvdata->csdev->dev;
621 etr_buf->hwaddr = flat_buf->daddr;
622 etr_buf->mode = ETR_MODE_FLAT;
623 etr_buf->private = flat_buf;
627 static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
629 struct etr_flat_buf *flat_buf = etr_buf->private;
631 if (flat_buf && flat_buf->daddr) {
632 struct device *real_dev = flat_buf->dev->parent;
634 dma_free_coherent(real_dev, flat_buf->size,
635 flat_buf->vaddr, flat_buf->daddr);
640 static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
643 * Adjust the buffer to point to the beginning of the trace data
644 * and update the available trace data.
646 etr_buf->offset = rrp - etr_buf->hwaddr;
648 etr_buf->len = etr_buf->size;
650 etr_buf->len = rwp - rrp;
653 static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
654 u64 offset, size_t len, char **bufpp)
656 struct etr_flat_buf *flat_buf = etr_buf->private;
658 *bufpp = (char *)flat_buf->vaddr + offset;
660 * tmc_etr_buf_get_data already adjusts the length to handle
661 * buffer wrapping around.
666 static const struct etr_buf_operations etr_flat_buf_ops = {
667 .alloc = tmc_etr_alloc_flat_buf,
668 .free = tmc_etr_free_flat_buf,
669 .sync = tmc_etr_sync_flat_buf,
670 .get_data = tmc_etr_get_data_flat_buf,
674 * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
677 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
678 struct etr_buf *etr_buf, int node,
681 struct etr_sg_table *etr_table;
682 struct device *dev = &drvdata->csdev->dev;
684 etr_table = tmc_init_etr_sg_table(dev, node,
685 etr_buf->size, pages);
686 if (IS_ERR(etr_table))
688 etr_buf->hwaddr = etr_table->hwaddr;
689 etr_buf->mode = ETR_MODE_ETR_SG;
690 etr_buf->private = etr_table;
694 static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
696 struct etr_sg_table *etr_table = etr_buf->private;
699 tmc_free_sg_table(etr_table->sg_table);
704 static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
705 size_t len, char **bufpp)
707 struct etr_sg_table *etr_table = etr_buf->private;
709 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
712 static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
714 long r_offset, w_offset;
715 struct etr_sg_table *etr_table = etr_buf->private;
716 struct tmc_sg_table *table = etr_table->sg_table;
718 /* Convert hw address to offset in the buffer */
719 r_offset = tmc_sg_get_data_page_offset(table, rrp);
722 "Unable to map RRP %llx to offset\n", rrp);
727 w_offset = tmc_sg_get_data_page_offset(table, rwp);
730 "Unable to map RWP %llx to offset\n", rwp);
735 etr_buf->offset = r_offset;
737 etr_buf->len = etr_buf->size;
739 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
741 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
744 static const struct etr_buf_operations etr_sg_buf_ops = {
745 .alloc = tmc_etr_alloc_sg_buf,
746 .free = tmc_etr_free_sg_buf,
747 .sync = tmc_etr_sync_sg_buf,
748 .get_data = tmc_etr_get_data_sg_buf,
752 * TMC ETR could be connected to a CATU device, which can provide address
753 * translation service. This is represented by the Output port of the TMC
754 * (ETR) connected to the input port of the CATU.
756 * Returns : coresight_device ptr for the CATU device if a CATU is found.
759 struct coresight_device *
760 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
763 struct coresight_device *tmp, *etr = drvdata->csdev;
765 if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
768 for (i = 0; i < etr->pdata->nr_outport; i++) {
769 tmp = etr->pdata->conns[i].child_dev;
770 if (tmp && coresight_is_catu_device(tmp))
776 EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device);
778 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
779 struct etr_buf *etr_buf)
781 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
783 if (catu && helper_ops(catu)->enable)
784 return helper_ops(catu)->enable(catu, etr_buf);
788 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
790 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
792 if (catu && helper_ops(catu)->disable)
793 helper_ops(catu)->disable(catu, drvdata->etr_buf);
796 static const struct etr_buf_operations *etr_buf_ops[] = {
797 [ETR_MODE_FLAT] = &etr_flat_buf_ops,
798 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
799 [ETR_MODE_CATU] = NULL,
802 void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
804 etr_buf_ops[ETR_MODE_CATU] = catu;
806 EXPORT_SYMBOL_GPL(tmc_etr_set_catu_ops);
808 void tmc_etr_remove_catu_ops(void)
810 etr_buf_ops[ETR_MODE_CATU] = NULL;
812 EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops);
814 static inline int tmc_etr_mode_alloc_buf(int mode,
815 struct tmc_drvdata *drvdata,
816 struct etr_buf *etr_buf, int node,
823 case ETR_MODE_ETR_SG:
825 if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
826 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
829 etr_buf->ops = etr_buf_ops[mode];
837 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
838 * @drvdata : ETR device details.
839 * @size : size of the requested buffer.
840 * @flags : Required properties for the buffer.
841 * @node : Node for memory allocations.
842 * @pages : An optional list of pages.
844 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
845 ssize_t size, int flags,
846 int node, void **pages)
849 bool has_etr_sg, has_iommu;
850 bool has_sg, has_catu;
851 struct etr_buf *etr_buf;
852 struct device *dev = &drvdata->csdev->dev;
854 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
855 has_iommu = iommu_get_domain_for_dev(dev->parent);
856 has_catu = !!tmc_etr_get_catu_device(drvdata);
858 has_sg = has_catu || has_etr_sg;
860 etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
862 return ERR_PTR(-ENOMEM);
864 etr_buf->size = size;
867 * If we have to use an existing list of pages, we cannot reliably
868 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
869 * we use the contiguous DMA memory if at least one of the following
870 * conditions is true:
871 * a) The ETR cannot use Scatter-Gather.
872 * b) we have a backing IOMMU
873 * c) The requested memory size is smaller (< 1M).
875 * Fallback to available mechanisms.
879 (!has_sg || has_iommu || size < SZ_1M))
880 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
881 etr_buf, node, pages);
882 if (rc && has_etr_sg)
883 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
884 etr_buf, node, pages);
886 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
887 etr_buf, node, pages);
893 refcount_set(&etr_buf->refcount, 1);
894 dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
895 (unsigned long)size >> 10, etr_buf->mode);
899 static void tmc_free_etr_buf(struct etr_buf *etr_buf)
901 WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
902 etr_buf->ops->free(etr_buf);
907 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
908 * with a maximum of @len bytes.
909 * Returns: The size of the linear data available @pos, with *bufpp
910 * updated to point to the buffer.
912 static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
913 u64 offset, size_t len, char **bufpp)
915 /* Adjust the length to limit this transaction to end of buffer */
916 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
918 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
922 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
927 len = tmc_etr_buf_get_data(etr_buf, offset,
928 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
929 if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
931 coresight_insert_barrier_packet(bufp);
932 return offset + CORESIGHT_BARRIER_PKT_SIZE;
936 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
937 * Makes sure the trace data is synced to the memory for consumption.
938 * @etr_buf->offset will hold the offset to the beginning of the trace data
939 * within the buffer, with @etr_buf->len bytes to consume.
941 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
943 struct etr_buf *etr_buf = drvdata->etr_buf;
947 rrp = tmc_read_rrp(drvdata);
948 rwp = tmc_read_rwp(drvdata);
949 status = readl_relaxed(drvdata->base + TMC_STS);
952 * If there were memory errors in the session, truncate the
955 if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) {
956 dev_dbg(&drvdata->csdev->dev,
957 "tmc memory error detected, truncating buffer\n");
959 etr_buf->full = false;
963 etr_buf->full = !!(status & TMC_STS_FULL);
965 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
967 etr_buf->ops->sync(etr_buf, rrp, rwp);
970 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
973 struct etr_buf *etr_buf = drvdata->etr_buf;
975 CS_UNLOCK(drvdata->base);
977 /* Wait for TMCSReady bit to be set */
978 tmc_wait_for_tmcready(drvdata);
980 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
981 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
983 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
984 axictl &= ~TMC_AXICTL_CLEAR_MASK;
985 axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
986 axictl |= TMC_AXICTL_AXCACHE_OS;
988 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
989 axictl &= ~TMC_AXICTL_ARCACHE_MASK;
990 axictl |= TMC_AXICTL_ARCACHE_OS;
993 if (etr_buf->mode == ETR_MODE_ETR_SG)
994 axictl |= TMC_AXICTL_SCT_GAT_MODE;
996 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
997 tmc_write_dba(drvdata, etr_buf->hwaddr);
999 * If the TMC pointers must be programmed before the session,
1000 * we have to set it properly (i.e, RRP/RWP to base address and
1001 * STS to "not full").
1003 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
1004 tmc_write_rrp(drvdata, etr_buf->hwaddr);
1005 tmc_write_rwp(drvdata, etr_buf->hwaddr);
1006 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
1007 writel_relaxed(sts, drvdata->base + TMC_STS);
1010 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
1011 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
1012 TMC_FFCR_TRIGON_TRIGIN,
1013 drvdata->base + TMC_FFCR);
1014 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
1015 tmc_enable_hw(drvdata);
1017 CS_LOCK(drvdata->base);
1020 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
1021 struct etr_buf *etr_buf)
1025 /* Callers should provide an appropriate buffer for use */
1026 if (WARN_ON(!etr_buf))
1029 if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
1030 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
1033 if (WARN_ON(drvdata->etr_buf))
1037 * If this ETR is connected to a CATU, enable it before we turn
1040 rc = tmc_etr_enable_catu(drvdata, etr_buf);
1043 rc = coresight_claim_device(drvdata->csdev);
1045 drvdata->etr_buf = etr_buf;
1046 __tmc_etr_enable_hw(drvdata);
1053 * Return the available trace data in the buffer (starts at etr_buf->offset,
1054 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
1055 * also updating the @bufpp on where to find it. Since the trace data
1056 * starts at anywhere in the buffer, depending on the RRP, we adjust the
1057 * @len returned to handle buffer wrapping around.
1059 * We are protected here by drvdata->reading != 0, which ensures the
1060 * sysfs_buf stays alive.
1062 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
1063 loff_t pos, size_t len, char **bufpp)
1066 ssize_t actual = len;
1067 struct etr_buf *etr_buf = drvdata->sysfs_buf;
1069 if (pos + actual > etr_buf->len)
1070 actual = etr_buf->len - pos;
1074 /* Compute the offset from which we read the data */
1075 offset = etr_buf->offset + pos;
1076 if (offset >= etr_buf->size)
1077 offset -= etr_buf->size;
1078 return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
1081 static struct etr_buf *
1082 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
1084 return tmc_alloc_etr_buf(drvdata, drvdata->size,
1085 0, cpu_to_node(0), NULL);
1089 tmc_etr_free_sysfs_buf(struct etr_buf *buf)
1092 tmc_free_etr_buf(buf);
1095 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1097 struct etr_buf *etr_buf = drvdata->etr_buf;
1099 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1100 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1101 drvdata->sysfs_buf = NULL;
1103 tmc_sync_etr_buf(drvdata);
1105 * Insert barrier packets at the beginning, if there was
1109 tmc_etr_buf_insert_barrier_packet(etr_buf,
1114 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1116 CS_UNLOCK(drvdata->base);
1118 tmc_flush_and_stop(drvdata);
1120 * When operating in sysFS mode the content of the buffer needs to be
1121 * read before the TMC is disabled.
1123 if (drvdata->mode == CS_MODE_SYSFS)
1124 tmc_etr_sync_sysfs_buf(drvdata);
1126 tmc_disable_hw(drvdata);
1128 CS_LOCK(drvdata->base);
1132 void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1134 __tmc_etr_disable_hw(drvdata);
1135 /* Disable CATU device if this ETR is connected to one */
1136 tmc_etr_disable_catu(drvdata);
1137 coresight_disclaim_device(drvdata->csdev);
1138 /* Reset the ETR buf used by hardware */
1139 drvdata->etr_buf = NULL;
1142 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1145 unsigned long flags;
1146 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1147 struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1150 * If we are enabling the ETR from disabled state, we need to make
1151 * sure we have a buffer with the right size. The etr_buf is not reset
1152 * immediately after we stop the tracing in SYSFS mode as we wait for
1153 * the user to collect the data. We may be able to reuse the existing
1154 * buffer, provided the size matches. Any allocation has to be done
1155 * with the lock released.
1157 spin_lock_irqsave(&drvdata->spinlock, flags);
1158 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1159 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1160 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1162 /* Allocate memory with the locks released */
1163 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1164 if (IS_ERR(new_buf))
1165 return PTR_ERR(new_buf);
1167 /* Let's try again */
1168 spin_lock_irqsave(&drvdata->spinlock, flags);
1171 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
1177 * In sysFS mode we can have multiple writers per sink. Since this
1178 * sink is already enabled no memory is needed and the HW need not be
1179 * touched, even if the buffer size has changed.
1181 if (drvdata->mode == CS_MODE_SYSFS) {
1182 atomic_inc(csdev->refcnt);
1187 * If we don't have a buffer or it doesn't match the requested size,
1188 * use the buffer allocated above. Otherwise reuse the existing buffer.
1190 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1191 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1192 free_buf = sysfs_buf;
1193 drvdata->sysfs_buf = new_buf;
1196 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1198 drvdata->mode = CS_MODE_SYSFS;
1199 atomic_inc(csdev->refcnt);
1202 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1204 /* Free memory outside the spinlock if need be */
1206 tmc_etr_free_sysfs_buf(free_buf);
1209 dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
1215 * alloc_etr_buf: Allocate ETR buffer for use by perf.
1216 * The size of the hardware buffer is dependent on the size configured
1217 * via sysfs and the perf ring buffer size. We prefer to allocate the
1218 * largest possible size, scaling down the size by half until it
1219 * reaches a minimum limit (1M), beyond which we give up.
1221 static struct etr_buf *
1222 alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1223 int nr_pages, void **pages, bool snapshot)
1226 struct etr_buf *etr_buf;
1229 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
1231 * Try to match the perf ring buffer size if it is larger
1232 * than the size requested via sysfs.
1234 if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
1235 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
1237 if (!IS_ERR(etr_buf))
1242 * Else switch to configured size for this ETR
1243 * and scale down until we hit the minimum limit.
1245 size = drvdata->size;
1247 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
1248 if (!IS_ERR(etr_buf))
1251 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
1253 return ERR_PTR(-ENOMEM);
1259 static struct etr_buf *
1260 get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
1261 struct perf_event *event, int nr_pages,
1262 void **pages, bool snapshot)
1265 pid_t pid = task_pid_nr(event->owner);
1266 struct etr_buf *etr_buf;
1270 * An etr_perf_buffer is associated with an event and holds a reference
1271 * to the AUX ring buffer that was created for that event. In CPU-wide
1272 * N:1 mode multiple events (one per CPU), each with its own AUX ring
1273 * buffer, share a sink. As such an etr_perf_buffer is created for each
1274 * event but a single etr_buf associated with the ETR is shared between
1275 * them. The last event in a trace session will copy the content of the
1276 * etr_buf to its AUX ring buffer. Ring buffer associated to other
1277 * events are simply not used an freed as events are destoyed. We still
1278 * need to allocate a ring buffer for each event since we don't know
1279 * which event will be last.
1283 * The first thing to do here is check if an etr_buf has already been
1284 * allocated for this session. If so it is shared with this event,
1285 * otherwise it is created.
1287 mutex_lock(&drvdata->idr_mutex);
1288 etr_buf = idr_find(&drvdata->idr, pid);
1290 refcount_inc(&etr_buf->refcount);
1291 mutex_unlock(&drvdata->idr_mutex);
1295 /* If we made it here no buffer has been allocated, do so now. */
1296 mutex_unlock(&drvdata->idr_mutex);
1298 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1299 if (IS_ERR(etr_buf))
1302 /* Now that we have a buffer, add it to the IDR. */
1303 mutex_lock(&drvdata->idr_mutex);
1304 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
1305 mutex_unlock(&drvdata->idr_mutex);
1307 /* Another event with this session ID has allocated this buffer. */
1308 if (ret == -ENOSPC) {
1309 tmc_free_etr_buf(etr_buf);
1313 /* The IDR can't allocate room for a new session, abandon ship. */
1314 if (ret == -ENOMEM) {
1315 tmc_free_etr_buf(etr_buf);
1316 return ERR_PTR(ret);
1323 static struct etr_buf *
1324 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
1325 struct perf_event *event, int nr_pages,
1326 void **pages, bool snapshot)
1329 * In per-thread mode the etr_buf isn't shared, so just go ahead
1330 * with memory allocation.
1332 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1335 static struct etr_buf *
1336 get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1337 int nr_pages, void **pages, bool snapshot)
1339 if (event->cpu == -1)
1340 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
1343 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
1347 static struct etr_perf_buffer *
1348 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1349 int nr_pages, void **pages, bool snapshot)
1352 struct etr_buf *etr_buf;
1353 struct etr_perf_buffer *etr_perf;
1355 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
1357 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
1359 return ERR_PTR(-ENOMEM);
1361 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1362 if (!IS_ERR(etr_buf))
1366 return ERR_PTR(-ENOMEM);
1370 * Keep a reference to the ETR this buffer has been allocated for
1371 * in order to have access to the IDR in tmc_free_etr_buffer().
1373 etr_perf->drvdata = drvdata;
1374 etr_perf->etr_buf = etr_buf;
1380 static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1381 struct perf_event *event, void **pages,
1382 int nr_pages, bool snapshot)
1384 struct etr_perf_buffer *etr_perf;
1385 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1387 etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
1388 nr_pages, pages, snapshot);
1389 if (IS_ERR(etr_perf)) {
1390 dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n");
1394 etr_perf->pid = task_pid_nr(event->owner);
1395 etr_perf->snapshot = snapshot;
1396 etr_perf->nr_pages = nr_pages;
1397 etr_perf->pages = pages;
1402 static void tmc_free_etr_buffer(void *config)
1404 struct etr_perf_buffer *etr_perf = config;
1405 struct tmc_drvdata *drvdata = etr_perf->drvdata;
1406 struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
1409 goto free_etr_perf_buffer;
1411 mutex_lock(&drvdata->idr_mutex);
1412 /* If we are not the last one to use the buffer, don't touch it. */
1413 if (!refcount_dec_and_test(&etr_buf->refcount)) {
1414 mutex_unlock(&drvdata->idr_mutex);
1415 goto free_etr_perf_buffer;
1418 /* We are the last one, remove from the IDR and free the buffer. */
1419 buf = idr_remove(&drvdata->idr, etr_perf->pid);
1420 mutex_unlock(&drvdata->idr_mutex);
1423 * Something went very wrong if the buffer associated with this ID
1424 * is not the same in the IDR. Leak to avoid use after free.
1426 if (buf && WARN_ON(buf != etr_buf))
1427 goto free_etr_perf_buffer;
1429 tmc_free_etr_buf(etr_perf->etr_buf);
1431 free_etr_perf_buffer:
1436 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
1437 * buffer to the perf ring buffer.
1439 static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
1440 unsigned long src_offset,
1441 unsigned long to_copy)
1444 long pg_idx, pg_offset;
1445 unsigned long head = etr_perf->head;
1446 char **dst_pages, *src_buf;
1447 struct etr_buf *etr_buf = etr_perf->etr_buf;
1449 head = etr_perf->head;
1450 pg_idx = head >> PAGE_SHIFT;
1451 pg_offset = head & (PAGE_SIZE - 1);
1452 dst_pages = (char **)etr_perf->pages;
1454 while (to_copy > 0) {
1456 * In one iteration, we can copy minimum of :
1457 * 1) what is available in the source buffer,
1458 * 2) what is available in the source buffer, before it
1460 * 3) what is available in the destination page.
1463 if (src_offset >= etr_buf->size)
1464 src_offset -= etr_buf->size;
1465 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
1467 if (WARN_ON_ONCE(bytes <= 0))
1469 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
1471 memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
1475 /* Move destination pointers */
1477 if (pg_offset == PAGE_SIZE) {
1479 if (++pg_idx == etr_perf->nr_pages)
1483 /* Move source pointers */
1484 src_offset += bytes;
1489 * tmc_update_etr_buffer : Update the perf ring buffer with the
1490 * available trace data. We use software double buffering at the moment.
1492 * TODO: Add support for reusing the perf ring buffer.
1494 static unsigned long
1495 tmc_update_etr_buffer(struct coresight_device *csdev,
1496 struct perf_output_handle *handle,
1500 unsigned long flags, offset, size = 0;
1501 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1502 struct etr_perf_buffer *etr_perf = config;
1503 struct etr_buf *etr_buf = etr_perf->etr_buf;
1505 spin_lock_irqsave(&drvdata->spinlock, flags);
1507 /* Don't do anything if another tracer is using this sink */
1508 if (atomic_read(csdev->refcnt) != 1) {
1509 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1513 if (WARN_ON(drvdata->perf_buf != etr_buf)) {
1515 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1519 CS_UNLOCK(drvdata->base);
1521 tmc_flush_and_stop(drvdata);
1522 tmc_sync_etr_buf(drvdata);
1524 CS_LOCK(drvdata->base);
1525 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1527 lost = etr_buf->full;
1528 offset = etr_buf->offset;
1529 size = etr_buf->len;
1532 * The ETR buffer may be bigger than the space available in the
1533 * perf ring buffer (handle->size). If so advance the offset so that we
1534 * get the latest trace data. In snapshot mode none of that matters
1535 * since we are expected to clobber stale data in favour of the latest
1538 if (!etr_perf->snapshot && size > handle->size) {
1539 u32 mask = tmc_get_memwidth_mask(drvdata);
1542 * Make sure the new size is aligned in accordance with the
1543 * requirement explained in function tmc_get_memwidth_mask().
1545 size = handle->size & mask;
1546 offset = etr_buf->offset + etr_buf->len - size;
1548 if (offset >= etr_buf->size)
1549 offset -= etr_buf->size;
1553 /* Insert barrier packets at the beginning, if there was an overflow */
1555 tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
1556 tmc_etr_sync_perf_buffer(etr_perf, offset, size);
1559 * In snapshot mode we simply increment the head by the number of byte
1560 * that were written. User space function cs_etm_find_snapshot() will
1561 * figure out how many bytes to get from the AUX buffer based on the
1562 * position of the head.
1564 if (etr_perf->snapshot)
1565 handle->head += size;
1568 * Don't set the TRUNCATED flag in snapshot mode because 1) the
1569 * captured buffer is expected to be truncated and 2) a full buffer
1570 * prevents the event from being re-enabled by the perf core,
1571 * resulting in stale data being send to user space.
1573 if (!etr_perf->snapshot && lost)
1574 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1578 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1582 unsigned long flags;
1583 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1584 struct perf_output_handle *handle = data;
1585 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
1587 spin_lock_irqsave(&drvdata->spinlock, flags);
1588 /* Don't use this sink if it is already claimed by sysFS */
1589 if (drvdata->mode == CS_MODE_SYSFS) {
1594 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
1599 /* Get a handle on the pid of the process to monitor */
1600 pid = etr_perf->pid;
1602 /* Do not proceed if this device is associated with another session */
1603 if (drvdata->pid != -1 && drvdata->pid != pid) {
1608 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
1611 * No HW configuration is needed if the sink is already in
1612 * use for this session.
1614 if (drvdata->pid == pid) {
1615 atomic_inc(csdev->refcnt);
1619 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1621 /* Associate with monitored process. */
1623 drvdata->mode = CS_MODE_PERF;
1624 drvdata->perf_buf = etr_perf->etr_buf;
1625 atomic_inc(csdev->refcnt);
1629 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1633 static int tmc_enable_etr_sink(struct coresight_device *csdev,
1634 u32 mode, void *data)
1638 return tmc_enable_etr_sink_sysfs(csdev);
1640 return tmc_enable_etr_sink_perf(csdev, data);
1643 /* We shouldn't be here */
1647 static int tmc_disable_etr_sink(struct coresight_device *csdev)
1649 unsigned long flags;
1650 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1652 spin_lock_irqsave(&drvdata->spinlock, flags);
1654 if (drvdata->reading) {
1655 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1659 if (atomic_dec_return(csdev->refcnt)) {
1660 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1664 /* Complain if we (somehow) got out of sync */
1665 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
1666 tmc_etr_disable_hw(drvdata);
1667 /* Dissociate from monitored process. */
1669 drvdata->mode = CS_MODE_DISABLED;
1670 /* Reset perf specific data */
1671 drvdata->perf_buf = NULL;
1673 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1675 dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
1679 static const struct coresight_ops_sink tmc_etr_sink_ops = {
1680 .enable = tmc_enable_etr_sink,
1681 .disable = tmc_disable_etr_sink,
1682 .alloc_buffer = tmc_alloc_etr_buffer,
1683 .update_buffer = tmc_update_etr_buffer,
1684 .free_buffer = tmc_free_etr_buffer,
1687 const struct coresight_ops tmc_etr_cs_ops = {
1688 .sink_ops = &tmc_etr_sink_ops,
1691 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1694 unsigned long flags;
1696 /* config types are set a boot time and never change */
1697 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1700 spin_lock_irqsave(&drvdata->spinlock, flags);
1701 if (drvdata->reading) {
1707 * We can safely allow reads even if the ETR is operating in PERF mode,
1708 * since the sysfs session is captured in mode specific data.
1709 * If drvdata::sysfs_data is NULL the trace data has been read already.
1711 if (!drvdata->sysfs_buf) {
1716 /* Disable the TMC if we are trying to read from a running session. */
1717 if (drvdata->mode == CS_MODE_SYSFS)
1718 __tmc_etr_disable_hw(drvdata);
1720 drvdata->reading = true;
1722 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1727 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1729 unsigned long flags;
1730 struct etr_buf *sysfs_buf = NULL;
1732 /* config types are set a boot time and never change */
1733 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1736 spin_lock_irqsave(&drvdata->spinlock, flags);
1738 /* RE-enable the TMC if need be */
1739 if (drvdata->mode == CS_MODE_SYSFS) {
1741 * The trace run will continue with the same allocated trace
1742 * buffer. Since the tracer is still enabled drvdata::buf can't
1745 __tmc_etr_enable_hw(drvdata);
1748 * The ETR is not tracing and the buffer was just read.
1749 * As such prepare to free the trace buffer.
1751 sysfs_buf = drvdata->sysfs_buf;
1752 drvdata->sysfs_buf = NULL;
1755 drvdata->reading = false;
1756 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1758 /* Free allocated memory out side of the spinlock */
1760 tmc_etr_free_sysfs_buf(sysfs_buf);