Lines Matching +full:tmc +full:- +full:etr
1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/dma-mapping.h>
17 #include "coresight-catu.h"
18 #include "coresight-etm-perf.h"
19 #include "coresight-priv.h"
20 #include "coresight-tmc.h"
36 * etr_perf_buffer - Perf buffer used for ETR
37 * @drvdata - The ETR drvdaga this buffer has been allocated for.
38 * @etr_buf - Actual buffer used by the ETR
39 * @pid - The PID this etr_perf_buffer belongs to.
40 * @snaphost - Perf session mode
41 * @nr_pages - Number of pages in the ring buffer.
42 * @pages - Array of Pages in the ring buffer.
53 /* Convert the perf index to an offset within the ETR buffer */
55 ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
57 /* Lower limit for ETR hardware buffer */
61 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
68 * ---Bit31------------Bit4-------Bit1-----Bit0--
70 * ----------------------------------------------
76 * b00 - Reserved.
77 * b01 - Last entry in the tables, points to 4K page buffer.
78 * b10 - Normal entry, points to 4K page buffer.
79 * b11 - Link. The address points to the base of next table.
106 * struct etr_sg_table : ETR SG Table
108 * @hwaddr: hwaddress used by the TMC, which is the base
121 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
130 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1); in tmc_etr_sg_table_entries()
136 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2)) in tmc_etr_sg_table_entries()
137 nr_sglinks--; in tmc_etr_sg_table_entries()
152 for (i = 0; i < tmc_pages->nr_pages; i++) { in tmc_pages_get_offset()
153 page_start = tmc_pages->daddrs[i]; in tmc_pages_get_offset()
155 return i * PAGE_SIZE + (addr - page_start); in tmc_pages_get_offset()
158 return -EINVAL; in tmc_pages_get_offset()
170 struct device *real_dev = dev->parent; in tmc_pages_free()
172 for (i = 0; i < tmc_pages->nr_pages; i++) { in tmc_pages_free()
173 if (tmc_pages->daddrs && tmc_pages->daddrs[i]) in tmc_pages_free()
174 dma_unmap_page(real_dev, tmc_pages->daddrs[i], in tmc_pages_free()
176 if (tmc_pages->pages && tmc_pages->pages[i]) in tmc_pages_free()
177 __free_page(tmc_pages->pages[i]); in tmc_pages_free()
180 kfree(tmc_pages->pages); in tmc_pages_free()
181 kfree(tmc_pages->daddrs); in tmc_pages_free()
182 tmc_pages->pages = NULL; in tmc_pages_free()
183 tmc_pages->daddrs = NULL; in tmc_pages_free()
184 tmc_pages->nr_pages = 0; in tmc_pages_free()
202 struct device *real_dev = dev->parent; in tmc_pages_alloc()
204 nr_pages = tmc_pages->nr_pages; in tmc_pages_alloc()
205 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs), in tmc_pages_alloc()
207 if (!tmc_pages->daddrs) in tmc_pages_alloc()
208 return -ENOMEM; in tmc_pages_alloc()
209 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), in tmc_pages_alloc()
211 if (!tmc_pages->pages) { in tmc_pages_alloc()
212 kfree(tmc_pages->daddrs); in tmc_pages_alloc()
213 tmc_pages->daddrs = NULL; in tmc_pages_alloc()
214 return -ENOMEM; in tmc_pages_alloc()
231 tmc_pages->daddrs[i] = paddr; in tmc_pages_alloc()
232 tmc_pages->pages[i] = page; in tmc_pages_alloc()
237 return -ENOMEM; in tmc_pages_alloc()
243 return tmc_pages_get_offset(&sg_table->data_pages, addr); in tmc_sg_get_data_page_offset()
248 if (sg_table->table_vaddr) in tmc_free_table_pages()
249 vunmap(sg_table->table_vaddr); in tmc_free_table_pages()
250 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE); in tmc_free_table_pages()
255 if (sg_table->data_vaddr) in tmc_free_data_pages()
256 vunmap(sg_table->data_vaddr); in tmc_free_data_pages()
257 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE); in tmc_free_data_pages()
275 struct tmc_pages *table_pages = &sg_table->table_pages; in tmc_alloc_table_pages()
277 rc = tmc_pages_alloc(table_pages, sg_table->dev, in tmc_alloc_table_pages()
278 dev_to_node(sg_table->dev), in tmc_alloc_table_pages()
282 sg_table->table_vaddr = vmap(table_pages->pages, in tmc_alloc_table_pages()
283 table_pages->nr_pages, in tmc_alloc_table_pages()
286 if (!sg_table->table_vaddr) in tmc_alloc_table_pages()
287 rc = -ENOMEM; in tmc_alloc_table_pages()
289 sg_table->table_daddr = table_pages->daddrs[0]; in tmc_alloc_table_pages()
298 rc = tmc_pages_alloc(&sg_table->data_pages, in tmc_alloc_data_pages()
299 sg_table->dev, sg_table->node, in tmc_alloc_data_pages()
302 sg_table->data_vaddr = vmap(sg_table->data_pages.pages, in tmc_alloc_data_pages()
303 sg_table->data_pages.nr_pages, in tmc_alloc_data_pages()
306 if (!sg_table->data_vaddr) in tmc_alloc_data_pages()
307 rc = -ENOMEM; in tmc_alloc_data_pages()
313 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
314 * and data buffers. TMC writes to the data buffers and reads from the SG
317 * @dev - Coresight device to which page should be DMA mapped.
318 * @node - Numa node for mem allocations
319 * @nr_tpages - Number of pages for the table entries.
320 * @nr_dpages - Number of pages for Data buffer.
321 * @pages - Optional list of virtual address of pages.
334 return ERR_PTR(-ENOMEM); in tmc_alloc_sg_table()
335 sg_table->data_pages.nr_pages = nr_dpages; in tmc_alloc_sg_table()
336 sg_table->table_pages.nr_pages = nr_tpages; in tmc_alloc_sg_table()
337 sg_table->node = node; in tmc_alloc_sg_table()
338 sg_table->dev = dev; in tmc_alloc_sg_table()
362 struct device *real_dev = table->dev->parent; in tmc_sg_table_sync_data_range()
363 struct tmc_pages *data = &table->data_pages; in tmc_sg_table_sync_data_range()
367 index = i % data->nr_pages; in tmc_sg_table_sync_data_range()
368 dma_sync_single_for_cpu(real_dev, data->daddrs[index], in tmc_sg_table_sync_data_range()
378 struct device *real_dev = sg_table->dev->parent; in tmc_sg_table_sync_table()
379 struct tmc_pages *table_pages = &sg_table->table_pages; in tmc_sg_table_sync_table()
381 for (i = 0; i < table_pages->nr_pages; i++) in tmc_sg_table_sync_table()
382 dma_sync_single_for_device(real_dev, table_pages->daddrs[i], in tmc_sg_table_sync_table()
400 int pg_offset = offset & (PAGE_SIZE - 1); in tmc_sg_table_get_data()
401 struct tmc_pages *data_pages = &sg_table->data_pages; in tmc_sg_table_get_data()
405 return -EINVAL; in tmc_sg_table_get_data()
408 len = (len < (size - offset)) ? len : size - offset; in tmc_sg_table_get_data()
410 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset); in tmc_sg_table_get_data()
412 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset; in tmc_sg_table_get_data()
428 tmc_pages = &sg_table->table_pages; in tmc_sg_daddr_to_vaddr()
429 base = (unsigned long)sg_table->table_vaddr; in tmc_sg_daddr_to_vaddr()
431 tmc_pages = &sg_table->data_pages; in tmc_sg_daddr_to_vaddr()
432 base = (unsigned long)sg_table->data_vaddr; in tmc_sg_daddr_to_vaddr()
447 struct tmc_sg_table *sg_table = etr_table->sg_table; in tmc_etr_sg_table_dump()
450 etr_table->hwaddr, true); in tmc_etr_sg_table_dump()
455 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
460 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
467 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
472 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
479 dev_dbg(sg_table->dev, "******* End of Table *****\n"); in tmc_etr_sg_table_dump()
502 struct tmc_sg_table *sg_table = etr_table->sg_table; in tmc_etr_sg_table_populate()
503 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs; in tmc_etr_sg_table_populate()
504 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs; in tmc_etr_sg_table_populate()
506 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages); in tmc_etr_sg_table_populate()
510 ptr = sg_table->table_vaddr; in tmc_etr_sg_table_populate()
515 for (i = 0; i < nr_entries - 1; i++) { in tmc_etr_sg_table_populate()
516 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) { in tmc_etr_sg_table_populate()
525 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) { in tmc_etr_sg_table_populate()
559 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
562 * @dev - Device pointer for the TMC
563 * @node - NUMA node where the memory should be allocated
564 * @size - Total size of the data buffer
565 * @pages - Optional list of page virtual address
578 return ERR_PTR(-ENOMEM); in tmc_init_etr_sg_table()
588 etr_table->sg_table = sg_table; in tmc_init_etr_sg_table()
589 /* TMC should use table base address for DBA */ in tmc_init_etr_sg_table()
590 etr_table->hwaddr = sg_table->table_daddr; in tmc_init_etr_sg_table()
607 struct device *real_dev = drvdata->csdev->dev.parent; in tmc_etr_alloc_flat_buf()
611 return -EINVAL; in tmc_etr_alloc_flat_buf()
615 return -ENOMEM; in tmc_etr_alloc_flat_buf()
617 flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, in tmc_etr_alloc_flat_buf()
618 &flat_buf->daddr, in tmc_etr_alloc_flat_buf()
621 if (!flat_buf->vaddr) { in tmc_etr_alloc_flat_buf()
623 return -ENOMEM; in tmc_etr_alloc_flat_buf()
626 flat_buf->size = etr_buf->size; in tmc_etr_alloc_flat_buf()
627 flat_buf->dev = &drvdata->csdev->dev; in tmc_etr_alloc_flat_buf()
628 etr_buf->hwaddr = flat_buf->daddr; in tmc_etr_alloc_flat_buf()
629 etr_buf->mode = ETR_MODE_FLAT; in tmc_etr_alloc_flat_buf()
630 etr_buf->private = flat_buf; in tmc_etr_alloc_flat_buf()
636 struct etr_flat_buf *flat_buf = etr_buf->private; in tmc_etr_free_flat_buf()
638 if (flat_buf && flat_buf->daddr) { in tmc_etr_free_flat_buf()
639 struct device *real_dev = flat_buf->dev->parent; in tmc_etr_free_flat_buf()
641 dma_free_noncoherent(real_dev, etr_buf->size, in tmc_etr_free_flat_buf()
642 flat_buf->vaddr, flat_buf->daddr, in tmc_etr_free_flat_buf()
650 struct etr_flat_buf *flat_buf = etr_buf->private; in tmc_etr_sync_flat_buf()
651 struct device *real_dev = flat_buf->dev->parent; in tmc_etr_sync_flat_buf()
657 etr_buf->offset = rrp - etr_buf->hwaddr; in tmc_etr_sync_flat_buf()
658 if (etr_buf->full) in tmc_etr_sync_flat_buf()
659 etr_buf->len = etr_buf->size; in tmc_etr_sync_flat_buf()
661 etr_buf->len = rwp - rrp; in tmc_etr_sync_flat_buf()
668 if (etr_buf->offset + etr_buf->len > etr_buf->size) in tmc_etr_sync_flat_buf()
669 dma_sync_single_for_cpu(real_dev, flat_buf->daddr, in tmc_etr_sync_flat_buf()
670 etr_buf->size, DMA_FROM_DEVICE); in tmc_etr_sync_flat_buf()
673 flat_buf->daddr + etr_buf->offset, in tmc_etr_sync_flat_buf()
674 etr_buf->len, DMA_FROM_DEVICE); in tmc_etr_sync_flat_buf()
680 struct etr_flat_buf *flat_buf = etr_buf->private; in tmc_etr_get_data_flat_buf()
682 *bufpp = (char *)flat_buf->vaddr + offset; in tmc_etr_get_data_flat_buf()
706 struct device *dev = &drvdata->csdev->dev; in tmc_etr_alloc_sg_buf()
709 etr_buf->size, pages); in tmc_etr_alloc_sg_buf()
711 return -ENOMEM; in tmc_etr_alloc_sg_buf()
712 etr_buf->hwaddr = etr_table->hwaddr; in tmc_etr_alloc_sg_buf()
713 etr_buf->mode = ETR_MODE_ETR_SG; in tmc_etr_alloc_sg_buf()
714 etr_buf->private = etr_table; in tmc_etr_alloc_sg_buf()
720 struct etr_sg_table *etr_table = etr_buf->private; in tmc_etr_free_sg_buf()
723 tmc_free_sg_table(etr_table->sg_table); in tmc_etr_free_sg_buf()
731 struct etr_sg_table *etr_table = etr_buf->private; in tmc_etr_get_data_sg_buf()
733 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp); in tmc_etr_get_data_sg_buf()
739 struct etr_sg_table *etr_table = etr_buf->private; in tmc_etr_sync_sg_buf()
740 struct tmc_sg_table *table = etr_table->sg_table; in tmc_etr_sync_sg_buf()
745 dev_warn(table->dev, in tmc_etr_sync_sg_buf()
747 etr_buf->len = 0; in tmc_etr_sync_sg_buf()
753 dev_warn(table->dev, in tmc_etr_sync_sg_buf()
755 etr_buf->len = 0; in tmc_etr_sync_sg_buf()
759 etr_buf->offset = r_offset; in tmc_etr_sync_sg_buf()
760 if (etr_buf->full) in tmc_etr_sync_sg_buf()
761 etr_buf->len = etr_buf->size; in tmc_etr_sync_sg_buf()
763 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) + in tmc_etr_sync_sg_buf()
764 w_offset - r_offset; in tmc_etr_sync_sg_buf()
765 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len); in tmc_etr_sync_sg_buf()
776 * TMC ETR could be connected to a CATU device, which can provide address
777 * translation service. This is represented by the Output port of the TMC
778 * (ETR) connected to the input port of the CATU.
786 struct coresight_device *etr = drvdata->csdev; in tmc_etr_get_catu_device() local
794 return coresight_find_output_type(etr->pdata, CORESIGHT_DEV_TYPE_HELPER, in tmc_etr_get_catu_device()
822 int rc = -EINVAL; in tmc_etr_mode_alloc_buf()
828 if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc) in tmc_etr_mode_alloc_buf()
829 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, in tmc_etr_mode_alloc_buf()
832 etr_buf->ops = etr_buf_ops[mode]; in tmc_etr_mode_alloc_buf()
835 return -EINVAL; in tmc_etr_mode_alloc_buf()
841 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); in get_etr_buf_hw()
843 buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent); in get_etr_buf_hw()
844 buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); in get_etr_buf_hw()
845 buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata); in get_etr_buf_hw()
850 bool has_sg = buf_hw->has_catu || buf_hw->has_etr_sg; in etr_can_use_flat_mode()
852 return !has_sg || buf_hw->has_iommu || etr_buf_size < SZ_1M; in etr_can_use_flat_mode()
856 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
857 * @drvdata : ETR device details.
867 int rc = -ENOMEM; in tmc_alloc_etr_buf()
870 struct device *dev = &drvdata->csdev->dev; in tmc_alloc_etr_buf()
875 return ERR_PTR(-ENOMEM); in tmc_alloc_etr_buf()
877 etr_buf->size = size; in tmc_alloc_etr_buf()
880 if (drvdata->etr_mode != ETR_MODE_AUTO) in tmc_alloc_etr_buf()
881 rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata, in tmc_alloc_etr_buf()
889 * a) The ETR cannot use Scatter-Gather. in tmc_alloc_etr_buf()
910 refcount_set(&etr_buf->refcount, 1); in tmc_alloc_etr_buf()
912 (unsigned long)size >> 10, etr_buf->mode); in tmc_alloc_etr_buf()
918 WARN_ON(!etr_buf->ops || !etr_buf->ops->free); in tmc_free_etr_buf()
919 etr_buf->ops->free(etr_buf); in tmc_free_etr_buf()
933 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset; in tmc_etr_buf_get_data()
935 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp); in tmc_etr_buf_get_data()
947 return -EINVAL; in tmc_etr_buf_insert_barrier_packet()
955 * @etr_buf->offset will hold the offset to the beginning of the trace data
956 * within the buffer, with @etr_buf->len bytes to consume.
960 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_sync_etr_buf()
966 status = readl_relaxed(drvdata->base + TMC_STS); in tmc_sync_etr_buf()
973 dev_dbg(&drvdata->csdev->dev, in tmc_sync_etr_buf()
974 "tmc memory error detected, truncating buffer\n"); in tmc_sync_etr_buf()
975 etr_buf->len = 0; in tmc_sync_etr_buf()
976 etr_buf->full = false; in tmc_sync_etr_buf()
980 etr_buf->full = !!(status & TMC_STS_FULL); in tmc_sync_etr_buf()
982 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync); in tmc_sync_etr_buf()
984 etr_buf->ops->sync(etr_buf, rrp, rwp); in tmc_sync_etr_buf()
990 struct etr_buf *etr_buf = drvdata->etr_buf; in __tmc_etr_enable_hw()
993 CS_UNLOCK(drvdata->base); in __tmc_etr_enable_hw()
998 dev_err(&drvdata->csdev->dev, in __tmc_etr_enable_hw()
999 "Failed to enable : TMC not ready\n"); in __tmc_etr_enable_hw()
1000 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1004 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ); in __tmc_etr_enable_hw()
1005 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); in __tmc_etr_enable_hw()
1007 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
1010 axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size); in __tmc_etr_enable_hw()
1018 if (etr_buf->mode == ETR_MODE_ETR_SG) in __tmc_etr_enable_hw()
1021 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
1022 tmc_write_dba(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1024 * If the TMC pointers must be programmed before the session, in __tmc_etr_enable_hw()
1029 tmc_write_rrp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1030 tmc_write_rwp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1031 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; in __tmc_etr_enable_hw()
1032 writel_relaxed(sts, drvdata->base + TMC_STS); in __tmc_etr_enable_hw()
1038 drvdata->base + TMC_FFCR); in __tmc_etr_enable_hw()
1039 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); in __tmc_etr_enable_hw()
1042 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1053 return -EINVAL; in tmc_etr_enable_hw()
1055 if ((etr_buf->mode == ETR_MODE_ETR_SG) && in tmc_etr_enable_hw()
1057 return -EINVAL; in tmc_etr_enable_hw()
1059 if (WARN_ON(drvdata->etr_buf)) in tmc_etr_enable_hw()
1060 return -EBUSY; in tmc_etr_enable_hw()
1062 rc = coresight_claim_device(drvdata->csdev); in tmc_etr_enable_hw()
1064 drvdata->etr_buf = etr_buf; in tmc_etr_enable_hw()
1067 drvdata->etr_buf = NULL; in tmc_etr_enable_hw()
1068 coresight_disclaim_device(drvdata->csdev); in tmc_etr_enable_hw()
1076 * Return the available trace data in the buffer (starts at etr_buf->offset,
1077 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
1082 * We are protected here by drvdata->reading != 0, which ensures the
1090 struct etr_buf *etr_buf = drvdata->sysfs_buf; in tmc_etr_get_sysfs_trace()
1092 if (pos + actual > etr_buf->len) in tmc_etr_get_sysfs_trace()
1093 actual = etr_buf->len - pos; in tmc_etr_get_sysfs_trace()
1098 offset = etr_buf->offset + pos; in tmc_etr_get_sysfs_trace()
1099 if (offset >= etr_buf->size) in tmc_etr_get_sysfs_trace()
1100 offset -= etr_buf->size; in tmc_etr_get_sysfs_trace()
1107 return tmc_alloc_etr_buf(drvdata, drvdata->size, in tmc_etr_setup_sysfs_buf()
1120 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_etr_sync_sysfs_buf()
1122 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { in tmc_etr_sync_sysfs_buf()
1123 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); in tmc_etr_sync_sysfs_buf()
1124 drvdata->sysfs_buf = NULL; in tmc_etr_sync_sysfs_buf()
1131 if (etr_buf->full) in tmc_etr_sync_sysfs_buf()
1133 etr_buf->offset); in tmc_etr_sync_sysfs_buf()
1139 CS_UNLOCK(drvdata->base); in __tmc_etr_disable_hw()
1144 * read before the TMC is disabled. in __tmc_etr_disable_hw()
1146 if (drvdata->mode == CS_MODE_SYSFS) in __tmc_etr_disable_hw()
1151 CS_LOCK(drvdata->base); in __tmc_etr_disable_hw()
1158 coresight_disclaim_device(drvdata->csdev); in tmc_etr_disable_hw()
1159 /* Reset the ETR buf used by hardware */ in tmc_etr_disable_hw()
1160 drvdata->etr_buf = NULL; in tmc_etr_disable_hw()
1167 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_etr_get_sysfs_buffer()
1171 * If we are enabling the ETR from disabled state, we need to make in tmc_etr_get_sysfs_buffer()
1178 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1179 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_etr_get_sysfs_buffer()
1180 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { in tmc_etr_get_sysfs_buffer()
1181 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1189 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1192 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) { in tmc_etr_get_sysfs_buffer()
1193 ret = -EBUSY; in tmc_etr_get_sysfs_buffer()
1201 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_etr_get_sysfs_buffer()
1202 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) { in tmc_etr_get_sysfs_buffer()
1204 drvdata->sysfs_buf = new_buf; in tmc_etr_get_sysfs_buffer()
1208 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1213 return ret ? ERR_PTR(ret) : drvdata->sysfs_buf; in tmc_etr_get_sysfs_buffer()
1220 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_sysfs()
1226 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1233 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_sysfs()
1234 atomic_inc(&csdev->refcnt); in tmc_enable_etr_sink_sysfs()
1240 drvdata->mode = CS_MODE_SYSFS; in tmc_enable_etr_sink_sysfs()
1241 atomic_inc(&csdev->refcnt); in tmc_enable_etr_sink_sysfs()
1245 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1248 dev_dbg(&csdev->dev, "TMC-ETR enabled\n"); in tmc_enable_etr_sink_sysfs()
1264 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) in tmc_etr_get_buffer()
1265 return ERR_PTR(-EINVAL); in tmc_etr_get_buffer()
1266 return etr_perf->etr_buf; in tmc_etr_get_buffer()
1268 return ERR_PTR(-EINVAL); in tmc_etr_get_buffer()
1274 * alloc_etr_buf: Allocate ETR buffer for use by perf.
1288 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); in alloc_etr_buf()
1293 if ((nr_pages << PAGE_SHIFT) > drvdata->size) { in alloc_etr_buf()
1301 * Else switch to configured size for this ETR in alloc_etr_buf()
1304 size = drvdata->size; in alloc_etr_buf()
1312 return ERR_PTR(-ENOMEM); in alloc_etr_buf()
1324 pid_t pid = task_pid_nr(event->owner); in get_perf_etr_buf_cpu_wide()
1330 * to the AUX ring buffer that was created for that event. In CPU-wide in get_perf_etr_buf_cpu_wide()
1333 * event but a single etr_buf associated with the ETR is shared between in get_perf_etr_buf_cpu_wide()
1346 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1347 etr_buf = idr_find(&drvdata->idr, pid); in get_perf_etr_buf_cpu_wide()
1349 refcount_inc(&etr_buf->refcount); in get_perf_etr_buf_cpu_wide()
1350 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1355 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1362 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1363 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); in get_perf_etr_buf_cpu_wide()
1364 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1367 if (ret == -ENOSPC) { in get_perf_etr_buf_cpu_wide()
1373 if (ret == -ENOMEM) { in get_perf_etr_buf_cpu_wide()
1388 * In per-thread mode the etr_buf isn't shared, so just go ahead in get_perf_etr_buf_per_thread()
1398 if (event->cpu == -1) in get_perf_etr_buf()
1414 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); in tmc_etr_setup_perf_buf()
1418 return ERR_PTR(-ENOMEM); in tmc_etr_setup_perf_buf()
1425 return ERR_PTR(-ENOMEM); in tmc_etr_setup_perf_buf()
1429 * Keep a reference to the ETR this buffer has been allocated for in tmc_etr_setup_perf_buf()
1432 etr_perf->drvdata = drvdata; in tmc_etr_setup_perf_buf()
1433 etr_perf->etr_buf = etr_buf; in tmc_etr_setup_perf_buf()
1444 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_alloc_etr_buffer()
1449 dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n"); in tmc_alloc_etr_buffer()
1453 etr_perf->pid = task_pid_nr(event->owner); in tmc_alloc_etr_buffer()
1454 etr_perf->snapshot = snapshot; in tmc_alloc_etr_buffer()
1455 etr_perf->nr_pages = nr_pages; in tmc_alloc_etr_buffer()
1456 etr_perf->pages = pages; in tmc_alloc_etr_buffer()
1464 struct tmc_drvdata *drvdata = etr_perf->drvdata; in tmc_free_etr_buffer()
1465 struct etr_buf *buf, *etr_buf = etr_perf->etr_buf; in tmc_free_etr_buffer()
1470 mutex_lock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1472 if (!refcount_dec_and_test(&etr_buf->refcount)) { in tmc_free_etr_buffer()
1473 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1478 buf = idr_remove(&drvdata->idr, etr_perf->pid); in tmc_free_etr_buffer()
1479 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1488 tmc_free_etr_buf(etr_perf->etr_buf); in tmc_free_etr_buffer()
1506 struct etr_buf *etr_buf = etr_perf->etr_buf; in tmc_etr_sync_perf_buffer()
1510 pg_offset = head & (PAGE_SIZE - 1); in tmc_etr_sync_perf_buffer()
1511 dst_pages = (char **)etr_perf->pages; in tmc_etr_sync_perf_buffer()
1522 if (src_offset >= etr_buf->size) in tmc_etr_sync_perf_buffer()
1523 src_offset -= etr_buf->size; in tmc_etr_sync_perf_buffer()
1528 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset)); in tmc_etr_sync_perf_buffer()
1532 to_copy -= bytes; in tmc_etr_sync_perf_buffer()
1538 if (++pg_idx == etr_perf->nr_pages) in tmc_etr_sync_perf_buffer()
1560 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_update_etr_buffer()
1562 struct etr_buf *etr_buf = etr_perf->etr_buf; in tmc_update_etr_buffer()
1564 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1567 if (atomic_read(&csdev->refcnt) != 1) { in tmc_update_etr_buffer()
1568 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1572 if (WARN_ON(drvdata->perf_buf != etr_buf)) { in tmc_update_etr_buffer()
1574 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1578 CS_UNLOCK(drvdata->base); in tmc_update_etr_buffer()
1583 CS_LOCK(drvdata->base); in tmc_update_etr_buffer()
1584 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1586 lost = etr_buf->full; in tmc_update_etr_buffer()
1587 offset = etr_buf->offset; in tmc_update_etr_buffer()
1588 size = etr_buf->len; in tmc_update_etr_buffer()
1591 * The ETR buffer may be bigger than the space available in the in tmc_update_etr_buffer()
1592 * perf ring buffer (handle->size). If so advance the offset so that we in tmc_update_etr_buffer()
1597 if (!etr_perf->snapshot && size > handle->size) { in tmc_update_etr_buffer()
1604 size = handle->size & mask; in tmc_update_etr_buffer()
1605 offset = etr_buf->offset + etr_buf->len - size; in tmc_update_etr_buffer()
1607 if (offset >= etr_buf->size) in tmc_update_etr_buffer()
1608 offset -= etr_buf->size; in tmc_update_etr_buffer()
1615 tmc_etr_sync_perf_buffer(etr_perf, handle->head, offset, size); in tmc_update_etr_buffer()
1622 if (etr_perf->snapshot) in tmc_update_etr_buffer()
1623 handle->head += size; in tmc_update_etr_buffer()
1636 * prevents the event from being re-enabled by the perf core, in tmc_update_etr_buffer()
1639 if (!etr_perf->snapshot && lost) in tmc_update_etr_buffer()
1649 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_perf()
1653 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1655 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_perf()
1656 rc = -EBUSY; in tmc_enable_etr_sink_perf()
1660 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) { in tmc_enable_etr_sink_perf()
1661 rc = -EINVAL; in tmc_enable_etr_sink_perf()
1666 pid = etr_perf->pid; in tmc_enable_etr_sink_perf()
1669 if (drvdata->pid != -1 && drvdata->pid != pid) { in tmc_enable_etr_sink_perf()
1670 rc = -EBUSY; in tmc_enable_etr_sink_perf()
1678 if (drvdata->pid == pid) { in tmc_enable_etr_sink_perf()
1679 atomic_inc(&csdev->refcnt); in tmc_enable_etr_sink_perf()
1683 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); in tmc_enable_etr_sink_perf()
1686 drvdata->pid = pid; in tmc_enable_etr_sink_perf()
1687 drvdata->mode = CS_MODE_PERF; in tmc_enable_etr_sink_perf()
1688 drvdata->perf_buf = etr_perf->etr_buf; in tmc_enable_etr_sink_perf()
1689 atomic_inc(&csdev->refcnt); in tmc_enable_etr_sink_perf()
1693 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1706 return -EINVAL; in tmc_enable_etr_sink()
1713 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etr_sink()
1715 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1717 if (drvdata->reading) { in tmc_disable_etr_sink()
1718 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1719 return -EBUSY; in tmc_disable_etr_sink()
1722 if (atomic_dec_return(&csdev->refcnt)) { in tmc_disable_etr_sink()
1723 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1724 return -EBUSY; in tmc_disable_etr_sink()
1728 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); in tmc_disable_etr_sink()
1731 drvdata->pid = -1; in tmc_disable_etr_sink()
1732 drvdata->mode = CS_MODE_DISABLED; in tmc_disable_etr_sink()
1734 drvdata->perf_buf = NULL; in tmc_disable_etr_sink()
1736 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1738 dev_dbg(&csdev->dev, "TMC-ETR disabled\n"); in tmc_disable_etr_sink()
1760 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_prepare_etr()
1761 return -EINVAL; in tmc_read_prepare_etr()
1763 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1764 if (drvdata->reading) { in tmc_read_prepare_etr()
1765 ret = -EBUSY; in tmc_read_prepare_etr()
1770 * We can safely allow reads even if the ETR is operating in PERF mode, in tmc_read_prepare_etr()
1774 if (!drvdata->sysfs_buf) { in tmc_read_prepare_etr()
1775 ret = -EINVAL; in tmc_read_prepare_etr()
1779 /* Disable the TMC if we are trying to read from a running session. */ in tmc_read_prepare_etr()
1780 if (drvdata->mode == CS_MODE_SYSFS) in tmc_read_prepare_etr()
1783 drvdata->reading = true; in tmc_read_prepare_etr()
1785 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1796 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_unprepare_etr()
1797 return -EINVAL; in tmc_read_unprepare_etr()
1799 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1801 /* RE-enable the TMC if need be */ in tmc_read_unprepare_etr()
1802 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_read_unprepare_etr()
1811 * The ETR is not tracing and the buffer was just read. in tmc_read_unprepare_etr()
1814 sysfs_buf = drvdata->sysfs_buf; in tmc_read_unprepare_etr()
1815 drvdata->sysfs_buf = NULL; in tmc_read_unprepare_etr()
1818 drvdata->reading = false; in tmc_read_unprepare_etr()
1819 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1830 [ETR_MODE_ETR_SG] = "tmc-sg",
1858 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); in buf_mode_preferred_show()
1860 return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]); in buf_mode_preferred_show()
1867 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); in buf_mode_preferred_store()
1872 drvdata->etr_mode = ETR_MODE_FLAT; in buf_mode_preferred_store()
1874 drvdata->etr_mode = ETR_MODE_ETR_SG; in buf_mode_preferred_store()
1876 drvdata->etr_mode = ETR_MODE_CATU; in buf_mode_preferred_store()
1878 drvdata->etr_mode = ETR_MODE_AUTO; in buf_mode_preferred_store()
1880 return -EINVAL; in buf_mode_preferred_store()