Lines Matching +full:coresight +full:- +full:catu
1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/coresight.h>
9 #include <linux/dma-mapping.h>
17 #include "coresight-catu.h"
18 #include "coresight-etm-perf.h"
19 #include "coresight-priv.h"
20 #include "coresight-tmc.h"
36 * etr_perf_buffer - Perf buffer used for ETR
37 * @drvdata - The ETR drvdaga this buffer has been allocated for.
38 * @etr_buf - Actual buffer used by the ETR
39 * @pid - The PID of the session owner that etr_perf_buffer
41 * @snaphost - Perf session mode
42 * @nr_pages - Number of pages in the ring buffer.
43 * @pages - Array of Pages in the ring buffer.
56 ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
69 * ---Bit31------------Bit4-------Bit1-----Bit0--
71 * ----------------------------------------------
77 * b00 - Reserved.
78 * b01 - Last entry in the tables, points to 4K page buffer.
79 * b10 - Normal entry, points to 4K page buffer.
80 * b11 - Link. The address points to the base of next table.
122 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
131 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1); in tmc_etr_sg_table_entries()
137 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2)) in tmc_etr_sg_table_entries()
138 nr_sglinks--; in tmc_etr_sg_table_entries()
153 for (i = 0; i < tmc_pages->nr_pages; i++) { in tmc_pages_get_offset()
154 page_start = tmc_pages->daddrs[i]; in tmc_pages_get_offset()
156 return i * PAGE_SIZE + (addr - page_start); in tmc_pages_get_offset()
159 return -EINVAL; in tmc_pages_get_offset()
171 struct device *real_dev = dev->parent; in tmc_pages_free()
173 for (i = 0; i < tmc_pages->nr_pages; i++) { in tmc_pages_free()
174 if (tmc_pages->daddrs && tmc_pages->daddrs[i]) in tmc_pages_free()
175 dma_unmap_page(real_dev, tmc_pages->daddrs[i], in tmc_pages_free()
177 if (tmc_pages->pages && tmc_pages->pages[i]) in tmc_pages_free()
178 __free_page(tmc_pages->pages[i]); in tmc_pages_free()
181 kfree(tmc_pages->pages); in tmc_pages_free()
182 kfree(tmc_pages->daddrs); in tmc_pages_free()
183 tmc_pages->pages = NULL; in tmc_pages_free()
184 tmc_pages->daddrs = NULL; in tmc_pages_free()
185 tmc_pages->nr_pages = 0; in tmc_pages_free()
203 struct device *real_dev = dev->parent; in tmc_pages_alloc()
205 nr_pages = tmc_pages->nr_pages; in tmc_pages_alloc()
206 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs), in tmc_pages_alloc()
208 if (!tmc_pages->daddrs) in tmc_pages_alloc()
209 return -ENOMEM; in tmc_pages_alloc()
210 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), in tmc_pages_alloc()
212 if (!tmc_pages->pages) { in tmc_pages_alloc()
213 kfree(tmc_pages->daddrs); in tmc_pages_alloc()
214 tmc_pages->daddrs = NULL; in tmc_pages_alloc()
215 return -ENOMEM; in tmc_pages_alloc()
232 tmc_pages->daddrs[i] = paddr; in tmc_pages_alloc()
233 tmc_pages->pages[i] = page; in tmc_pages_alloc()
238 return -ENOMEM; in tmc_pages_alloc()
244 return tmc_pages_get_offset(&sg_table->data_pages, addr); in tmc_sg_get_data_page_offset()
249 if (sg_table->table_vaddr) in tmc_free_table_pages()
250 vunmap(sg_table->table_vaddr); in tmc_free_table_pages()
251 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE); in tmc_free_table_pages()
256 if (sg_table->data_vaddr) in tmc_free_data_pages()
257 vunmap(sg_table->data_vaddr); in tmc_free_data_pages()
258 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE); in tmc_free_data_pages()
277 struct tmc_pages *table_pages = &sg_table->table_pages; in tmc_alloc_table_pages()
279 rc = tmc_pages_alloc(table_pages, sg_table->dev, in tmc_alloc_table_pages()
280 dev_to_node(sg_table->dev), in tmc_alloc_table_pages()
284 sg_table->table_vaddr = vmap(table_pages->pages, in tmc_alloc_table_pages()
285 table_pages->nr_pages, in tmc_alloc_table_pages()
288 if (!sg_table->table_vaddr) in tmc_alloc_table_pages()
289 rc = -ENOMEM; in tmc_alloc_table_pages()
291 sg_table->table_daddr = table_pages->daddrs[0]; in tmc_alloc_table_pages()
300 rc = tmc_pages_alloc(&sg_table->data_pages, in tmc_alloc_data_pages()
301 sg_table->dev, sg_table->node, in tmc_alloc_data_pages()
304 sg_table->data_vaddr = vmap(sg_table->data_pages.pages, in tmc_alloc_data_pages()
305 sg_table->data_pages.nr_pages, in tmc_alloc_data_pages()
308 if (!sg_table->data_vaddr) in tmc_alloc_data_pages()
309 rc = -ENOMEM; in tmc_alloc_data_pages()
319 * @dev - Coresight device to which page should be DMA mapped.
320 * @node - Numa node for mem allocations
321 * @nr_tpages - Number of pages for the table entries.
322 * @nr_dpages - Number of pages for Data buffer.
323 * @pages - Optional list of virtual address of pages.
336 return ERR_PTR(-ENOMEM); in tmc_alloc_sg_table()
337 sg_table->data_pages.nr_pages = nr_dpages; in tmc_alloc_sg_table()
338 sg_table->table_pages.nr_pages = nr_tpages; in tmc_alloc_sg_table()
339 sg_table->node = node; in tmc_alloc_sg_table()
340 sg_table->dev = dev; in tmc_alloc_sg_table()
363 struct device *real_dev = table->dev->parent; in tmc_sg_table_sync_data_range()
364 struct tmc_pages *data = &table->data_pages; in tmc_sg_table_sync_data_range()
368 index = i % data->nr_pages; in tmc_sg_table_sync_data_range()
369 dma_sync_single_for_cpu(real_dev, data->daddrs[index], in tmc_sg_table_sync_data_range()
379 struct device *real_dev = sg_table->dev->parent; in tmc_sg_table_sync_table()
380 struct tmc_pages *table_pages = &sg_table->table_pages; in tmc_sg_table_sync_table()
382 for (i = 0; i < table_pages->nr_pages; i++) in tmc_sg_table_sync_table()
383 dma_sync_single_for_device(real_dev, table_pages->daddrs[i], in tmc_sg_table_sync_table()
401 int pg_offset = offset & (PAGE_SIZE - 1); in tmc_sg_table_get_data()
402 struct tmc_pages *data_pages = &sg_table->data_pages; in tmc_sg_table_get_data()
406 return -EINVAL; in tmc_sg_table_get_data()
409 len = (len < (size - offset)) ? len : size - offset; in tmc_sg_table_get_data()
411 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset); in tmc_sg_table_get_data()
413 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset; in tmc_sg_table_get_data()
429 tmc_pages = &sg_table->table_pages; in tmc_sg_daddr_to_vaddr()
430 base = (unsigned long)sg_table->table_vaddr; in tmc_sg_daddr_to_vaddr()
432 tmc_pages = &sg_table->data_pages; in tmc_sg_daddr_to_vaddr()
433 base = (unsigned long)sg_table->data_vaddr; in tmc_sg_daddr_to_vaddr()
448 struct tmc_sg_table *sg_table = etr_table->sg_table; in tmc_etr_sg_table_dump()
451 etr_table->hwaddr, true); in tmc_etr_sg_table_dump()
456 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
461 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
468 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
473 dev_dbg(sg_table->dev, in tmc_etr_sg_table_dump()
480 dev_dbg(sg_table->dev, "******* End of Table *****\n"); in tmc_etr_sg_table_dump()
503 struct tmc_sg_table *sg_table = etr_table->sg_table; in tmc_etr_sg_table_populate()
504 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs; in tmc_etr_sg_table_populate()
505 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs; in tmc_etr_sg_table_populate()
507 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages); in tmc_etr_sg_table_populate()
511 ptr = sg_table->table_vaddr; in tmc_etr_sg_table_populate()
516 for (i = 0; i < nr_entries - 1; i++) { in tmc_etr_sg_table_populate()
517 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) { in tmc_etr_sg_table_populate()
526 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) { in tmc_etr_sg_table_populate()
563 * @dev - Device pointer for the TMC
564 * @node - NUMA node where the memory should be allocated
565 * @size - Total size of the data buffer
566 * @pages - Optional list of page virtual address
579 return ERR_PTR(-ENOMEM); in tmc_init_etr_sg_table()
589 etr_table->sg_table = sg_table; in tmc_init_etr_sg_table()
591 etr_table->hwaddr = sg_table->table_daddr; in tmc_init_etr_sg_table()
608 struct device *real_dev = drvdata->csdev->dev.parent; in tmc_etr_alloc_flat_buf()
612 return -EINVAL; in tmc_etr_alloc_flat_buf()
616 return -ENOMEM; in tmc_etr_alloc_flat_buf()
618 flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, in tmc_etr_alloc_flat_buf()
619 &flat_buf->daddr, in tmc_etr_alloc_flat_buf()
622 if (!flat_buf->vaddr) { in tmc_etr_alloc_flat_buf()
624 return -ENOMEM; in tmc_etr_alloc_flat_buf()
627 flat_buf->size = etr_buf->size; in tmc_etr_alloc_flat_buf()
628 flat_buf->dev = &drvdata->csdev->dev; in tmc_etr_alloc_flat_buf()
629 etr_buf->hwaddr = flat_buf->daddr; in tmc_etr_alloc_flat_buf()
630 etr_buf->mode = ETR_MODE_FLAT; in tmc_etr_alloc_flat_buf()
631 etr_buf->private = flat_buf; in tmc_etr_alloc_flat_buf()
637 struct etr_flat_buf *flat_buf = etr_buf->private; in tmc_etr_free_flat_buf()
639 if (flat_buf && flat_buf->daddr) { in tmc_etr_free_flat_buf()
640 struct device *real_dev = flat_buf->dev->parent; in tmc_etr_free_flat_buf()
642 dma_free_noncoherent(real_dev, etr_buf->size, in tmc_etr_free_flat_buf()
643 flat_buf->vaddr, flat_buf->daddr, in tmc_etr_free_flat_buf()
651 struct etr_flat_buf *flat_buf = etr_buf->private; in tmc_etr_sync_flat_buf()
652 struct device *real_dev = flat_buf->dev->parent; in tmc_etr_sync_flat_buf()
658 etr_buf->offset = rrp - etr_buf->hwaddr; in tmc_etr_sync_flat_buf()
659 if (etr_buf->full) in tmc_etr_sync_flat_buf()
660 etr_buf->len = etr_buf->size; in tmc_etr_sync_flat_buf()
662 etr_buf->len = rwp - rrp; in tmc_etr_sync_flat_buf()
669 if (etr_buf->offset + etr_buf->len > etr_buf->size) in tmc_etr_sync_flat_buf()
670 dma_sync_single_for_cpu(real_dev, flat_buf->daddr, in tmc_etr_sync_flat_buf()
671 etr_buf->size, DMA_FROM_DEVICE); in tmc_etr_sync_flat_buf()
674 flat_buf->daddr + etr_buf->offset, in tmc_etr_sync_flat_buf()
675 etr_buf->len, DMA_FROM_DEVICE); in tmc_etr_sync_flat_buf()
681 struct etr_flat_buf *flat_buf = etr_buf->private; in tmc_etr_get_data_flat_buf()
683 *bufpp = (char *)flat_buf->vaddr + offset; in tmc_etr_get_data_flat_buf()
707 struct device *dev = &drvdata->csdev->dev; in tmc_etr_alloc_sg_buf()
710 etr_buf->size, pages); in tmc_etr_alloc_sg_buf()
712 return -ENOMEM; in tmc_etr_alloc_sg_buf()
713 etr_buf->hwaddr = etr_table->hwaddr; in tmc_etr_alloc_sg_buf()
714 etr_buf->mode = ETR_MODE_ETR_SG; in tmc_etr_alloc_sg_buf()
715 etr_buf->private = etr_table; in tmc_etr_alloc_sg_buf()
721 struct etr_sg_table *etr_table = etr_buf->private; in tmc_etr_free_sg_buf()
724 tmc_free_sg_table(etr_table->sg_table); in tmc_etr_free_sg_buf()
732 struct etr_sg_table *etr_table = etr_buf->private; in tmc_etr_get_data_sg_buf()
734 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp); in tmc_etr_get_data_sg_buf()
740 struct etr_sg_table *etr_table = etr_buf->private; in tmc_etr_sync_sg_buf()
741 struct tmc_sg_table *table = etr_table->sg_table; in tmc_etr_sync_sg_buf()
746 dev_warn(table->dev, in tmc_etr_sync_sg_buf()
748 etr_buf->len = 0; in tmc_etr_sync_sg_buf()
754 dev_warn(table->dev, in tmc_etr_sync_sg_buf()
756 etr_buf->len = 0; in tmc_etr_sync_sg_buf()
760 etr_buf->offset = r_offset; in tmc_etr_sync_sg_buf()
761 if (etr_buf->full) in tmc_etr_sync_sg_buf()
762 etr_buf->len = etr_buf->size; in tmc_etr_sync_sg_buf()
764 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) + in tmc_etr_sync_sg_buf()
765 w_offset - r_offset; in tmc_etr_sync_sg_buf()
766 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len); in tmc_etr_sync_sg_buf()
777 * TMC ETR could be connected to a CATU device, which can provide address
779 * (ETR) connected to the input port of the CATU.
781 * Returns : coresight_device ptr for the CATU device if a CATU is found.
787 struct coresight_device *etr = drvdata->csdev; in tmc_etr_get_catu_device()
795 return coresight_find_output_type(etr->pdata, CORESIGHT_DEV_TYPE_HELPER, in tmc_etr_get_catu_device()
806 void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu) in tmc_etr_set_catu_ops() argument
808 etr_buf_ops[ETR_MODE_CATU] = catu; in tmc_etr_set_catu_ops()
823 int rc = -EINVAL; in tmc_etr_mode_alloc_buf()
829 if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc) in tmc_etr_mode_alloc_buf()
830 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, in tmc_etr_mode_alloc_buf()
833 etr_buf->ops = etr_buf_ops[mode]; in tmc_etr_mode_alloc_buf()
836 return -EINVAL; in tmc_etr_mode_alloc_buf()
842 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); in get_etr_buf_hw()
844 buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent); in get_etr_buf_hw()
845 buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); in get_etr_buf_hw()
846 buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata); in get_etr_buf_hw()
851 bool has_sg = buf_hw->has_catu || buf_hw->has_etr_sg; in etr_can_use_flat_mode()
853 return !has_sg || buf_hw->has_iommu || etr_buf_size < SZ_1M; in etr_can_use_flat_mode()
868 int rc = -ENOMEM; in tmc_alloc_etr_buf()
871 struct device *dev = &drvdata->csdev->dev; in tmc_alloc_etr_buf()
876 return ERR_PTR(-ENOMEM); in tmc_alloc_etr_buf()
878 etr_buf->size = size; in tmc_alloc_etr_buf()
881 if (drvdata->etr_mode != ETR_MODE_AUTO) in tmc_alloc_etr_buf()
882 rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata, in tmc_alloc_etr_buf()
890 * a) The ETR cannot use Scatter-Gather. in tmc_alloc_etr_buf()
911 refcount_set(&etr_buf->refcount, 1); in tmc_alloc_etr_buf()
913 (unsigned long)size >> 10, etr_buf->mode); in tmc_alloc_etr_buf()
919 WARN_ON(!etr_buf->ops || !etr_buf->ops->free); in tmc_free_etr_buf()
920 etr_buf->ops->free(etr_buf); in tmc_free_etr_buf()
934 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset; in tmc_etr_buf_get_data()
936 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp); in tmc_etr_buf_get_data()
948 return -EINVAL; in tmc_etr_buf_insert_barrier_packet()
956 * @etr_buf->offset will hold the offset to the beginning of the trace data
957 * within the buffer, with @etr_buf->len bytes to consume.
961 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_sync_etr_buf()
967 status = readl_relaxed(drvdata->base + TMC_STS); in tmc_sync_etr_buf()
974 dev_dbg(&drvdata->csdev->dev, in tmc_sync_etr_buf()
976 etr_buf->len = 0; in tmc_sync_etr_buf()
977 etr_buf->full = false; in tmc_sync_etr_buf()
981 etr_buf->full = !!(status & TMC_STS_FULL); in tmc_sync_etr_buf()
983 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync); in tmc_sync_etr_buf()
985 etr_buf->ops->sync(etr_buf, rrp, rwp); in tmc_sync_etr_buf()
991 struct etr_buf *etr_buf = drvdata->etr_buf; in __tmc_etr_enable_hw()
994 CS_UNLOCK(drvdata->base); in __tmc_etr_enable_hw()
999 dev_err(&drvdata->csdev->dev, in __tmc_etr_enable_hw()
1001 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1005 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ); in __tmc_etr_enable_hw()
1006 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); in __tmc_etr_enable_hw()
1008 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
1011 axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size); in __tmc_etr_enable_hw()
1019 if (etr_buf->mode == ETR_MODE_ETR_SG) in __tmc_etr_enable_hw()
1022 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
1023 tmc_write_dba(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1030 tmc_write_rrp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1031 tmc_write_rwp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1032 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; in __tmc_etr_enable_hw()
1033 writel_relaxed(sts, drvdata->base + TMC_STS); in __tmc_etr_enable_hw()
1039 drvdata->base + TMC_FFCR); in __tmc_etr_enable_hw()
1040 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); in __tmc_etr_enable_hw()
1043 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1054 return -EINVAL; in tmc_etr_enable_hw()
1056 if ((etr_buf->mode == ETR_MODE_ETR_SG) && in tmc_etr_enable_hw()
1058 return -EINVAL; in tmc_etr_enable_hw()
1060 if (WARN_ON(drvdata->etr_buf)) in tmc_etr_enable_hw()
1061 return -EBUSY; in tmc_etr_enable_hw()
1063 rc = coresight_claim_device(drvdata->csdev); in tmc_etr_enable_hw()
1065 drvdata->etr_buf = etr_buf; in tmc_etr_enable_hw()
1068 drvdata->etr_buf = NULL; in tmc_etr_enable_hw()
1069 coresight_disclaim_device(drvdata->csdev); in tmc_etr_enable_hw()
1077 * Return the available trace data in the buffer (starts at etr_buf->offset,
1078 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
1083 * We are protected here by drvdata->reading != 0, which ensures the
1091 struct etr_buf *etr_buf = drvdata->sysfs_buf; in tmc_etr_get_sysfs_trace()
1093 if (pos + actual > etr_buf->len) in tmc_etr_get_sysfs_trace()
1094 actual = etr_buf->len - pos; in tmc_etr_get_sysfs_trace()
1099 offset = etr_buf->offset + pos; in tmc_etr_get_sysfs_trace()
1100 if (offset >= etr_buf->size) in tmc_etr_get_sysfs_trace()
1101 offset -= etr_buf->size; in tmc_etr_get_sysfs_trace()
1108 return tmc_alloc_etr_buf(drvdata, drvdata->size, in tmc_etr_setup_sysfs_buf()
1121 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_etr_sync_sysfs_buf()
1123 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { in tmc_etr_sync_sysfs_buf()
1124 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); in tmc_etr_sync_sysfs_buf()
1125 drvdata->sysfs_buf = NULL; in tmc_etr_sync_sysfs_buf()
1132 if (etr_buf->full) in tmc_etr_sync_sysfs_buf()
1134 etr_buf->offset); in tmc_etr_sync_sysfs_buf()
1140 CS_UNLOCK(drvdata->base); in __tmc_etr_disable_hw()
1147 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) in __tmc_etr_disable_hw()
1152 CS_LOCK(drvdata->base); in __tmc_etr_disable_hw()
1159 coresight_disclaim_device(drvdata->csdev); in tmc_etr_disable_hw()
1161 drvdata->etr_buf = NULL; in tmc_etr_disable_hw()
1168 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_etr_get_sysfs_buffer()
1179 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1180 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_etr_get_sysfs_buffer()
1181 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { in tmc_etr_get_sysfs_buffer()
1182 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1190 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1193 if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) { in tmc_etr_get_sysfs_buffer()
1194 ret = -EBUSY; in tmc_etr_get_sysfs_buffer()
1202 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_etr_get_sysfs_buffer()
1203 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) { in tmc_etr_get_sysfs_buffer()
1205 drvdata->sysfs_buf = new_buf; in tmc_etr_get_sysfs_buffer()
1209 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_etr_get_sysfs_buffer()
1214 return ret ? ERR_PTR(ret) : drvdata->sysfs_buf; in tmc_etr_get_sysfs_buffer()
1221 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_sysfs()
1227 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1235 csdev->refcnt++; in tmc_enable_etr_sink_sysfs()
1242 csdev->refcnt++; in tmc_enable_etr_sink_sysfs()
1246 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1249 dev_dbg(&csdev->dev, "TMC-ETR enabled\n"); in tmc_enable_etr_sink_sysfs()
1265 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) in tmc_etr_get_buffer()
1266 return ERR_PTR(-EINVAL); in tmc_etr_get_buffer()
1267 return etr_perf->etr_buf; in tmc_etr_get_buffer()
1269 return ERR_PTR(-EINVAL); in tmc_etr_get_buffer()
1289 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); in alloc_etr_buf()
1294 if ((nr_pages << PAGE_SHIFT) > drvdata->size) { in alloc_etr_buf()
1305 size = drvdata->size; in alloc_etr_buf()
1313 return ERR_PTR(-ENOMEM); in alloc_etr_buf()
1325 pid_t pid = task_pid_nr(event->owner); in get_perf_etr_buf_cpu_wide()
1331 * to the AUX ring buffer that was created for that event. In CPU-wide in get_perf_etr_buf_cpu_wide()
1347 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1348 etr_buf = idr_find(&drvdata->idr, pid); in get_perf_etr_buf_cpu_wide()
1350 refcount_inc(&etr_buf->refcount); in get_perf_etr_buf_cpu_wide()
1351 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1356 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1363 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1364 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); in get_perf_etr_buf_cpu_wide()
1365 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1368 if (ret == -ENOSPC) { in get_perf_etr_buf_cpu_wide()
1374 if (ret == -ENOMEM) { in get_perf_etr_buf_cpu_wide()
1389 * In per-thread mode the etr_buf isn't shared, so just go ahead in get_perf_etr_buf_per_thread()
1399 if (event->cpu == -1) in get_perf_etr_buf()
1415 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); in tmc_etr_setup_perf_buf()
1419 return ERR_PTR(-ENOMEM); in tmc_etr_setup_perf_buf()
1426 return ERR_PTR(-ENOMEM); in tmc_etr_setup_perf_buf()
1433 etr_perf->drvdata = drvdata; in tmc_etr_setup_perf_buf()
1434 etr_perf->etr_buf = etr_buf; in tmc_etr_setup_perf_buf()
1445 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_alloc_etr_buffer()
1450 dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n"); in tmc_alloc_etr_buffer()
1454 etr_perf->pid = task_pid_nr(event->owner); in tmc_alloc_etr_buffer()
1455 etr_perf->snapshot = snapshot; in tmc_alloc_etr_buffer()
1456 etr_perf->nr_pages = nr_pages; in tmc_alloc_etr_buffer()
1457 etr_perf->pages = pages; in tmc_alloc_etr_buffer()
1465 struct tmc_drvdata *drvdata = etr_perf->drvdata; in tmc_free_etr_buffer()
1466 struct etr_buf *buf, *etr_buf = etr_perf->etr_buf; in tmc_free_etr_buffer()
1471 mutex_lock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1473 if (!refcount_dec_and_test(&etr_buf->refcount)) { in tmc_free_etr_buffer()
1474 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1479 buf = idr_remove(&drvdata->idr, etr_perf->pid); in tmc_free_etr_buffer()
1480 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1489 tmc_free_etr_buf(etr_perf->etr_buf); in tmc_free_etr_buffer()
1507 struct etr_buf *etr_buf = etr_perf->etr_buf; in tmc_etr_sync_perf_buffer()
1511 pg_offset = head & (PAGE_SIZE - 1); in tmc_etr_sync_perf_buffer()
1512 dst_pages = (char **)etr_perf->pages; in tmc_etr_sync_perf_buffer()
1523 if (src_offset >= etr_buf->size) in tmc_etr_sync_perf_buffer()
1524 src_offset -= etr_buf->size; in tmc_etr_sync_perf_buffer()
1529 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset)); in tmc_etr_sync_perf_buffer()
1533 to_copy -= bytes; in tmc_etr_sync_perf_buffer()
1539 if (++pg_idx == etr_perf->nr_pages) in tmc_etr_sync_perf_buffer()
1561 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_update_etr_buffer()
1563 struct etr_buf *etr_buf = etr_perf->etr_buf; in tmc_update_etr_buffer()
1565 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1568 if (csdev->refcnt != 1) { in tmc_update_etr_buffer()
1569 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1573 if (WARN_ON(drvdata->perf_buf != etr_buf)) { in tmc_update_etr_buffer()
1575 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1579 CS_UNLOCK(drvdata->base); in tmc_update_etr_buffer()
1584 CS_LOCK(drvdata->base); in tmc_update_etr_buffer()
1585 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1587 lost = etr_buf->full; in tmc_update_etr_buffer()
1588 offset = etr_buf->offset; in tmc_update_etr_buffer()
1589 size = etr_buf->len; in tmc_update_etr_buffer()
1593 * perf ring buffer (handle->size). If so advance the offset so that we in tmc_update_etr_buffer()
1598 if (!etr_perf->snapshot && size > handle->size) { in tmc_update_etr_buffer()
1605 size = handle->size & mask; in tmc_update_etr_buffer()
1606 offset = etr_buf->offset + etr_buf->len - size; in tmc_update_etr_buffer()
1608 if (offset >= etr_buf->size) in tmc_update_etr_buffer()
1609 offset -= etr_buf->size; in tmc_update_etr_buffer()
1616 tmc_etr_sync_perf_buffer(etr_perf, handle->head, offset, size); in tmc_update_etr_buffer()
1623 if (etr_perf->snapshot) in tmc_update_etr_buffer()
1624 handle->head += size; in tmc_update_etr_buffer()
1637 * prevents the event from being re-enabled by the perf core, in tmc_update_etr_buffer()
1640 if (!etr_perf->snapshot && lost) in tmc_update_etr_buffer()
1650 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_perf()
1654 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1657 rc = -EBUSY; in tmc_enable_etr_sink_perf()
1661 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) { in tmc_enable_etr_sink_perf()
1662 rc = -EINVAL; in tmc_enable_etr_sink_perf()
1667 pid = etr_perf->pid; in tmc_enable_etr_sink_perf()
1670 if (drvdata->pid != -1 && drvdata->pid != pid) { in tmc_enable_etr_sink_perf()
1671 rc = -EBUSY; in tmc_enable_etr_sink_perf()
1679 if (drvdata->pid == pid) { in tmc_enable_etr_sink_perf()
1680 csdev->refcnt++; in tmc_enable_etr_sink_perf()
1684 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); in tmc_enable_etr_sink_perf()
1687 drvdata->pid = pid; in tmc_enable_etr_sink_perf()
1689 drvdata->perf_buf = etr_perf->etr_buf; in tmc_enable_etr_sink_perf()
1690 csdev->refcnt++; in tmc_enable_etr_sink_perf()
1694 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1707 return -EINVAL; in tmc_enable_etr_sink()
1714 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etr_sink()
1716 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1718 if (drvdata->reading) { in tmc_disable_etr_sink()
1719 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1720 return -EBUSY; in tmc_disable_etr_sink()
1723 csdev->refcnt--; in tmc_disable_etr_sink()
1724 if (csdev->refcnt) { in tmc_disable_etr_sink()
1725 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1726 return -EBUSY; in tmc_disable_etr_sink()
1733 drvdata->pid = -1; in tmc_disable_etr_sink()
1736 drvdata->perf_buf = NULL; in tmc_disable_etr_sink()
1738 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1740 dev_dbg(&csdev->dev, "TMC-ETR disabled\n"); in tmc_disable_etr_sink()
1762 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_prepare_etr()
1763 return -EINVAL; in tmc_read_prepare_etr()
1765 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1766 if (drvdata->reading) { in tmc_read_prepare_etr()
1767 ret = -EBUSY; in tmc_read_prepare_etr()
1776 if (!drvdata->sysfs_buf) { in tmc_read_prepare_etr()
1777 ret = -EINVAL; in tmc_read_prepare_etr()
1782 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) in tmc_read_prepare_etr()
1785 drvdata->reading = true; in tmc_read_prepare_etr()
1787 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1798 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_unprepare_etr()
1799 return -EINVAL; in tmc_read_unprepare_etr()
1801 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1803 /* RE-enable the TMC if need be */ in tmc_read_unprepare_etr()
1804 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { in tmc_read_unprepare_etr()
1816 sysfs_buf = drvdata->sysfs_buf; in tmc_read_unprepare_etr()
1817 drvdata->sysfs_buf = NULL; in tmc_read_unprepare_etr()
1820 drvdata->reading = false; in tmc_read_unprepare_etr()
1821 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1832 [ETR_MODE_ETR_SG] = "tmc-sg",
1833 [ETR_MODE_CATU] = "catu",
1860 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); in buf_mode_preferred_show()
1862 return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]); in buf_mode_preferred_show()
1869 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); in buf_mode_preferred_store()
1874 drvdata->etr_mode = ETR_MODE_FLAT; in buf_mode_preferred_store()
1876 drvdata->etr_mode = ETR_MODE_ETR_SG; in buf_mode_preferred_store()
1878 drvdata->etr_mode = ETR_MODE_CATU; in buf_mode_preferred_store()
1880 drvdata->etr_mode = ETR_MODE_AUTO; in buf_mode_preferred_store()
1882 return -EINVAL; in buf_mode_preferred_store()