Lines Matching full:ring
3 * TI K3 NAVSS Ring Accelerator subsystem driver
31 * @db: Ring Doorbell Register
33 * @occ: Ring Occupancy Register
34 * @indx: Ring Current Index Register
35 * @hwocc: Ring Hardware Occupancy Register
36 * @hwindx: Ring Hardware Current Index Register
57 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
59 * @head_data: Ring Head Entry Data Registers
60 * @tail_data: Ring Tail Entry Data Registers
61 * @peek_head_data: Ring Peek Head Entry Data Regs
62 * @peek_tail_data: Ring Peek Tail Entry Data Regs
114 int (*push_tail)(struct k3_ring *ring, void *elm);
115 int (*push_head)(struct k3_ring *ring, void *elm);
116 int (*pop_tail)(struct k3_ring *ring, void *elm);
117 int (*pop_head)(struct k3_ring *ring, void *elm);
138 * struct k3_ring - RA Ring descriptor
140 * @rt: Ring control/status registers
141 * @fifos: Ring queues registers
142 * @proxy: Ring Proxy Datapath registers
143 * @ring_mem_dma: Ring buffer dma address
144 * @ring_mem_virt: Ring buffer virt address
145 * @ops: Ring operations
146 * @size: Ring size in elements
147 * @elm_size: Size of the ring element
148 * @mode: Ring mode
150 * @state: Ring state
151 * @ring_id: Ring Id
154 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
192 * @num_rings: number of ring in RA
193 * @rings_inuse: bitfield for ring usage tracking
205 * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
240 static int k3_ringacc_ring_read_occ(struct k3_ring *ring) in k3_ringacc_ring_read_occ() argument
242 return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK; in k3_ringacc_ring_read_occ()
245 static void k3_ringacc_ring_update_occ(struct k3_ring *ring) in k3_ringacc_ring_update_occ() argument
249 val = readl(&ring->rt->occ); in k3_ringacc_ring_update_occ()
251 ring->state.occ = val & K3_RINGACC_RT_OCC_MASK; in k3_ringacc_ring_update_occ()
252 ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE); in k3_ringacc_ring_update_occ()
255 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) in k3_ringacc_ring_get_fifo_pos() argument
258 (4 << ring->elm_size); in k3_ringacc_ring_get_fifo_pos()
261 static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) in k3_ringacc_get_elm_addr() argument
263 return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); in k3_ringacc_get_elm_addr()
266 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
267 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
268 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
269 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
282 /* Reverse side of the DMA ring can only be popped by SW */
286 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
287 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
288 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
289 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
298 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
299 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
300 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
301 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
310 static void k3_ringacc_ring_dump(struct k3_ring *ring) in k3_ringacc_ring_dump() argument
312 struct device *dev = ring->parent->dev; in k3_ringacc_ring_dump()
314 dev_dbg(dev, "dump ring: %d\n", ring->ring_id); in k3_ringacc_ring_dump()
315 dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, in k3_ringacc_ring_dump()
316 &ring->ring_mem_dma); in k3_ringacc_ring_dump()
318 ring->elm_size, ring->size, ring->mode, ring->proxy_id); in k3_ringacc_ring_dump()
319 dev_dbg(dev, "dump flags %08X\n", ring->flags); in k3_ringacc_ring_dump()
321 dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); in k3_ringacc_ring_dump()
322 dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); in k3_ringacc_ring_dump()
323 dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); in k3_ringacc_ring_dump()
324 dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); in k3_ringacc_ring_dump()
325 dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); in k3_ringacc_ring_dump()
327 if (ring->ring_mem_virt) in k3_ringacc_ring_dump()
329 16, 1, ring->ring_mem_virt, 16 * 8, false); in k3_ringacc_ring_dump()
343 /* Request for any general purpose ring */ in k3_ringacc_request_ring()
373 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, in k3_ringacc_request_ring()
376 dev_dbg(ringacc->dev, "Giving ring#%d\n", id); in k3_ringacc_request_ring()
401 * DMA rings must be requested by ID, completion ring is the reverse in k3_dmaring_request_dual_ring()
402 * side of the forward ring in k3_dmaring_request_dual_ring()
423 dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id); in k3_dmaring_request_dual_ring()
463 static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) in k3_ringacc_ring_reset_sci() argument
466 struct k3_ringacc *ringacc = ring->parent; in k3_ringacc_ring_reset_sci()
470 ring_cfg.index = ring->ring_id; in k3_ringacc_ring_reset_sci()
472 ring_cfg.count = ring->size; in k3_ringacc_ring_reset_sci()
476 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", in k3_ringacc_ring_reset_sci()
477 ret, ring->ring_id); in k3_ringacc_ring_reset_sci()
480 void k3_ringacc_ring_reset(struct k3_ring *ring) in k3_ringacc_ring_reset() argument
482 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_reset()
485 memset(&ring->state, 0, sizeof(ring->state)); in k3_ringacc_ring_reset()
487 k3_ringacc_ring_reset_sci(ring); in k3_ringacc_ring_reset()
491 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, in k3_ringacc_ring_reconfig_qmode_sci() argument
495 struct k3_ringacc *ringacc = ring->parent; in k3_ringacc_ring_reconfig_qmode_sci()
499 ring_cfg.index = ring->ring_id; in k3_ringacc_ring_reconfig_qmode_sci()
506 ret, ring->ring_id); in k3_ringacc_ring_reconfig_qmode_sci()
509 void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) in k3_ringacc_ring_reset_dma() argument
511 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_reset_dma()
514 if (!ring->parent->dma_ring_reset_quirk) in k3_ringacc_ring_reset_dma()
518 occ = k3_ringacc_ring_read_occ(ring); in k3_ringacc_ring_reset_dma()
523 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, in k3_ringacc_ring_reset_dma()
524 ring->ring_id, occ); in k3_ringacc_ring_reset_dma()
525 /* TI-SCI ring reset */ in k3_ringacc_ring_reset_dma()
526 k3_ringacc_ring_reset_sci(ring); in k3_ringacc_ring_reset_dma()
529 * Setup the ring in ring/doorbell mode (if not already in this in k3_ringacc_ring_reset_dma()
532 if (ring->mode != K3_RINGACC_RING_MODE_RING) in k3_ringacc_ring_reset_dma()
534 ring, K3_RINGACC_RING_MODE_RING); in k3_ringacc_ring_reset_dma()
536 * Ring the doorbell 2**22 – ringOcc times. in k3_ringacc_ring_reset_dma()
537 * This will wrap the internal UDMAP ring state occupancy in k3_ringacc_ring_reset_dma()
544 * Ring the doorbell with the maximum count each in k3_ringacc_ring_reset_dma()
553 writel(db_ring_cnt_cur, &ring->rt->db); in k3_ringacc_ring_reset_dma()
557 /* Restore the original ring mode (if not ring mode) */ in k3_ringacc_ring_reset_dma()
558 if (ring->mode != K3_RINGACC_RING_MODE_RING) in k3_ringacc_ring_reset_dma()
559 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); in k3_ringacc_ring_reset_dma()
563 /* Reset the ring */ in k3_ringacc_ring_reset_dma()
564 k3_ringacc_ring_reset(ring); in k3_ringacc_ring_reset_dma()
568 static void k3_ringacc_ring_free_sci(struct k3_ring *ring) in k3_ringacc_ring_free_sci() argument
571 struct k3_ringacc *ringacc = ring->parent; in k3_ringacc_ring_free_sci()
575 ring_cfg.index = ring->ring_id; in k3_ringacc_ring_free_sci()
580 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", in k3_ringacc_ring_free_sci()
581 ret, ring->ring_id); in k3_ringacc_ring_free_sci()
584 int k3_ringacc_ring_free(struct k3_ring *ring) in k3_ringacc_ring_free() argument
588 if (!ring) in k3_ringacc_ring_free()
591 ringacc = ring->parent; in k3_ringacc_ring_free()
594 * DMA rings: rings shared memory and configuration, only forward ring in k3_ringacc_ring_free()
595 * is configured and reverse ring considered as slave. in k3_ringacc_ring_free()
597 if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE)) in k3_ringacc_ring_free()
600 dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); in k3_ringacc_ring_free()
602 if (!test_bit(ring->ring_id, ringacc->rings_inuse)) in k3_ringacc_ring_free()
607 if (--ring->use_count) in k3_ringacc_ring_free()
610 if (!(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_free()
613 k3_ringacc_ring_free_sci(ring); in k3_ringacc_ring_free()
615 dma_free_coherent(ring->dma_dev, in k3_ringacc_ring_free()
616 ring->size * (4 << ring->elm_size), in k3_ringacc_ring_free()
617 ring->ring_mem_virt, ring->ring_mem_dma); in k3_ringacc_ring_free()
618 ring->flags = 0; in k3_ringacc_ring_free()
619 ring->ops = NULL; in k3_ringacc_ring_free()
620 ring->dma_dev = NULL; in k3_ringacc_ring_free()
621 ring->asel = 0; in k3_ringacc_ring_free()
623 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { in k3_ringacc_ring_free()
624 clear_bit(ring->proxy_id, ringacc->proxy_inuse); in k3_ringacc_ring_free()
625 ring->proxy = NULL; in k3_ringacc_ring_free()
626 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; in k3_ringacc_ring_free()
630 clear_bit(ring->ring_id, ringacc->rings_inuse); in k3_ringacc_ring_free()
640 u32 k3_ringacc_get_ring_id(struct k3_ring *ring) in k3_ringacc_get_ring_id() argument
642 if (!ring) in k3_ringacc_get_ring_id()
645 return ring->ring_id; in k3_ringacc_get_ring_id()
649 u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) in k3_ringacc_get_tisci_dev_id() argument
651 if (!ring) in k3_ringacc_get_tisci_dev_id()
654 return ring->parent->tisci_dev_id; in k3_ringacc_get_tisci_dev_id()
658 int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) in k3_ringacc_get_ring_irq_num() argument
662 if (!ring) in k3_ringacc_get_ring_irq_num()
665 irq_num = msi_get_virq(ring->parent->dev, ring->ring_id); in k3_ringacc_get_ring_irq_num()
672 static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) in k3_ringacc_ring_cfg_sci() argument
675 struct k3_ringacc *ringacc = ring->parent; in k3_ringacc_ring_cfg_sci()
682 ring_cfg.index = ring->ring_id; in k3_ringacc_ring_cfg_sci()
684 ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma); in k3_ringacc_ring_cfg_sci()
685 ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma); in k3_ringacc_ring_cfg_sci()
686 ring_cfg.count = ring->size; in k3_ringacc_ring_cfg_sci()
687 ring_cfg.mode = ring->mode; in k3_ringacc_ring_cfg_sci()
688 ring_cfg.size = ring->elm_size; in k3_ringacc_ring_cfg_sci()
689 ring_cfg.asel = ring->asel; in k3_ringacc_ring_cfg_sci()
693 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", in k3_ringacc_ring_cfg_sci()
694 ret, ring->ring_id); in k3_ringacc_ring_cfg_sci()
699 static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) in k3_dmaring_cfg() argument
710 ringacc = ring->parent; in k3_dmaring_cfg()
713 * DMA rings: rings shared memory and configuration, only forward ring in k3_dmaring_cfg()
714 * is configured and reverse ring considered as slave. in k3_dmaring_cfg()
716 if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE)) in k3_dmaring_cfg()
719 if (!test_bit(ring->ring_id, ringacc->rings_inuse)) in k3_dmaring_cfg()
722 ring->size = cfg->size; in k3_dmaring_cfg()
723 ring->elm_size = cfg->elm_size; in k3_dmaring_cfg()
724 ring->mode = cfg->mode; in k3_dmaring_cfg()
725 ring->asel = cfg->asel; in k3_dmaring_cfg()
726 ring->dma_dev = cfg->dma_dev; in k3_dmaring_cfg()
727 if (!ring->dma_dev) { in k3_dmaring_cfg()
728 dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n", in k3_dmaring_cfg()
729 ring->ring_id); in k3_dmaring_cfg()
730 ring->dma_dev = ringacc->dev; in k3_dmaring_cfg()
733 memset(&ring->state, 0, sizeof(ring->state)); in k3_dmaring_cfg()
735 ring->ops = &k3_dmaring_fwd_ops; in k3_dmaring_cfg()
737 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev, in k3_dmaring_cfg()
738 ring->size * (4 << ring->elm_size), in k3_dmaring_cfg()
739 &ring->ring_mem_dma, GFP_KERNEL); in k3_dmaring_cfg()
740 if (!ring->ring_mem_virt) { in k3_dmaring_cfg()
741 dev_err(ringacc->dev, "Failed to alloc ring mem\n"); in k3_dmaring_cfg()
746 ret = k3_ringacc_ring_cfg_sci(ring); in k3_dmaring_cfg()
750 ring->flags |= K3_RING_FLAG_BUSY; in k3_dmaring_cfg()
752 k3_ringacc_ring_dump(ring); in k3_dmaring_cfg()
754 /* DMA rings: configure reverse ring */ in k3_dmaring_cfg()
755 reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings]; in k3_dmaring_cfg()
763 reverse_ring->ring_mem_virt = ring->ring_mem_virt; in k3_dmaring_cfg()
764 reverse_ring->ring_mem_dma = ring->ring_mem_dma; in k3_dmaring_cfg()
771 dma_free_coherent(ring->dma_dev, in k3_dmaring_cfg()
772 ring->size * (4 << ring->elm_size), in k3_dmaring_cfg()
773 ring->ring_mem_virt, in k3_dmaring_cfg()
774 ring->ring_mem_dma); in k3_dmaring_cfg()
776 ring->ops = NULL; in k3_dmaring_cfg()
777 ring->proxy = NULL; in k3_dmaring_cfg()
778 ring->dma_dev = NULL; in k3_dmaring_cfg()
779 ring->asel = 0; in k3_dmaring_cfg()
783 int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) in k3_ringacc_ring_cfg() argument
788 if (!ring || !cfg) in k3_ringacc_ring_cfg()
791 ringacc = ring->parent; in k3_ringacc_ring_cfg()
794 return k3_dmaring_cfg(ring, cfg); in k3_ringacc_ring_cfg()
799 !test_bit(ring->ring_id, ringacc->rings_inuse)) in k3_ringacc_ring_cfg()
803 ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && in k3_ringacc_ring_cfg()
807 4 << ring->elm_size); in k3_ringacc_ring_cfg()
812 * In case of shared ring only the first user (master user) can in k3_ringacc_ring_cfg()
813 * configure the ring. The sequence should be by the client: in k3_ringacc_ring_cfg()
814 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user in k3_ringacc_ring_cfg()
815 * k3_ringacc_ring_cfg(ring, cfg); # master configuration in k3_ringacc_ring_cfg()
819 if (ring->use_count != 1) in k3_ringacc_ring_cfg()
822 ring->size = cfg->size; in k3_ringacc_ring_cfg()
823 ring->elm_size = cfg->elm_size; in k3_ringacc_ring_cfg()
824 ring->mode = cfg->mode; in k3_ringacc_ring_cfg()
825 memset(&ring->state, 0, sizeof(ring->state)); in k3_ringacc_ring_cfg()
827 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) in k3_ringacc_ring_cfg()
828 ring->proxy = ringacc->proxy_target_base + in k3_ringacc_ring_cfg()
829 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; in k3_ringacc_ring_cfg()
831 switch (ring->mode) { in k3_ringacc_ring_cfg()
833 ring->ops = &k3_ring_mode_ring_ops; in k3_ringacc_ring_cfg()
834 ring->dma_dev = cfg->dma_dev; in k3_ringacc_ring_cfg()
835 if (!ring->dma_dev) in k3_ringacc_ring_cfg()
836 ring->dma_dev = ringacc->dev; in k3_ringacc_ring_cfg()
839 ring->dma_dev = ringacc->dev; in k3_ringacc_ring_cfg()
840 if (ring->proxy) in k3_ringacc_ring_cfg()
841 ring->ops = &k3_ring_mode_proxy_ops; in k3_ringacc_ring_cfg()
843 ring->ops = &k3_ring_mode_msg_ops; in k3_ringacc_ring_cfg()
846 ring->ops = NULL; in k3_ringacc_ring_cfg()
851 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev, in k3_ringacc_ring_cfg()
852 ring->size * (4 << ring->elm_size), in k3_ringacc_ring_cfg()
853 &ring->ring_mem_dma, GFP_KERNEL); in k3_ringacc_ring_cfg()
854 if (!ring->ring_mem_virt) { in k3_ringacc_ring_cfg()
855 dev_err(ringacc->dev, "Failed to alloc ring mem\n"); in k3_ringacc_ring_cfg()
860 ret = k3_ringacc_ring_cfg_sci(ring); in k3_ringacc_ring_cfg()
865 ring->flags |= K3_RING_FLAG_BUSY; in k3_ringacc_ring_cfg()
866 ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? in k3_ringacc_ring_cfg()
869 k3_ringacc_ring_dump(ring); in k3_ringacc_ring_cfg()
874 dma_free_coherent(ring->dma_dev, in k3_ringacc_ring_cfg()
875 ring->size * (4 << ring->elm_size), in k3_ringacc_ring_cfg()
876 ring->ring_mem_virt, in k3_ringacc_ring_cfg()
877 ring->ring_mem_dma); in k3_ringacc_ring_cfg()
879 ring->ops = NULL; in k3_ringacc_ring_cfg()
880 ring->dma_dev = NULL; in k3_ringacc_ring_cfg()
882 ring->proxy = NULL; in k3_ringacc_ring_cfg()
887 u32 k3_ringacc_ring_get_size(struct k3_ring *ring) in k3_ringacc_ring_get_size() argument
889 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_get_size()
892 return ring->size; in k3_ringacc_ring_get_size()
896 u32 k3_ringacc_ring_get_free(struct k3_ring *ring) in k3_ringacc_ring_get_free() argument
898 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_get_free()
901 if (!ring->state.free) in k3_ringacc_ring_get_free()
902 ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring); in k3_ringacc_ring_get_free()
904 return ring->state.free; in k3_ringacc_ring_get_free()
908 u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) in k3_ringacc_ring_get_occ() argument
910 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_get_occ()
913 return k3_ringacc_ring_read_occ(ring); in k3_ringacc_ring_get_occ()
917 u32 k3_ringacc_ring_is_full(struct k3_ring *ring) in k3_ringacc_ring_is_full() argument
919 return !k3_ringacc_ring_get_free(ring); in k3_ringacc_ring_is_full()
934 static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, in k3_ringacc_ring_cfg_proxy() argument
939 val = ring->ring_id; in k3_ringacc_ring_cfg_proxy()
941 val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); in k3_ringacc_ring_cfg_proxy()
942 writel(val, &ring->proxy->control); in k3_ringacc_ring_cfg_proxy()
946 static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, in k3_ringacc_ring_access_proxy() argument
951 ptr = (void __iomem *)&ring->proxy->data; in k3_ringacc_ring_access_proxy()
956 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); in k3_ringacc_ring_access_proxy()
960 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); in k3_ringacc_ring_access_proxy()
966 ptr += k3_ringacc_ring_get_fifo_pos(ring); in k3_ringacc_ring_access_proxy()
971 dev_dbg(ring->parent->dev, in k3_ringacc_ring_access_proxy()
974 memcpy_fromio(elem, ptr, (4 << ring->elm_size)); in k3_ringacc_ring_access_proxy()
975 ring->state.occ--; in k3_ringacc_ring_access_proxy()
979 dev_dbg(ring->parent->dev, in k3_ringacc_ring_access_proxy()
982 memcpy_toio(ptr, elem, (4 << ring->elm_size)); in k3_ringacc_ring_access_proxy()
983 ring->state.free--; in k3_ringacc_ring_access_proxy()
989 dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free, in k3_ringacc_ring_access_proxy()
990 ring->state.occ); in k3_ringacc_ring_access_proxy()
994 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push_head_proxy() argument
996 return k3_ringacc_ring_access_proxy(ring, elem, in k3_ringacc_ring_push_head_proxy()
1000 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push_tail_proxy() argument
1002 return k3_ringacc_ring_access_proxy(ring, elem, in k3_ringacc_ring_push_tail_proxy()
1006 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop_head_proxy() argument
1008 return k3_ringacc_ring_access_proxy(ring, elem, in k3_ringacc_ring_pop_head_proxy()
1012 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop_tail_proxy() argument
1014 return k3_ringacc_ring_access_proxy(ring, elem, in k3_ringacc_ring_pop_tail_proxy()
1018 static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, in k3_ringacc_ring_access_io() argument
1026 ptr = (void __iomem *)&ring->fifos->head_data; in k3_ringacc_ring_access_io()
1030 ptr = (void __iomem *)&ring->fifos->tail_data; in k3_ringacc_ring_access_io()
1036 ptr += k3_ringacc_ring_get_fifo_pos(ring); in k3_ringacc_ring_access_io()
1041 dev_dbg(ring->parent->dev, in k3_ringacc_ring_access_io()
1044 memcpy_fromio(elem, ptr, (4 << ring->elm_size)); in k3_ringacc_ring_access_io()
1045 ring->state.occ--; in k3_ringacc_ring_access_io()
1049 dev_dbg(ring->parent->dev, in k3_ringacc_ring_access_io()
1052 memcpy_toio(ptr, elem, (4 << ring->elm_size)); in k3_ringacc_ring_access_io()
1053 ring->state.free--; in k3_ringacc_ring_access_io()
1059 dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", in k3_ringacc_ring_access_io()
1060 ring->state.free, ring->state.windex, ring->state.occ, in k3_ringacc_ring_access_io()
1061 ring->state.rindex); in k3_ringacc_ring_access_io()
1065 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push_head_io() argument
1067 return k3_ringacc_ring_access_io(ring, elem, in k3_ringacc_ring_push_head_io()
1071 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push_io() argument
1073 return k3_ringacc_ring_access_io(ring, elem, in k3_ringacc_ring_push_io()
1077 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop_io() argument
1079 return k3_ringacc_ring_access_io(ring, elem, in k3_ringacc_ring_pop_io()
1083 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop_tail_io() argument
1085 return k3_ringacc_ring_access_io(ring, elem, in k3_ringacc_ring_pop_tail_io()
1090 * The element is 48 bits of address + ASEL bits in the ring.
1099 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem) in k3_dmaring_fwd_pop() argument
1105 * DMA rings: forward ring is always tied DMA channel and HW does not in k3_dmaring_fwd_pop()
1110 ring->state.occ = k3_ringacc_ring_read_occ(ring); in k3_dmaring_fwd_pop()
1111 if (ring->state.windex >= ring->state.occ) in k3_dmaring_fwd_pop()
1112 elem_idx = ring->state.windex - ring->state.occ; in k3_dmaring_fwd_pop()
1114 elem_idx = ring->size - (ring->state.occ - ring->state.windex); in k3_dmaring_fwd_pop()
1116 elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx); in k3_dmaring_fwd_pop()
1117 memcpy(elem, elem_ptr, (4 << ring->elm_size)); in k3_dmaring_fwd_pop()
1120 ring->state.occ--; in k3_dmaring_fwd_pop()
1121 writel(-1, &ring->rt->db); in k3_dmaring_fwd_pop()
1123 dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n", in k3_dmaring_fwd_pop()
1124 __func__, ring->state.occ, ring->state.windex, elem_idx, in k3_dmaring_fwd_pop()
1129 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem) in k3_dmaring_reverse_pop() argument
1133 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex); in k3_dmaring_reverse_pop()
1135 if (ring->state.occ) { in k3_dmaring_reverse_pop()
1136 memcpy(elem, elem_ptr, (4 << ring->elm_size)); in k3_dmaring_reverse_pop()
1139 ring->state.rindex = (ring->state.rindex + 1) % ring->size; in k3_dmaring_reverse_pop()
1140 ring->state.occ--; in k3_dmaring_reverse_pop()
1141 writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db); in k3_dmaring_reverse_pop()
1142 } else if (ring->state.tdown_complete) { in k3_dmaring_reverse_pop()
1146 writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db); in k3_dmaring_reverse_pop()
1147 ring->state.tdown_complete = false; in k3_dmaring_reverse_pop()
1150 dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n", in k3_dmaring_reverse_pop()
1151 __func__, ring->state.occ, ring->state.rindex, elem_ptr); in k3_dmaring_reverse_pop()
1155 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push_mem() argument
1159 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex); in k3_ringacc_ring_push_mem()
1161 memcpy(elem_ptr, elem, (4 << ring->elm_size)); in k3_ringacc_ring_push_mem()
1162 if (ring->parent->dma_rings) { in k3_ringacc_ring_push_mem()
1165 *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT); in k3_ringacc_ring_push_mem()
1168 ring->state.windex = (ring->state.windex + 1) % ring->size; in k3_ringacc_ring_push_mem()
1169 ring->state.free--; in k3_ringacc_ring_push_mem()
1170 writel(1, &ring->rt->db); in k3_ringacc_ring_push_mem()
1172 dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", in k3_ringacc_ring_push_mem()
1173 ring->state.free, ring->state.windex); in k3_ringacc_ring_push_mem()
1178 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop_mem() argument
1182 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex); in k3_ringacc_ring_pop_mem()
1184 memcpy(elem, elem_ptr, (4 << ring->elm_size)); in k3_ringacc_ring_pop_mem()
1186 ring->state.rindex = (ring->state.rindex + 1) % ring->size; in k3_ringacc_ring_pop_mem()
1187 ring->state.occ--; in k3_ringacc_ring_pop_mem()
1188 writel(-1, &ring->rt->db); in k3_ringacc_ring_pop_mem()
1190 dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", in k3_ringacc_ring_pop_mem()
1191 ring->state.occ, ring->state.rindex, elem_ptr); in k3_ringacc_ring_pop_mem()
1195 int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push() argument
1199 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_push()
1202 dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", in k3_ringacc_ring_push()
1203 ring->state.free, ring->state.windex); in k3_ringacc_ring_push()
1205 if (k3_ringacc_ring_is_full(ring)) in k3_ringacc_ring_push()
1208 if (ring->ops && ring->ops->push_tail) in k3_ringacc_ring_push()
1209 ret = ring->ops->push_tail(ring, elem); in k3_ringacc_ring_push()
1215 int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) in k3_ringacc_ring_push_head() argument
1219 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_push_head()
1222 dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", in k3_ringacc_ring_push_head()
1223 ring->state.free, ring->state.windex); in k3_ringacc_ring_push_head()
1225 if (k3_ringacc_ring_is_full(ring)) in k3_ringacc_ring_push_head()
1228 if (ring->ops && ring->ops->push_head) in k3_ringacc_ring_push_head()
1229 ret = ring->ops->push_head(ring, elem); in k3_ringacc_ring_push_head()
1235 int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop() argument
1239 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_pop()
1242 if (!ring->state.occ) in k3_ringacc_ring_pop()
1243 k3_ringacc_ring_update_occ(ring); in k3_ringacc_ring_pop()
1245 dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ, in k3_ringacc_ring_pop()
1246 ring->state.rindex); in k3_ringacc_ring_pop()
1248 if (!ring->state.occ && !ring->state.tdown_complete) in k3_ringacc_ring_pop()
1251 if (ring->ops && ring->ops->pop_head) in k3_ringacc_ring_pop()
1252 ret = ring->ops->pop_head(ring, elem); in k3_ringacc_ring_pop()
1258 int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) in k3_ringacc_ring_pop_tail() argument
1262 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) in k3_ringacc_ring_pop_tail()
1265 if (!ring->state.occ) in k3_ringacc_ring_pop_tail()
1266 k3_ringacc_ring_update_occ(ring); in k3_ringacc_ring_pop_tail()
1268 dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", in k3_ringacc_ring_pop_tail()
1269 ring->state.occ, ring->state.rindex); in k3_ringacc_ring_pop_tail()
1271 if (!ring->state.occ) in k3_ringacc_ring_pop_tail()
1274 if (ring->ops && ring->ops->pop_tail) in k3_ringacc_ring_pop_tail()
1275 ret = ring->ops->pop_tail(ring, elem); in k3_ringacc_ring_pop_tail()
1433 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", in k3_ringacc_init()
1438 dev_info(dev, "dma-ring-reset-quirk: %s\n", in k3_ringacc_init()
1498 struct k3_ring *ring = &ringacc->rings[i]; in k3_ringacc_dmarings_init() local
1500 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i; in k3_ringacc_dmarings_init()
1501 ring->parent = ringacc; in k3_ringacc_dmarings_init()
1502 ring->ring_id = i; in k3_ringacc_dmarings_init()
1503 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; in k3_ringacc_dmarings_init()
1505 ring = &ringacc->rings[ringacc->num_rings + i]; in k3_ringacc_dmarings_init()
1506 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i + in k3_ringacc_dmarings_init()
1508 ring->parent = ringacc; in k3_ringacc_dmarings_init()
1509 ring->ring_id = i; in k3_ringacc_dmarings_init()
1510 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; in k3_ringacc_dmarings_init()
1511 ring->flags = K3_RING_FLAG_REVERSE; in k3_ringacc_dmarings_init()