Lines Matching +full:rx +full:- +full:port +full:- +full:mapping

1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
24 regmap_write(ocelot->targets[FDMA], reg, data); in ocelot_fdma_writel()
31 regmap_read(ocelot->targets[FDMA], reg, &retval); in ocelot_fdma_readl()
43 return (dma - base) / sizeof(struct ocelot_fdma_dcb); in ocelot_fdma_dma_idx()
48 return unlikely(idx == ring_sz - 1) ? 0 : idx + 1; in ocelot_fdma_idx_next()
53 return unlikely(idx == 0) ? ring_sz - 1 : idx - 1; in ocelot_fdma_idx_prev()
58 struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring; in ocelot_fdma_rx_ring_free()
60 if (rx_ring->next_to_use >= rx_ring->next_to_clean) in ocelot_fdma_rx_ring_free()
61 return OCELOT_FDMA_RX_RING_SIZE - in ocelot_fdma_rx_ring_free()
62 (rx_ring->next_to_use - rx_ring->next_to_clean) - 1; in ocelot_fdma_rx_ring_free()
64 return rx_ring->next_to_clean - rx_ring->next_to_use - 1; in ocelot_fdma_rx_ring_free()
69 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_free()
71 if (tx_ring->next_to_use >= tx_ring->next_to_clean) in ocelot_fdma_tx_ring_free()
72 return OCELOT_FDMA_TX_RING_SIZE - in ocelot_fdma_tx_ring_free()
73 (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; in ocelot_fdma_tx_ring_free()
75 return tx_ring->next_to_clean - tx_ring->next_to_use - 1; in ocelot_fdma_tx_ring_free()
80 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_empty()
82 return tx_ring->next_to_clean == tx_ring->next_to_use; in ocelot_fdma_tx_ring_empty()
116 dcb->llp = 0; in ocelot_fdma_dcb_set_data()
117 dcb->datap = ALIGN_DOWN(dma_addr, 4); in ocelot_fdma_dcb_set_data()
118 dcb->datal = ALIGN_DOWN(size, 4); in ocelot_fdma_dcb_set_data()
119 dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset); in ocelot_fdma_dcb_set_data()
125 dma_addr_t mapping; in ocelot_fdma_rx_alloc_page() local
132 mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE, in ocelot_fdma_rx_alloc_page()
134 if (unlikely(dma_mapping_error(ocelot->dev, mapping))) { in ocelot_fdma_rx_alloc_page()
139 rxb->page = page; in ocelot_fdma_rx_alloc_page()
140 rxb->page_offset = 0; in ocelot_fdma_rx_alloc_page()
141 rxb->dma_addr = mapping; in ocelot_fdma_rx_alloc_page()
148 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_alloc_rx_buffs()
156 rx_ring = &fdma->rx_ring; in ocelot_fdma_alloc_rx_buffs()
157 idx = rx_ring->next_to_use; in ocelot_fdma_alloc_rx_buffs()
159 while (alloc_cnt--) { in ocelot_fdma_alloc_rx_buffs()
160 rxb = &rx_ring->bufs[idx]; in ocelot_fdma_alloc_rx_buffs()
162 if (unlikely(!rxb->page)) { in ocelot_fdma_alloc_rx_buffs()
164 dev_err_ratelimited(ocelot->dev, in ocelot_fdma_alloc_rx_buffs()
165 "Failed to allocate rx\n"); in ocelot_fdma_alloc_rx_buffs()
166 ret = -ENOMEM; in ocelot_fdma_alloc_rx_buffs()
171 dcb = &rx_ring->dcbs[idx]; in ocelot_fdma_alloc_rx_buffs()
172 dma_addr = rxb->dma_addr + rxb->page_offset; in ocelot_fdma_alloc_rx_buffs()
177 dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx); in ocelot_fdma_alloc_rx_buffs()
180 rx_ring->next_to_use = idx; in ocelot_fdma_alloc_rx_buffs()
181 rx_ring->next_to_alloc = idx; in ocelot_fdma_alloc_rx_buffs()
191 dma_addr_t mapping; in ocelot_fdma_tx_dcb_set_skb() local
193 mapping = dma_map_single(ocelot->dev, skb->data, skb->len, in ocelot_fdma_tx_dcb_set_skb()
195 if (unlikely(dma_mapping_error(ocelot->dev, mapping))) in ocelot_fdma_tx_dcb_set_skb()
198 dma_unmap_addr_set(tx_buf, dma_addr, mapping); in ocelot_fdma_tx_dcb_set_skb()
200 ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE); in ocelot_fdma_tx_dcb_set_skb()
201 tx_buf->skb = skb; in ocelot_fdma_tx_dcb_set_skb()
202 dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len); in ocelot_fdma_tx_dcb_set_skb()
203 dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF; in ocelot_fdma_tx_dcb_set_skb()
228 idx = ocelot_fdma_idx_prev(rx_ring->next_to_use, in ocelot_fdma_rx_set_llp()
230 dcb = &rx_ring->dcbs[idx]; in ocelot_fdma_rx_set_llp()
231 dcb->llp = 0; in ocelot_fdma_rx_set_llp()
236 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_rx_restart()
244 rx_ring = &fdma->rx_ring; in ocelot_fdma_rx_restart()
247 dev_err_ratelimited(ocelot->dev, in ocelot_fdma_rx_restart()
248 "Unable to stop RX channel\n"); in ocelot_fdma_rx_restart()
255 * we processed some DCBs in RX, there is free space, and we must set in ocelot_fdma_rx_restart()
259 dma_base = rx_ring->dcbs_dma; in ocelot_fdma_rx_restart()
274 struct page *page = rxb->page; in ocelot_fdma_add_rx_frag()
279 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in ocelot_fdma_add_rx_frag()
280 rxb->page_offset, size, OCELOT_FDMA_RX_SIZE); in ocelot_fdma_add_rx_frag()
288 rxb->page_offset ^= OCELOT_FDMA_RX_SIZE; in ocelot_fdma_add_rx_frag()
298 struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring; in ocelot_fdma_reuse_rx_page()
301 new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc]; in ocelot_fdma_reuse_rx_page()
302 rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc, in ocelot_fdma_reuse_rx_page()
309 dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr, in ocelot_fdma_reuse_rx_page()
310 old_rxb->page_offset, in ocelot_fdma_reuse_rx_page()
322 void *buff_addr = page_address(rxb->page) + in ocelot_fdma_get_skb()
323 rxb->page_offset; in ocelot_fdma_get_skb()
327 dev_err_ratelimited(ocelot->dev, in ocelot_fdma_get_skb()
334 dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr, in ocelot_fdma_get_skb()
335 rxb->page_offset, OCELOT_FDMA_RX_SIZE, in ocelot_fdma_get_skb()
343 dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, in ocelot_fdma_get_skb()
347 /* clear rx buff content */ in ocelot_fdma_get_skb()
348 rxb->page = NULL; in ocelot_fdma_get_skb()
356 void *xfh = skb->data; in ocelot_fdma_receive_skb()
363 if (unlikely(src_port >= ocelot->num_phys_ports)) in ocelot_fdma_receive_skb()
370 if (pskb_trim(skb, skb->len - ETH_FCS_LEN)) in ocelot_fdma_receive_skb()
373 skb->dev = ndev; in ocelot_fdma_receive_skb()
374 skb->protocol = eth_type_trans(skb, skb->dev); in ocelot_fdma_receive_skb()
375 skb->dev->stats.rx_bytes += skb->len; in ocelot_fdma_receive_skb()
376 skb->dev->stats.rx_packets++; in ocelot_fdma_receive_skb()
378 if (ocelot->ptp) { in ocelot_fdma_receive_skb()
391 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_rx_get()
402 rx_ring = &fdma->rx_ring; in ocelot_fdma_rx_get()
403 skb = rx_ring->skb; in ocelot_fdma_rx_get()
405 while (budget--) { in ocelot_fdma_rx_get()
406 idx = rx_ring->next_to_clean; in ocelot_fdma_rx_get()
407 dcb = &rx_ring->dcbs[idx]; in ocelot_fdma_rx_get()
408 stat = dcb->stat; in ocelot_fdma_rx_get()
420 rxb = &rx_ring->bufs[idx]; in ocelot_fdma_rx_get()
430 rx_ring->next_to_clean = idx; in ocelot_fdma_rx_get()
434 dev_err_ratelimited(ocelot->dev, in ocelot_fdma_rx_get()
453 rx_ring->skb = skb; in ocelot_fdma_rx_get()
466 int port; in ocelot_fdma_wakeup_netdev() local
468 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_fdma_wakeup_netdev()
469 ocelot_port = ocelot->ports[port]; in ocelot_fdma_wakeup_netdev()
473 port); in ocelot_fdma_wakeup_netdev()
474 dev = priv->dev; in ocelot_fdma_wakeup_netdev()
483 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_tx_cleanup()
495 tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_cleanup()
501 ntc = tx_ring->next_to_clean; in ocelot_fdma_tx_cleanup()
502 dcb = &tx_ring->dcbs[ntc]; in ocelot_fdma_tx_cleanup()
503 if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD)) in ocelot_fdma_tx_cleanup()
506 buf = &tx_ring->bufs[ntc]; in ocelot_fdma_tx_cleanup()
507 skb = buf->skb; in ocelot_fdma_tx_cleanup()
508 dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr), in ocelot_fdma_tx_cleanup()
509 skb->len, DMA_TO_DEVICE); in ocelot_fdma_tx_cleanup()
511 dcb_llp = dcb->llp; in ocelot_fdma_tx_cleanup()
514 tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc, in ocelot_fdma_tx_cleanup()
536 dev_warn(ocelot->dev, in ocelot_fdma_tx_cleanup()
542 new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use, in ocelot_fdma_tx_cleanup()
544 dcb = &tx_ring->dcbs[new_null_llp_idx]; in ocelot_fdma_tx_cleanup()
545 dcb->llp = 0; in ocelot_fdma_tx_cleanup()
547 dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean); in ocelot_fdma_tx_cleanup()
554 struct ocelot *ocelot = fdma->ocelot; in ocelot_fdma_napi_poll()
568 napi_complete_done(&fdma->napi, work_done); in ocelot_fdma_napi_poll()
590 napi_schedule(&ocelot->fdma->napi); in ocelot_fdma_interrupt()
596 dev_err_ratelimited(ocelot->dev, in ocelot_fdma_interrupt()
610 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_send_skb()
616 dcb = &tx_ring->dcbs[tx_ring->next_to_use]; in ocelot_fdma_send_skb()
617 tx_buf = &tx_ring->bufs[tx_ring->next_to_use]; in ocelot_fdma_send_skb()
623 next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use, in ocelot_fdma_send_skb()
629 dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, in ocelot_fdma_send_skb()
630 tx_ring->next_to_use); in ocelot_fdma_send_skb()
634 dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx); in ocelot_fdma_send_skb()
637 tx_ring->next_to_use = next_idx; in ocelot_fdma_send_skb()
640 static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op, in ocelot_fdma_prepare_skb() argument
643 int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0); in ocelot_fdma_prepare_skb()
644 int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); in ocelot_fdma_prepare_skb()
661 dev->name, err); in ocelot_fdma_prepare_skb()
668 ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb); in ocelot_fdma_prepare_skb()
673 int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op, in ocelot_fdma_inject_frame() argument
676 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_inject_frame()
679 spin_lock(&fdma->tx_ring.xmit_lock); in ocelot_fdma_inject_frame()
687 if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev)) in ocelot_fdma_inject_frame()
693 spin_unlock(&fdma->tx_ring.xmit_lock); in ocelot_fdma_inject_frame()
700 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_free_rx_ring()
705 rx_ring = &fdma->rx_ring; in ocelot_fdma_free_rx_ring()
706 idx = rx_ring->next_to_clean; in ocelot_fdma_free_rx_ring()
708 /* Free the pages held in the RX ring */ in ocelot_fdma_free_rx_ring()
709 while (idx != rx_ring->next_to_use) { in ocelot_fdma_free_rx_ring()
710 rxb = &rx_ring->bufs[idx]; in ocelot_fdma_free_rx_ring()
711 dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, in ocelot_fdma_free_rx_ring()
713 __free_page(rxb->page); in ocelot_fdma_free_rx_ring()
717 if (fdma->rx_ring.skb) in ocelot_fdma_free_rx_ring()
718 dev_kfree_skb_any(fdma->rx_ring.skb); in ocelot_fdma_free_rx_ring()
723 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_free_tx_ring()
729 tx_ring = &fdma->tx_ring; in ocelot_fdma_free_tx_ring()
730 idx = tx_ring->next_to_clean; in ocelot_fdma_free_tx_ring()
732 while (idx != tx_ring->next_to_use) { in ocelot_fdma_free_tx_ring()
733 txb = &tx_ring->bufs[idx]; in ocelot_fdma_free_tx_ring()
734 skb = txb->skb; in ocelot_fdma_free_tx_ring()
735 dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr), in ocelot_fdma_free_tx_ring()
736 skb->len, DMA_TO_DEVICE); in ocelot_fdma_free_tx_ring()
744 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_rings_alloc()
751 fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev, in ocelot_fdma_rings_alloc()
753 &fdma->dcbs_dma_base, GFP_KERNEL); in ocelot_fdma_rings_alloc()
754 if (!fdma->dcbs_base) in ocelot_fdma_rings_alloc()
755 return -ENOMEM; in ocelot_fdma_rings_alloc()
758 dcbs = fdma->dcbs_base; in ocelot_fdma_rings_alloc()
759 dcbs_dma = fdma->dcbs_dma_base; in ocelot_fdma_rings_alloc()
767 fdma->tx_ring.dcbs = dcbs; in ocelot_fdma_rings_alloc()
768 fdma->tx_ring.dcbs_dma = dcbs_dma; in ocelot_fdma_rings_alloc()
769 spin_lock_init(&fdma->tx_ring.xmit_lock); in ocelot_fdma_rings_alloc()
771 /* RX queue */ in ocelot_fdma_rings_alloc()
772 fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE; in ocelot_fdma_rings_alloc()
773 fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE; in ocelot_fdma_rings_alloc()
782 * the RX chan, but this is for the first run in ocelot_fdma_rings_alloc()
784 ocelot_fdma_rx_set_llp(&fdma->rx_ring); in ocelot_fdma_rings_alloc()
791 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_netdev_init()
793 dev->needed_headroom = OCELOT_TAG_LEN; in ocelot_fdma_netdev_init()
794 dev->needed_tailroom = ETH_FCS_LEN; in ocelot_fdma_netdev_init()
796 if (fdma->ndev) in ocelot_fdma_netdev_init()
799 fdma->ndev = dev; in ocelot_fdma_netdev_init()
800 netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll, in ocelot_fdma_netdev_init()
806 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_netdev_deinit()
808 if (fdma->ndev == dev) { in ocelot_fdma_netdev_deinit()
809 netif_napi_del(&fdma->napi); in ocelot_fdma_netdev_deinit()
810 fdma->ndev = NULL; in ocelot_fdma_netdev_deinit()
816 struct device *dev = ocelot->dev; in ocelot_fdma_init()
824 ocelot->fdma = fdma; in ocelot_fdma_init()
825 ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32); in ocelot_fdma_init()
829 fdma->ocelot = ocelot; in ocelot_fdma_init()
830 fdma->irq = platform_get_irq_byname(pdev, "fdma"); in ocelot_fdma_init()
831 ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0, in ocelot_fdma_init()
845 devm_free_irq(dev, fdma->irq, fdma); in ocelot_fdma_init()
849 ocelot->fdma = NULL; in ocelot_fdma_init()
854 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_start()
872 napi_enable(&fdma->napi); in ocelot_fdma_start()
874 ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma, in ocelot_fdma_start()
880 struct ocelot_fdma *fdma = ocelot->fdma; in ocelot_fdma_deinit()
887 napi_synchronize(&fdma->napi); in ocelot_fdma_deinit()
888 napi_disable(&fdma->napi); in ocelot_fdma_deinit()