Lines Matching refs:tx

277 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)  in tsnep_tx_ring_cleanup()  argument
279 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
282 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
285 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
286 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
287 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
288 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
289 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
294 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
296 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
303 tx->page[i] = in tsnep_tx_ring_create()
304 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
306 if (!tx->page[i]) { in tsnep_tx_ring_create()
311 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
313 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
316 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
321 entry = &tx->entry[i]; in tsnep_tx_ring_create()
322 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
329 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
333 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
337 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
338 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
339 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
340 tx->write = 0; in tsnep_tx_init()
341 tx->read = 0; in tsnep_tx_init()
342 tx->owner_counter = 1; in tsnep_tx_init()
343 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
346 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
350 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
357 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
362 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
369 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
374 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
380 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
383 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
416 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
417 tx->owner_counter++; in tsnep_tx_activate()
418 if (tx->owner_counter == 4) in tsnep_tx_activate()
419 tx->owner_counter = 1; in tsnep_tx_activate()
420 tx->increment_owner_counter--; in tsnep_tx_activate()
421 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
422 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
425 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
442 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
444 if (tx->read <= tx->write) in tsnep_tx_desc_available()
445 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
447 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
467 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
472 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
483 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
485 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
493 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
505 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
521 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
530 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
532 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
538 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
560 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
571 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
575 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
580 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
583 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
585 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
589 tx->dropped++; in tsnep_xmit_frame_ring()
599 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
601 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
608 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
610 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
612 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
618 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
621 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
634 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
660 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
675 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
689 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
692 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
695 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
697 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
700 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
707 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
709 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
717 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
719 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
724 struct netdev_queue *tx_nq, struct tsnep_tx *tx, in tsnep_xdp_xmit_back() argument
742 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); in tsnep_xdp_xmit_back()
753 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
758 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
761 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
762 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
767 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
773 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
777 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
779 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
780 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
783 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
785 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
786 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
797 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
799 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
807 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
811 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
820 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
824 if (tx->read == tx->write) in tsnep_tx_poll()
827 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
846 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
878 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
880 tx->packets++; in tsnep_tx_poll()
881 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
886 if (tx->xsk_pool) { in tsnep_tx_poll()
888 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
889 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
890 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
891 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
894 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
904 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
910 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
913 if (tx->read != tx->write) { in tsnep_tx_pending()
914 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
926 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
930 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
934 tsnep_tx_init(tx); in tsnep_tx_open()
939 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
941 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
1281 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1294 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1326 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1344 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1362 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1366 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1436 struct tsnep_tx *tx; in tsnep_rx_poll() local
1448 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1509 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1525 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1539 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1551 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1609 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1634 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1791 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1807 if (queue->tx) in tsnep_poll()
1808 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1854 if (queue->tx && queue->rx) in tsnep_request_irq()
1857 else if (queue->tx) in tsnep_request_irq()
1859 name, queue->tx->queue_index); in tsnep_request_irq()
1912 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1919 if (tx) in tsnep_queue_open()
1920 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1972 if (queue->tx) in tsnep_queue_enable()
1973 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1981 if (queue->tx) in tsnep_queue_disable()
1982 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
2000 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2001 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2043 if (adapter->queue[i].tx) in tsnep_netdev_open()
2044 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2064 if (adapter->queue[i].tx) in tsnep_netdev_close()
2065 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2100 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2121 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2143 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2181 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2182 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2183 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2299 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2308 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2315 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2316 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2321 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2333 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2489 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2490 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2491 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2492 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2520 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2521 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2522 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2523 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()