Lines Matching refs:tx_ring

704 			    struct wx_ring *tx_ring, int napi_budget)  in wx_clean_tx_irq()  argument
708 unsigned int i = tx_ring->next_to_clean; in wx_clean_tx_irq()
712 if (!netif_carrier_ok(tx_ring->netdev)) in wx_clean_tx_irq()
715 tx_buffer = &tx_ring->tx_buffer_info[i]; in wx_clean_tx_irq()
716 tx_desc = WX_TX_DESC(tx_ring, i); in wx_clean_tx_irq()
717 i -= tx_ring->count; in wx_clean_tx_irq()
744 dma_unmap_single(tx_ring->dev, in wx_clean_tx_irq()
758 i -= tx_ring->count; in wx_clean_tx_irq()
759 tx_buffer = tx_ring->tx_buffer_info; in wx_clean_tx_irq()
760 tx_desc = WX_TX_DESC(tx_ring, 0); in wx_clean_tx_irq()
765 dma_unmap_page(tx_ring->dev, in wx_clean_tx_irq()
778 i -= tx_ring->count; in wx_clean_tx_irq()
779 tx_buffer = tx_ring->tx_buffer_info; in wx_clean_tx_irq()
780 tx_desc = WX_TX_DESC(tx_ring, 0); in wx_clean_tx_irq()
790 i += tx_ring->count; in wx_clean_tx_irq()
791 tx_ring->next_to_clean = i; in wx_clean_tx_irq()
792 u64_stats_update_begin(&tx_ring->syncp); in wx_clean_tx_irq()
793 tx_ring->stats.bytes += total_bytes; in wx_clean_tx_irq()
794 tx_ring->stats.packets += total_packets; in wx_clean_tx_irq()
795 u64_stats_update_end(&tx_ring->syncp); in wx_clean_tx_irq()
799 netdev_tx_completed_queue(wx_txring_txq(tx_ring), in wx_clean_tx_irq()
803 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in wx_clean_tx_irq()
804 (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in wx_clean_tx_irq()
810 if (__netif_subqueue_stopped(tx_ring->netdev, in wx_clean_tx_irq()
811 tx_ring->queue_index) && in wx_clean_tx_irq()
812 netif_running(tx_ring->netdev)) { in wx_clean_tx_irq()
813 netif_wake_subqueue(tx_ring->netdev, in wx_clean_tx_irq()
814 tx_ring->queue_index); in wx_clean_tx_irq()
815 ++tx_ring->tx_stats.restart_queue; in wx_clean_tx_irq()
875 static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) in wx_maybe_stop_tx() argument
877 if (likely(wx_desc_unused(tx_ring) >= size)) in wx_maybe_stop_tx()
880 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in wx_maybe_stop_tx()
888 if (likely(wx_desc_unused(tx_ring) < size)) in wx_maybe_stop_tx()
892 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in wx_maybe_stop_tx()
893 ++tx_ring->tx_stats.restart_queue; in wx_maybe_stop_tx()
935 static void wx_tx_map(struct wx_ring *tx_ring, in wx_tx_map() argument
942 u16 i = tx_ring->next_to_use; in wx_tx_map()
950 tx_desc = WX_TX_DESC(tx_ring, i); in wx_tx_map()
955 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in wx_tx_map()
960 if (dma_mapping_error(tx_ring->dev, dma)) in wx_tx_map()
975 if (i == tx_ring->count) { in wx_tx_map()
976 tx_desc = WX_TX_DESC(tx_ring, 0); in wx_tx_map()
994 if (i == tx_ring->count) { in wx_tx_map()
995 tx_desc = WX_TX_DESC(tx_ring, 0); in wx_tx_map()
1004 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in wx_tx_map()
1007 tx_buffer = &tx_ring->tx_buffer_info[i]; in wx_tx_map()
1014 netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); in wx_tx_map()
1031 if (i == tx_ring->count) in wx_tx_map()
1034 tx_ring->next_to_use = i; in wx_tx_map()
1036 wx_maybe_stop_tx(tx_ring, DESC_NEEDED); in wx_tx_map()
1038 if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) in wx_tx_map()
1039 writel(i, tx_ring->tail); in wx_tx_map()
1043 dev_err(tx_ring->dev, "TX DMA map failed\n"); in wx_tx_map()
1047 tx_buffer = &tx_ring->tx_buffer_info[i]; in wx_tx_map()
1049 dma_unmap_page(tx_ring->dev, in wx_tx_map()
1057 i += tx_ring->count; in wx_tx_map()
1064 tx_ring->next_to_use = i; in wx_tx_map()
1067 static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, in wx_tx_ctxtdesc() argument
1071 u16 i = tx_ring->next_to_use; in wx_tx_ctxtdesc()
1073 context_desc = WX_TX_CTXTDESC(tx_ring, i); in wx_tx_ctxtdesc()
1075 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in wx_tx_ctxtdesc()
1207 static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, in wx_tso() argument
1211 struct net_device *netdev = tx_ring->netdev; in wx_tso()
1322 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, in wx_tso()
1328 static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, in wx_tx_csum() argument
1332 struct net_device *netdev = tx_ring->netdev; in wx_tx_csum()
1447 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, in wx_tx_csum()
1452 struct wx_ring *tx_ring) in wx_xmit_frame_ring() argument
1454 struct wx *wx = netdev_priv(tx_ring->netdev); in wx_xmit_frame_ring()
1472 if (wx_maybe_stop_tx(tx_ring, count + 3)) { in wx_xmit_frame_ring()
1473 tx_ring->tx_stats.tx_busy++; in wx_xmit_frame_ring()
1478 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in wx_xmit_frame_ring()
1495 tso = wx_tso(tx_ring, first, &hdr_len, ptype); in wx_xmit_frame_ring()
1499 wx_tx_csum(tx_ring, first, ptype); in wx_xmit_frame_ring()
1501 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate) in wx_xmit_frame_ring()
1502 wx->atr(tx_ring, first, ptype); in wx_xmit_frame_ring()
1504 wx_tx_map(tx_ring, first, hdr_len); in wx_xmit_frame_ring()
1519 struct wx_ring *tx_ring; in wx_xmit_frame() local
1534 tx_ring = wx->tx_ring[r_idx]; in wx_xmit_frame()
1536 return wx_xmit_frame_ring(skb, tx_ring); in wx_xmit_frame()
1731 wx->tx_ring[i]->reg_idx = i; in wx_cache_ring_rss()
1815 wx->tx_ring[txr_idx] = ring; in wx_alloc_q_vector()
1869 wx->tx_ring[ring->queue_index] = NULL; in wx_free_q_vector()
2307 static void wx_clean_tx_ring(struct wx_ring *tx_ring) in wx_clean_tx_ring() argument
2310 u16 i = tx_ring->next_to_clean; in wx_clean_tx_ring()
2312 tx_buffer = &tx_ring->tx_buffer_info[i]; in wx_clean_tx_ring()
2314 while (i != tx_ring->next_to_use) { in wx_clean_tx_ring()
2321 dma_unmap_single(tx_ring->dev, in wx_clean_tx_ring()
2328 tx_desc = WX_TX_DESC(tx_ring, i); in wx_clean_tx_ring()
2335 if (unlikely(i == tx_ring->count)) { in wx_clean_tx_ring()
2337 tx_buffer = tx_ring->tx_buffer_info; in wx_clean_tx_ring()
2338 tx_desc = WX_TX_DESC(tx_ring, 0); in wx_clean_tx_ring()
2343 dma_unmap_page(tx_ring->dev, in wx_clean_tx_ring()
2352 if (unlikely(i == tx_ring->count)) { in wx_clean_tx_ring()
2354 tx_buffer = tx_ring->tx_buffer_info; in wx_clean_tx_ring()
2358 netdev_tx_reset_queue(wx_txring_txq(tx_ring)); in wx_clean_tx_ring()
2361 tx_ring->next_to_use = 0; in wx_clean_tx_ring()
2362 tx_ring->next_to_clean = 0; in wx_clean_tx_ring()
2374 wx_clean_tx_ring(wx->tx_ring[i]); in wx_clean_all_tx_rings()
2384 static void wx_free_tx_resources(struct wx_ring *tx_ring) in wx_free_tx_resources() argument
2386 wx_clean_tx_ring(tx_ring); in wx_free_tx_resources()
2387 kvfree(tx_ring->tx_buffer_info); in wx_free_tx_resources()
2388 tx_ring->tx_buffer_info = NULL; in wx_free_tx_resources()
2391 if (!tx_ring->desc) in wx_free_tx_resources()
2394 dma_free_coherent(tx_ring->dev, tx_ring->size, in wx_free_tx_resources()
2395 tx_ring->desc, tx_ring->dma); in wx_free_tx_resources()
2396 tx_ring->desc = NULL; in wx_free_tx_resources()
2410 wx_free_tx_resources(wx->tx_ring[i]); in wx_free_all_tx_resources()
2541 static int wx_setup_tx_resources(struct wx_ring *tx_ring) in wx_setup_tx_resources() argument
2543 struct device *dev = tx_ring->dev; in wx_setup_tx_resources()
2548 size = sizeof(struct wx_tx_buffer) * tx_ring->count; in wx_setup_tx_resources()
2550 if (tx_ring->q_vector) in wx_setup_tx_resources()
2551 numa_node = tx_ring->q_vector->numa_node; in wx_setup_tx_resources()
2553 tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); in wx_setup_tx_resources()
2554 if (!tx_ring->tx_buffer_info) in wx_setup_tx_resources()
2555 tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL); in wx_setup_tx_resources()
2556 if (!tx_ring->tx_buffer_info) in wx_setup_tx_resources()
2560 tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc); in wx_setup_tx_resources()
2561 tx_ring->size = ALIGN(tx_ring->size, 4096); in wx_setup_tx_resources()
2564 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in wx_setup_tx_resources()
2565 &tx_ring->dma, GFP_KERNEL); in wx_setup_tx_resources()
2566 if (!tx_ring->desc) { in wx_setup_tx_resources()
2568 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in wx_setup_tx_resources()
2569 &tx_ring->dma, GFP_KERNEL); in wx_setup_tx_resources()
2572 if (!tx_ring->desc) in wx_setup_tx_resources()
2575 tx_ring->next_to_use = 0; in wx_setup_tx_resources()
2576 tx_ring->next_to_clean = 0; in wx_setup_tx_resources()
2581 kvfree(tx_ring->tx_buffer_info); in wx_setup_tx_resources()
2582 tx_ring->tx_buffer_info = NULL; in wx_setup_tx_resources()
2602 err = wx_setup_tx_resources(wx->tx_ring[i]); in wx_setup_all_tx_resources()
2614 wx_free_tx_resources(wx->tx_ring[i]); in wx_setup_all_tx_resources()
2679 struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]); in wx_get_stats64()
2819 memcpy(&temp_ring[i], wx->tx_ring[i], in wx_set_ring()
2835 wx_free_tx_resources(wx->tx_ring[i]); in wx_set_ring()
2837 memcpy(wx->tx_ring[i], &temp_ring[i], in wx_set_ring()