Lines Matching refs:tx_ring

59 static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)  in iavf_clean_tx_ring()  argument
65 if (!tx_ring->tx_bi) in iavf_clean_tx_ring()
69 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring()
70 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring()
72 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring()
73 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring()
76 memset(tx_ring->desc, 0, tx_ring->size); in iavf_clean_tx_ring()
78 tx_ring->next_to_use = 0; in iavf_clean_tx_ring()
79 tx_ring->next_to_clean = 0; in iavf_clean_tx_ring()
81 if (!tx_ring->netdev) in iavf_clean_tx_ring()
85 netdev_tx_reset_queue(txring_txq(tx_ring)); in iavf_clean_tx_ring()
94 void iavf_free_tx_resources(struct iavf_ring *tx_ring) in iavf_free_tx_resources() argument
96 iavf_clean_tx_ring(tx_ring); in iavf_free_tx_resources()
97 kfree(tx_ring->tx_bi); in iavf_free_tx_resources()
98 tx_ring->tx_bi = NULL; in iavf_free_tx_resources()
100 if (tx_ring->desc) { in iavf_free_tx_resources()
101 dma_free_coherent(tx_ring->dev, tx_ring->size, in iavf_free_tx_resources()
102 tx_ring->desc, tx_ring->dma); in iavf_free_tx_resources()
103 tx_ring->desc = NULL; in iavf_free_tx_resources()
159 struct iavf_ring *tx_ring = NULL; in iavf_detect_recover_hung() local
178 tx_ring = &vsi->back->tx_rings[i]; in iavf_detect_recover_hung()
179 if (tx_ring && tx_ring->desc) { in iavf_detect_recover_hung()
187 packets = tx_ring->stats.packets & INT_MAX; in iavf_detect_recover_hung()
188 if (tx_ring->prev_pkt_ctr == packets) { in iavf_detect_recover_hung()
189 iavf_force_wb(vsi, tx_ring->q_vector); in iavf_detect_recover_hung()
197 tx_ring->prev_pkt_ctr = in iavf_detect_recover_hung()
198 iavf_get_tx_pending(tx_ring, true) ? packets : -1; in iavf_detect_recover_hung()
214 struct iavf_ring *tx_ring, int napi_budget) in iavf_clean_tx_irq() argument
216 int i = tx_ring->next_to_clean; in iavf_clean_tx_irq()
222 tx_buf = &tx_ring->tx_bi[i]; in iavf_clean_tx_irq()
223 tx_desc = IAVF_TX_DESC(tx_ring, i); in iavf_clean_tx_irq()
224 i -= tx_ring->count; in iavf_clean_tx_irq()
236 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in iavf_clean_tx_irq()
253 dma_unmap_single(tx_ring->dev, in iavf_clean_tx_irq()
265 tx_ring, tx_desc, tx_buf); in iavf_clean_tx_irq()
271 i -= tx_ring->count; in iavf_clean_tx_irq()
272 tx_buf = tx_ring->tx_bi; in iavf_clean_tx_irq()
273 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_clean_tx_irq()
278 dma_unmap_page(tx_ring->dev, in iavf_clean_tx_irq()
291 i -= tx_ring->count; in iavf_clean_tx_irq()
292 tx_buf = tx_ring->tx_bi; in iavf_clean_tx_irq()
293 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_clean_tx_irq()
302 i += tx_ring->count; in iavf_clean_tx_irq()
303 tx_ring->next_to_clean = i; in iavf_clean_tx_irq()
304 u64_stats_update_begin(&tx_ring->syncp); in iavf_clean_tx_irq()
305 tx_ring->stats.bytes += total_bytes; in iavf_clean_tx_irq()
306 tx_ring->stats.packets += total_packets; in iavf_clean_tx_irq()
307 u64_stats_update_end(&tx_ring->syncp); in iavf_clean_tx_irq()
308 tx_ring->q_vector->tx.total_bytes += total_bytes; in iavf_clean_tx_irq()
309 tx_ring->q_vector->tx.total_packets += total_packets; in iavf_clean_tx_irq()
311 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { in iavf_clean_tx_irq()
317 unsigned int j = iavf_get_tx_pending(tx_ring, false); in iavf_clean_tx_irq()
322 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) in iavf_clean_tx_irq()
323 tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB; in iavf_clean_tx_irq()
327 netdev_tx_completed_queue(txring_txq(tx_ring), in iavf_clean_tx_irq()
331 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in iavf_clean_tx_irq()
332 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in iavf_clean_tx_irq()
337 if (__netif_subqueue_stopped(tx_ring->netdev, in iavf_clean_tx_irq()
338 tx_ring->queue_index) && in iavf_clean_tx_irq()
340 netif_wake_subqueue(tx_ring->netdev, in iavf_clean_tx_irq()
341 tx_ring->queue_index); in iavf_clean_tx_irq()
342 ++tx_ring->tx_stats.restart_queue; in iavf_clean_tx_irq()
650 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) in iavf_setup_tx_descriptors() argument
652 struct device *dev = tx_ring->dev; in iavf_setup_tx_descriptors()
659 WARN_ON(tx_ring->tx_bi); in iavf_setup_tx_descriptors()
660 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_setup_tx_descriptors()
661 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in iavf_setup_tx_descriptors()
662 if (!tx_ring->tx_bi) in iavf_setup_tx_descriptors()
666 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); in iavf_setup_tx_descriptors()
667 tx_ring->size = ALIGN(tx_ring->size, 4096); in iavf_setup_tx_descriptors()
668 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in iavf_setup_tx_descriptors()
669 &tx_ring->dma, GFP_KERNEL); in iavf_setup_tx_descriptors()
670 if (!tx_ring->desc) { in iavf_setup_tx_descriptors()
672 tx_ring->size); in iavf_setup_tx_descriptors()
676 tx_ring->next_to_use = 0; in iavf_setup_tx_descriptors()
677 tx_ring->next_to_clean = 0; in iavf_setup_tx_descriptors()
678 tx_ring->prev_pkt_ctr = -1; in iavf_setup_tx_descriptors()
682 kfree(tx_ring->tx_bi); in iavf_setup_tx_descriptors()
683 tx_ring->tx_bi = NULL; in iavf_setup_tx_descriptors()
1463 struct iavf_ring *tx_ring, u32 *flags) in iavf_tx_prepare_vlan_flags() argument
1475 if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { in iavf_tx_prepare_vlan_flags()
1477 } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { in iavf_tx_prepare_vlan_flags()
1480 dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); in iavf_tx_prepare_vlan_flags()
1613 struct iavf_ring *tx_ring, in iavf_tx_enable_csum() argument
1775 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, in iavf_create_tx_ctx() argument
1780 int i = tx_ring->next_to_use; in iavf_create_tx_ctx()
1787 context_desc = IAVF_TX_CTXTDESC(tx_ring, i); in iavf_create_tx_ctx()
1790 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in iavf_create_tx_ctx()
1890 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) in __iavf_maybe_stop_tx() argument
1892 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __iavf_maybe_stop_tx()
1897 if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) in __iavf_maybe_stop_tx()
1901 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __iavf_maybe_stop_tx()
1902 ++tx_ring->tx_stats.restart_queue; in __iavf_maybe_stop_tx()
1916 static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, in iavf_tx_map() argument
1925 u16 i = tx_ring->next_to_use; in iavf_tx_map()
1936 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in iavf_tx_map()
1938 tx_desc = IAVF_TX_DESC(tx_ring, i); in iavf_tx_map()
1944 if (dma_mapping_error(tx_ring->dev, dma)) in iavf_tx_map()
1963 if (i == tx_ring->count) { in iavf_tx_map()
1964 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_tx_map()
1984 if (i == tx_ring->count) { in iavf_tx_map()
1985 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_tx_map()
1992 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in iavf_tx_map()
1995 tx_bi = &tx_ring->tx_bi[i]; in iavf_tx_map()
1998 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in iavf_tx_map()
2001 if (i == tx_ring->count) in iavf_tx_map()
2004 tx_ring->next_to_use = i; in iavf_tx_map()
2006 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); in iavf_tx_map()
2027 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in iavf_tx_map()
2028 writel(i, tx_ring->tail); in iavf_tx_map()
2034 dev_info(tx_ring->dev, "TX DMA map failed\n"); in iavf_tx_map()
2038 tx_bi = &tx_ring->tx_bi[i]; in iavf_tx_map()
2039 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); in iavf_tx_map()
2043 i = tx_ring->count; in iavf_tx_map()
2047 tx_ring->next_to_use = i; in iavf_tx_map()
2058 struct iavf_ring *tx_ring) in iavf_xmit_frame_ring() argument
2073 iavf_trace(xmit_frame_ring, skb, tx_ring); in iavf_xmit_frame_ring()
2082 tx_ring->tx_stats.tx_linearize++; in iavf_xmit_frame_ring()
2091 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { in iavf_xmit_frame_ring()
2092 tx_ring->tx_stats.tx_busy++; in iavf_xmit_frame_ring()
2097 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in iavf_xmit_frame_ring()
2103 iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); in iavf_xmit_frame_ring()
2128 tx_ring, &cd_tunneling); in iavf_xmit_frame_ring()
2135 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in iavf_xmit_frame_ring()
2138 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in iavf_xmit_frame_ring()
2144 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); in iavf_xmit_frame_ring()
2160 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; in iavf_xmit_frame() local
2172 return iavf_xmit_frame_ring(skb, tx_ring); in iavf_xmit_frame()