Lines Matching refs:tx_ring

39 	struct ice_tx_ring *tx_ring;  in ice_prgm_fdir_fltr()  local
48 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
49 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
51 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
67 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
68 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
69 f_desc = ICE_TX_FDIRDESC(tx_ring, i); in ice_prgm_fdir_fltr()
73 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
74 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr()
75 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
102 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
151 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) in ice_clean_tx_ring() argument
156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
157 ice_xsk_clean_xdp_ring(tx_ring); in ice_clean_tx_ring()
162 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
166 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
167 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
170 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
172 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_clean_tx_ring()
175 memset(tx_ring->desc, 0, size); in ice_clean_tx_ring()
177 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
178 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
180 if (!tx_ring->netdev) in ice_clean_tx_ring()
184 netdev_tx_reset_queue(txring_txq(tx_ring)); in ice_clean_tx_ring()
193 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) in ice_free_tx_ring() argument
197 ice_clean_tx_ring(tx_ring); in ice_free_tx_ring()
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
199 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
201 if (tx_ring->desc) { in ice_free_tx_ring()
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_free_tx_ring()
204 dmam_free_coherent(tx_ring->dev, size, in ice_free_tx_ring()
205 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
206 tx_ring->desc = NULL; in ice_free_tx_ring()
217 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) in ice_clean_tx_irq() argument
221 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
222 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); in ice_clean_tx_irq()
229 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
230 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_tx_irq()
231 i -= tx_ring->count; in ice_clean_tx_irq()
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
264 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
280 i -= tx_ring->count; in ice_clean_tx_irq()
281 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
282 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq()
287 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
301 i -= tx_ring->count; in ice_clean_tx_irq()
302 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
303 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq()
312 i += tx_ring->count; in ice_clean_tx_irq()
313 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); in ice_clean_tx_irq()
316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); in ice_clean_tx_irq()
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in ice_clean_tx_irq()
325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && in ice_clean_tx_irq()
327 netif_tx_wake_queue(txring_txq(tx_ring)); in ice_clean_tx_irq()
328 ++tx_ring->ring_stats->tx_stats.restart_q; in ice_clean_tx_irq()
341 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) in ice_setup_tx_ring() argument
343 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
350 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
351 tx_ring->tx_buf = in ice_setup_tx_ring()
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, in ice_setup_tx_ring()
354 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, in ice_setup_tx_ring()
362 if (!tx_ring->desc) { in ice_setup_tx_ring()
368 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
369 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
370 tx_ring->ring_stats->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
374 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
375 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
1300 struct ice_tx_ring *tx_ring; in __ice_update_sample() local
1302 ice_for_each_tx_ring(tx_ring, *rc) { in __ice_update_sample()
1305 ring_stats = tx_ring->ring_stats; in __ice_update_sample()
1475 struct ice_tx_ring *tx_ring; in ice_napi_poll() local
1484 ice_for_each_tx_ring(tx_ring, q_vector->tx) { in ice_napi_poll()
1485 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll()
1489 wd = ice_xmit_zc(tx_ring, xsk_pool); in ice_napi_poll()
1490 else if (ice_ring_is_xdp(tx_ring)) in ice_napi_poll()
1493 wd = ice_clean_tx_irq(tx_ring, budget); in ice_napi_poll()
1560 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) in __ice_maybe_stop_tx() argument
1562 netif_tx_stop_queue(txring_txq(tx_ring)); in __ice_maybe_stop_tx()
1567 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) in __ice_maybe_stop_tx()
1571 netif_tx_start_queue(txring_txq(tx_ring)); in __ice_maybe_stop_tx()
1572 ++tx_ring->ring_stats->tx_stats.restart_q; in __ice_maybe_stop_tx()
1583 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) in ice_maybe_stop_tx() argument
1585 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) in ice_maybe_stop_tx()
1588 return __ice_maybe_stop_tx(tx_ring, size); in ice_maybe_stop_tx()
1602 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, in ice_tx_map() argument
1606 u16 i = tx_ring->next_to_use; in ice_tx_map()
1623 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_tx_map()
1630 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1637 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1659 if (i == tx_ring->count) { in ice_tx_map()
1660 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_tx_map()
1680 if (i == tx_ring->count) { in ice_tx_map()
1681 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_tx_map()
1688 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1691 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1699 if (i == tx_ring->count) in ice_tx_map()
1718 tx_ring->next_to_use = i; in ice_tx_map()
1720 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); in ice_tx_map()
1723 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map()
1727 writel(i, tx_ring->tail); in ice_tx_map()
1734 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1735 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); in ice_tx_map()
1739 i = tx_ring->count; in ice_tx_map()
1743 tx_ring->next_to_use = i; in ice_tx_map()
1947 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) in ice_tx_prepare_vlan_flags() argument
1961 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) in ice_tx_prepare_vlan_flags()
1967 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); in ice_tx_prepare_vlan_flags()
2260 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, in ice_tstamp() argument
2274 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2276 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; in ice_tstamp()
2294 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) in ice_xmit_frame_ring() argument
2297 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2303 ice_trace(xmit_frame_ring, tx_ring, skb); in ice_xmit_frame_ring()
2313 tx_ring->ring_stats->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2322 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + in ice_xmit_frame_ring()
2324 tx_ring->ring_stats->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2329 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); in ice_xmit_frame_ring()
2331 offload.tx_ring = tx_ring; in ice_xmit_frame_ring()
2334 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2342 ice_tx_prepare_vlan_flags(tx_ring, first); in ice_xmit_frame_ring()
2370 ice_tstamp(tx_ring, skb, first, &offload); in ice_xmit_frame_ring()
2376 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2379 cdesc = ICE_TX_CTX_DESC(tx_ring, i); in ice_xmit_frame_ring()
2381 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2390 ice_tx_map(tx_ring, first, &offload); in ice_xmit_frame_ring()
2394 ice_trace(xmit_frame_ring_drop, tx_ring, skb); in ice_xmit_frame_ring()
2410 struct ice_tx_ring *tx_ring; in ice_start_xmit() local
2412 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2420 return ice_xmit_frame_ring(skb, tx_ring); in ice_start_xmit()
2460 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) in ice_clean_ctrl_tx_irq() argument
2462 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2463 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2468 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2469 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_ctrl_tx_irq()
2470 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2497 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2498 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2499 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_ctrl_tx_irq()
2504 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2509 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2524 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2525 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2526 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_ctrl_tx_irq()
2532 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2533 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()