Lines Matching refs:tx_ring
46 struct enetc_bdr *tx_ring) in enetc_rx_ring_from_xdp_tx_ring() argument
48 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring()
70 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument
78 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
82 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
87 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument
94 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame()
106 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument
109 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail()
145 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_buffs() argument
148 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_map_tx_buffs()
162 i = tx_ring->next_to_use; in enetc_map_tx_buffs()
163 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_buffs()
166 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); in enetc_map_tx_buffs()
167 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in enetc_map_tx_buffs()
174 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
200 if (tx_ring->tsd_enable) in enetc_map_tx_buffs()
221 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
223 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
224 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
279 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in enetc_map_tx_buffs()
281 if (dma_mapping_error(tx_ring->dev, dma)) in enetc_map_tx_buffs()
291 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
293 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
294 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
313 tx_ring->tx_swbd[i].is_eof = true; in enetc_map_tx_buffs()
314 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
316 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_buffs()
317 tx_ring->next_to_use = i; in enetc_map_tx_buffs()
321 enetc_update_tx_ring_tail(tx_ring); in enetc_map_tx_buffs()
326 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_buffs()
329 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
330 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_map_tx_buffs()
332 i = tx_ring->bd_count; in enetc_map_tx_buffs()
339 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, in enetc_map_tx_tso_hdr() argument
349 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; in enetc_map_tx_tso_hdr()
375 enetc_bdr_idx_inc(tx_ring, i); in enetc_map_tx_tso_hdr()
376 txbd = ENETC_TXBD(*tx_ring, *i); in enetc_map_tx_tso_hdr()
377 tx_swbd = &tx_ring->tx_swbd[*i]; in enetc_map_tx_tso_hdr()
392 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, in enetc_map_tx_tso_data() argument
403 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in enetc_map_tx_tso_data()
404 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { in enetc_map_tx_tso_data()
405 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_map_tx_tso_data()
458 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, in enetc_tso_complete_csum() argument
488 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_tso_buffs() argument
501 i = tx_ring->next_to_use; in enetc_map_tx_tso_buffs()
507 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_tso_buffs()
508 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
516 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; in enetc_map_tx_tso_buffs()
521 enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); in enetc_map_tx_tso_buffs()
531 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_tso_buffs()
532 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_tso_buffs()
533 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
544 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, in enetc_map_tx_tso_buffs()
559 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); in enetc_map_tx_tso_buffs()
565 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_tso_buffs()
568 tx_ring->next_to_use = i; in enetc_map_tx_tso_buffs()
569 enetc_update_tx_ring_tail(tx_ring); in enetc_map_tx_tso_buffs()
574 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_tso_buffs()
578 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
579 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_map_tx_tso_buffs()
581 i = tx_ring->bd_count; in enetc_map_tx_tso_buffs()
592 struct enetc_bdr *tx_ring; in enetc_start_xmit() local
604 tx_ring = priv->tx_ring[skb->queue_mapping]; in enetc_start_xmit()
607 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { in enetc_start_xmit()
608 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
613 count = enetc_map_tx_tso_buffs(tx_ring, skb); in enetc_start_xmit()
621 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { in enetc_start_xmit()
622 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
632 count = enetc_map_tx_buffs(tx_ring, skb); in enetc_start_xmit()
639 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) in enetc_start_xmit()
640 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
724 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) in enetc_bd_ready_count() argument
726 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; in enetc_bd_ready_count()
728 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; in enetc_bd_ready_count()
775 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, in enetc_recycle_xdp_tx_buff() argument
778 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_recycle_xdp_tx_buff()
788 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); in enetc_recycle_xdp_tx_buff()
814 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) in enetc_clean_tx_ring() argument
817 struct net_device *ndev = tx_ring->ndev; in enetc_clean_tx_ring()
824 i = tx_ring->next_to_clean; in enetc_clean_tx_ring()
825 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
827 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
837 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_clean_tx_ring()
852 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
854 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
881 if (unlikely(i == tx_ring->bd_count)) { in enetc_clean_tx_ring()
883 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
890 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | in enetc_clean_tx_ring()
891 BIT(16 + tx_ring->index)); in enetc_clean_tx_ring()
895 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
898 tx_ring->next_to_clean = i; in enetc_clean_tx_ring()
899 tx_ring->stats.packets += tx_frm_cnt; in enetc_clean_tx_ring()
900 tx_ring->stats.bytes += tx_byte_cnt; in enetc_clean_tx_ring()
901 tx_ring->stats.win_drop += tx_win_drop; in enetc_clean_tx_ring()
904 __netif_subqueue_stopped(ndev, tx_ring->index) && in enetc_clean_tx_ring()
906 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { in enetc_clean_tx_ring()
907 netif_wake_subqueue(ndev, tx_ring->index); in enetc_clean_tx_ring()
1250 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, in enetc_xdp_map_tx_buff() argument
1254 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_xdp_map_tx_buff()
1263 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); in enetc_xdp_map_tx_buff()
1269 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, in enetc_xdp_tx() argument
1275 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) in enetc_xdp_tx()
1283 i = tx_ring->next_to_use; in enetc_xdp_tx()
1288 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); in enetc_xdp_tx()
1292 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_xdp_tx()
1297 enetc_bdr_idx_inc(tx_ring, &i); in enetc_xdp_tx()
1300 tx_ring->next_to_use = i; in enetc_xdp_tx()
1305 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, in enetc_xdp_frame_to_xdp_tx_swbd() argument
1318 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1319 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1320 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1345 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1346 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1349 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); in enetc_xdp_frame_to_xdp_tx_swbd()
1351 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1377 struct enetc_bdr *tx_ring; in enetc_xdp_xmit() local
1386 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; in enetc_xdp_xmit()
1388 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); in enetc_xdp_xmit()
1391 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, in enetc_xdp_xmit()
1397 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, in enetc_xdp_xmit()
1400 enetc_unmap_tx_buff(tx_ring, in enetc_xdp_xmit()
1402 tx_ring->stats.xdp_tx_drops++; in enetc_xdp_xmit()
1410 enetc_update_tx_ring_tail(tx_ring); in enetc_xdp_xmit()
1412 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; in enetc_xdp_xmit()
1538 struct enetc_bdr *tx_ring; in enetc_clean_rx_ring_xdp() local
1608 tx_ring = priv->xdp_tx_ring[rx_ring->index]; in enetc_clean_rx_ring_xdp()
1611 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1619 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { in enetc_clean_rx_ring_xdp()
1621 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1623 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1668 enetc_update_tx_ring_tail(tx_ring); in enetc_clean_rx_ring_xdp()
1690 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) in enetc_poll()
1845 struct enetc_bdr *tx_ring = priv->tx_ring[i]; in enetc_alloc_tx_resources() local
1847 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, in enetc_alloc_tx_resources()
1848 tx_ring->bd_count); in enetc_alloc_tx_resources()
1947 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, in enetc_assign_tx_resource() argument
1950 tx_ring->bd_base = res ? res->bd_base : NULL; in enetc_assign_tx_resource()
1951 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; in enetc_assign_tx_resource()
1952 tx_ring->tx_swbd = res ? res->tx_swbd : NULL; in enetc_assign_tx_resource()
1953 tx_ring->tso_headers = res ? res->tso_headers : NULL; in enetc_assign_tx_resource()
1954 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; in enetc_assign_tx_resource()
1974 enetc_assign_tx_resource(priv->tx_ring[i], in enetc_assign_tx_resources()
1997 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) in enetc_free_tx_ring() argument
2001 for (i = 0; i < tx_ring->bd_count; i++) { in enetc_free_tx_ring()
2002 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring()
2004 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_free_tx_ring()
2033 enetc_free_tx_ring(priv->tx_ring[i]); in enetc_free_rxtx_rings()
2118 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_setup_txbdr() argument
2120 int idx = tx_ring->index; in enetc_setup_txbdr()
2124 lower_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2127 upper_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2129 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_txbdr()
2131 ENETC_RTBLENR_LEN(tx_ring->bd_count)); in enetc_setup_txbdr()
2134 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2135 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2140 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); in enetc_setup_txbdr()
2141 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in enetc_setup_txbdr()
2147 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2148 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2149 tx_ring->idr = hw->reg + ENETC_SITXIDR; in enetc_setup_txbdr()
2210 enetc_setup_txbdr(hw, priv->tx_ring[i]); in enetc_setup_bdrs()
2216 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_enable_txbdr() argument
2218 int idx = tx_ring->index; in enetc_enable_txbdr()
2251 enetc_enable_txbdr(hw, priv->tx_ring[i]); in enetc_enable_tx_bdrs()
2285 enetc_disable_txbdr(hw, priv->tx_ring[i]); in enetc_disable_tx_bdrs()
2288 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_wait_txbdr() argument
2291 int idx = tx_ring->index; in enetc_wait_txbdr()
2301 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", in enetc_wait_txbdr()
2311 enetc_wait_txbdr(hw, priv->tx_ring[i]); in enetc_wait_bdrs()
2340 int idx = v->tx_ring[j].index; in enetc_setup_irqs()
2664 priv->tx_ring[i]->prio); in enetc_debug_tx_ring_prios()
2671 struct enetc_bdr *tx_ring; in enetc_reset_tc_mqprio() local
2683 tx_ring = priv->tx_ring[i]; in enetc_reset_tc_mqprio()
2684 tx_ring->prio = 0; in enetc_reset_tc_mqprio()
2685 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); in enetc_reset_tc_mqprio()
2701 struct enetc_bdr *tx_ring; in enetc_setup_tc_mqprio() local
2725 tx_ring = priv->tx_ring[q]; in enetc_setup_tc_mqprio()
2733 tx_ring->prio = tc; in enetc_setup_tc_mqprio()
2734 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); in enetc_setup_tc_mqprio()
2845 packets += priv->tx_ring[i]->stats.packets; in enetc_get_stats()
2846 bytes += priv->tx_ring[i]->stats.bytes; in enetc_get_stats()
2847 tx_dropped += priv->tx_ring[i]->stats.win_drop; in enetc_get_stats()
3024 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); in enetc_alloc_msix()
3069 bdr = &v->tx_ring[j]; in enetc_alloc_msix()
3074 priv->tx_ring[idx] = bdr; in enetc_alloc_msix()
3090 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; in enetc_alloc_msix()
3130 priv->tx_ring[i] = NULL; in enetc_free_msix()