Lines Matching refs:tx_ring

22 static void i40e_fdir(struct i40e_ring *tx_ring,  in i40e_fdir()  argument
26 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
31 i = tx_ring->next_to_use; in i40e_fdir()
32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
87 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
99 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
100 dev = tx_ring->dev; in i40e_program_fdir_filter()
103 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
115 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
116 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
117 i40e_fdir(tx_ring, fdir_data, add); in i40e_program_fdir_filter()
120 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
121 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter()
122 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
124 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
149 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
782 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) in i40e_clean_tx_ring() argument
787 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
788 i40e_xsk_clean_tx_ring(tx_ring); in i40e_clean_tx_ring()
791 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
795 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
796 i40e_unmap_and_free_tx_resource(tx_ring, in i40e_clean_tx_ring()
797 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
800 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
801 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
804 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
806 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
807 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
809 if (!tx_ring->netdev) in i40e_clean_tx_ring()
813 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring()
822 void i40e_free_tx_resources(struct i40e_ring *tx_ring) in i40e_free_tx_resources() argument
824 i40e_clean_tx_ring(tx_ring); in i40e_free_tx_resources()
825 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
826 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
828 if (tx_ring->desc) { in i40e_free_tx_resources()
829 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
830 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
831 tx_ring->desc = NULL; in i40e_free_tx_resources()
873 struct i40e_ring *tx_ring = NULL; in i40e_detect_recover_hung() local
892 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
893 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
901 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
902 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
903 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
911 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
912 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
927 struct i40e_ring *tx_ring, int napi_budget, in i40e_clean_tx_irq() argument
930 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
937 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
938 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
939 i -= tx_ring->count; in i40e_clean_tx_irq()
941 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
953 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
966 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
972 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
984 tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
990 i -= tx_ring->count; in i40e_clean_tx_irq()
991 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
992 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
997 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
1010 i -= tx_ring->count; in i40e_clean_tx_irq()
1011 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1012 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
1021 i += tx_ring->count; in i40e_clean_tx_irq()
1022 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
1023 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); in i40e_clean_tx_irq()
1024 i40e_arm_wb(tx_ring, vsi, budget); in i40e_clean_tx_irq()
1026 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
1030 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq()
1034 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
1035 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
1040 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
1041 tx_ring->queue_index) && in i40e_clean_tx_irq()
1043 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
1044 tx_ring->queue_index); in i40e_clean_tx_irq()
1045 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1415 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40e_setup_tx_descriptors() argument
1417 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1424 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1425 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1426 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1427 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1430 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1433 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1437 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1438 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1439 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1440 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1441 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1443 tx_ring->size); in i40e_setup_tx_descriptors()
1447 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1448 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1449 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1453 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1454 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
2860 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_atr() argument
2864 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2884 if (!tx_ring->atr_sample_rate) in i40e_atr()
2930 tx_ring->atr_count++; in i40e_atr()
2936 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2939 tx_ring->atr_count = 0; in i40e_atr()
2942 i = tx_ring->next_to_use; in i40e_atr()
2943 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_atr()
2946 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2949 tx_ring->queue_index); in i40e_atr()
2956 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
3004 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
3011 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
3040 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) in i40e_tx_prepare_vlan_flags()
3200 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tsyn() argument
3215 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3246 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
3427 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
3432 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3439 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
3442 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3458 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
3460 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3464 ++tx_ring->tx_stats.tx_stopped; in __i40e_maybe_stop_tx()
3467 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
3471 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3472 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3572 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
3581 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3593 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3595 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
3601 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3621 if (i == tx_ring->count) { in i40e_tx_map()
3622 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3643 if (i == tx_ring->count) { in i40e_tx_map()
3644 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3651 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3654 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3657 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3660 if (i == tx_ring->count) in i40e_tx_map()
3663 tx_ring->next_to_use = i; in i40e_tx_map()
3665 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_tx_map()
3673 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3678 tx_ring->packet_stride = 0; in i40e_tx_map()
3698 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in i40e_tx_map()
3699 writel(i, tx_ring->tail); in i40e_tx_map()
3705 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3709 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3710 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
3714 i = tx_ring->count; in i40e_tx_map()
3718 tx_ring->next_to_use = i; in i40e_tx_map()
3871 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
3886 i40e_trace(xmit_frame_ring, skb, tx_ring); in i40e_xmit_frame_ring()
3895 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3904 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_frame_ring()
3905 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3910 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3916 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
3928 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
3932 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); in i40e_xmit_frame_ring()
3940 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
3947 i40e_atr(tx_ring, skb, tx_flags); in i40e_xmit_frame_ring()
3949 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
3956 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3961 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3982 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame() local
3990 return i40e_xmit_frame_ring(skb, tx_ring); in i40e_lan_xmit_frame()