/linux-6.12.1/drivers/net/ethernet/netronome/nfp/nfd3/ |
D | rings.c | 11 static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_xsk_tx_bufs_free() argument 16 while (tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_xsk_tx_bufs_free() 17 idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfd3_xsk_tx_bufs_free() 18 txbuf = &tx_ring->txbufs[idx]; in nfp_nfd3_xsk_tx_bufs_free() 22 tx_ring->qcp_rd_p++; in nfp_nfd3_xsk_tx_bufs_free() 23 tx_ring->rd_p++; in nfp_nfd3_xsk_tx_bufs_free() 25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free() 29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free() 42 nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_reset() argument 47 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_tx_ring_reset() [all …]
|
D | xsk.c | 17 struct nfp_net_tx_ring *tx_ring, in nfp_nfd3_xsk_tx_xdp() argument 26 if (nfp_net_tx_space(tx_ring) < 1) in nfp_nfd3_xsk_tx_xdp() 32 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_xsk_tx_xdp() 34 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_xsk_tx_xdp() 40 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_xsk_tx_xdp() 50 tx_ring->wr_ptr_add++; in nfp_nfd3_xsk_tx_xdp() 51 tx_ring->wr_p++; in nfp_nfd3_xsk_tx_xdp() 120 struct nfp_net_tx_ring *tx_ring; in nfp_nfd3_xsk_rx() local 126 tx_ring = r_vec->xdp_ring; in nfp_nfd3_xsk_rx() 230 if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring, in nfp_nfd3_xsk_rx() [all …]
|
D | dp.c | 31 static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_should_wake() argument 33 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); in nfp_nfd3_tx_ring_should_wake() 36 static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_should_stop() argument 38 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); in nfp_nfd3_tx_ring_should_stop() 52 struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_stop() argument 58 if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring))) in nfp_nfd3_tx_ring_stop() 259 struct nfp_net_tx_ring *tx_ring; in nfp_nfd3_tx() local 274 tx_ring = &dp->tx_rings[qidx]; in nfp_nfd3_tx() 275 r_vec = tx_ring->r_vec; in nfp_nfd3_tx() 279 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { in nfp_nfd3_tx() [all …]
|
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/nfdk/ |
D | rings.c | 11 nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_reset() argument 16 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfdk_tx_ring_reset() 23 rd_idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfdk_tx_ring_reset() 24 txbuf = &tx_ring->ktxbufs[rd_idx]; in nfp_nfdk_tx_ring_reset() 28 n_descs = D_BLOCK_CPL(tx_ring->rd_p); in nfp_nfdk_tx_ring_reset() 57 tx_ring->rd_p += n_descs; in nfp_nfdk_tx_ring_reset() 60 memset(tx_ring->txds, 0, tx_ring->size); in nfp_nfdk_tx_ring_reset() 61 tx_ring->data_pending = 0; in nfp_nfdk_tx_ring_reset() 62 tx_ring->wr_p = 0; in nfp_nfdk_tx_ring_reset() 63 tx_ring->rd_p = 0; in nfp_nfdk_tx_ring_reset() [all …]
|
D | dp.c | 18 static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_should_wake() argument 20 return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2); in nfp_nfdk_tx_ring_should_wake() 23 static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_should_stop() argument 25 return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT); in nfp_nfdk_tx_ring_should_stop() 29 struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_stop() argument 35 if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring))) in nfp_nfdk_tx_ring_stop() 119 nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring, in nfp_nfdk_tx_maybe_close_block() argument 151 if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) != in nfp_nfdk_tx_maybe_close_block() 152 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) in nfp_nfdk_tx_maybe_close_block() 155 if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK) in nfp_nfdk_tx_maybe_close_block() [all …]
|
/linux-6.12.1/drivers/net/ethernet/amazon/ena/ |
D | ena_xdp.c | 8 static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_xdp_req_id() argument 12 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_xdp_req_id() 16 return handle_invalid_req_id(tx_ring, req_id, tx_info, true); in validate_xdp_req_id() 19 static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring, in ena_xdp_tx_map_frame() argument 24 struct ena_adapter *adapter = tx_ring->adapter; in ena_xdp_tx_map_frame() 35 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_xdp_tx_map_frame() 37 push_len = min_t(u32, size, tx_ring->tx_max_header_size); in ena_xdp_tx_map_frame() 48 dma = dma_map_single(tx_ring->dev, in ena_xdp_tx_map_frame() 52 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_xdp_tx_map_frame() 68 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_xdp_tx_map_frame() [all …]
|
D | ena_netdev.c | 53 struct ena_ring *tx_ring; in ena_tx_timeout() local 62 tx_ring = &adapter->tx_ring[txqueue]; in ena_tx_timeout() 64 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in ena_tx_timeout() 65 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); in ena_tx_timeout() 216 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 246 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings() 259 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local 263 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources() 269 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources() 272 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources() [all …]
|
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_dp.h | 51 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) in nfp_net_tx_full() argument 53 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); in nfp_net_tx_full() 56 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_xmit_more_flush() argument 59 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); in nfp_net_tx_xmit_more_flush() 60 tx_ring->wr_ptr_add = 0; in nfp_net_tx_xmit_more_flush() 64 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp) in nfp_net_read_tx_cmpl() argument 66 if (tx_ring->txrwb) in nfp_net_read_tx_cmpl() 67 return *tx_ring->txrwb; in nfp_net_read_tx_cmpl() 68 return nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_net_read_tx_cmpl() 100 struct nfp_net_tx_ring *tx_ring, unsigned int idx); [all …]
|
D | nfp_net_debugfs.c | 83 struct nfp_net_tx_ring *tx_ring; in nfp_tx_q_show() local 90 tx_ring = r_vec->tx_ring; in nfp_tx_q_show() 92 tx_ring = r_vec->xdp_ring; in nfp_tx_q_show() 93 if (!r_vec->nfp_net || !tx_ring) in nfp_tx_q_show() 99 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_tx_q_show() 100 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); in nfp_tx_q_show() 103 tx_ring->idx, tx_ring->qcidx, in nfp_tx_q_show() 104 tx_ring == r_vec->tx_ring ? "" : "xdp", in nfp_tx_q_show() 105 tx_ring->cnt, &tx_ring->dma, tx_ring->txds, in nfp_tx_q_show() 106 tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); in nfp_tx_q_show() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.c | 59 static void iavf_clean_tx_ring(struct iavf_ring *tx_ring) in iavf_clean_tx_ring() argument 65 if (!tx_ring->tx_bi) in iavf_clean_tx_ring() 69 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring() 70 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring() 72 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring() 73 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring() 76 memset(tx_ring->desc, 0, tx_ring->size); in iavf_clean_tx_ring() 78 tx_ring->next_to_use = 0; in iavf_clean_tx_ring() 79 tx_ring->next_to_clean = 0; in iavf_clean_tx_ring() 81 if (!tx_ring->netdev) in iavf_clean_tx_ring() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 39 struct ice_tx_ring *tx_ring; in ice_prgm_fdir_fltr() local 48 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr() 49 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr() 51 dev = tx_ring->dev; in ice_prgm_fdir_fltr() 54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr() 67 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr() 68 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr() 69 f_desc = ICE_TX_FDIRDESC(tx_ring, i); in ice_prgm_fdir_fltr() 73 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr() 74 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr() [all …]
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | ipoib_tx.c | 49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used() 50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used() 56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq() 63 if (atomic_dec_and_test(&txq->tx_ring.stops)) in hfi1_ipoib_wake_txq() 70 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat() 76 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat() 81 ++txq->tx_ring.sent_txreqs; in hfi1_ipoib_check_queue_depth() 83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) { in hfi1_ipoib_check_queue_depth() 108 atomic_xchg(&txq->tx_ring.ring_full, 0)) { in hfi1_ipoib_check_queue_stopped() 136 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_drain_tx_ring() local [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 742 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument 775 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso() 782 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso() 784 netdev_err(tx_ring->netdev, in fm10k_tso() 789 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument 811 dev_warn(tx_ring->dev, in fm10k_tx_csum() 813 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum() 854 dev_warn(tx_ring->dev, in fm10k_tx_csum() 859 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum() 865 tx_ring->tx_stats.csum_good++; in fm10k_tx_csum() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx_common.h | 46 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, in i40e_update_tx_stats() argument 50 u64_stats_update_begin(&tx_ring->syncp); in i40e_update_tx_stats() 51 tx_ring->stats.bytes += total_bytes; in i40e_update_tx_stats() 52 tx_ring->stats.packets += total_packets; in i40e_update_tx_stats() 53 u64_stats_update_end(&tx_ring->syncp); in i40e_update_tx_stats() 54 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_update_tx_stats() 55 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_update_tx_stats() 66 static inline void i40e_arm_wb(struct i40e_ring *tx_ring, in i40e_arm_wb() argument 70 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { in i40e_arm_wb() 76 unsigned int j = i40e_get_tx_pending(tx_ring, false); in i40e_arm_wb() [all …]
|
D | i40e_txrx.c | 22 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument 26 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir() 31 i = tx_ring->next_to_use; in i40e_fdir() 32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir() 35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir() 87 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local 99 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter() 100 dev = tx_ring->dev; in i40e_program_fdir_filter() 103 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter() 115 i = tx_ring->next_to_use; in i40e_program_fdir_filter() [all …]
|
/linux-6.12.1/drivers/net/can/spi/mcp251xfd/ |
D | mcp251xfd-tx.c | 21 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_get_tx_obj_next() argument 25 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_get_tx_obj_next() 27 return &tx_ring->obj[tx_head]; in mcp251xfd_get_tx_obj_next() 135 struct mcp251xfd_tx_ring *tx_ring, in mcp251xfd_tx_failure_drop() argument 143 tx_ring->head--; in mcp251xfd_tx_failure_drop() 145 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_tx_failure_drop() 159 struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tx_obj_write_sync() local 164 mcp251xfd_tx_failure_drop(priv, tx_ring, err); in mcp251xfd_tx_obj_write_sync() 174 struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_tx_busy() argument 176 if (mcp251xfd_get_tx_free(tx_ring) > 0) in mcp251xfd_tx_busy() [all …]
|
D | mcp251xfd-tef.c | 114 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_get_tef_len() local 115 const u8 shift = tx_ring->obj_num_shift_to_u8; in mcp251xfd_get_tef_len() 129 mcp251xfd_get_tx_free(tx_ring) == 0) { in mcp251xfd_get_tef_len() 130 *len_p = tx_ring->obj_num; in mcp251xfd_get_tef_len() 145 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); in mcp251xfd_get_tef_len() 146 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); in mcp251xfd_get_tef_len() 147 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); in mcp251xfd_get_tef_len() 160 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tef_obj_read() local 164 (offset > tx_ring->obj_num || in mcp251xfd_tef_obj_read() 165 len > tx_ring->obj_num || in mcp251xfd_tef_obj_read() [all …]
|
/linux-6.12.1/drivers/net/ethernet/freescale/enetc/ |
D | enetc.c | 46 struct enetc_bdr *tx_ring) in enetc_rx_ring_from_xdp_tx_ring() argument 48 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring() 70 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument 78 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff() 82 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff() 87 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument 94 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame() 106 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument 109 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail() 145 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_buffs() argument [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 417 struct igbvf_ring *tx_ring) in igbvf_setup_tx_resources() argument 422 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources() 423 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources() 424 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources() 428 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources() 429 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources() 431 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, in igbvf_setup_tx_resources() 432 &tx_ring->dma, GFP_KERNEL); in igbvf_setup_tx_resources() 433 if (!tx_ring->desc) in igbvf_setup_tx_resources() 436 tx_ring->adapter = adapter; in igbvf_setup_tx_resources() [all …]
|
/linux-6.12.1/drivers/net/ethernet/wangxun/libwx/ |
D | wx_lib.c | 704 struct wx_ring *tx_ring, int napi_budget) in wx_clean_tx_irq() argument 708 unsigned int i = tx_ring->next_to_clean; in wx_clean_tx_irq() 712 if (!netif_carrier_ok(tx_ring->netdev)) in wx_clean_tx_irq() 715 tx_buffer = &tx_ring->tx_buffer_info[i]; in wx_clean_tx_irq() 716 tx_desc = WX_TX_DESC(tx_ring, i); in wx_clean_tx_irq() 717 i -= tx_ring->count; in wx_clean_tx_irq() 744 dma_unmap_single(tx_ring->dev, in wx_clean_tx_irq() 758 i -= tx_ring->count; in wx_clean_tx_irq() 759 tx_buffer = tx_ring->tx_buffer_info; in wx_clean_tx_irq() 760 tx_desc = WX_TX_DESC(tx_ring, 0); in wx_clean_tx_irq() [all …]
|
/linux-6.12.1/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_io.c | 271 u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) in qlcnic_82xx_change_filter() argument 280 producer = tx_ring->producer; in qlcnic_82xx_change_filter() 281 hwdesc = &tx_ring->desc_head[tx_ring->producer]; in qlcnic_82xx_change_filter() 297 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter() 304 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_send_filter() argument 338 vlan_id, tx_ring); in qlcnic_send_filter() 353 qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); in qlcnic_send_filter() 373 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_tx_encap_pkt() argument 377 u32 producer = tx_ring->producer; in qlcnic_tx_encap_pkt() 407 hwdesc = &tx_ring->desc_head[producer]; in qlcnic_tx_encap_pkt() [all …]
|
/linux-6.12.1/drivers/net/ethernet/broadcom/ |
D | bcm4908_enet.c | 85 struct bcm4908_enet_dma_ring tx_ring; member 198 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; in bcm4908_enet_dma_free() local 208 size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd); in bcm4908_enet_dma_free() 209 if (tx_ring->cpu_addr) in bcm4908_enet_dma_free() 210 dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr); in bcm4908_enet_dma_free() 211 kfree(tx_ring->slots); in bcm4908_enet_dma_free() 216 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; in bcm4908_enet_dma_alloc() local 221 tx_ring->length = ENET_TX_BDS_NUM; in bcm4908_enet_dma_alloc() 222 tx_ring->is_tx = 1; in bcm4908_enet_dma_alloc() 223 tx_ring->cfg_block = ENET_DMA_CH_TX_CFG; in bcm4908_enet_dma_alloc() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mscc/ |
D | ocelot_fdma.c | 69 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_free() local 71 if (tx_ring->next_to_use >= tx_ring->next_to_clean) in ocelot_fdma_tx_ring_free() 73 (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; in ocelot_fdma_tx_ring_free() 75 return tx_ring->next_to_clean - tx_ring->next_to_use - 1; in ocelot_fdma_tx_ring_free() 80 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_empty() local 82 return tx_ring->next_to_clean == tx_ring->next_to_use; in ocelot_fdma_tx_ring_empty() 484 struct ocelot_fdma_tx_ring *tx_ring; in ocelot_fdma_tx_cleanup() local 495 tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_cleanup() 501 ntc = tx_ring->next_to_clean; in ocelot_fdma_tx_cleanup() 502 dcb = &tx_ring->dcbs[ntc]; in ocelot_fdma_tx_cleanup() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/igc/ |
D | igc_xdp.c | 48 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_enable_pool() local 77 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_enable_pool() 83 igc_disable_tx_ring(tx_ring); in igc_xdp_enable_pool() 88 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); in igc_xdp_enable_pool() 93 igc_enable_tx_ring(tx_ring); in igc_xdp_enable_pool() 107 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_disable_pool() local 123 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_disable_pool() 129 igc_disable_tx_ring(tx_ring); in igc_xdp_disable_pool() 135 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); in igc_xdp_disable_pool() 140 igc_enable_tx_ring(tx_ring); in igc_xdp_disable_pool()
|
D | igc_dump.c | 118 struct igc_ring *tx_ring; in igc_rings_dump() local 138 tx_ring = adapter->tx_ring[n]; in igc_rings_dump() 139 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igc_rings_dump() 142 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igc_rings_dump() 167 tx_ring = adapter->tx_ring[n]; in igc_rings_dump() 170 tx_ring->queue_index); in igc_rings_dump() 174 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igc_rings_dump() 178 tx_desc = IGC_TX_DESC(tx_ring, i); in igc_rings_dump() 179 buffer_info = &tx_ring->tx_buffer_info[i]; in igc_rings_dump() 181 if (i == tx_ring->next_to_use && in igc_rings_dump() [all …]
|