/linux-6.12.1/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_dp.c | 76 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_ring_init() argument 81 rx_ring->idx = idx; in nfp_net_rx_ring_init() 82 rx_ring->r_vec = r_vec; in nfp_net_rx_ring_init() 83 u64_stats_init(&rx_ring->r_vec->rx_sync); in nfp_net_rx_ring_init() 85 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; in nfp_net_rx_ring_init() 86 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); in nfp_net_rx_ring_init() 95 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) in nfp_net_rx_ring_reset() argument 102 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) in nfp_net_rx_ring_reset() 106 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_net_rx_ring_reset() 107 last_idx = rx_ring->cnt - 1; in nfp_net_rx_ring_reset() [all …]
|
D | nfp_net_xsk.c | 17 nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx, in nfp_net_xsk_rx_bufs_stash() argument 22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); in nfp_net_xsk_rx_bufs_stash() 24 rx_ring->rxds[idx].fld.reserved = 0; in nfp_net_xsk_rx_bufs_stash() 25 rx_ring->rxds[idx].fld.meta_len_dd = 0; in nfp_net_xsk_rx_bufs_stash() 27 rx_ring->xsk_rxbufs[idx].xdp = xdp; in nfp_net_xsk_rx_bufs_stash() 28 rx_ring->xsk_rxbufs[idx].dma_addr = in nfp_net_xsk_rx_bufs_stash() 46 void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring) in nfp_net_xsk_rx_bufs_free() argument 50 if (!rx_ring->cnt) in nfp_net_xsk_rx_bufs_free() 53 for (i = 0; i < rx_ring->cnt - 1; i++) in nfp_net_xsk_rx_bufs_free() 54 nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]); in nfp_net_xsk_rx_bufs_free() [all …]
|
D | nfp_net_debugfs.c | 15 struct nfp_net_rx_ring *rx_ring; in nfp_rx_q_show() local 24 if (!r_vec->nfp_net || !r_vec->rx_ring) in nfp_rx_q_show() 27 rx_ring = r_vec->rx_ring; in nfp_rx_q_show() 31 rxd_cnt = rx_ring->cnt; in nfp_rx_q_show() 33 fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show() 34 fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show() 37 rx_ring->idx, rx_ring->fl_qcidx, in nfp_rx_q_show() 38 rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, in nfp_rx_q_show() 39 rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); in nfp_rx_q_show() 42 rxd = &rx_ring->rxds[i]; in nfp_rx_q_show() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 9 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) in i40e_clear_rx_bi_zc() argument 11 memset(rx_ring->rx_bi_zc, 0, in i40e_clear_rx_bi_zc() 12 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); in i40e_clear_rx_bi_zc() 15 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument 17 return &rx_ring->rx_bi_zc[idx]; in i40e_rx_bi() 29 static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) in i40e_realloc_rx_xdp_bi() argument 31 size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : in i40e_realloc_rx_xdp_bi() 32 sizeof(*rx_ring->rx_bi); in i40e_realloc_rx_xdp_bi() 33 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); in i40e_realloc_rx_xdp_bi() 39 kfree(rx_ring->rx_bi); in i40e_realloc_rx_xdp_bi() [all …]
|
D | i40e_txrx.c | 680 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_fd_handle_status() argument 683 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status() 1352 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument 1354 return &rx_ring->rx_bi[idx]; in i40e_rx_bi() 1364 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument 1368 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page() 1370 new_buff = i40e_rx_bi(rx_ring, nta); in i40e_reuse_rx_page() 1374 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page() 1398 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_clean_programming_status() argument 1406 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); in i40e_clean_programming_status() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 98 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp_zc() argument 107 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp_zc() 111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ixgbe_run_xdp_zc() 141 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc() 146 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc() 151 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) in ixgbe_alloc_rx_buffers_zc() argument 155 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers_zc() 163 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers_zc() 164 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers_zc() [all …]
|
/linux-6.12.1/drivers/net/can/spi/mcp251xfd/ |
D | mcp251xfd-ring.c | 201 struct mcp251xfd_rx_ring *rx_ring; in mcp251xfd_ring_init_rx() local 208 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { in mcp251xfd_ring_init_rx() 209 rx_ring->last_valid = timecounter_read(&priv->tc); in mcp251xfd_ring_init_rx() 210 rx_ring->head = 0; in mcp251xfd_ring_init_rx() 211 rx_ring->tail = 0; in mcp251xfd_ring_init_rx() 212 rx_ring->base = *base; in mcp251xfd_ring_init_rx() 213 rx_ring->nr = i; in mcp251xfd_ring_init_rx() 214 rx_ring->fifo_nr = *fifo_nr; in mcp251xfd_ring_init_rx() 216 *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num); in mcp251xfd_ring_init_rx() 220 addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); in mcp251xfd_ring_init_rx() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 383 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) in ice_clean_rx_ring() argument 385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring() 386 struct device *dev = rx_ring->dev; in ice_clean_rx_ring() 391 if (!rx_ring->rx_buf) in ice_clean_rx_ring() 394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring() 395 ice_xsk_clean_rx_ring(rx_ring); in ice_clean_rx_ring() 405 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring() 406 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring() 416 rx_ring->rx_buf_len, in ice_clean_rx_ring() 420 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring() [all …]
|
D | ice_xsk.c | 15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx) in ice_xdp_buf() argument 17 return &rx_ring->xdp_buf[idx]; in ice_xdp_buf() 86 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, in ice_qvec_dis_irq() argument 97 reg = rx_ring->reg_idx; in ice_qvec_dis_irq() 167 struct ice_rx_ring *rx_ring; in ice_qp_dis() local 175 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_dis() 176 q_vector = rx_ring->q_vector; in ice_qp_dis() 182 ice_qvec_dis_irq(vsi, rx_ring, q_vector); in ice_qp_dis() 317 ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) in ice_realloc_rx_xdp_bufs() argument 319 size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : in ice_realloc_rx_xdp_bufs() [all …]
|
D | ice_txrx_lib.h | 18 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, in ice_set_rx_bufs_act() argument 22 u32 nr_frags = rx_ring->nr_frags + 1; in ice_set_rx_bufs_act() 23 u32 idx = rx_ring->first_desc; in ice_set_rx_bufs_act() 24 u32 cnt = rx_ring->count; in ice_set_rx_bufs_act() 28 buf = &rx_ring->rx_buf[idx]; in ice_set_rx_bufs_act() 36 if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { in ice_set_rx_bufs_act() 37 u32 delta = rx_ring->nr_frags - sinfo_frags; in ice_set_rx_bufs_act() 44 buf = &rx_ring->rx_buf[idx]; in ice_set_rx_bufs_act() 76 ice_is_non_eop(const struct ice_rx_ring *rx_ring, in ice_is_non_eop() argument 84 rx_ring->ring_stats->rx_stats.non_eop_descs++; in ice_is_non_eop() [all …]
|
D | ice_txrx_lib.c | 16 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) in ice_release_rx_desc() argument 18 u16 prev_ntu = rx_ring->next_to_use & ~0x7; in ice_release_rx_desc() 20 rx_ring->next_to_use = val; in ice_release_rx_desc() 23 rx_ring->next_to_alloc = val; in ice_release_rx_desc() 38 writel(val, rx_ring->tail); in ice_release_rx_desc() 67 ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, in ice_rx_hash_to_skb() argument 75 if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded)) in ice_rx_hash_to_skb() 162 ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring, in ice_ptp_rx_hwts_to_skb() argument 166 u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx); in ice_ptp_rx_hwts_to_skb() 192 ice_process_skb_fields(struct ice_rx_ring *rx_ring, in ice_process_skb_fields() argument [all …]
|
/linux-6.12.1/drivers/net/ethernet/amazon/ena/ |
D | ena_xdp.h | 45 int ena_xdp_register_rxq_info(struct ena_ring *rx_ring); 46 void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring); 82 static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_execute() argument 90 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); in ena_xdp_execute() 98 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute() 99 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute() 105 xdp_ring = rx_ring->xdp_ring; in ena_xdp_execute() 110 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf, in ena_xdp_execute() 115 xdp_stat = &rx_ring->rx_stats.xdp_tx; in ena_xdp_execute() 119 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { in ena_xdp_execute() [all …]
|
D | ena_netdev.c | 95 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu() 217 rxr = &adapter->rx_ring[i]; in ena_init_io_rings() 393 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local 397 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources() 406 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources() 409 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources() 410 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources() 411 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources() 412 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources() 416 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources() [all …]
|
D | ena_xdp.c | 135 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) in ena_xdp_xmit() 196 int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) in ena_xdp_register_rxq_info() argument 200 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); in ena_xdp_register_rxq_info() 202 netif_dbg(rx_ring->adapter, ifup, rx_ring->netdev, "Registering RX info for queue %d", in ena_xdp_register_rxq_info() 203 rx_ring->qid); in ena_xdp_register_rxq_info() 205 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info() 207 rx_ring->qid, rc); in ena_xdp_register_rxq_info() 211 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); in ena_xdp_register_rxq_info() 214 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info() 216 rx_ring->qid, rc); in ena_xdp_register_rxq_info() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.c | 691 static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) in iavf_clean_rx_ring() argument 694 if (!rx_ring->rx_fqes) in iavf_clean_rx_ring() 697 if (rx_ring->skb) { in iavf_clean_rx_ring() 698 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring() 699 rx_ring->skb = NULL; in iavf_clean_rx_ring() 703 for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { in iavf_clean_rx_ring() 704 const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; in iavf_clean_rx_ring() 706 page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); in iavf_clean_rx_ring() 708 if (unlikely(++i == rx_ring->count)) in iavf_clean_rx_ring() 712 rx_ring->next_to_clean = 0; in iavf_clean_rx_ring() [all …]
|
/linux-6.12.1/drivers/net/ethernet/freescale/enetc/ |
D | enetc.c | 50 return priv->rx_ring[index]; in enetc_rx_ring_from_xdp_tx_ring() 718 v->rx_ring.stats.packets, in enetc_rx_net_dim() 719 v->rx_ring.stats.bytes, in enetc_rx_net_dim() 736 static void enetc_reuse_page(struct enetc_bdr *rx_ring, in enetc_reuse_page() argument 741 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page() 744 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); in enetc_reuse_page() 786 struct enetc_bdr *rx_ring; in enetc_recycle_xdp_tx_buff() local 788 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); in enetc_recycle_xdp_tx_buff() 790 if (likely(enetc_swbd_unused(rx_ring))) { in enetc_recycle_xdp_tx_buff() 791 enetc_reuse_page(rx_ring, &rx_swbd); in enetc_recycle_xdp_tx_buff() [all …]
|
/linux-6.12.1/drivers/net/ethernet/broadcom/ |
D | bcm4908_enet.c | 86 struct bcm4908_enet_dma_ring rx_ring; member 199 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; in bcm4908_enet_dma_free() local 203 size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd); in bcm4908_enet_dma_free() 204 if (rx_ring->cpu_addr) in bcm4908_enet_dma_free() 205 dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr); in bcm4908_enet_dma_free() 206 kfree(rx_ring->slots); in bcm4908_enet_dma_free() 217 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; in bcm4908_enet_dma_alloc() local 231 rx_ring->length = ENET_RX_BDS_NUM; in bcm4908_enet_dma_alloc() 232 rx_ring->is_tx = 0; in bcm4908_enet_dma_alloc() 233 rx_ring->cfg_block = ENET_DMA_CH_RX_CFG; in bcm4908_enet_dma_alloc() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mscc/ |
D | ocelot_fdma.c | 58 struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring; in ocelot_fdma_rx_ring_free() local 60 if (rx_ring->next_to_use >= rx_ring->next_to_clean) in ocelot_fdma_rx_ring_free() 62 (rx_ring->next_to_use - rx_ring->next_to_clean) - 1; in ocelot_fdma_rx_ring_free() 64 return rx_ring->next_to_clean - rx_ring->next_to_use - 1; in ocelot_fdma_rx_ring_free() 149 struct ocelot_fdma_rx_ring *rx_ring; in ocelot_fdma_alloc_rx_buffs() local 156 rx_ring = &fdma->rx_ring; in ocelot_fdma_alloc_rx_buffs() 157 idx = rx_ring->next_to_use; in ocelot_fdma_alloc_rx_buffs() 160 rxb = &rx_ring->bufs[idx]; in ocelot_fdma_alloc_rx_buffs() 171 dcb = &rx_ring->dcbs[idx]; in ocelot_fdma_alloc_rx_buffs() 177 dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx); in ocelot_fdma_alloc_rx_buffs() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/igc/ |
D | igc_xdp.c | 48 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_enable_pool() local 76 rx_ring = adapter->rx_ring[queue_id]; in igc_xdp_enable_pool() 79 napi = &rx_ring->q_vector->napi; in igc_xdp_enable_pool() 82 igc_disable_rx_ring(rx_ring); in igc_xdp_enable_pool() 87 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); in igc_xdp_enable_pool() 92 igc_enable_rx_ring(rx_ring); in igc_xdp_enable_pool() 107 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_disable_pool() local 122 rx_ring = adapter->rx_ring[queue_id]; in igc_xdp_disable_pool() 125 napi = &rx_ring->q_vector->napi; in igc_xdp_disable_pool() 128 igc_disable_rx_ring(rx_ring); in igc_xdp_disable_pool() [all …]
|
/linux-6.12.1/drivers/net/ethernet/aquantia/atlantic/ |
D | aq_ring.c | 52 static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring) in aq_alloc_rxpages() argument 54 struct device *dev = aq_nic_get_dev(rx_ring->aq_nic); in aq_alloc_rxpages() 55 unsigned int order = rx_ring->page_order; in aq_alloc_rxpages() 73 rxpage->pg_off = rx_ring->page_offset; in aq_alloc_rxpages() 407 struct aq_ring_s *rx_ring, in aq_xdp_run_prog() argument 417 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog() 418 ++rx_ring->stats.rx.packets; in aq_xdp_run_prog() 419 rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp); in aq_xdp_run_prog() 420 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog() 422 prog = READ_ONCE(rx_ring->xdp_prog); in aq_xdp_run_prog() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 74 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument 87 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page() 92 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page() 97 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page() 100 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page() 116 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument 120 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers() 126 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers() 127 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers() 128 i -= rx_ring->count; in fm10k_alloc_rx_buffers() [all …]
|
/linux-6.12.1/drivers/net/ethernet/wangxun/libwx/ |
D | wx_lib.c | 164 static void wx_dma_sync_frag(struct wx_ring *rx_ring, in wx_dma_sync_frag() argument 170 dma_sync_single_range_for_cpu(rx_ring->dev, in wx_dma_sync_frag() 178 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); in wx_dma_sync_frag() 181 static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, in wx_get_rx_buffer() argument 189 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in wx_get_rx_buffer() 210 wx_dma_sync_frag(rx_ring, rx_buffer); in wx_get_rx_buffer() 214 dma_sync_single_range_for_cpu(rx_ring->dev, in wx_get_rx_buffer() 223 static void wx_put_rx_buffer(struct wx_ring *rx_ring, in wx_put_rx_buffer() argument 237 static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, in wx_build_skb() argument 257 skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); in wx_build_skb() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb() 133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, in igbvf_alloc_rx_buffers() argument 136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers() 145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers() 146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers() 154 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); in igbvf_alloc_rx_buffers() 212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers() 214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers() 218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers() 219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 111 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, 505 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument 509 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 510 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 514 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields() 521 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 523 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields() 527 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument 532 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer() 536 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer() [all …]
|
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/nfd3/ |
D | dp.c | 588 struct nfp_net_rx_ring *rx_ring, in nfp_nfd3_rx_give_one() argument 593 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_nfd3_rx_give_one() 598 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_nfd3_rx_give_one() 599 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_nfd3_rx_give_one() 602 rx_ring->rxds[wr_idx].fld.reserved = 0; in nfp_nfd3_rx_give_one() 603 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; in nfp_nfd3_rx_give_one() 608 nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld, in nfp_nfd3_rx_give_one() 611 rx_ring->wr_p++; in nfp_nfd3_rx_give_one() 612 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) { in nfp_nfd3_rx_give_one() 617 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH); in nfp_nfd3_rx_give_one() [all …]
|