Home
last modified time | relevance | path

Searched refs:xsk_pool (Results 1 – 25 of 44) sorted by relevance

12

/linux-6.12.1/drivers/net/ethernet/intel/ice/
Dice_xsk.c355 if (!rx_ring->xsk_pool) in ice_realloc_zc_buf()
475 struct xsk_buff_pool *xsk_pool, u16 count) in __ice_alloc_rx_bufs_zc() argument
487 nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, in __ice_alloc_rx_bufs_zc()
500 nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count); in __ice_alloc_rx_bufs_zc()
525 struct xsk_buff_pool *xsk_pool, u16 count) in ice_alloc_rx_bufs_zc() argument
534 if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh)) in ice_alloc_rx_bufs_zc()
536 return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover); in ice_alloc_rx_bufs_zc()
608 struct xsk_buff_pool *xsk_pool) in ice_clean_xdp_irq_zc() argument
659 xsk_tx_completed(xsk_pool, xsk_frames); in ice_clean_xdp_irq_zc()
679 struct xsk_buff_pool *xsk_pool) in ice_xmit_xdp_tx_zc() argument
[all …]
Dice_xsk.h24 struct xsk_buff_pool *xsk_pool,
28 struct xsk_buff_pool *xsk_pool, u16 count);
32 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
36 struct xsk_buff_pool __always_unused *xsk_pool) in ice_xmit_zc() argument
51 struct xsk_buff_pool __always_unused *xsk_pool, in ice_clean_rx_irq_zc() argument
59 struct xsk_buff_pool __always_unused *xsk_pool, in ice_alloc_rx_bufs_zc() argument
Dice_base.c510 xsk_pool_fill_cb(ring->xsk_pool, &desc); in ice_xsk_pool_fill_cb()
557 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
561 xsk_pool_get_rx_frame_size(ring->xsk_pool); in ice_vsi_cfg_rxq()
573 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq()
606 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
609 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq()
617 ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs); in ice_vsi_cfg_rxq()
Dice_txrx.c156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
429 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
460 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
1485 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll() local
1488 if (xsk_pool) in ice_napi_poll()
1489 wd = ice_xmit_zc(tx_ring, xsk_pool); in ice_napi_poll()
1515 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll() local
1522 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1523 ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : in ice_napi_poll()
Dice_txrx.h360 struct xsk_buff_pool *xsk_pool; member
386 struct xsk_buff_pool *xsk_pool; member
/linux-6.12.1/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c210 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in i40e_run_xdp_zc()
252 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); in i40e_alloc_rx_buffers_zc()
520 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc()
522 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
524 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
538 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt()
539 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt()
559 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch()
560 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); in i40e_xmit_pkt_batch()
603 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; in i40e_xmit_zc()
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe()
45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe()
102 rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe()
103 __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe()
173 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); in mlx5e_xsk_alloc_rx_wqes_batched()
175 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); in mlx5e_xsk_alloc_rx_wqes_batched()
177 alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig); in mlx5e_xsk_alloc_rx_wqes_batched()
213 *frag->xskp = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_wqes()
Dtx.c58 if (xp_tx_metadata_enabled(sq->xsk_pool)) in mlx5e_xsk_tx_post_err()
66 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
114 if (xp_tx_metadata_enabled(sq->xsk_pool)) { in mlx5e_xsk_tx()
Dsetup.c82 rq->xsk_pool = pool; in mlx5e_init_xsk_rq()
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/nfd3/
Dxsk.c21 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx_xdp()
324 xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused); in nfp_nfd3_xsk_complete()
337 struct xsk_buff_pool *xsk_pool; in nfp_nfd3_xsk_tx() local
342 xsk_pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx()
346 if (!xsk_tx_peek_desc(xsk_pool, &desc[i])) in nfp_nfd3_xsk_tx()
356 xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr, in nfp_nfd3_xsk_tx()
368 xsk_buff_raw_get_dma(xsk_pool, desc[i].addr)); in nfp_nfd3_xsk_tx()
381 xsk_tx_release(xsk_pool); in nfp_nfd3_xsk_tx()
Drings.c25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free()
29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free()
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/
Den_txrx.c90 bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
98 xsk_set_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post()
102 xsk_clear_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post()
108 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
117 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
119 xsk_clear_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
Den_main.c565 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; in mlx5e_create_rq_umr_mkey()
610 WARN_ON(rq->xsk_pool); in mlx5e_init_frags_partition()
673 if (rq->xsk_pool) in mlx5e_init_wqe_alloc_info()
691 if (rq->xsk_pool) in mlx5e_init_wqe_alloc_info()
922 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); in mlx5e_alloc_rq()
1449 struct xsk_buff_pool *xsk_pool, in mlx5e_alloc_xdpsq() argument
1465 sq->xsk_pool = xsk_pool; in mlx5e_alloc_xdpsq()
1467 sq->stats = sq->xsk_pool ? in mlx5e_alloc_xdpsq()
2015 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, in mlx5e_open_xdpsq() argument
2021 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); in mlx5e_open_xdpsq()
[all …]
/linux-6.12.1/drivers/net/ethernet/freescale/dpaa2/
Ddpaa2-xsk.c49 ch->xsk_pool->umem->headroom); in dpaa2_xsk_run_xdp()
183 if (!ch->xsk_pool) in dpaa2_xsk_disable_pool()
200 ch->xsk_pool = NULL; in dpaa2_xsk_disable_pool()
265 ch->xsk_pool = pool; in dpaa2_xsk_enable_pool()
354 addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr); in dpaa2_xsk_tx_build_fd()
355 xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len); in dpaa2_xsk_tx_build_fd()
393 struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs; in dpaa2_xsk_tx()
410 batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); in dpaa2_xsk_tx()
451 xsk_tx_release(ch->xsk_pool); in dpaa2_xsk_tx()
/linux-6.12.1/drivers/net/ethernet/intel/ixgbe/
Dixgbe_xsk.c114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ixgbe_run_xdp_zc()
168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc()
364 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ixgbe_clean_rx_irq_zc()
366 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
368 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
393 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc()
461 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq()
528 if (!ring->xsk_pool) in ixgbe_xsk_wakeup()
543 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_xsk_clean_tx_ring()
/linux-6.12.1/drivers/net/ethernet/engleder/
Dtsnep_main.c761 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
762 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
786 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
797 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
886 if (tx->xsk_pool) { in tsnep_tx_poll()
888 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
889 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
890 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
952 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
955 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
[all …]
Dtsnep.h93 struct xsk_buff_pool *xsk_pool; member
131 struct xsk_buff_pool *xsk_pool; member
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/
Dnfp_net_xsk.c22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); in nfp_net_xsk_rx_bufs_stash()
60 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_net_xsk_rx_ring_fill_freelist()
Dnfp_net_debugfs.c46 if (!r_vec->xsk_pool) { in nfp_rx_q_show()
/linux-6.12.1/drivers/net/ethernet/google/gve/
Dgve_tx.c188 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done()
189 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done()
959 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { in gve_xsk_tx()
964 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx()
972 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx()
992 if (tx->xsk_pool) { in gve_xdp_poll()
999 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xdp_poll()
1000 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xdp_poll()
Dgve_main.c1212 rx->xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1213 if (rx->xsk_pool) { in gve_reg_xdp_info()
1222 xsk_pool_set_rxq_info(rx->xsk_pool, in gve_reg_xdp_info()
1229 priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1255 if (rx->xsk_pool) { in gve_unreg_xdp_info()
1257 rx->xsk_pool = NULL; in gve_unreg_xdp_info()
1263 priv->tx[tx_qid].xsk_pool = NULL; in gve_unreg_xdp_info()
1642 rx->xsk_pool = pool; in gve_xsk_pool_enable()
1645 priv->tx[tx_qid].xsk_pool = pool; in gve_xsk_pool_enable()
1678 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
[all …]
/linux-6.12.1/drivers/net/ethernet/stmicro/stmmac/
Dstmmac_main.c239 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
1648 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1693 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1695 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1702 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1712 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1763 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1769 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1809 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1896 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
[all …]
Dstmmac.h76 struct xsk_buff_pool *xsk_pool; member
120 struct xsk_buff_pool *xsk_pool; member
/linux-6.12.1/drivers/net/ethernet/intel/igc/
Digc_ptp.c760 struct xsk_buff_pool *xsk_pool; in igc_ptp_tx_reg_to_stamp() local
762 xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool; in igc_ptp_tx_reg_to_stamp()
763 if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) { in igc_ptp_tx_reg_to_stamp()
Digc_main.c251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
464 if (ring->xsk_pool) in igc_clean_rx_ring()
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
637 if (ring->xsk_pool) { in igc_configure_rx_ring()
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
670 if (ring->xsk_pool) in igc_configure_rx_ring()
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
2312 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
[all …]

12