Searched refs:mpwqe (Results 1 – 13 of 13) sorted by relevance
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | rx.c | 30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe() 36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe() 44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe() 50 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe() 52 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe() 54 if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) { in mlx5e_xsk_alloc_rx_mpwqe() 64 } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) { in mlx5e_xsk_alloc_rx_mpwqe() 75 } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) { in mlx5e_xsk_alloc_rx_mpwqe() 76 u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2); in mlx5e_xsk_alloc_rx_mpwqe() 101 __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) - in mlx5e_xsk_alloc_rx_mpwqe() [all …]
|
D | tx.c | 108 if (sq->mpwqe.wqe) in mlx5e_xsk_tx() 130 if (sq->mpwqe.wqe) in mlx5e_xsk_tx()
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 566 if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe)) in mlx5e_free_rx_mpwqe() 569 no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_free_rx_mpwqe() 578 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) in mlx5e_free_rx_mpwqe() 582 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) { in mlx5e_free_rx_mpwqe() 595 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; in mlx5e_post_rx_mpwqe() 650 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_build_shampo_hd_umr() 729 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_alloc_rx_hd_mpwqe() 760 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_alloc_rx_hd_mpwqe() 785 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs); in mlx5e_alloc_rx_mpwqe() 787 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_alloc_rx_mpwqe() [all …]
|
D | en_main.c | 319 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, in mlx5e_build_umr_wqe() 320 rq->mpwqe.umr_mode), in mlx5e_build_umr_wqe() 325 cseg->umr_mkey = rq->mpwqe.umr_mkey_be; in mlx5e_build_umr_wqe() 328 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode); in mlx5e_build_umr_wqe() 335 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), in mlx5e_rq_shampo_hd_alloc() 337 if (!rq->mpwqe.shampo) in mlx5e_rq_shampo_hd_alloc() 344 kvfree(rq->mpwqe.shampo); in mlx5e_rq_shampo_hd_free() 349 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_rq_shampo_hd_info_alloc() 374 kvfree(rq->mpwqe.shampo->bitmap); in mlx5e_rq_shampo_hd_info_free() 375 kvfree(rq->mpwqe.shampo->info); in mlx5e_rq_shampo_hd_info_free() [all …]
|
D | en_tx.c | 517 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_same_eseg() 526 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_session_start() 549 return sq->mpwqe.wqe; in mlx5e_tx_mpwqe_session_is_active() 554 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_add_dseg() 572 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_session_complete() 629 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { in mlx5e_sq_xmit_mpwqe()
|
D | en.h | 423 struct mlx5e_tx_mpwqe mpwqe; member 489 struct mlx5e_tx_mpwqe mpwqe; member 681 } mpwqe; member
|
D | en_stats.c | 285 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xdp_red() 297 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xdpsq() 309 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xsksq() 2153 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2163 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2194 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
|
D | en_stats.h | 451 u64 mpwqe; member
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | txrx.h | 221 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_shampo_get_cqe_header_index() 369 mlx5_wq_ll_reset(&rq->mpwqe.wq); in mlx5e_rqwq_reset() 370 rq->mpwqe.actual_wq_head = 0; in mlx5e_rqwq_reset() 396 return mlx5_wq_ll_get_size(&rq->mpwqe.wq); in mlx5e_rqwq_get_size() 406 return rq->mpwqe.wq.cur_sz; in mlx5e_rqwq_get_cur_sz() 416 return mlx5_wq_ll_get_head(&rq->mpwqe.wq); in mlx5e_rqwq_get_head() 426 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); in mlx5e_rqwq_get_wqe_counter() 561 size_t isz = struct_size(rq->mpwqe.info, alloc_units.frag_pages, rq->mpwqe.pages_per_wqe); in mlx5e_get_mpw_info() 563 return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); in mlx5e_get_mpw_info()
|
D | xdp.c | 380 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_session_start() 397 stats->mpwqe++; in mlx5e_xdp_mpwqe_session_start() 403 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_complete() 430 if (unlikely(!sq->mpwqe.wqe)) { in mlx5e_xmit_xdp_frame_check_mpwqe() 453 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xmit_xdp_frame_mpwqe() 467 if (unlikely(sq->mpwqe.wqe)) in mlx5e_xmit_xdp_frame_mpwqe() 944 if (sq->mpwqe.wqe) in mlx5e_xdp_xmit() 957 if (xdpsq->mpwqe.wqe) in mlx5e_xdp_rx_poll_complete()
|
D | xdp.h | 204 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_add_dseg()
|
D | params.c | 259 bool mpwqe) in mlx5e_rx_get_linear_stride_sz() argument 268 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; in mlx5e_rx_get_linear_stride_sz() 270 no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk); in mlx5e_rx_get_linear_stride_sz()
|
/linux-6.12.1/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/ |
D | counters.rst | 361 - The number of send blocks processed from Multi-Packet WQEs (mpwqe). 365 - The number of send packets processed from Multi-Packet WQEs (mpwqe).
|