/linux-6.12.1/drivers/net/ethernet/intel/idpf/ |
D | idpf_ethtool.c | 24 cmd->data = vport->num_rxq; in idpf_get_rxnfc() 189 u16 num_txq, num_rxq; in idpf_get_channels() local 195 num_rxq = vport_config->user_config.num_req_rx_qs; in idpf_get_channels() 197 combined = min(num_txq, num_rxq); in idpf_get_channels() 209 ch->rx_count = num_rxq - combined; in idpf_get_channels() 228 u16 num_txq, num_rxq; in idpf_set_channels() local 245 num_rxq = vport_config->user_config.num_req_rx_qs; in idpf_set_channels() 268 if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) in idpf_set_channels() 278 vport_config->user_config.num_req_rx_qs = num_rxq; in idpf_set_channels() 787 u16 num_rxq; in idpf_collect_queue_stats() local [all …]
|
D | idpf_txrx.c | 528 u16 num_rxq; in idpf_rx_desc_rel_all() local 538 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) in idpf_rx_desc_rel_all() 544 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rx_desc_rel_all() 545 for (j = 0; j < num_rxq; j++) in idpf_rx_desc_rel_all() 820 int num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rx_bufs_init_all() local 822 for (j = 0; j < num_rxq; j++) { in idpf_rx_bufs_init_all() 925 u16 num_rxq; in idpf_rx_desc_alloc_all() local 930 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rx_desc_alloc_all() 932 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rx_desc_alloc_all() 934 for (j = 0; j < num_rxq; j++) { in idpf_rx_desc_alloc_all() [all …]
|
D | idpf_virtchnl.c | 1112 u16 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_queue_reg_init() local 1114 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { in __idpf_queue_reg_init() 1212 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init() 1219 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init() 1563 totqs = vport->num_rxq + vport->num_bufq; in idpf_send_config_rx_queues_msg() 1571 u16 num_rxq; in idpf_send_config_rx_queues_msg() local 1598 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_config_rx_queues_msg() 1600 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_config_rx_queues_msg() 1602 for (j = 0; j < num_rxq; j++, k++) { in idpf_send_config_rx_queues_msg() 1711 u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; in idpf_send_ena_dis_queues_msg() local [all …]
|
D | idpf_singleq_txrx.c | 1075 u16 num_rxq = q_vec->num_rxq; in idpf_rx_singleq_clean_all() local 1082 budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; in idpf_rx_singleq_clean_all() 1083 for (i = 0; i < num_rxq; i++) { in idpf_rx_singleq_clean_all()
|
D | idpf_txrx.h | 406 u16 num_rxq; member 891 u16 num_rxq; member
|
D | idpf_lib.c | 1266 err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); in idpf_set_real_num_queues() 1312 for (j = 0; j < grp->singleq.num_rxq; j++) { in idpf_rx_init_buf_tail() 1902 new_vport->num_rxq, in idpf_initiate_soft_reset() 1928 vport->num_rxq, vport->num_bufq); in idpf_initiate_soft_reset()
|
D | idpf.h | 306 u16 num_rxq; member
|
/linux-6.12.1/drivers/net/ethernet/intel/ice/ |
D | ice_lib.c | 194 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs() 584 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def() 897 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params() 1070 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map() 1072 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map() 1077 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map() 1087 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map() 1181 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); in ice_chnl_vsi_setup_q_map() 1494 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key() 1503 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key() [all …]
|
D | ice_sriov.c | 432 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; in ice_set_per_vf_res() local 469 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, in ice_set_per_vf_res() 473 num_rxq = 0; in ice_set_per_vf_res() 474 else if (num_rxq > avail_qs) in ice_set_per_vf_res() 475 num_rxq = rounddown_pow_of_two(avail_qs); in ice_set_per_vf_res() 477 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { in ice_set_per_vf_res() 491 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); in ice_set_per_vf_res()
|
D | ice.h | 169 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 220 u16 num_rxq; member 404 u16 num_rxq; /* Used Rx queues */ member
|
D | ice_base.c | 635 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq() 821 rx_rings_rem = vsi->num_rxq; in ice_vsi_map_rings_to_vectors() 851 q_base = vsi->num_rxq - rx_rings_rem; in ice_vsi_map_rings_to_vectors()
|
D | ice_xsk.c | 171 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) in ice_qp_dis() 378 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { in ice_xsk_pool_setup() 1130 if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq) in ice_xsk_wakeup()
|
D | ice_ethtool_fdir.c | 1470 if (!vsi->num_rxq || q_index >= vsi->num_rxq) in ice_update_per_q_fltr() 1812 (*ring >= (ch->base_q + ch->num_rxq))) in ice_update_ring_dest_vsi() 1859 if (ring >= vsi->num_rxq) in ice_set_fdir_input_set()
|
D | ice_ethtool.c | 3346 rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); in ice_set_ringparam() 3825 (u16)pf->hw.func_caps.common_cap.num_rxq); in ice_get_max_rxq() 3869 ch->rx_count = vsi->num_rxq - ch->combined_count; in ice_get_channels() 4122 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { in ice_get_q_coalesce() 4129 } else if (q_num < vsi->num_rxq) { in ice_get_q_coalesce() 4290 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { in ice_set_q_coalesce() 4300 } else if (q_num < vsi->num_rxq) { in ice_set_q_coalesce() 4374 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq) in __ice_set_coalesce()
|
D | ice_main.c | 4043 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps() 7107 if (!vsi->num_txq || !vsi->num_rxq) in ice_get_stats64() 7326 if (!vsi->num_rxq) { in ice_vsi_setup_rx_rings() 7445 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); in ice_vsi_open() 8443 if (vsi->num_rxq < in ice_validate_mqprio_qopt() 8615 if (ch->num_txq || ch->num_rxq) in ice_chnl_cfg_res() 8669 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; in ice_setup_hw_channel() 8671 ch->num_rxq); in ice_setup_hw_channel() 8746 if (!ch->num_txq || !ch->num_rxq) { in ice_create_q_channel() 8747 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); in ice_create_q_channel() [all …]
|
D | ice_type.h | 272 u16 num_rxq; /* Number/Total Rx queues */ member
|
D | ice_tc_lib.c | 910 return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL; in ice_locate_rx_ring_using_queue() 1852 if (queue >= vsi->num_rxq) { in ice_tc_forward_to_queue()
|
D | ice_common.c | 2134 caps->num_rxq = number; in ice_parse_common_caps() 2137 caps->num_rxq); in ice_parse_common_caps() 2939 func_caps->common_cap.num_rxq = 1; in ice_set_safe_mode_caps() 2969 dev_caps->common_cap.num_rxq = num_funcs; in ice_set_safe_mode_caps()
|
/linux-6.12.1/drivers/net/ethernet/atheros/alx/ |
D | alx.h | 122 int num_rxq; member
|
D | main.c | 831 int err, num_vec, num_txq, num_rxq; in alx_enable_msix() local 834 num_rxq = 1; in alx_enable_msix() 835 num_vec = max_t(int, num_txq, num_rxq) + 1; in alx_enable_msix() 847 alx->num_rxq = num_rxq; in alx_enable_msix() 911 alx->num_rxq = 1; in alx_init_intr() 1236 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); in __alx_open()
|
/linux-6.12.1/drivers/net/ethernet/stmicro/stmmac/ |
D | dwmac5.h | 112 u32 num_txq, u32 num_rxq,
|
D | dwmac5.c | 577 u32 num_txq, u32 num_rxq, in dwmac5_fpe_configure() argument 586 value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; in dwmac5_fpe_configure()
|
D | dwxgmac2_core.c | 583 struct stmmac_rss *cfg, u32 num_rxq) in dwxgmac2_rss_configure() argument 609 for (i = 0; i < num_rxq; i++) in dwxgmac2_rss_configure() 1509 u32 num_txq, u32 num_rxq, in dwxgmac3_fpe_configure() argument 1525 value |= (num_rxq - 1) << XGMAC_RQ_SHIFT; in dwxgmac3_fpe_configure()
|
D | hwif.h | 395 struct stmmac_rss *cfg, u32 num_rxq); 424 u32 num_txq, u32 num_rxq,
|