Home
last modified time | relevance | path

Searched refs:rxqs (Results 1 – 25 of 34) sorted by relevance

12

/linux-6.12.1/drivers/net/ethernet/fungible/funeth/
Dfuneth_main.c182 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); in fun_config_rss() local
214 *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid); in fun_config_rss()
378 static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs, in free_rxqs() argument
383 for (i = start; i < nqs && rxqs[i]; i++) in free_rxqs()
384 rxqs[i] = funeth_rxq_free(rxqs[i], state); in free_rxqs()
387 static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs, in alloc_rxqs() argument
398 state, &rxqs[i]); in alloc_rxqs()
400 free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED); in alloc_rxqs()
445 struct funeth_rxq **rxqs = qset->rxqs; in fun_free_rings() local
450 if (!rxqs) { in fun_free_rings()
[all …]
Dfuneth.h66 struct funeth_rxq **rxqs; member
87 struct funeth_rxq * __rcu *rxqs; member
Dfuneth_ethtool.c486 struct funeth_rxq **rxqs; in fun_set_coalesce() local
510 rxqs = rtnl_dereference(fp->rxqs); in fun_set_coalesce()
511 if (!rxqs) in fun_set_coalesce()
515 WRITE_ONCE(rxqs[i]->irq_db_val, db_val); in fun_set_coalesce()
754 struct funeth_rxq **rxqs; in fun_get_ethtool_stats() local
761 rxqs = rtnl_dereference(fp->rxqs); in fun_get_ethtool_stats()
762 if (!rxqs) in fun_get_ethtool_stats()
816 FUN_QSTAT_READ(rxqs[i], start, rxs); in fun_get_ethtool_stats()
/linux-6.12.1/drivers/net/ethernet/huawei/hinic/
Dhinic_main.c117 gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]); in gather_nic_stats()
230 if (nic_dev->rxqs) in create_rxqs()
233 nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs, in create_rxqs()
234 sizeof(*nic_dev->rxqs), GFP_KERNEL); in create_rxqs()
235 if (!nic_dev->rxqs) in create_rxqs()
243 err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); in create_rxqs()
261 hinic_clean_rxq(&nic_dev->rxqs[i]); in create_rxqs()
264 hinic_rq_debug_rem(nic_dev->rxqs[j].rq); in create_rxqs()
265 hinic_clean_rxq(&nic_dev->rxqs[j]); in create_rxqs()
270 devm_kfree(&netdev->dev, nic_dev->rxqs); in create_rxqs()
[all …]
Dhinic_dev.h98 struct hinic_rxq *rxqs; member
Dhinic_debugfs.c240 rq = dev->rxqs[rq_id].rq; in hinic_rq_debug_add()
Dhinic_ethtool.c700 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : in set_queue_coalesce()
1389 if (!nic_dev->rxqs) in get_drv_queue_stats()
1392 hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); in get_drv_queue_stats()
/linux-6.12.1/net/
Ddevres.c22 unsigned int txqs, unsigned int rxqs) in devm_alloc_etherdev_mqs() argument
30 dr->ndev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs); in devm_alloc_etherdev_mqs()
/linux-6.12.1/drivers/net/ethernet/microsoft/mana/
Dmana_bpf.c145 return rtnl_dereference(apc->rxqs[0]->bpf_prog); in mana_chn_xdp_get()
164 rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog); in mana_chn_setxdp()
Dmana_en.c450 rx_stats = &apc->rxqs[q]->stats; in mana_get_stats64()
724 kfree(apc->rxqs); in mana_cleanup_port_context()
725 apc->rxqs = NULL; in mana_cleanup_port_context()
737 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
740 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
1322 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
2328 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2331 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
2343 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2348 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
[all …]
Dmana_ethtool.c177 rx_stats = &apc->rxqs[q]->stats; in mana_get_ethtool_stats()
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/
Dnfp_net_repr.h102 nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs);
Dnfp_net_repr.c424 nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs) in nfp_repr_alloc_mqs() argument
429 netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs); in nfp_repr_alloc_mqs()
/linux-6.12.1/include/linux/
Detherdevice.h57 unsigned int rxqs);
63 unsigned int rxqs);
/linux-6.12.1/net/ethernet/
Deth.c380 unsigned int rxqs) in alloc_etherdev_mqs() argument
383 ether_setup, txqs, rxqs); in alloc_etherdev_mqs()
/linux-6.12.1/drivers/net/can/dev/
Ddev.c250 unsigned int txqs, unsigned int rxqs) in alloc_candev_mqs() argument
277 txqs, rxqs); in alloc_candev_mqs()
/linux-6.12.1/include/linux/can/
Ddev.h174 unsigned int txqs, unsigned int rxqs);
/linux-6.12.1/drivers/net/ethernet/marvell/
Dmvneta.c506 struct mvneta_rx_queue *rxqs; member
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
3315 &pp->rxqs[rx_queue]); in mvneta_poll()
3318 &pp->rxqs[rx_queue]); in mvneta_poll()
3656 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
3666 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
4684 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
4709 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
4710 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
4923 if (pp->rxqs[i].page_pool) in mvneta_ethtool_pp_stats()
[all …]
/linux-6.12.1/drivers/net/ethernet/intel/idpf/
Didpf_txrx.c539 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, in idpf_rx_desc_rel_all()
825 q = rx_qgrp->singleq.rxqs[j]; in idpf_rx_bufs_init_all()
940 q = rx_qgrp->singleq.rxqs[j]; in idpf_rx_desc_alloc_all()
1063 kfree(rx_qgrp->singleq.rxqs[j]); in idpf_rxq_group_rel()
1064 rx_qgrp->singleq.rxqs[j] = NULL; in idpf_rxq_group_rel()
1463 rx_qgrp->singleq.rxqs[j] = in idpf_rxq_group_alloc()
1464 kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), in idpf_rxq_group_alloc()
1466 if (!rx_qgrp->singleq.rxqs[j]) { in idpf_rxq_group_alloc()
1535 q = rx_qgrp->singleq.rxqs[j]; in idpf_rxq_group_alloc()
4079 q = rx_qgrp->singleq.rxqs[j]; in idpf_vport_intr_map_vector_to_qs()
Didpf_ethtool.c803 rxq = rxq_grp->singleq.rxqs[j]; in idpf_collect_queue_stats()
937 rxq = rxq_grp->singleq.rxqs[j]; in idpf_get_ethtool_stats()
966 return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; in idpf_find_rxq_vec()
Didpf_virtchnl.c1117 q = rx_qgrp->singleq.rxqs[j]; in __idpf_queue_reg_init()
1607 rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_config_rx_queues_msg()
1768 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); in idpf_send_ena_dis_queues_msg()
1910 rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_map_unmap_queue_vector_msg()
3312 q = rx_qgrp->singleq.rxqs[j]; in __idpf_vport_queue_ids_init()
Didpf_txrx.h892 struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q]; member
/linux-6.12.1/drivers/infiniband/hw/hfi1/
Dipoib_main.c245 params->rxqs = dd->num_netdev_contexts; in hfi1_ipoib_rn_get_params()
/linux-6.12.1/drivers/net/ethernet/marvell/mvpp2/
Dmvpp2_main.c727 prxq = port->rxqs[lrxq]->id; in mvpp2_rxq_long_pool_set()
748 prxq = port->rxqs[lrxq]->id; in mvpp2_rxq_short_pool_set()
2318 queue = port->rxqs[lrxq]->id; in mvpp2_defaults_set()
2336 queue = port->rxqs[lrxq]->id; in mvpp2_ingress_enable()
2349 queue = port->rxqs[lrxq]->id; in mvpp2_ingress_disable()
2859 return port->rxqs[queue]; in mvpp2_get_rx_queue()
3315 mvpp2_rxq_deinit(port, port->rxqs[queue]); in mvpp2_cleanup_rxqs()
3327 err = mvpp2_rxq_init(port, port->rxqs[queue]); in mvpp2_setup_rxqs()
5431 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; in mvpp2_ethtool_set_coalesce()
5465 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; in mvpp2_ethtool_get_coalesce()
[all …]
/linux-6.12.1/include/net/mana/
Dmana.h439 struct mana_rxq **rxqs; member

12