Searched refs:qcfg (Results 1 – 7 of 7) sorted by relevance
925 cfg->qcfg = &priv->tx_cfg; in gve_tx_get_curr_alloc_cfg()1278 cfg->qcfg = &priv->rx_cfg; in gve_rx_get_curr_alloc_cfg()1358 priv->tx_cfg = *tx_alloc_cfg->qcfg; in gve_queues_start()1359 priv->rx_cfg = *rx_alloc_cfg->qcfg; in gve_queues_start()1369 gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues); in gve_queues_start()1845 tx_alloc_cfg.qcfg = &new_tx_config; in gve_adjust_queues()1847 rx_alloc_cfg.qcfg = &new_rx_config; in gve_adjust_queues()
648 struct gve_queue_config *qcfg; member662 struct gve_queue_config *qcfg; member
468 rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), in gve_rx_alloc_rings_dqo()473 for (i = 0; i < cfg->qcfg->num_queues; i++) { in gve_rx_alloc_rings_dqo()502 for (i = 0; i < cfg->qcfg->num_queues; i++) in gve_rx_free_rings_dqo()
388 rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), in gve_rx_alloc_rings_gqi()393 for (i = 0; i < cfg->qcfg->num_queues; i++) { in gve_rx_alloc_rings_gqi()422 for (i = 0; i < cfg->qcfg->num_queues; i++) in gve_rx_free_rings_gqi()
337 if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { in gve_tx_alloc_rings_gqi()344 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_gqi()
385 if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { in gve_tx_alloc_rings_dqo()392 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_dqo()
1264 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ argument1265 (qcfg)->num_paths : ((qcfg)->num_paths * 2))