Lines Matching +full:rx +full:- +full:queues +full:- +full:config

1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2024 Google LLC
33 #define GVE_VERSION_PREFIX "GVE-"
48 driver_info = dma_alloc_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
52 return -ENOMEM; in gve_verify_driver_compatibility()
66 strscpy(driver_info->os_version_str1, utsname()->release, in gve_verify_driver_compatibility()
67 sizeof(driver_info->os_version_str1)); in gve_verify_driver_compatibility()
68 strscpy(driver_info->os_version_str2, utsname()->version, in gve_verify_driver_compatibility()
69 sizeof(driver_info->os_version_str2)); in gve_verify_driver_compatibility()
76 if (err == -EOPNOTSUPP) in gve_verify_driver_compatibility()
79 dma_free_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
116 if (priv->rx) { in gve_get_stats()
117 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
120 u64_stats_fetch_begin(&priv->rx[ring].statss); in gve_get_stats()
121 packets = priv->rx[ring].rpackets; in gve_get_stats()
122 bytes = priv->rx[ring].rbytes; in gve_get_stats()
123 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, in gve_get_stats()
125 s->rx_packets += packets; in gve_get_stats()
126 s->rx_bytes += bytes; in gve_get_stats()
129 if (priv->tx) { in gve_get_stats()
133 u64_stats_fetch_begin(&priv->tx[ring].statss); in gve_get_stats()
134 packets = priv->tx[ring].pkt_done; in gve_get_stats()
135 bytes = priv->tx[ring].bytes_done; in gve_get_stats()
136 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, in gve_get_stats()
138 s->tx_packets += packets; in gve_get_stats()
139 s->tx_bytes += bytes; in gve_get_stats()
146 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; in gve_alloc_flow_rule_caches()
149 if (!priv->max_flow_rules) in gve_alloc_flow_rule_caches()
152 flow_rules_cache->rules_cache = in gve_alloc_flow_rule_caches()
153 kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache), in gve_alloc_flow_rule_caches()
155 if (!flow_rules_cache->rules_cache) { in gve_alloc_flow_rule_caches()
156 dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n"); in gve_alloc_flow_rule_caches()
157 return -ENOMEM; in gve_alloc_flow_rule_caches()
160 flow_rules_cache->rule_ids_cache = in gve_alloc_flow_rule_caches()
161 kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache), in gve_alloc_flow_rule_caches()
163 if (!flow_rules_cache->rule_ids_cache) { in gve_alloc_flow_rule_caches()
164 dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n"); in gve_alloc_flow_rule_caches()
165 err = -ENOMEM; in gve_alloc_flow_rule_caches()
172 kvfree(flow_rules_cache->rules_cache); in gve_alloc_flow_rule_caches()
173 flow_rules_cache->rules_cache = NULL; in gve_alloc_flow_rule_caches()
179 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; in gve_free_flow_rule_caches()
181 kvfree(flow_rules_cache->rule_ids_cache); in gve_free_flow_rule_caches()
182 flow_rules_cache->rule_ids_cache = NULL; in gve_free_flow_rule_caches()
183 kvfree(flow_rules_cache->rules_cache); in gve_free_flow_rule_caches()
184 flow_rules_cache->rules_cache = NULL; in gve_free_flow_rule_caches()
189 priv->counter_array = in gve_alloc_counter_array()
190 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_counter_array()
191 priv->num_event_counters * in gve_alloc_counter_array()
192 sizeof(*priv->counter_array), in gve_alloc_counter_array()
193 &priv->counter_array_bus, GFP_KERNEL); in gve_alloc_counter_array()
194 if (!priv->counter_array) in gve_alloc_counter_array()
195 return -ENOMEM; in gve_alloc_counter_array()
202 if (!priv->counter_array) in gve_free_counter_array()
205 dma_free_coherent(&priv->pdev->dev, in gve_free_counter_array()
206 priv->num_event_counters * in gve_free_counter_array()
207 sizeof(*priv->counter_array), in gve_free_counter_array()
208 priv->counter_array, priv->counter_array_bus); in gve_free_counter_array()
209 priv->counter_array = NULL; in gve_free_counter_array()
228 queue_work(priv->gve_wq, &priv->stats_report_task); in gve_stats_report_schedule()
236 mod_timer(&priv->stats_report_timer, in gve_stats_report_timer()
238 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_stats_report_timer()
249 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
250 priv->stats_report_len = struct_size(priv->stats_report, stats, in gve_alloc_stats_report()
252 priv->stats_report = in gve_alloc_stats_report()
253 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_alloc_stats_report()
254 &priv->stats_report_bus, GFP_KERNEL); in gve_alloc_stats_report()
255 if (!priv->stats_report) in gve_alloc_stats_report()
256 return -ENOMEM; in gve_alloc_stats_report()
257 /* Set up timer for the report-stats task */ in gve_alloc_stats_report()
258 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); in gve_alloc_stats_report()
259 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; in gve_alloc_stats_report()
265 if (!priv->stats_report) in gve_free_stats_report()
268 del_timer_sync(&priv->stats_report_timer); in gve_free_stats_report()
269 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_free_stats_report()
270 priv->stats_report, priv->stats_report_bus); in gve_free_stats_report()
271 priv->stats_report = NULL; in gve_free_stats_report()
278 queue_work(priv->gve_wq, &priv->service_task); in gve_mgmnt_intr()
285 struct gve_priv *priv = block->priv; in gve_intr()
288 napi_schedule_irqoff(&block->napi); in gve_intr()
297 napi_schedule_irqoff(&block->napi); in gve_intr_dqo()
322 priv = block->priv; in gve_napi_poll()
324 if (block->tx) { in gve_napi_poll()
325 if (block->tx->q_num < priv->tx_cfg.num_queues) in gve_napi_poll()
334 if (block->rx) { in gve_napi_poll()
342 /* Complete processing - don't unmask irq if busy polling is enabled */ in gve_napi_poll()
352 if (block->tx) in gve_napi_poll()
353 reschedule |= gve_tx_clean_pending(priv, block->tx); in gve_napi_poll()
354 if (block->rx) in gve_napi_poll()
355 reschedule |= gve_rx_work_pending(block->rx); in gve_napi_poll()
367 struct gve_priv *priv = block->priv; in gve_napi_poll_dqo()
371 if (block->tx) in gve_napi_poll_dqo()
377 if (block->rx) { in gve_napi_poll_dqo()
386 if (likely(gve_is_napi_on_home_cpu(priv, block->irq))) in gve_napi_poll_dqo()
395 work_done--; in gve_napi_poll_dqo()
402 * PCI MSI-X PBA feature. in gve_napi_poll_dqo()
416 int num_vecs_requested = priv->num_ntfy_blks + 1; in gve_alloc_notify_blocks()
422 priv->msix_vectors = kvcalloc(num_vecs_requested, in gve_alloc_notify_blocks()
423 sizeof(*priv->msix_vectors), GFP_KERNEL); in gve_alloc_notify_blocks()
424 if (!priv->msix_vectors) in gve_alloc_notify_blocks()
425 return -ENOMEM; in gve_alloc_notify_blocks()
427 priv->msix_vectors[i].entry = i; in gve_alloc_notify_blocks()
428 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, in gve_alloc_notify_blocks()
431 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", in gve_alloc_notify_blocks()
437 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1; in gve_alloc_notify_blocks()
441 priv->num_ntfy_blks = new_num_ntfy_blks; in gve_alloc_notify_blocks()
442 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_alloc_notify_blocks()
443 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
445 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
447 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
448 …d not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d… in gve_alloc_notify_blocks()
449 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
450 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
451 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
452 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
453 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
454 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
456 /* Half the notification blocks go to TX and half to RX */ in gve_alloc_notify_blocks()
457 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); in gve_alloc_notify_blocks()
459 /* Setup Management Vector - the last vector */ in gve_alloc_notify_blocks()
460 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s", in gve_alloc_notify_blocks()
461 pci_name(priv->pdev)); in gve_alloc_notify_blocks()
462 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, in gve_alloc_notify_blocks()
463 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); in gve_alloc_notify_blocks()
465 dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); in gve_alloc_notify_blocks()
468 priv->irq_db_indices = in gve_alloc_notify_blocks()
469 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_notify_blocks()
470 priv->num_ntfy_blks * in gve_alloc_notify_blocks()
471 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
472 &priv->irq_db_indices_bus, GFP_KERNEL); in gve_alloc_notify_blocks()
473 if (!priv->irq_db_indices) { in gve_alloc_notify_blocks()
474 err = -ENOMEM; in gve_alloc_notify_blocks()
478 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * in gve_alloc_notify_blocks()
479 sizeof(*priv->ntfy_blocks), GFP_KERNEL); in gve_alloc_notify_blocks()
480 if (!priv->ntfy_blocks) { in gve_alloc_notify_blocks()
481 err = -ENOMEM; in gve_alloc_notify_blocks()
485 /* Setup the other blocks - the first n-1 vectors */ in gve_alloc_notify_blocks()
486 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_alloc_notify_blocks()
487 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_alloc_notify_blocks()
490 snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s", in gve_alloc_notify_blocks()
491 i, pci_name(priv->pdev)); in gve_alloc_notify_blocks()
492 block->priv = priv; in gve_alloc_notify_blocks()
493 err = request_irq(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
495 0, block->name, block); in gve_alloc_notify_blocks()
497 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
501 block->irq = priv->msix_vectors[msix_idx].vector; in gve_alloc_notify_blocks()
502 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
504 block->irq_db_index = &priv->irq_db_indices[i].index; in gve_alloc_notify_blocks()
509 struct gve_notify_block *block = &priv->ntfy_blocks[j]; in gve_alloc_notify_blocks()
512 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
514 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_alloc_notify_blocks()
515 block->irq = 0; in gve_alloc_notify_blocks()
517 kvfree(priv->ntfy_blocks); in gve_alloc_notify_blocks()
518 priv->ntfy_blocks = NULL; in gve_alloc_notify_blocks()
520 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_alloc_notify_blocks()
521 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
522 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_alloc_notify_blocks()
523 priv->irq_db_indices = NULL; in gve_alloc_notify_blocks()
525 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_alloc_notify_blocks()
527 pci_disable_msix(priv->pdev); in gve_alloc_notify_blocks()
529 kvfree(priv->msix_vectors); in gve_alloc_notify_blocks()
530 priv->msix_vectors = NULL; in gve_alloc_notify_blocks()
538 if (!priv->msix_vectors) in gve_free_notify_blocks()
542 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_free_notify_blocks()
543 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_free_notify_blocks()
546 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_free_notify_blocks()
548 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_free_notify_blocks()
549 block->irq = 0; in gve_free_notify_blocks()
551 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_free_notify_blocks()
552 kvfree(priv->ntfy_blocks); in gve_free_notify_blocks()
553 priv->ntfy_blocks = NULL; in gve_free_notify_blocks()
554 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_free_notify_blocks()
555 sizeof(*priv->irq_db_indices), in gve_free_notify_blocks()
556 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_free_notify_blocks()
557 priv->irq_db_indices = NULL; in gve_free_notify_blocks()
558 pci_disable_msix(priv->pdev); in gve_free_notify_blocks()
559 kvfree(priv->msix_vectors); in gve_free_notify_blocks()
560 priv->msix_vectors = NULL; in gve_free_notify_blocks()
580 priv->counter_array_bus, in gve_setup_device_resources()
581 priv->num_event_counters, in gve_setup_device_resources()
582 priv->irq_db_indices_bus, in gve_setup_device_resources()
583 priv->num_ntfy_blks); in gve_setup_device_resources()
585 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
587 err = -ENXIO; in gve_setup_device_resources()
592 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), in gve_setup_device_resources()
594 if (!priv->ptype_lut_dqo) { in gve_setup_device_resources()
595 err = -ENOMEM; in gve_setup_device_resources()
598 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); in gve_setup_device_resources()
600 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
606 err = gve_adminq_report_stats(priv, priv->stats_report_len, in gve_setup_device_resources()
607 priv->stats_report_bus, in gve_setup_device_resources()
610 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
616 kvfree(priv->ptype_lut_dqo); in gve_setup_device_resources()
617 priv->ptype_lut_dqo = NULL; in gve_setup_device_resources()
640 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
647 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
653 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
660 kvfree(priv->ptype_lut_dqo); in gve_teardown_device_resources()
661 priv->ptype_lut_dqo = NULL; in gve_teardown_device_resources()
678 err = gve_adminq_unregister_page_list(priv, qpl->id); in gve_unregister_qpl()
680 netif_err(priv, drv, priv->dev, in gve_unregister_qpl()
682 qpl->id); in gve_unregister_qpl()
686 priv->num_registered_pages -= qpl->num_entries; in gve_unregister_qpl()
699 pages = qpl->num_entries; in gve_register_qpl()
701 if (pages + priv->num_registered_pages > priv->max_registered_pages) { in gve_register_qpl()
702 netif_err(priv, drv, priv->dev, in gve_register_qpl()
704 pages + priv->num_registered_pages, in gve_register_qpl()
705 priv->max_registered_pages); in gve_register_qpl()
706 return -EINVAL; in gve_register_qpl()
711 netif_err(priv, drv, priv->dev, in gve_register_qpl()
713 qpl->id); in gve_register_qpl()
717 priv->num_registered_pages += pages; in gve_register_qpl()
723 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_get_qpl()
726 return tx->tx_fifo.qpl; in gve_tx_get_qpl()
728 return tx->dqo.qpl; in gve_tx_get_qpl()
733 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_get_qpl() local
736 return rx->data.qpl; in gve_rx_get_qpl()
738 return rx->dqo.qpl; in gve_rx_get_qpl()
750 /* This failure will trigger a reset - no need to clean up */ in gve_register_xdp_qpls()
763 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), in gve_register_qpls()
765 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); in gve_register_qpls()
791 /* This failure will trigger a reset - no need to clean */ in gve_unregister_xdp_qpls()
804 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), in gve_unregister_qpls()
806 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); in gve_unregister_qpls()
810 /* This failure will trigger a reset - no need to clean */ in gve_unregister_qpls()
817 /* This failure will trigger a reset - no need to clean */ in gve_unregister_qpls()
830 priv->num_xdp_queues); in gve_create_xdp_rings()
832 netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", in gve_create_xdp_rings()
833 priv->num_xdp_queues); in gve_create_xdp_rings()
834 /* This failure will trigger a reset - no need to clean in gve_create_xdp_rings()
839 netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", in gve_create_xdp_rings()
840 priv->num_xdp_queues); in gve_create_xdp_rings()
853 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", in gve_create_rings()
855 /* This failure will trigger a reset - no need to clean in gve_create_rings()
860 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", in gve_create_rings()
863 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rings()
865 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", in gve_create_rings()
866 priv->rx_cfg.num_queues); in gve_create_rings()
867 /* This failure will trigger a reset - no need to clean in gve_create_rings()
872 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", in gve_create_rings()
873 priv->rx_cfg.num_queues); in gve_create_rings()
876 /* Rx data ring has been prefilled with packet buffers at queue in gve_create_rings()
882 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rings()
883 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
885 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rings()
887 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
900 for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { in init_xdp_sync_stats()
903 u64_stats_init(&priv->tx[i].statss); in init_xdp_sync_stats()
904 priv->tx[i].ntfy_id = ntfy_idx; in init_xdp_sync_stats()
912 for (i = 0; i < priv->tx_cfg.num_queues; i++) in gve_init_sync_stats()
913 u64_stats_init(&priv->tx[i].statss); in gve_init_sync_stats()
915 /* Init stats for XDP TX queues */ in gve_init_sync_stats()
918 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_init_sync_stats()
919 u64_stats_init(&priv->rx[i].statss); in gve_init_sync_stats()
925 cfg->qcfg = &priv->tx_cfg; in gve_tx_get_curr_alloc_cfg()
926 cfg->raw_addressing = !gve_is_qpl(priv); in gve_tx_get_curr_alloc_cfg()
927 cfg->ring_size = priv->tx_desc_cnt; in gve_tx_get_curr_alloc_cfg()
928 cfg->start_idx = 0; in gve_tx_get_curr_alloc_cfg()
929 cfg->num_rings = gve_num_tx_queues(priv); in gve_tx_get_curr_alloc_cfg()
930 cfg->tx = priv->tx; in gve_tx_get_curr_alloc_cfg()
937 if (!priv->tx) in gve_tx_stop_rings()
966 if (!priv->num_xdp_queues) in gve_alloc_xdp_rings()
971 cfg.num_rings = priv->num_xdp_queues; in gve_alloc_xdp_rings()
1021 priv->num_xdp_queues); in gve_destroy_xdp_rings()
1023 netif_err(priv, drv, priv->dev, in gve_destroy_xdp_rings()
1024 "failed to destroy XDP queues\n"); in gve_destroy_xdp_rings()
1025 /* This failure will trigger a reset - no need to clean up */ in gve_destroy_xdp_rings()
1028 netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); in gve_destroy_xdp_rings()
1040 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
1041 "failed to destroy tx queues\n"); in gve_destroy_rings()
1042 /* This failure will trigger a reset - no need to clean up */ in gve_destroy_rings()
1045 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); in gve_destroy_rings()
1046 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rings()
1048 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
1049 "failed to destroy rx queues\n"); in gve_destroy_rings()
1050 /* This failure will trigger a reset - no need to clean up */ in gve_destroy_rings()
1053 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); in gve_destroy_rings()
1063 cfg.num_rings = priv->num_xdp_queues; in gve_free_xdp_rings()
1065 if (priv->tx) { in gve_free_xdp_rings()
1090 priv->page_alloc_fail++; in gve_alloc_page()
1091 return -ENOMEM; in gve_alloc_page()
1095 priv->dma_mapping_error++; in gve_alloc_page()
1097 return -ENOMEM; in gve_alloc_page()
1113 qpl->id = id; in gve_alloc_queue_page_list()
1114 qpl->num_entries = 0; in gve_alloc_queue_page_list()
1115 qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); in gve_alloc_queue_page_list()
1116 if (!qpl->pages) in gve_alloc_queue_page_list()
1119 qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); in gve_alloc_queue_page_list()
1120 if (!qpl->page_buses) in gve_alloc_queue_page_list()
1124 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], in gve_alloc_queue_page_list()
1125 &qpl->page_buses[i], in gve_alloc_queue_page_list()
1129 qpl->num_entries++; in gve_alloc_queue_page_list()
1156 if (!qpl->pages) in gve_free_queue_page_list()
1158 if (!qpl->page_buses) in gve_free_queue_page_list()
1161 for (i = 0; i < qpl->num_entries; i++) in gve_free_queue_page_list()
1162 gve_free_page(&priv->pdev->dev, qpl->pages[i], in gve_free_queue_page_list()
1163 qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); in gve_free_queue_page_list()
1165 kvfree(qpl->page_buses); in gve_free_queue_page_list()
1166 qpl->page_buses = NULL; in gve_free_queue_page_list()
1168 kvfree(qpl->pages); in gve_free_queue_page_list()
1169 qpl->pages = NULL; in gve_free_queue_page_list()
1181 queue_work(priv->gve_wq, &priv->service_task); in gve_schedule_reset()
1192 struct gve_rx_ring *rx; in gve_reg_xdp_info() local
1197 if (!priv->num_xdp_queues) in gve_reg_xdp_info()
1200 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_reg_xdp_info()
1201 rx = &priv->rx[i]; in gve_reg_xdp_info()
1202 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_reg_xdp_info()
1204 err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i, in gve_reg_xdp_info()
1205 napi->napi_id); in gve_reg_xdp_info()
1208 err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in gve_reg_xdp_info()
1212 rx->xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1213 if (rx->xsk_pool) { in gve_reg_xdp_info()
1214 err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i, in gve_reg_xdp_info()
1215 napi->napi_id); in gve_reg_xdp_info()
1218 err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, in gve_reg_xdp_info()
1222 xsk_pool_set_rxq_info(rx->xsk_pool, in gve_reg_xdp_info()
1223 &rx->xsk_rxq); in gve_reg_xdp_info()
1227 for (i = 0; i < priv->num_xdp_queues; i++) { in gve_reg_xdp_info()
1229 priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1234 for (j = i; j >= 0; j--) { in gve_reg_xdp_info()
1235 rx = &priv->rx[j]; in gve_reg_xdp_info()
1236 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in gve_reg_xdp_info()
1237 xdp_rxq_info_unreg(&rx->xdp_rxq); in gve_reg_xdp_info()
1238 if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) in gve_reg_xdp_info()
1239 xdp_rxq_info_unreg(&rx->xsk_rxq); in gve_reg_xdp_info()
1248 if (!priv->num_xdp_queues) in gve_unreg_xdp_info()
1251 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_unreg_xdp_info()
1252 struct gve_rx_ring *rx = &priv->rx[i]; in gve_unreg_xdp_info() local
1254 xdp_rxq_info_unreg(&rx->xdp_rxq); in gve_unreg_xdp_info()
1255 if (rx->xsk_pool) { in gve_unreg_xdp_info()
1256 xdp_rxq_info_unreg(&rx->xsk_rxq); in gve_unreg_xdp_info()
1257 rx->xsk_pool = NULL; in gve_unreg_xdp_info()
1261 for (i = 0; i < priv->num_xdp_queues; i++) { in gve_unreg_xdp_info()
1263 priv->tx[tx_qid].xsk_pool = NULL; in gve_unreg_xdp_info()
1271 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_drain_page_cache()
1272 page_frag_cache_drain(&priv->rx[i].page_cache); in gve_drain_page_cache()
1278 cfg->qcfg = &priv->rx_cfg; in gve_rx_get_curr_alloc_cfg()
1279 cfg->qcfg_tx = &priv->tx_cfg; in gve_rx_get_curr_alloc_cfg()
1280 cfg->raw_addressing = !gve_is_qpl(priv); in gve_rx_get_curr_alloc_cfg()
1281 cfg->enable_header_split = priv->header_split_enabled; in gve_rx_get_curr_alloc_cfg()
1282 cfg->ring_size = priv->rx_desc_cnt; in gve_rx_get_curr_alloc_cfg()
1283 cfg->packet_buffer_size = gve_is_gqi(priv) ? in gve_rx_get_curr_alloc_cfg()
1285 priv->data_buffer_size_dqo; in gve_rx_get_curr_alloc_cfg()
1286 cfg->rx = priv->rx; in gve_rx_get_curr_alloc_cfg()
1325 if (!priv->rx) in gve_rx_stop_rings()
1339 priv->tx = NULL; in gve_queues_mem_remove()
1340 priv->rx = NULL; in gve_queues_mem_remove()
1343 /* The passed-in queue memory is stored into priv and the queues are made live.
1344 * No memory is allocated. Passed-in memory is freed on errors.
1350 struct net_device *dev = priv->dev; in gve_queues_start()
1354 priv->tx = tx_alloc_cfg->tx; in gve_queues_start()
1355 priv->rx = rx_alloc_cfg->rx; in gve_queues_start()
1358 priv->tx_cfg = *tx_alloc_cfg->qcfg; in gve_queues_start()
1359 priv->rx_cfg = *rx_alloc_cfg->qcfg; in gve_queues_start()
1360 priv->tx_desc_cnt = tx_alloc_cfg->ring_size; in gve_queues_start()
1361 priv->rx_desc_cnt = rx_alloc_cfg->ring_size; in gve_queues_start()
1363 if (priv->xdp_prog) in gve_queues_start()
1364 priv->num_xdp_queues = priv->rx_cfg.num_queues; in gve_queues_start()
1366 priv->num_xdp_queues = 0; in gve_queues_start()
1368 gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings); in gve_queues_start()
1369 gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues); in gve_queues_start()
1372 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); in gve_queues_start()
1375 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); in gve_queues_start()
1387 priv->header_split_enabled = rx_alloc_cfg->enable_header_split; in gve_queues_start()
1388 priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size; in gve_queues_start()
1397 mod_timer(&priv->stats_report_timer, in gve_queues_start()
1399 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_queues_start()
1402 queue_work(priv->gve_wq, &priv->service_task); in gve_queues_start()
1403 priv->interface_up_cnt++; in gve_queues_start()
1416 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); in gve_queues_start()
1448 netif_carrier_off(priv->dev); in gve_queues_stop()
1460 del_timer_sync(&priv->stats_report_timer); in gve_queues_stop()
1465 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); in gve_queues_stop()
1467 priv->interface_down_cnt++; in gve_queues_stop()
1509 priv->num_xdp_queues = 0; in gve_remove_xdp_queues()
1517 priv->num_xdp_queues = priv->rx_cfg.num_queues; in gve_add_xdp_queues()
1523 err = gve_reg_xdp_info(priv, priv->dev); in gve_add_xdp_queues()
1540 priv->num_xdp_queues = 0; in gve_add_xdp_queues()
1549 if (link_status == netif_carrier_ok(priv->dev)) in gve_handle_link_status()
1553 netdev_info(priv->dev, "Device link is up.\n"); in gve_handle_link_status()
1554 netif_carrier_on(priv->dev); in gve_handle_link_status()
1556 netdev_info(priv->dev, "Device link is down.\n"); in gve_handle_link_status()
1557 netif_carrier_off(priv->dev); in gve_handle_link_status()
1568 old_prog = READ_ONCE(priv->xdp_prog); in gve_set_xdp()
1569 if (!netif_running(priv->dev)) { in gve_set_xdp()
1570 WRITE_ONCE(priv->xdp_prog, prog); in gve_set_xdp()
1578 // Allocate XDP TX queues if an XDP program is in gve_set_xdp()
1584 // Remove XDP TX queues if an XDP program is in gve_set_xdp()
1590 WRITE_ONCE(priv->xdp_prog, prog); in gve_set_xdp()
1596 status = ioread32be(&priv->reg_bar0->device_status); in gve_set_xdp()
1607 struct gve_rx_ring *rx; in gve_xsk_pool_enable() local
1611 if (qid >= priv->rx_cfg.num_queues) { in gve_xsk_pool_enable()
1612 dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid); in gve_xsk_pool_enable()
1613 return -EINVAL; in gve_xsk_pool_enable()
1616 priv->dev->max_mtu + sizeof(struct ethhdr)) { in gve_xsk_pool_enable()
1617 dev_err(&priv->pdev->dev, "xsk pool frame_len too small"); in gve_xsk_pool_enable()
1618 return -EINVAL; in gve_xsk_pool_enable()
1621 err = xsk_pool_dma_map(pool, &priv->pdev->dev, in gve_xsk_pool_enable()
1627 if (!priv->xdp_prog) in gve_xsk_pool_enable()
1630 rx = &priv->rx[qid]; in gve_xsk_pool_enable()
1631 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_xsk_pool_enable()
1632 err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id); in gve_xsk_pool_enable()
1636 err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, in gve_xsk_pool_enable()
1641 xsk_pool_set_rxq_info(pool, &rx->xsk_rxq); in gve_xsk_pool_enable()
1642 rx->xsk_pool = pool; in gve_xsk_pool_enable()
1645 priv->tx[tx_qid].xsk_pool = pool; in gve_xsk_pool_enable()
1649 if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) in gve_xsk_pool_enable()
1650 xdp_rxq_info_unreg(&rx->xsk_rxq); in gve_xsk_pool_enable()
1668 return -EINVAL; in gve_xsk_pool_disable()
1669 if (qid >= priv->rx_cfg.num_queues) in gve_xsk_pool_disable()
1670 return -EINVAL; in gve_xsk_pool_disable()
1673 if (!priv->xdp_prog) in gve_xsk_pool_disable()
1678 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1679 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1680 priv->tx[tx_qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1684 napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; in gve_xsk_pool_disable()
1685 napi_disable(napi_rx); /* make sure current rx poll is done */ in gve_xsk_pool_disable()
1687 napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi; in gve_xsk_pool_disable()
1690 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1691 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1692 priv->tx[tx_qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1696 if (gve_rx_work_pending(&priv->rx[qid])) in gve_xsk_pool_disable()
1700 if (gve_tx_clean_pending(priv, &priv->tx[tx_qid])) in gve_xsk_pool_disable()
1714 if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog) in gve_xsk_wakeup()
1715 return -EINVAL; in gve_xsk_wakeup()
1718 struct gve_tx_ring *tx = &priv->tx[tx_queue_id]; in gve_xsk_wakeup()
1720 &priv->ntfy_blocks[tx->ntfy_id].napi; in gve_xsk_wakeup()
1729 tx->xdp_xsk_wakeup++; in gve_xsk_wakeup()
1739 if (dev->features & NETIF_F_LRO) { in verify_xdp_configuration()
1741 return -EOPNOTSUPP; in verify_xdp_configuration()
1744 if (priv->queue_format != GVE_GQI_QPL_FORMAT) { in verify_xdp_configuration()
1746 priv->queue_format); in verify_xdp_configuration()
1747 return -EOPNOTSUPP; in verify_xdp_configuration()
1750 if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) { in verify_xdp_configuration()
1752 dev->mtu); in verify_xdp_configuration()
1753 return -EOPNOTSUPP; in verify_xdp_configuration()
1756 if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || in verify_xdp_configuration()
1757 (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { in verify_xdp_configuration()
1758RX queues %d should be equal to the number of configured TX queues %d and the number of configured… in verify_xdp_configuration()
1759 priv->rx_cfg.num_queues, in verify_xdp_configuration()
1760 priv->tx_cfg.num_queues, in verify_xdp_configuration()
1761 priv->tx_cfg.max_queues); in verify_xdp_configuration()
1762 return -EINVAL; in verify_xdp_configuration()
1775 switch (xdp->command) { in gve_xdp()
1777 return gve_set_xdp(priv, xdp->prog, xdp->extack); in gve_xdp()
1779 if (xdp->xsk.pool) in gve_xdp()
1780 return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id); in gve_xdp()
1782 return gve_xsk_pool_disable(dev, xdp->xsk.queue_id); in gve_xdp()
1784 return -EINVAL; in gve_xdp()
1790 if (!priv->max_flow_rules) in gve_flow_rules_reset()
1805 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1806 "Adjust config failed to alloc new queues"); in gve_adjust_config()
1811 err = gve_close(priv->dev); in gve_adjust_config()
1813 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1814 "Adjust config failed to close old queues"); in gve_adjust_config()
1822 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1823 "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n"); in gve_adjust_config()
1844 /* Relay the new config from ethtool */ in gve_adjust_queues()
1850 if (netif_running(priv->dev)) { in gve_adjust_queues()
1854 /* Set the config for the next up. */ in gve_adjust_queues()
1855 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1856 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1865 if (netif_carrier_ok(priv->dev)) in gve_turndown()
1866 netif_carrier_off(priv->dev); in gve_turndown()
1874 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1878 napi_disable(&block->napi); in gve_turndown()
1880 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turndown()
1882 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1886 napi_disable(&block->napi); in gve_turndown()
1889 /* Stop tx queues */ in gve_turndown()
1890 netif_tx_disable(priv->dev); in gve_turndown()
1900 /* Start the tx queues */ in gve_turnup()
1901 netif_tx_start_all_queues(priv->dev); in gve_turnup()
1903 /* Enable napi and unmask interrupts for all queues */ in gve_turnup()
1906 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1911 napi_enable(&block->napi); in gve_turnup()
1916 priv->tx_coalesce_usecs); in gve_turnup()
1920 * handled by the one-off napi schedule below. Whereas any in gve_turnup()
1924 napi_schedule(&block->napi); in gve_turnup()
1926 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turnup()
1928 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1933 napi_enable(&block->napi); in gve_turnup()
1938 priv->rx_coalesce_usecs); in gve_turnup()
1942 * handled by the one-off napi schedule below. Whereas any in gve_turnup()
1946 napi_schedule(&block->napi); in gve_turnup()
1957 status = ioread32be(&priv->reg_bar0->device_status); in gve_turnup_and_check_status()
1972 if (txqueue > priv->tx_cfg.num_queues) in gve_tx_timeout()
1976 if (ntfy_idx >= priv->num_ntfy_blks) in gve_tx_timeout()
1979 block = &priv->ntfy_blocks[ntfy_idx]; in gve_tx_timeout()
1980 tx = block->tx; in gve_tx_timeout()
1983 if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) in gve_tx_timeout()
1990 if (last_nic_done - tx->done) { in gve_tx_timeout()
1993 napi_schedule(&block->napi); in gve_tx_timeout()
1994 tx->last_kick_msec = current_time; in gve_tx_timeout()
2003 tx->queue_timeout++; in gve_tx_timeout()
2004 priv->tx_timeo_cnt++; in gve_tx_timeout()
2009 if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE) in gve_get_pkt_buf_size()
2015 /* header-split is not supported on non-DQO_RDA yet even if device advertises it */
2018 return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT; in gve_header_split_supported()
2032 dev_err(&priv->pdev->dev, "Header-split not supported\n"); in gve_set_hsplit_config()
2033 return -EOPNOTSUPP; in gve_set_hsplit_config()
2041 if (enable_hdr_split == priv->header_split_enabled) in gve_set_hsplit_config()
2049 if (netif_running(priv->dev)) in gve_set_hsplit_config()
2057 const netdev_features_t orig_features = netdev->features; in gve_set_features()
2065 if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { in gve_set_features()
2066 netdev->features ^= NETIF_F_LRO; in gve_set_features()
2073 if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) { in gve_set_features()
2082 netdev->features = orig_features; in gve_set_features()
2102 dev_info(&priv->pdev->dev, "Device requested reset.\n"); in gve_handle_status()
2106 priv->stats_report_trigger_cnt++; in gve_handle_status()
2129 struct stats *stats = priv->stats_report->stats; in gve_handle_report_stats()
2137 be64_add_cpu(&priv->stats_report->written_count, 1); in gve_handle_report_stats()
2139 if (priv->tx) { in gve_handle_report_stats()
2146 last_completion = priv->tx[idx].done; in gve_handle_report_stats()
2147 tx_frames = priv->tx[idx].req; in gve_handle_report_stats()
2151 start = u64_stats_fetch_begin(&priv->tx[idx].statss); in gve_handle_report_stats()
2152 tx_bytes = priv->tx[idx].bytes_done; in gve_handle_report_stats()
2153 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); in gve_handle_report_stats()
2156 .value = cpu_to_be64(priv->tx[idx].wake_queue), in gve_handle_report_stats()
2161 .value = cpu_to_be64(priv->tx[idx].stop_queue), in gve_handle_report_stats()
2181 .value = cpu_to_be64(priv->tx[idx].queue_timeout), in gve_handle_report_stats()
2186 /* rx stats */ in gve_handle_report_stats()
2187 if (priv->rx) { in gve_handle_report_stats()
2188 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_handle_report_stats()
2191 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
2196 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()
2208 u32 status = ioread32be(&priv->reg_bar0->device_status); in gve_service_task()
2218 if (priv->queue_format == GVE_GQI_QPL_FORMAT) { in gve_set_netdev_xdp_features()
2219 priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC; in gve_set_netdev_xdp_features()
2220 priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; in gve_set_netdev_xdp_features()
2221 priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; in gve_set_netdev_xdp_features()
2222 priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; in gve_set_netdev_xdp_features()
2224 priv->dev->xdp_features = 0; in gve_set_netdev_xdp_features()
2234 err = gve_adminq_alloc(&priv->pdev->dev, priv); in gve_init_priv()
2236 dev_err(&priv->pdev->dev, in gve_init_priv()
2243 dev_err(&priv->pdev->dev, in gve_init_priv()
2248 priv->num_registered_pages = 0; in gve_init_priv()
2253 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; in gve_init_priv()
2257 dev_err(&priv->pdev->dev, in gve_init_priv()
2261 priv->dev->mtu = priv->dev->max_mtu; in gve_init_priv()
2262 num_ntfy = pci_msix_vec_count(priv->pdev); in gve_init_priv()
2264 dev_err(&priv->pdev->dev, in gve_init_priv()
2265 "could not count MSI-x vectors: err=%d\n", num_ntfy); in gve_init_priv()
2269 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", in gve_init_priv()
2271 err = -EINVAL; in gve_init_priv()
2277 netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); in gve_init_priv()
2279 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; in gve_init_priv()
2280 /* gvnic has one Notification Block per MSI-x vector, except for the in gve_init_priv()
2283 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; in gve_init_priv()
2284 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_init_priv()
2286 priv->tx_cfg.max_queues = in gve_init_priv()
2287 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
2288 priv->rx_cfg.max_queues = in gve_init_priv()
2289 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
2291 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_init_priv()
2292 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_init_priv()
2293 if (priv->default_num_queues > 0) { in gve_init_priv()
2294 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
2295 priv->tx_cfg.num_queues); in gve_init_priv()
2296 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
2297 priv->rx_cfg.num_queues); in gve_init_priv()
2300 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", in gve_init_priv()
2301 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); in gve_init_priv()
2302 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", in gve_init_priv()
2303 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); in gve_init_priv()
2306 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
2307 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
2316 gve_adminq_free(&priv->pdev->dev, priv); in gve_init_priv()
2323 gve_adminq_free(&priv->pdev->dev, priv); in gve_teardown_priv_resources()
2337 gve_close(priv->dev); in gve_reset_and_teardown()
2349 err = gve_open(priv->dev); in gve_reset_recovery()
2355 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); in gve_reset_recovery()
2362 bool was_up = netif_running(priv->dev); in gve_reset()
2365 dev_info(&priv->pdev->dev, "Performing reset\n"); in gve_reset()
2377 err = gve_close(priv->dev); in gve_reset()
2389 priv->reset_cnt++; in gve_reset()
2390 priv->interface_up_cnt = 0; in gve_reset()
2391 priv->interface_down_cnt = 0; in gve_reset()
2392 priv->stats_report_trigger_cnt = 0; in gve_reset()
2419 if (!priv->rx) in gve_rx_queue_stop()
2420 return -EAGAIN; in gve_rx_queue_stop()
2422 /* Destroying queue 0 while other queues exist is not supported in DQO */ in gve_rx_queue_stop()
2424 return -ERANGE; in gve_rx_queue_stop()
2426 /* Single-queue destruction requires quiescence on all queues */ in gve_rx_queue_stop()
2429 /* This failure will trigger a reset - no need to clean up */ in gve_rx_queue_stop()
2435 /* This failure will trigger a reset - no need to clean up */ in gve_rx_queue_stop()
2443 /* Turn the unstopped queues back up */ in gve_rx_queue_stop()
2447 *gve_per_q_mem = priv->rx[idx]; in gve_rx_queue_stop()
2448 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); in gve_rx_queue_stop()
2475 if (!priv->rx) in gve_rx_queue_mem_alloc()
2476 return -EAGAIN; in gve_rx_queue_mem_alloc()
2495 if (!priv->rx) in gve_rx_queue_start()
2496 return -EAGAIN; in gve_rx_queue_start()
2499 priv->rx[idx] = *gve_per_q_mem; in gve_rx_queue_start()
2501 /* Single-queue creation requires quiescence on all queues */ in gve_rx_queue_start()
2507 /* This failure will trigger a reset - no need to clean up */ in gve_rx_queue_start()
2513 /* This failure will trigger a reset - no need to clean up */ in gve_rx_queue_start()
2519 gve_rx_write_doorbell(priv, &priv->rx[idx]); in gve_rx_queue_start()
2521 gve_rx_post_buffers_dqo(&priv->rx[idx]); in gve_rx_queue_start()
2523 /* Turn the unstopped queues back up */ in gve_rx_queue_start()
2535 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); in gve_rx_queue_start()
2566 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in gve_probe()
2568 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err); in gve_probe()
2574 dev_err(&pdev->dev, "Failed to map pci bar!\n"); in gve_probe()
2575 err = -ENOMEM; in gve_probe()
2581 dev_err(&pdev->dev, "Failed to map doorbell bar!\n"); in gve_probe()
2582 err = -ENOMEM; in gve_probe()
2586 gve_write_version(&reg_bar->driver_version); in gve_probe()
2587 /* Get max queues to alloc etherdev */ in gve_probe()
2588 max_tx_queues = ioread32be(&reg_bar->max_tx_queues); in gve_probe()
2589 max_rx_queues = ioread32be(&reg_bar->max_rx_queues); in gve_probe()
2593 dev_err(&pdev->dev, "could not allocate netdev\n"); in gve_probe()
2594 err = -ENOMEM; in gve_probe()
2597 SET_NETDEV_DEV(dev, &pdev->dev); in gve_probe()
2599 dev->ethtool_ops = &gve_ethtool_ops; in gve_probe()
2600 dev->netdev_ops = &gve_netdev_ops; in gve_probe()
2601 dev->queue_mgmt_ops = &gve_queue_mgmt_ops; in gve_probe()
2608 dev->hw_features = NETIF_F_HIGHDMA; in gve_probe()
2609 dev->hw_features |= NETIF_F_SG; in gve_probe()
2610 dev->hw_features |= NETIF_F_HW_CSUM; in gve_probe()
2611 dev->hw_features |= NETIF_F_TSO; in gve_probe()
2612 dev->hw_features |= NETIF_F_TSO6; in gve_probe()
2613 dev->hw_features |= NETIF_F_TSO_ECN; in gve_probe()
2614 dev->hw_features |= NETIF_F_RXCSUM; in gve_probe()
2615 dev->hw_features |= NETIF_F_RXHASH; in gve_probe()
2616 dev->features = dev->hw_features; in gve_probe()
2617 dev->watchdog_timeo = 5 * HZ; in gve_probe()
2618 dev->min_mtu = ETH_MIN_MTU; in gve_probe()
2622 priv->dev = dev; in gve_probe()
2623 priv->pdev = pdev; in gve_probe()
2624 priv->msg_enable = DEFAULT_MSG_LEVEL; in gve_probe()
2625 priv->reg_bar0 = reg_bar; in gve_probe()
2626 priv->db_bar2 = db_bar; in gve_probe()
2627 priv->service_task_flags = 0x0; in gve_probe()
2628 priv->state_flags = 0x0; in gve_probe()
2629 priv->ethtool_flags = 0x0; in gve_probe()
2630 priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_probe()
2631 priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_probe()
2634 priv->gve_wq = alloc_ordered_workqueue("gve", 0); in gve_probe()
2635 if (!priv->gve_wq) { in gve_probe()
2636 dev_err(&pdev->dev, "Could not allocate workqueue"); in gve_probe()
2637 err = -ENOMEM; in gve_probe()
2640 INIT_WORK(&priv->service_task, gve_service_task); in gve_probe()
2641 INIT_WORK(&priv->stats_report_task, gve_stats_report_task); in gve_probe()
2642 priv->tx_cfg.max_queues = max_tx_queues; in gve_probe()
2643 priv->rx_cfg.max_queues = max_rx_queues; in gve_probe()
2653 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); in gve_probe()
2654 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); in gve_probe()
2656 queue_work(priv->gve_wq, &priv->service_task); in gve_probe()
2663 destroy_workqueue(priv->gve_wq); in gve_probe()
2686 __be32 __iomem *db_bar = priv->db_bar2; in gve_remove()
2687 void __iomem *reg_bar = priv->reg_bar0; in gve_remove()
2691 destroy_workqueue(priv->gve_wq); in gve_remove()
2703 bool was_up = netif_running(priv->dev); in gve_shutdown()
2706 if (was_up && gve_close(priv->dev)) { in gve_shutdown()
2721 bool was_up = netif_running(priv->dev); in gve_suspend()
2723 priv->suspend_cnt++; in gve_suspend()
2725 if (was_up && gve_close(priv->dev)) { in gve_suspend()
2732 priv->up_before_suspend = was_up; in gve_suspend()
2743 priv->resume_cnt++; in gve_resume()
2745 err = gve_reset_recovery(priv, priv->up_before_suspend); in gve_resume()