Lines Matching refs:priv

42 static int gve_verify_driver_compatibility(struct gve_priv *priv)  in gve_verify_driver_compatibility()  argument
48 driver_info = dma_alloc_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
71 err = gve_adminq_verify_driver_compatibility(priv, in gve_verify_driver_compatibility()
79 dma_free_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
89 struct gve_priv *priv = netdev_priv(dev); in gve_features_check() local
91 if (!gve_is_gqi(priv)) in gve_features_check()
99 struct gve_priv *priv = netdev_priv(dev); in gve_start_xmit() local
101 if (gve_is_gqi(priv)) in gve_start_xmit()
109 struct gve_priv *priv = netdev_priv(dev); in gve_get_stats() local
115 num_tx_queues = gve_num_tx_queues(priv); in gve_get_stats()
116 if (priv->rx) { in gve_get_stats()
117 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
120 u64_stats_fetch_begin(&priv->rx[ring].statss); in gve_get_stats()
121 packets = priv->rx[ring].rpackets; in gve_get_stats()
122 bytes = priv->rx[ring].rbytes; in gve_get_stats()
123 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, in gve_get_stats()
129 if (priv->tx) { in gve_get_stats()
133 u64_stats_fetch_begin(&priv->tx[ring].statss); in gve_get_stats()
134 packets = priv->tx[ring].pkt_done; in gve_get_stats()
135 bytes = priv->tx[ring].bytes_done; in gve_get_stats()
136 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, in gve_get_stats()
144 static int gve_alloc_flow_rule_caches(struct gve_priv *priv) in gve_alloc_flow_rule_caches() argument
146 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; in gve_alloc_flow_rule_caches()
149 if (!priv->max_flow_rules) in gve_alloc_flow_rule_caches()
156 dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n"); in gve_alloc_flow_rule_caches()
164 dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n"); in gve_alloc_flow_rule_caches()
177 static void gve_free_flow_rule_caches(struct gve_priv *priv) in gve_free_flow_rule_caches() argument
179 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; in gve_free_flow_rule_caches()
187 static int gve_alloc_counter_array(struct gve_priv *priv) in gve_alloc_counter_array() argument
189 priv->counter_array = in gve_alloc_counter_array()
190 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_counter_array()
191 priv->num_event_counters * in gve_alloc_counter_array()
192 sizeof(*priv->counter_array), in gve_alloc_counter_array()
193 &priv->counter_array_bus, GFP_KERNEL); in gve_alloc_counter_array()
194 if (!priv->counter_array) in gve_alloc_counter_array()
200 static void gve_free_counter_array(struct gve_priv *priv) in gve_free_counter_array() argument
202 if (!priv->counter_array) in gve_free_counter_array()
205 dma_free_coherent(&priv->pdev->dev, in gve_free_counter_array()
206 priv->num_event_counters * in gve_free_counter_array()
207 sizeof(*priv->counter_array), in gve_free_counter_array()
208 priv->counter_array, priv->counter_array_bus); in gve_free_counter_array()
209 priv->counter_array = NULL; in gve_free_counter_array()
215 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_stats_report_task() local
217 if (gve_get_do_report_stats(priv)) { in gve_stats_report_task()
218 gve_handle_report_stats(priv); in gve_stats_report_task()
219 gve_clear_do_report_stats(priv); in gve_stats_report_task()
223 static void gve_stats_report_schedule(struct gve_priv *priv) in gve_stats_report_schedule() argument
225 if (!gve_get_probe_in_progress(priv) && in gve_stats_report_schedule()
226 !gve_get_reset_in_progress(priv)) { in gve_stats_report_schedule()
227 gve_set_do_report_stats(priv); in gve_stats_report_schedule()
228 queue_work(priv->gve_wq, &priv->stats_report_task); in gve_stats_report_schedule()
234 struct gve_priv *priv = from_timer(priv, t, stats_report_timer); in gve_stats_report_timer() local
236 mod_timer(&priv->stats_report_timer, in gve_stats_report_timer()
238 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_stats_report_timer()
239 gve_stats_report_schedule(priv); in gve_stats_report_timer()
242 static int gve_alloc_stats_report(struct gve_priv *priv) in gve_alloc_stats_report() argument
247 gve_num_tx_queues(priv); in gve_alloc_stats_report()
249 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
250 priv->stats_report_len = struct_size(priv->stats_report, stats, in gve_alloc_stats_report()
252 priv->stats_report = in gve_alloc_stats_report()
253 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_alloc_stats_report()
254 &priv->stats_report_bus, GFP_KERNEL); in gve_alloc_stats_report()
255 if (!priv->stats_report) in gve_alloc_stats_report()
258 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); in gve_alloc_stats_report()
259 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; in gve_alloc_stats_report()
263 static void gve_free_stats_report(struct gve_priv *priv) in gve_free_stats_report() argument
265 if (!priv->stats_report) in gve_free_stats_report()
268 del_timer_sync(&priv->stats_report_timer); in gve_free_stats_report()
269 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_free_stats_report()
270 priv->stats_report, priv->stats_report_bus); in gve_free_stats_report()
271 priv->stats_report = NULL; in gve_free_stats_report()
276 struct gve_priv *priv = arg; in gve_mgmnt_intr() local
278 queue_work(priv->gve_wq, &priv->service_task); in gve_mgmnt_intr()
285 struct gve_priv *priv = block->priv; in gve_intr() local
287 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_intr()
301 static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq) in gve_is_napi_on_home_cpu() argument
318 struct gve_priv *priv; in gve_napi_poll() local
322 priv = block->priv; in gve_napi_poll()
325 if (block->tx->q_num < priv->tx_cfg.num_queues) in gve_napi_poll()
344 irq_doorbell = gve_irq_doorbell(priv, block); in gve_napi_poll()
353 reschedule |= gve_tx_clean_pending(priv, block->tx); in gve_napi_poll()
367 struct gve_priv *priv = block->priv; in gve_napi_poll_dqo() local
386 if (likely(gve_is_napi_on_home_cpu(priv, block->irq))) in gve_napi_poll_dqo()
407 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
414 static int gve_alloc_notify_blocks(struct gve_priv *priv) in gve_alloc_notify_blocks() argument
416 int num_vecs_requested = priv->num_ntfy_blks + 1; in gve_alloc_notify_blocks()
422 priv->msix_vectors = kvcalloc(num_vecs_requested, in gve_alloc_notify_blocks()
423 sizeof(*priv->msix_vectors), GFP_KERNEL); in gve_alloc_notify_blocks()
424 if (!priv->msix_vectors) in gve_alloc_notify_blocks()
427 priv->msix_vectors[i].entry = i; in gve_alloc_notify_blocks()
428 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, in gve_alloc_notify_blocks()
431 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", in gve_alloc_notify_blocks()
441 priv->num_ntfy_blks = new_num_ntfy_blks; in gve_alloc_notify_blocks()
442 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_alloc_notify_blocks()
443 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
445 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
447 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
449 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
450 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
451 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
452 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
453 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
454 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
457 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); in gve_alloc_notify_blocks()
460 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s", in gve_alloc_notify_blocks()
461 pci_name(priv->pdev)); in gve_alloc_notify_blocks()
462 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, in gve_alloc_notify_blocks()
463 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); in gve_alloc_notify_blocks()
465 dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); in gve_alloc_notify_blocks()
468 priv->irq_db_indices = in gve_alloc_notify_blocks()
469 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_notify_blocks()
470 priv->num_ntfy_blks * in gve_alloc_notify_blocks()
471 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
472 &priv->irq_db_indices_bus, GFP_KERNEL); in gve_alloc_notify_blocks()
473 if (!priv->irq_db_indices) { in gve_alloc_notify_blocks()
478 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * in gve_alloc_notify_blocks()
479 sizeof(*priv->ntfy_blocks), GFP_KERNEL); in gve_alloc_notify_blocks()
480 if (!priv->ntfy_blocks) { in gve_alloc_notify_blocks()
486 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_alloc_notify_blocks()
487 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_alloc_notify_blocks()
491 i, pci_name(priv->pdev)); in gve_alloc_notify_blocks()
492 block->priv = priv; in gve_alloc_notify_blocks()
493 err = request_irq(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
494 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, in gve_alloc_notify_blocks()
497 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
501 block->irq = priv->msix_vectors[msix_idx].vector; in gve_alloc_notify_blocks()
502 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
504 block->irq_db_index = &priv->irq_db_indices[i].index; in gve_alloc_notify_blocks()
509 struct gve_notify_block *block = &priv->ntfy_blocks[j]; in gve_alloc_notify_blocks()
512 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
514 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_alloc_notify_blocks()
517 kvfree(priv->ntfy_blocks); in gve_alloc_notify_blocks()
518 priv->ntfy_blocks = NULL; in gve_alloc_notify_blocks()
520 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_alloc_notify_blocks()
521 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
522 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_alloc_notify_blocks()
523 priv->irq_db_indices = NULL; in gve_alloc_notify_blocks()
525 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_alloc_notify_blocks()
527 pci_disable_msix(priv->pdev); in gve_alloc_notify_blocks()
529 kvfree(priv->msix_vectors); in gve_alloc_notify_blocks()
530 priv->msix_vectors = NULL; in gve_alloc_notify_blocks()
534 static void gve_free_notify_blocks(struct gve_priv *priv) in gve_free_notify_blocks() argument
538 if (!priv->msix_vectors) in gve_free_notify_blocks()
542 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_free_notify_blocks()
543 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_free_notify_blocks()
546 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_free_notify_blocks()
548 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_free_notify_blocks()
551 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_free_notify_blocks()
552 kvfree(priv->ntfy_blocks); in gve_free_notify_blocks()
553 priv->ntfy_blocks = NULL; in gve_free_notify_blocks()
554 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_free_notify_blocks()
555 sizeof(*priv->irq_db_indices), in gve_free_notify_blocks()
556 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_free_notify_blocks()
557 priv->irq_db_indices = NULL; in gve_free_notify_blocks()
558 pci_disable_msix(priv->pdev); in gve_free_notify_blocks()
559 kvfree(priv->msix_vectors); in gve_free_notify_blocks()
560 priv->msix_vectors = NULL; in gve_free_notify_blocks()
563 static int gve_setup_device_resources(struct gve_priv *priv) in gve_setup_device_resources() argument
567 err = gve_alloc_flow_rule_caches(priv); in gve_setup_device_resources()
570 err = gve_alloc_counter_array(priv); in gve_setup_device_resources()
573 err = gve_alloc_notify_blocks(priv); in gve_setup_device_resources()
576 err = gve_alloc_stats_report(priv); in gve_setup_device_resources()
579 err = gve_adminq_configure_device_resources(priv, in gve_setup_device_resources()
580 priv->counter_array_bus, in gve_setup_device_resources()
581 priv->num_event_counters, in gve_setup_device_resources()
582 priv->irq_db_indices_bus, in gve_setup_device_resources()
583 priv->num_ntfy_blks); in gve_setup_device_resources()
585 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
591 if (!gve_is_gqi(priv)) { in gve_setup_device_resources()
592 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), in gve_setup_device_resources()
594 if (!priv->ptype_lut_dqo) { in gve_setup_device_resources()
598 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); in gve_setup_device_resources()
600 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
606 err = gve_adminq_report_stats(priv, priv->stats_report_len, in gve_setup_device_resources()
607 priv->stats_report_bus, in gve_setup_device_resources()
610 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
612 gve_set_device_resources_ok(priv); in gve_setup_device_resources()
616 kvfree(priv->ptype_lut_dqo); in gve_setup_device_resources()
617 priv->ptype_lut_dqo = NULL; in gve_setup_device_resources()
619 gve_free_stats_report(priv); in gve_setup_device_resources()
621 gve_free_notify_blocks(priv); in gve_setup_device_resources()
623 gve_free_counter_array(priv); in gve_setup_device_resources()
625 gve_free_flow_rule_caches(priv); in gve_setup_device_resources()
630 static void gve_trigger_reset(struct gve_priv *priv);
632 static void gve_teardown_device_resources(struct gve_priv *priv) in gve_teardown_device_resources() argument
637 if (gve_get_device_resources_ok(priv)) { in gve_teardown_device_resources()
638 err = gve_flow_rules_reset(priv); in gve_teardown_device_resources()
640 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
642 gve_trigger_reset(priv); in gve_teardown_device_resources()
645 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); in gve_teardown_device_resources()
647 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
649 gve_trigger_reset(priv); in gve_teardown_device_resources()
651 err = gve_adminq_deconfigure_device_resources(priv); in gve_teardown_device_resources()
653 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
656 gve_trigger_reset(priv); in gve_teardown_device_resources()
660 kvfree(priv->ptype_lut_dqo); in gve_teardown_device_resources()
661 priv->ptype_lut_dqo = NULL; in gve_teardown_device_resources()
663 gve_free_flow_rule_caches(priv); in gve_teardown_device_resources()
664 gve_free_counter_array(priv); in gve_teardown_device_resources()
665 gve_free_notify_blocks(priv); in gve_teardown_device_resources()
666 gve_free_stats_report(priv); in gve_teardown_device_resources()
667 gve_clear_device_resources_ok(priv); in gve_teardown_device_resources()
670 static int gve_unregister_qpl(struct gve_priv *priv, in gve_unregister_qpl() argument
678 err = gve_adminq_unregister_page_list(priv, qpl->id); in gve_unregister_qpl()
680 netif_err(priv, drv, priv->dev, in gve_unregister_qpl()
686 priv->num_registered_pages -= qpl->num_entries; in gve_unregister_qpl()
690 static int gve_register_qpl(struct gve_priv *priv, in gve_register_qpl() argument
701 if (pages + priv->num_registered_pages > priv->max_registered_pages) { in gve_register_qpl()
702 netif_err(priv, drv, priv->dev, in gve_register_qpl()
704 pages + priv->num_registered_pages, in gve_register_qpl()
705 priv->max_registered_pages); in gve_register_qpl()
709 err = gve_adminq_register_page_list(priv, qpl); in gve_register_qpl()
711 netif_err(priv, drv, priv->dev, in gve_register_qpl()
717 priv->num_registered_pages += pages; in gve_register_qpl()
721 static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx) in gve_tx_get_qpl() argument
723 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_get_qpl()
725 if (gve_is_gqi(priv)) in gve_tx_get_qpl()
731 static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx) in gve_rx_get_qpl() argument
733 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_get_qpl()
735 if (gve_is_gqi(priv)) in gve_rx_get_qpl()
741 static int gve_register_xdp_qpls(struct gve_priv *priv) in gve_register_xdp_qpls() argument
747 start_id = gve_xdp_tx_start_queue_id(priv); in gve_register_xdp_qpls()
748 for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { in gve_register_xdp_qpls()
749 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_register_xdp_qpls()
757 static int gve_register_qpls(struct gve_priv *priv) in gve_register_qpls() argument
763 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), in gve_register_qpls()
764 gve_is_qpl(priv)); in gve_register_qpls()
765 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); in gve_register_qpls()
768 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_register_qpls()
774 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i)); in gve_register_qpls()
782 static int gve_unregister_xdp_qpls(struct gve_priv *priv) in gve_unregister_xdp_qpls() argument
788 start_id = gve_xdp_tx_start_queue_id(priv); in gve_unregister_xdp_qpls()
789 for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { in gve_unregister_xdp_qpls()
790 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_unregister_xdp_qpls()
798 static int gve_unregister_qpls(struct gve_priv *priv) in gve_unregister_qpls() argument
804 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), in gve_unregister_qpls()
805 gve_is_qpl(priv)); in gve_unregister_qpls()
806 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); in gve_unregister_qpls()
809 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_unregister_qpls()
816 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i)); in gve_unregister_qpls()
824 static int gve_create_xdp_rings(struct gve_priv *priv) in gve_create_xdp_rings() argument
828 err = gve_adminq_create_tx_queues(priv, in gve_create_xdp_rings()
829 gve_xdp_tx_start_queue_id(priv), in gve_create_xdp_rings()
830 priv->num_xdp_queues); in gve_create_xdp_rings()
832 netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", in gve_create_xdp_rings()
833 priv->num_xdp_queues); in gve_create_xdp_rings()
839 netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", in gve_create_xdp_rings()
840 priv->num_xdp_queues); in gve_create_xdp_rings()
845 static int gve_create_rings(struct gve_priv *priv) in gve_create_rings() argument
847 int num_tx_queues = gve_num_tx_queues(priv); in gve_create_rings()
851 err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues); in gve_create_rings()
853 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", in gve_create_rings()
860 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", in gve_create_rings()
863 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rings()
865 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", in gve_create_rings()
866 priv->rx_cfg.num_queues); in gve_create_rings()
872 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", in gve_create_rings()
873 priv->rx_cfg.num_queues); in gve_create_rings()
875 if (gve_is_gqi(priv)) { in gve_create_rings()
882 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rings()
883 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
885 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rings()
887 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
894 static void init_xdp_sync_stats(struct gve_priv *priv) in init_xdp_sync_stats() argument
896 int start_id = gve_xdp_tx_start_queue_id(priv); in init_xdp_sync_stats()
900 for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { in init_xdp_sync_stats()
901 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in init_xdp_sync_stats()
903 u64_stats_init(&priv->tx[i].statss); in init_xdp_sync_stats()
904 priv->tx[i].ntfy_id = ntfy_idx; in init_xdp_sync_stats()
908 static void gve_init_sync_stats(struct gve_priv *priv) in gve_init_sync_stats() argument
912 for (i = 0; i < priv->tx_cfg.num_queues; i++) in gve_init_sync_stats()
913 u64_stats_init(&priv->tx[i].statss); in gve_init_sync_stats()
916 init_xdp_sync_stats(priv); in gve_init_sync_stats()
918 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_init_sync_stats()
919 u64_stats_init(&priv->rx[i].statss); in gve_init_sync_stats()
922 static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, in gve_tx_get_curr_alloc_cfg() argument
925 cfg->qcfg = &priv->tx_cfg; in gve_tx_get_curr_alloc_cfg()
926 cfg->raw_addressing = !gve_is_qpl(priv); in gve_tx_get_curr_alloc_cfg()
927 cfg->ring_size = priv->tx_desc_cnt; in gve_tx_get_curr_alloc_cfg()
929 cfg->num_rings = gve_num_tx_queues(priv); in gve_tx_get_curr_alloc_cfg()
930 cfg->tx = priv->tx; in gve_tx_get_curr_alloc_cfg()
933 static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings) in gve_tx_stop_rings() argument
937 if (!priv->tx) in gve_tx_stop_rings()
941 if (gve_is_gqi(priv)) in gve_tx_stop_rings()
942 gve_tx_stop_ring_gqi(priv, i); in gve_tx_stop_rings()
944 gve_tx_stop_ring_dqo(priv, i); in gve_tx_stop_rings()
948 static void gve_tx_start_rings(struct gve_priv *priv, int start_id, in gve_tx_start_rings() argument
954 if (gve_is_gqi(priv)) in gve_tx_start_rings()
955 gve_tx_start_ring_gqi(priv, i); in gve_tx_start_rings()
957 gve_tx_start_ring_dqo(priv, i); in gve_tx_start_rings()
961 static int gve_alloc_xdp_rings(struct gve_priv *priv) in gve_alloc_xdp_rings() argument
966 if (!priv->num_xdp_queues) in gve_alloc_xdp_rings()
969 gve_tx_get_curr_alloc_cfg(priv, &cfg); in gve_alloc_xdp_rings()
970 cfg.start_idx = gve_xdp_tx_start_queue_id(priv); in gve_alloc_xdp_rings()
971 cfg.num_rings = priv->num_xdp_queues; in gve_alloc_xdp_rings()
973 err = gve_tx_alloc_rings_gqi(priv, &cfg); in gve_alloc_xdp_rings()
977 gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings); in gve_alloc_xdp_rings()
978 init_xdp_sync_stats(priv); in gve_alloc_xdp_rings()
983 static int gve_queues_mem_alloc(struct gve_priv *priv, in gve_queues_mem_alloc() argument
989 if (gve_is_gqi(priv)) in gve_queues_mem_alloc()
990 err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
992 err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
996 if (gve_is_gqi(priv)) in gve_queues_mem_alloc()
997 err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg); in gve_queues_mem_alloc()
999 err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg); in gve_queues_mem_alloc()
1006 if (gve_is_gqi(priv)) in gve_queues_mem_alloc()
1007 gve_tx_free_rings_gqi(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
1009 gve_tx_free_rings_dqo(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
1013 static int gve_destroy_xdp_rings(struct gve_priv *priv) in gve_destroy_xdp_rings() argument
1018 start_id = gve_xdp_tx_start_queue_id(priv); in gve_destroy_xdp_rings()
1019 err = gve_adminq_destroy_tx_queues(priv, in gve_destroy_xdp_rings()
1021 priv->num_xdp_queues); in gve_destroy_xdp_rings()
1023 netif_err(priv, drv, priv->dev, in gve_destroy_xdp_rings()
1028 netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); in gve_destroy_xdp_rings()
1033 static int gve_destroy_rings(struct gve_priv *priv) in gve_destroy_rings() argument
1035 int num_tx_queues = gve_num_tx_queues(priv); in gve_destroy_rings()
1038 err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues); in gve_destroy_rings()
1040 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
1045 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); in gve_destroy_rings()
1046 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rings()
1048 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
1053 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); in gve_destroy_rings()
1057 static void gve_free_xdp_rings(struct gve_priv *priv) in gve_free_xdp_rings() argument
1061 gve_tx_get_curr_alloc_cfg(priv, &cfg); in gve_free_xdp_rings()
1062 cfg.start_idx = gve_xdp_tx_start_queue_id(priv); in gve_free_xdp_rings()
1063 cfg.num_rings = priv->num_xdp_queues; in gve_free_xdp_rings()
1065 if (priv->tx) { in gve_free_xdp_rings()
1066 gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings); in gve_free_xdp_rings()
1067 gve_tx_free_rings_gqi(priv, &cfg); in gve_free_xdp_rings()
1071 static void gve_queues_mem_free(struct gve_priv *priv, in gve_queues_mem_free() argument
1075 if (gve_is_gqi(priv)) { in gve_queues_mem_free()
1076 gve_tx_free_rings_gqi(priv, tx_cfg); in gve_queues_mem_free()
1077 gve_rx_free_rings_gqi(priv, rx_cfg); in gve_queues_mem_free()
1079 gve_tx_free_rings_dqo(priv, tx_cfg); in gve_queues_mem_free()
1080 gve_rx_free_rings_dqo(priv, rx_cfg); in gve_queues_mem_free()
1084 int gve_alloc_page(struct gve_priv *priv, struct device *dev, in gve_alloc_page() argument
1090 priv->page_alloc_fail++; in gve_alloc_page()
1095 priv->dma_mapping_error++; in gve_alloc_page()
1102 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv, in gve_alloc_queue_page_list() argument
1124 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], in gve_alloc_queue_page_list()
1126 gve_qpl_dma_dir(priv, id), GFP_KERNEL); in gve_alloc_queue_page_list()
1135 gve_free_queue_page_list(priv, qpl, id); in gve_alloc_queue_page_list()
1148 void gve_free_queue_page_list(struct gve_priv *priv, in gve_free_queue_page_list() argument
1162 gve_free_page(&priv->pdev->dev, qpl->pages[i], in gve_free_queue_page_list()
1163 qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); in gve_free_queue_page_list()
1178 void gve_schedule_reset(struct gve_priv *priv) in gve_schedule_reset() argument
1180 gve_set_do_reset(priv); in gve_schedule_reset()
1181 queue_work(priv->gve_wq, &priv->service_task); in gve_schedule_reset()
1184 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
1185 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1186 static void gve_turndown(struct gve_priv *priv);
1187 static void gve_turnup(struct gve_priv *priv);
1189 static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) in gve_reg_xdp_info() argument
1197 if (!priv->num_xdp_queues) in gve_reg_xdp_info()
1200 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_reg_xdp_info()
1201 rx = &priv->rx[i]; in gve_reg_xdp_info()
1202 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_reg_xdp_info()
1227 for (i = 0; i < priv->num_xdp_queues; i++) { in gve_reg_xdp_info()
1228 tx_qid = gve_xdp_tx_queue_id(priv, i); in gve_reg_xdp_info()
1229 priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1235 rx = &priv->rx[j]; in gve_reg_xdp_info()
1244 static void gve_unreg_xdp_info(struct gve_priv *priv) in gve_unreg_xdp_info() argument
1248 if (!priv->num_xdp_queues) in gve_unreg_xdp_info()
1251 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_unreg_xdp_info()
1252 struct gve_rx_ring *rx = &priv->rx[i]; in gve_unreg_xdp_info()
1261 for (i = 0; i < priv->num_xdp_queues; i++) { in gve_unreg_xdp_info()
1262 tx_qid = gve_xdp_tx_queue_id(priv, i); in gve_unreg_xdp_info()
1263 priv->tx[tx_qid].xsk_pool = NULL; in gve_unreg_xdp_info()
1267 static void gve_drain_page_cache(struct gve_priv *priv) in gve_drain_page_cache() argument
1271 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_drain_page_cache()
1272 page_frag_cache_drain(&priv->rx[i].page_cache); in gve_drain_page_cache()
1275 static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, in gve_rx_get_curr_alloc_cfg() argument
1278 cfg->qcfg = &priv->rx_cfg; in gve_rx_get_curr_alloc_cfg()
1279 cfg->qcfg_tx = &priv->tx_cfg; in gve_rx_get_curr_alloc_cfg()
1280 cfg->raw_addressing = !gve_is_qpl(priv); in gve_rx_get_curr_alloc_cfg()
1281 cfg->enable_header_split = priv->header_split_enabled; in gve_rx_get_curr_alloc_cfg()
1282 cfg->ring_size = priv->rx_desc_cnt; in gve_rx_get_curr_alloc_cfg()
1283 cfg->packet_buffer_size = gve_is_gqi(priv) ? in gve_rx_get_curr_alloc_cfg()
1285 priv->data_buffer_size_dqo; in gve_rx_get_curr_alloc_cfg()
1286 cfg->rx = priv->rx; in gve_rx_get_curr_alloc_cfg()
1289 void gve_get_curr_alloc_cfgs(struct gve_priv *priv, in gve_get_curr_alloc_cfgs() argument
1293 gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg); in gve_get_curr_alloc_cfgs()
1294 gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg); in gve_get_curr_alloc_cfgs()
1297 static void gve_rx_start_ring(struct gve_priv *priv, int i) in gve_rx_start_ring() argument
1299 if (gve_is_gqi(priv)) in gve_rx_start_ring()
1300 gve_rx_start_ring_gqi(priv, i); in gve_rx_start_ring()
1302 gve_rx_start_ring_dqo(priv, i); in gve_rx_start_ring()
1305 static void gve_rx_start_rings(struct gve_priv *priv, int num_rings) in gve_rx_start_rings() argument
1310 gve_rx_start_ring(priv, i); in gve_rx_start_rings()
1313 static void gve_rx_stop_ring(struct gve_priv *priv, int i) in gve_rx_stop_ring() argument
1315 if (gve_is_gqi(priv)) in gve_rx_stop_ring()
1316 gve_rx_stop_ring_gqi(priv, i); in gve_rx_stop_ring()
1318 gve_rx_stop_ring_dqo(priv, i); in gve_rx_stop_ring()
1321 static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings) in gve_rx_stop_rings() argument
1325 if (!priv->rx) in gve_rx_stop_rings()
1329 gve_rx_stop_ring(priv, i); in gve_rx_stop_rings()
1332 static void gve_queues_mem_remove(struct gve_priv *priv) in gve_queues_mem_remove() argument
1337 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_queues_mem_remove()
1338 gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_queues_mem_remove()
1339 priv->tx = NULL; in gve_queues_mem_remove()
1340 priv->rx = NULL; in gve_queues_mem_remove()
1346 static int gve_queues_start(struct gve_priv *priv, in gve_queues_start() argument
1350 struct net_device *dev = priv->dev; in gve_queues_start()
1354 priv->tx = tx_alloc_cfg->tx; in gve_queues_start()
1355 priv->rx = rx_alloc_cfg->rx; in gve_queues_start()
1358 priv->tx_cfg = *tx_alloc_cfg->qcfg; in gve_queues_start()
1359 priv->rx_cfg = *rx_alloc_cfg->qcfg; in gve_queues_start()
1360 priv->tx_desc_cnt = tx_alloc_cfg->ring_size; in gve_queues_start()
1361 priv->rx_desc_cnt = rx_alloc_cfg->ring_size; in gve_queues_start()
1363 if (priv->xdp_prog) in gve_queues_start()
1364 priv->num_xdp_queues = priv->rx_cfg.num_queues; in gve_queues_start()
1366 priv->num_xdp_queues = 0; in gve_queues_start()
1368 gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings); in gve_queues_start()
1369 gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues); in gve_queues_start()
1370 gve_init_sync_stats(priv); in gve_queues_start()
1372 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); in gve_queues_start()
1375 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); in gve_queues_start()
1379 err = gve_reg_xdp_info(priv, dev); in gve_queues_start()
1383 err = gve_register_qpls(priv); in gve_queues_start()
1387 priv->header_split_enabled = rx_alloc_cfg->enable_header_split; in gve_queues_start()
1388 priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size; in gve_queues_start()
1390 err = gve_create_rings(priv); in gve_queues_start()
1394 gve_set_device_rings_ok(priv); in gve_queues_start()
1396 if (gve_get_report_stats(priv)) in gve_queues_start()
1397 mod_timer(&priv->stats_report_timer, in gve_queues_start()
1399 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_queues_start()
1401 gve_turnup(priv); in gve_queues_start()
1402 queue_work(priv->gve_wq, &priv->service_task); in gve_queues_start()
1403 priv->interface_up_cnt++; in gve_queues_start()
1407 if (gve_get_reset_in_progress(priv)) in gve_queues_start()
1409 gve_reset_and_teardown(priv, true); in gve_queues_start()
1411 gve_reset_recovery(priv, false); in gve_queues_start()
1415 gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); in gve_queues_start()
1416 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); in gve_queues_start()
1417 gve_queues_mem_remove(priv); in gve_queues_start()
1425 struct gve_priv *priv = netdev_priv(dev); in gve_open() local
1428 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_open()
1430 err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_open()
1437 err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_open()
1444 static int gve_queues_stop(struct gve_priv *priv) in gve_queues_stop() argument
1448 netif_carrier_off(priv->dev); in gve_queues_stop()
1449 if (gve_get_device_rings_ok(priv)) { in gve_queues_stop()
1450 gve_turndown(priv); in gve_queues_stop()
1451 gve_drain_page_cache(priv); in gve_queues_stop()
1452 err = gve_destroy_rings(priv); in gve_queues_stop()
1455 err = gve_unregister_qpls(priv); in gve_queues_stop()
1458 gve_clear_device_rings_ok(priv); in gve_queues_stop()
1460 del_timer_sync(&priv->stats_report_timer); in gve_queues_stop()
1462 gve_unreg_xdp_info(priv); in gve_queues_stop()
1464 gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); in gve_queues_stop()
1465 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); in gve_queues_stop()
1467 priv->interface_down_cnt++; in gve_queues_stop()
1474 if (gve_get_reset_in_progress(priv)) in gve_queues_stop()
1477 gve_reset_and_teardown(priv, true); in gve_queues_stop()
1478 return gve_reset_recovery(priv, false); in gve_queues_stop()
1483 struct gve_priv *priv = netdev_priv(dev); in gve_close() local
1486 err = gve_queues_stop(priv); in gve_close()
1490 gve_queues_mem_remove(priv); in gve_close()
1494 static int gve_remove_xdp_queues(struct gve_priv *priv) in gve_remove_xdp_queues() argument
1498 err = gve_destroy_xdp_rings(priv); in gve_remove_xdp_queues()
1502 err = gve_unregister_xdp_qpls(priv); in gve_remove_xdp_queues()
1506 gve_unreg_xdp_info(priv); in gve_remove_xdp_queues()
1507 gve_free_xdp_rings(priv); in gve_remove_xdp_queues()
1509 priv->num_xdp_queues = 0; in gve_remove_xdp_queues()
1513 static int gve_add_xdp_queues(struct gve_priv *priv) in gve_add_xdp_queues() argument
1517 priv->num_xdp_queues = priv->rx_cfg.num_queues; in gve_add_xdp_queues()
1519 err = gve_alloc_xdp_rings(priv); in gve_add_xdp_queues()
1523 err = gve_reg_xdp_info(priv, priv->dev); in gve_add_xdp_queues()
1527 err = gve_register_xdp_qpls(priv); in gve_add_xdp_queues()
1531 err = gve_create_xdp_rings(priv); in gve_add_xdp_queues()
1538 gve_free_xdp_rings(priv); in gve_add_xdp_queues()
1540 priv->num_xdp_queues = 0; in gve_add_xdp_queues()
1544 static void gve_handle_link_status(struct gve_priv *priv, bool link_status) in gve_handle_link_status() argument
1546 if (!gve_get_napi_enabled(priv)) in gve_handle_link_status()
1549 if (link_status == netif_carrier_ok(priv->dev)) in gve_handle_link_status()
1553 netdev_info(priv->dev, "Device link is up.\n"); in gve_handle_link_status()
1554 netif_carrier_on(priv->dev); in gve_handle_link_status()
1556 netdev_info(priv->dev, "Device link is down.\n"); in gve_handle_link_status()
1557 netif_carrier_off(priv->dev); in gve_handle_link_status()
1561 static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, in gve_set_xdp() argument
1568 old_prog = READ_ONCE(priv->xdp_prog); in gve_set_xdp()
1569 if (!netif_running(priv->dev)) { in gve_set_xdp()
1570 WRITE_ONCE(priv->xdp_prog, prog); in gve_set_xdp()
1576 gve_turndown(priv); in gve_set_xdp()
1580 err = gve_add_xdp_queues(priv); in gve_set_xdp()
1586 err = gve_remove_xdp_queues(priv); in gve_set_xdp()
1590 WRITE_ONCE(priv->xdp_prog, prog); in gve_set_xdp()
1595 gve_turnup(priv); in gve_set_xdp()
1596 status = ioread32be(&priv->reg_bar0->device_status); in gve_set_xdp()
1597 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_set_xdp()
1605 struct gve_priv *priv = netdev_priv(dev); in gve_xsk_pool_enable() local
1611 if (qid >= priv->rx_cfg.num_queues) { in gve_xsk_pool_enable()
1612 dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid); in gve_xsk_pool_enable()
1616 priv->dev->max_mtu + sizeof(struct ethhdr)) { in gve_xsk_pool_enable()
1617 dev_err(&priv->pdev->dev, "xsk pool frame_len too small"); in gve_xsk_pool_enable()
1621 err = xsk_pool_dma_map(pool, &priv->pdev->dev, in gve_xsk_pool_enable()
1627 if (!priv->xdp_prog) in gve_xsk_pool_enable()
1630 rx = &priv->rx[qid]; in gve_xsk_pool_enable()
1631 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_xsk_pool_enable()
1644 tx_qid = gve_xdp_tx_queue_id(priv, qid); in gve_xsk_pool_enable()
1645 priv->tx[tx_qid].xsk_pool = pool; in gve_xsk_pool_enable()
1660 struct gve_priv *priv = netdev_priv(dev); in gve_xsk_pool_disable() local
1669 if (qid >= priv->rx_cfg.num_queues) in gve_xsk_pool_disable()
1673 if (!priv->xdp_prog) in gve_xsk_pool_disable()
1676 tx_qid = gve_xdp_tx_queue_id(priv, qid); in gve_xsk_pool_disable()
1678 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1679 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1680 priv->tx[tx_qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1684 napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; in gve_xsk_pool_disable()
1687 napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi; in gve_xsk_pool_disable()
1690 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1691 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1692 priv->tx[tx_qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1696 if (gve_rx_work_pending(&priv->rx[qid])) in gve_xsk_pool_disable()
1700 if (gve_tx_clean_pending(priv, &priv->tx[tx_qid])) in gve_xsk_pool_disable()
1711 struct gve_priv *priv = netdev_priv(dev); in gve_xsk_wakeup() local
1712 int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id); in gve_xsk_wakeup()
1714 if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog) in gve_xsk_wakeup()
1718 struct gve_tx_ring *tx = &priv->tx[tx_queue_id]; in gve_xsk_wakeup()
1720 &priv->ntfy_blocks[tx->ntfy_id].napi; in gve_xsk_wakeup()
1737 struct gve_priv *priv = netdev_priv(dev); in verify_xdp_configuration() local
1744 if (priv->queue_format != GVE_GQI_QPL_FORMAT) { in verify_xdp_configuration()
1746 priv->queue_format); in verify_xdp_configuration()
1756 if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || in verify_xdp_configuration()
1757 (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { in verify_xdp_configuration()
1759 priv->rx_cfg.num_queues, in verify_xdp_configuration()
1760 priv->tx_cfg.num_queues, in verify_xdp_configuration()
1761 priv->tx_cfg.max_queues); in verify_xdp_configuration()
1769 struct gve_priv *priv = netdev_priv(dev); in gve_xdp() local
1777 return gve_set_xdp(priv, xdp->prog, xdp->extack); in gve_xdp()
1788 int gve_flow_rules_reset(struct gve_priv *priv) in gve_flow_rules_reset() argument
1790 if (!priv->max_flow_rules) in gve_flow_rules_reset()
1793 return gve_adminq_reset_flow_rules(priv); in gve_flow_rules_reset()
1796 int gve_adjust_config(struct gve_priv *priv, in gve_adjust_config() argument
1803 err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg); in gve_adjust_config()
1805 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1811 err = gve_close(priv->dev); in gve_adjust_config()
1813 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1815 gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg); in gve_adjust_config()
1820 err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg); in gve_adjust_config()
1822 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1827 gve_turndown(priv); in gve_adjust_config()
1834 int gve_adjust_queues(struct gve_priv *priv, in gve_adjust_queues() argument
1842 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_adjust_queues()
1850 if (netif_running(priv->dev)) { in gve_adjust_queues()
1851 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_adjust_queues()
1855 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1856 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1861 static void gve_turndown(struct gve_priv *priv) in gve_turndown() argument
1865 if (netif_carrier_ok(priv->dev)) in gve_turndown()
1866 netif_carrier_off(priv->dev); in gve_turndown()
1868 if (!gve_get_napi_enabled(priv)) in gve_turndown()
1872 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { in gve_turndown()
1873 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turndown()
1874 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1876 if (!gve_tx_was_added_to_block(priv, idx)) in gve_turndown()
1880 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turndown()
1881 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turndown()
1882 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1884 if (!gve_rx_was_added_to_block(priv, idx)) in gve_turndown()
1890 netif_tx_disable(priv->dev); in gve_turndown()
1892 gve_clear_napi_enabled(priv); in gve_turndown()
1893 gve_clear_report_stats(priv); in gve_turndown()
1896 static void gve_turnup(struct gve_priv *priv) in gve_turnup() argument
1901 netif_tx_start_all_queues(priv->dev); in gve_turnup()
1904 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { in gve_turnup()
1905 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turnup()
1906 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1908 if (!gve_tx_was_added_to_block(priv, idx)) in gve_turnup()
1912 if (gve_is_gqi(priv)) { in gve_turnup()
1913 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1915 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1916 priv->tx_coalesce_usecs); in gve_turnup()
1926 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turnup()
1927 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turnup()
1928 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1930 if (!gve_rx_was_added_to_block(priv, idx)) in gve_turnup()
1934 if (gve_is_gqi(priv)) { in gve_turnup()
1935 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1937 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1938 priv->rx_coalesce_usecs); in gve_turnup()
1949 gve_set_napi_enabled(priv); in gve_turnup()
1952 static void gve_turnup_and_check_status(struct gve_priv *priv) in gve_turnup_and_check_status() argument
1956 gve_turnup(priv); in gve_turnup_and_check_status()
1957 status = ioread32be(&priv->reg_bar0->device_status); in gve_turnup_and_check_status()
1958 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_turnup_and_check_status()
1965 struct gve_priv *priv; in gve_tx_timeout() local
1971 priv = netdev_priv(dev); in gve_tx_timeout()
1972 if (txqueue > priv->tx_cfg.num_queues) in gve_tx_timeout()
1975 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); in gve_tx_timeout()
1976 if (ntfy_idx >= priv->num_ntfy_blks) in gve_tx_timeout()
1979 block = &priv->ntfy_blocks[ntfy_idx]; in gve_tx_timeout()
1989 last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_timeout()
1992 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_tx_timeout()
1999 gve_schedule_reset(priv); in gve_tx_timeout()
2004 priv->tx_timeo_cnt++; in gve_tx_timeout()
2007 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit) in gve_get_pkt_buf_size() argument
2009 if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE) in gve_get_pkt_buf_size()
2016 bool gve_header_split_supported(const struct gve_priv *priv) in gve_header_split_supported() argument
2018 return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT; in gve_header_split_supported()
2021 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) in gve_set_hsplit_config() argument
2031 if (!gve_header_split_supported(priv)) { in gve_set_hsplit_config()
2032 dev_err(&priv->pdev->dev, "Header-split not supported\n"); in gve_set_hsplit_config()
2041 if (enable_hdr_split == priv->header_split_enabled) in gve_set_hsplit_config()
2044 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_hsplit_config()
2047 rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split); in gve_set_hsplit_config()
2049 if (netif_running(priv->dev)) in gve_set_hsplit_config()
2050 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_hsplit_config()
2060 struct gve_priv *priv = netdev_priv(netdev); in gve_set_features() local
2063 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_features()
2068 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_features()
2074 err = gve_flow_rules_reset(priv); in gve_set_features()
2099 static void gve_handle_status(struct gve_priv *priv, u32 status) in gve_handle_status() argument
2102 dev_info(&priv->pdev->dev, "Device requested reset.\n"); in gve_handle_status()
2103 gve_set_do_reset(priv); in gve_handle_status()
2106 priv->stats_report_trigger_cnt++; in gve_handle_status()
2107 gve_set_do_report_stats(priv); in gve_handle_status()
2111 static void gve_handle_reset(struct gve_priv *priv) in gve_handle_reset() argument
2117 if (gve_get_probe_in_progress(priv)) in gve_handle_reset()
2120 if (gve_get_do_reset(priv)) { in gve_handle_reset()
2122 gve_reset(priv, false); in gve_handle_reset()
2127 void gve_handle_report_stats(struct gve_priv *priv) in gve_handle_report_stats() argument
2129 struct stats *stats = priv->stats_report->stats; in gve_handle_report_stats()
2134 if (!gve_get_report_stats(priv)) in gve_handle_report_stats()
2137 be64_add_cpu(&priv->stats_report->written_count, 1); in gve_handle_report_stats()
2139 if (priv->tx) { in gve_handle_report_stats()
2140 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { in gve_handle_report_stats()
2145 if (gve_is_gqi(priv)) { in gve_handle_report_stats()
2146 last_completion = priv->tx[idx].done; in gve_handle_report_stats()
2147 tx_frames = priv->tx[idx].req; in gve_handle_report_stats()
2151 start = u64_stats_fetch_begin(&priv->tx[idx].statss); in gve_handle_report_stats()
2152 tx_bytes = priv->tx[idx].bytes_done; in gve_handle_report_stats()
2153 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); in gve_handle_report_stats()
2156 .value = cpu_to_be64(priv->tx[idx].wake_queue), in gve_handle_report_stats()
2161 .value = cpu_to_be64(priv->tx[idx].stop_queue), in gve_handle_report_stats()
2181 .value = cpu_to_be64(priv->tx[idx].queue_timeout), in gve_handle_report_stats()
2187 if (priv->rx) { in gve_handle_report_stats()
2188 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_handle_report_stats()
2191 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
2196 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()
2206 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_service_task() local
2208 u32 status = ioread32be(&priv->reg_bar0->device_status); in gve_service_task()
2210 gve_handle_status(priv, status); in gve_service_task()
2212 gve_handle_reset(priv); in gve_service_task()
2213 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_service_task()
2216 static void gve_set_netdev_xdp_features(struct gve_priv *priv) in gve_set_netdev_xdp_features() argument
2218 if (priv->queue_format == GVE_GQI_QPL_FORMAT) { in gve_set_netdev_xdp_features()
2219 priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC; in gve_set_netdev_xdp_features()
2220 priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; in gve_set_netdev_xdp_features()
2221 priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; in gve_set_netdev_xdp_features()
2222 priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; in gve_set_netdev_xdp_features()
2224 priv->dev->xdp_features = 0; in gve_set_netdev_xdp_features()
2228 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) in gve_init_priv() argument
2234 err = gve_adminq_alloc(&priv->pdev->dev, priv); in gve_init_priv()
2236 dev_err(&priv->pdev->dev, in gve_init_priv()
2241 err = gve_verify_driver_compatibility(priv); in gve_init_priv()
2243 dev_err(&priv->pdev->dev, in gve_init_priv()
2248 priv->num_registered_pages = 0; in gve_init_priv()
2253 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; in gve_init_priv()
2255 err = gve_adminq_describe_device(priv); in gve_init_priv()
2257 dev_err(&priv->pdev->dev, in gve_init_priv()
2261 priv->dev->mtu = priv->dev->max_mtu; in gve_init_priv()
2262 num_ntfy = pci_msix_vec_count(priv->pdev); in gve_init_priv()
2264 dev_err(&priv->pdev->dev, in gve_init_priv()
2269 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", in gve_init_priv()
2276 if (!gve_is_gqi(priv)) in gve_init_priv()
2277 netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); in gve_init_priv()
2279 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; in gve_init_priv()
2283 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; in gve_init_priv()
2284 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_init_priv()
2286 priv->tx_cfg.max_queues = in gve_init_priv()
2287 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
2288 priv->rx_cfg.max_queues = in gve_init_priv()
2289 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
2291 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_init_priv()
2292 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_init_priv()
2293 if (priv->default_num_queues > 0) { in gve_init_priv()
2294 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
2295 priv->tx_cfg.num_queues); in gve_init_priv()
2296 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
2297 priv->rx_cfg.num_queues); in gve_init_priv()
2300 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", in gve_init_priv()
2301 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); in gve_init_priv()
2302 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", in gve_init_priv()
2303 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); in gve_init_priv()
2305 if (!gve_is_gqi(priv)) { in gve_init_priv()
2306 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
2307 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
2311 gve_set_netdev_xdp_features(priv); in gve_init_priv()
2312 err = gve_setup_device_resources(priv); in gve_init_priv()
2316 gve_adminq_free(&priv->pdev->dev, priv); in gve_init_priv()
2320 static void gve_teardown_priv_resources(struct gve_priv *priv) in gve_teardown_priv_resources() argument
2322 gve_teardown_device_resources(priv); in gve_teardown_priv_resources()
2323 gve_adminq_free(&priv->pdev->dev, priv); in gve_teardown_priv_resources()
2326 static void gve_trigger_reset(struct gve_priv *priv) in gve_trigger_reset() argument
2329 gve_adminq_release(priv); in gve_trigger_reset()
2332 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) in gve_reset_and_teardown() argument
2334 gve_trigger_reset(priv); in gve_reset_and_teardown()
2337 gve_close(priv->dev); in gve_reset_and_teardown()
2338 gve_teardown_priv_resources(priv); in gve_reset_and_teardown()
2341 static int gve_reset_recovery(struct gve_priv *priv, bool was_up) in gve_reset_recovery() argument
2345 err = gve_init_priv(priv, true); in gve_reset_recovery()
2349 err = gve_open(priv->dev); in gve_reset_recovery()
2355 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); in gve_reset_recovery()
2356 gve_turndown(priv); in gve_reset_recovery()
2360 int gve_reset(struct gve_priv *priv, bool attempt_teardown) in gve_reset() argument
2362 bool was_up = netif_running(priv->dev); in gve_reset()
2365 dev_info(&priv->pdev->dev, "Performing reset\n"); in gve_reset()
2366 gve_clear_do_reset(priv); in gve_reset()
2367 gve_set_reset_in_progress(priv); in gve_reset()
2372 gve_turndown(priv); in gve_reset()
2373 gve_reset_and_teardown(priv, was_up); in gve_reset()
2377 err = gve_close(priv->dev); in gve_reset()
2380 gve_reset_and_teardown(priv, was_up); in gve_reset()
2383 gve_teardown_priv_resources(priv); in gve_reset()
2387 err = gve_reset_recovery(priv, was_up); in gve_reset()
2388 gve_clear_reset_in_progress(priv); in gve_reset()
2389 priv->reset_cnt++; in gve_reset()
2390 priv->interface_up_cnt = 0; in gve_reset()
2391 priv->interface_down_cnt = 0; in gve_reset()
2392 priv->stats_report_trigger_cnt = 0; in gve_reset()
2415 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_stop() local
2419 if (!priv->rx) in gve_rx_queue_stop()
2423 if (!gve_is_gqi(priv) && idx == 0) in gve_rx_queue_stop()
2427 gve_turndown(priv); in gve_rx_queue_stop()
2430 err = gve_adminq_destroy_single_rx_queue(priv, idx); in gve_rx_queue_stop()
2434 if (gve_is_qpl(priv)) { in gve_rx_queue_stop()
2436 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx)); in gve_rx_queue_stop()
2441 gve_rx_stop_ring(priv, idx); in gve_rx_queue_stop()
2444 gve_turnup_and_check_status(priv); in gve_rx_queue_stop()
2447 *gve_per_q_mem = priv->rx[idx]; in gve_rx_queue_stop()
2448 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); in gve_rx_queue_stop()
2454 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_mem_free() local
2459 gve_rx_get_curr_alloc_cfg(priv, &cfg); in gve_rx_queue_mem_free()
2461 if (gve_is_gqi(priv)) in gve_rx_queue_mem_free()
2462 gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg); in gve_rx_queue_mem_free()
2464 gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg); in gve_rx_queue_mem_free()
2470 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_mem_alloc() local
2475 if (!priv->rx) in gve_rx_queue_mem_alloc()
2479 gve_rx_get_curr_alloc_cfg(priv, &cfg); in gve_rx_queue_mem_alloc()
2481 if (gve_is_gqi(priv)) in gve_rx_queue_mem_alloc()
2482 err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx); in gve_rx_queue_mem_alloc()
2484 err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx); in gve_rx_queue_mem_alloc()
2491 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_start() local
2495 if (!priv->rx) in gve_rx_queue_start()
2499 priv->rx[idx] = *gve_per_q_mem; in gve_rx_queue_start()
2502 gve_turndown(priv); in gve_rx_queue_start()
2504 gve_rx_start_ring(priv, idx); in gve_rx_queue_start()
2506 if (gve_is_qpl(priv)) { in gve_rx_queue_start()
2508 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx)); in gve_rx_queue_start()
2514 err = gve_adminq_create_single_rx_queue(priv, idx); in gve_rx_queue_start()
2518 if (gve_is_gqi(priv)) in gve_rx_queue_start()
2519 gve_rx_write_doorbell(priv, &priv->rx[idx]); in gve_rx_queue_start()
2521 gve_rx_post_buffers_dqo(&priv->rx[idx]); in gve_rx_queue_start()
2524 gve_turnup_and_check_status(priv); in gve_rx_queue_start()
2528 gve_rx_stop_ring(priv, idx); in gve_rx_queue_start()
2535 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); in gve_rx_queue_start()
2553 struct gve_priv *priv; in gve_probe() local
2591 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); in gve_probe()
2621 priv = netdev_priv(dev); in gve_probe()
2622 priv->dev = dev; in gve_probe()
2623 priv->pdev = pdev; in gve_probe()
2624 priv->msg_enable = DEFAULT_MSG_LEVEL; in gve_probe()
2625 priv->reg_bar0 = reg_bar; in gve_probe()
2626 priv->db_bar2 = db_bar; in gve_probe()
2627 priv->service_task_flags = 0x0; in gve_probe()
2628 priv->state_flags = 0x0; in gve_probe()
2629 priv->ethtool_flags = 0x0; in gve_probe()
2630 priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_probe()
2631 priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_probe()
2633 gve_set_probe_in_progress(priv); in gve_probe()
2634 priv->gve_wq = alloc_ordered_workqueue("gve", 0); in gve_probe()
2635 if (!priv->gve_wq) { in gve_probe()
2640 INIT_WORK(&priv->service_task, gve_service_task); in gve_probe()
2641 INIT_WORK(&priv->stats_report_task, gve_stats_report_task); in gve_probe()
2642 priv->tx_cfg.max_queues = max_tx_queues; in gve_probe()
2643 priv->rx_cfg.max_queues = max_rx_queues; in gve_probe()
2645 err = gve_init_priv(priv, false); in gve_probe()
2654 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); in gve_probe()
2655 gve_clear_probe_in_progress(priv); in gve_probe()
2656 queue_work(priv->gve_wq, &priv->service_task); in gve_probe()
2660 gve_teardown_priv_resources(priv); in gve_probe()
2663 destroy_workqueue(priv->gve_wq); in gve_probe()
2685 struct gve_priv *priv = netdev_priv(netdev); in gve_remove() local
2686 __be32 __iomem *db_bar = priv->db_bar2; in gve_remove()
2687 void __iomem *reg_bar = priv->reg_bar0; in gve_remove()
2690 gve_teardown_priv_resources(priv); in gve_remove()
2691 destroy_workqueue(priv->gve_wq); in gve_remove()
2702 struct gve_priv *priv = netdev_priv(netdev); in gve_shutdown() local
2703 bool was_up = netif_running(priv->dev); in gve_shutdown()
2706 if (was_up && gve_close(priv->dev)) { in gve_shutdown()
2708 gve_reset_and_teardown(priv, was_up); in gve_shutdown()
2711 gve_teardown_priv_resources(priv); in gve_shutdown()
2720 struct gve_priv *priv = netdev_priv(netdev); in gve_suspend() local
2721 bool was_up = netif_running(priv->dev); in gve_suspend()
2723 priv->suspend_cnt++; in gve_suspend()
2725 if (was_up && gve_close(priv->dev)) { in gve_suspend()
2727 gve_reset_and_teardown(priv, was_up); in gve_suspend()
2730 gve_teardown_priv_resources(priv); in gve_suspend()
2732 priv->up_before_suspend = was_up; in gve_suspend()
2740 struct gve_priv *priv = netdev_priv(netdev); in gve_resume() local
2743 priv->resume_cnt++; in gve_resume()
2745 err = gve_reset_recovery(priv, priv->up_before_suspend); in gve_resume()