Lines Matching refs:rdev

82 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
86 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
87 static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
89 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
91 static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
92 static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev) in bnxt_re_set_db_offset() argument
102 res = &rdev->qplib_res; in bnxt_re_set_db_offset()
103 en_dev = rdev->en_dev; in bnxt_re_set_db_offset()
104 cctx = rdev->chip_ctx; in bnxt_re_set_db_offset()
107 rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset); in bnxt_re_set_db_offset()
109 dev_info(rdev_to_dev(rdev), in bnxt_re_set_db_offset()
129 dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n"); in bnxt_re_set_db_offset()
133 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev) in bnxt_re_set_drv_mode() argument
137 cctx = rdev->chip_ctx; in bnxt_re_set_drv_mode()
138 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? in bnxt_re_set_drv_mode()
140 if (bnxt_re_hwrm_qcaps(rdev)) in bnxt_re_set_drv_mode()
141 dev_err(rdev_to_dev(rdev), in bnxt_re_set_drv_mode()
143 if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) { in bnxt_re_set_drv_mode()
149 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) in bnxt_re_destroy_chip_ctx() argument
153 if (!rdev->chip_ctx) in bnxt_re_destroy_chip_ctx()
155 chip_ctx = rdev->chip_ctx; in bnxt_re_destroy_chip_ctx()
156 rdev->chip_ctx = NULL; in bnxt_re_destroy_chip_ctx()
157 rdev->rcfw.res = NULL; in bnxt_re_destroy_chip_ctx()
158 rdev->qplib_res.cctx = NULL; in bnxt_re_destroy_chip_ctx()
159 rdev->qplib_res.pdev = NULL; in bnxt_re_destroy_chip_ctx()
160 rdev->qplib_res.netdev = NULL; in bnxt_re_destroy_chip_ctx()
164 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) in bnxt_re_setup_chip_ctx() argument
170 en_dev = rdev->en_dev; in bnxt_re_setup_chip_ctx()
172 rdev->qplib_res.pdev = en_dev->pdev; in bnxt_re_setup_chip_ctx()
179 rdev->chip_ctx = chip_ctx; in bnxt_re_setup_chip_ctx()
182 rdev->qplib_res.cctx = rdev->chip_ctx; in bnxt_re_setup_chip_ctx()
183 rdev->rcfw.res = &rdev->qplib_res; in bnxt_re_setup_chip_ctx()
184 rdev->qplib_res.dattr = &rdev->dev_attr; in bnxt_re_setup_chip_ctx()
185 rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); in bnxt_re_setup_chip_ctx()
187 bnxt_re_set_drv_mode(rdev); in bnxt_re_setup_chip_ctx()
189 bnxt_re_set_db_offset(rdev); in bnxt_re_setup_chip_ctx()
190 rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); in bnxt_re_setup_chip_ctx()
192 kfree(rdev->chip_ctx); in bnxt_re_setup_chip_ctx()
193 rdev->chip_ctx = NULL; in bnxt_re_setup_chip_ctx()
198 ibdev_info(&rdev->ibdev, in bnxt_re_setup_chip_ctx()
205 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) in bnxt_re_get_sriov_func_type() argument
207 if (BNXT_EN_VF(rdev->en_dev)) in bnxt_re_get_sriov_func_type()
208 rdev->is_virtfn = 1; in bnxt_re_get_sriov_func_type()
216 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) in bnxt_re_limit_pf_res() argument
222 attr = &rdev->dev_attr; in bnxt_re_limit_pf_res()
223 ctx = &rdev->qplib_ctx; in bnxt_re_limit_pf_res()
233 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_limit_pf_res()
235 rdev->qplib_ctx.tqm_ctx.qcount[i] = in bnxt_re_limit_pf_res()
236 rdev->dev_attr.tqm_alloc_reqs[i]; in bnxt_re_limit_pf_res()
275 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) in bnxt_re_set_resource_limits() argument
279 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); in bnxt_re_set_resource_limits()
280 bnxt_re_limit_pf_res(rdev); in bnxt_re_set_resource_limits()
282 num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ? in bnxt_re_set_resource_limits()
283 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; in bnxt_re_set_resource_limits()
285 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); in bnxt_re_set_resource_limits()
288 static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev) in bnxt_re_vf_res_config() argument
290 rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); in bnxt_re_vf_res_config()
291 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { in bnxt_re_vf_res_config()
292 bnxt_re_set_resource_limits(rdev); in bnxt_re_vf_res_config()
293 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, in bnxt_re_vf_res_config()
294 &rdev->qplib_ctx); in bnxt_re_vf_res_config()
301 struct bnxt_re_dev *rdev; in bnxt_re_shutdown() local
303 rdev = en_info->rdev; in bnxt_re_shutdown()
304 ib_unregister_device(&rdev->ibdev); in bnxt_re_shutdown()
305 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); in bnxt_re_shutdown()
312 struct bnxt_re_dev *rdev; in bnxt_re_stop_irq() local
316 rdev = en_info->rdev; in bnxt_re_stop_irq()
317 rcfw = &rdev->rcfw; in bnxt_re_stop_irq()
319 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { in bnxt_re_stop_irq()
320 nq = &rdev->nq[indx - 1]; in bnxt_re_stop_irq()
332 struct bnxt_re_dev *rdev; in bnxt_re_start_irq() local
336 rdev = en_info->rdev; in bnxt_re_start_irq()
337 msix_ent = rdev->en_dev->msix_entries; in bnxt_re_start_irq()
338 rcfw = &rdev->rcfw; in bnxt_re_start_irq()
345 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n"); in bnxt_re_start_irq()
352 for (indx = 0; indx < rdev->num_msix; indx++) in bnxt_re_start_irq()
353 rdev->en_dev->msix_entries[indx].vector = ent[indx].vector; in bnxt_re_start_irq()
358 ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n"); in bnxt_re_start_irq()
361 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { in bnxt_re_start_irq()
362 nq = &rdev->nq[indx - 1]; in bnxt_re_start_irq()
366 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n", in bnxt_re_start_irq()
380 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) in bnxt_re_register_netdev() argument
384 en_dev = rdev->en_dev; in bnxt_re_register_netdev()
385 return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev); in bnxt_re_register_netdev()
407 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, in bnxt_re_hwrm_qcfg() argument
410 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_hwrm_qcfg()
429 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev) in bnxt_re_hwrm_qcaps() argument
431 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_hwrm_qcaps()
439 cctx = rdev->chip_ctx; in bnxt_re_hwrm_qcaps()
456 static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev) in bnxt_re_hwrm_dbr_pacing_qcfg() argument
458 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; in bnxt_re_hwrm_dbr_pacing_qcfg()
461 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_hwrm_dbr_pacing_qcfg()
466 cctx = rdev->chip_ctx; in bnxt_re_hwrm_dbr_pacing_qcfg()
491 static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev) in bnxt_re_set_default_pacing_data() argument
493 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; in bnxt_re_set_default_pacing_data()
495 pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing; in bnxt_re_set_default_pacing_data()
496 pacing_data->pacing_th = rdev->pacing.pacing_algo_th; in bnxt_re_set_default_pacing_data()
501 static u32 __get_fifo_occupancy(struct bnxt_re_dev *rdev) in __get_fifo_occupancy() argument
503 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; in __get_fifo_occupancy()
506 read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off); in __get_fifo_occupancy()
513 static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev) in is_dbr_fifo_full() argument
517 fifo_occup = __get_fifo_occupancy(rdev); in is_dbr_fifo_full()
518 max_occup = BNXT_RE_MAX_FIFO_DEPTH(rdev->chip_ctx) - 1; in is_dbr_fifo_full()
525 static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev) in __wait_for_fifo_occupancy_below_th() argument
527 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; in __wait_for_fifo_occupancy_below_th()
535 fifo_occup = __get_fifo_occupancy(rdev); in __wait_for_fifo_occupancy_below_th()
543 dev_info_once(rdev_to_dev(rdev), in __wait_for_fifo_occupancy_below_th()
555 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, in bnxt_re_db_fifo_check() local
560 if (!mutex_trylock(&rdev->pacing.dbq_lock)) in bnxt_re_db_fifo_check()
562 pacing_data = rdev->qplib_res.pacing_data; in bnxt_re_db_fifo_check()
563 pacing_save = rdev->pacing.do_pacing_save; in bnxt_re_db_fifo_check()
564 __wait_for_fifo_occupancy_below_th(rdev); in bnxt_re_db_fifo_check()
565 cancel_delayed_work_sync(&rdev->dbq_pacing_work); in bnxt_re_db_fifo_check()
566 if (pacing_save > rdev->pacing.dbr_def_do_pacing) { in bnxt_re_db_fifo_check()
578 pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4; in bnxt_re_db_fifo_check()
585 rdev->pacing.do_pacing_save = pacing_data->do_pacing; in bnxt_re_db_fifo_check()
588 schedule_delayed_work(&rdev->dbq_pacing_work, in bnxt_re_db_fifo_check()
589 msecs_to_jiffies(rdev->pacing.dbq_pacing_time)); in bnxt_re_db_fifo_check()
590 rdev->stats.pacing.alerts++; in bnxt_re_db_fifo_check()
591 mutex_unlock(&rdev->pacing.dbq_lock); in bnxt_re_db_fifo_check()
596 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, in bnxt_re_pacing_timer_exp() local
601 if (!mutex_trylock(&rdev->pacing.dbq_lock)) in bnxt_re_pacing_timer_exp()
604 pacing_data = rdev->qplib_res.pacing_data; in bnxt_re_pacing_timer_exp()
605 fifo_occup = __get_fifo_occupancy(rdev); in bnxt_re_pacing_timer_exp()
615 pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing); in bnxt_re_pacing_timer_exp()
616 if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) { in bnxt_re_pacing_timer_exp()
617 bnxt_re_set_default_pacing_data(rdev); in bnxt_re_pacing_timer_exp()
618 rdev->stats.pacing.complete++; in bnxt_re_pacing_timer_exp()
623 schedule_delayed_work(&rdev->dbq_pacing_work, in bnxt_re_pacing_timer_exp()
624 msecs_to_jiffies(rdev->pacing.dbq_pacing_time)); in bnxt_re_pacing_timer_exp()
625 rdev->stats.pacing.resched++; in bnxt_re_pacing_timer_exp()
627 rdev->pacing.do_pacing_save = pacing_data->do_pacing; in bnxt_re_pacing_timer_exp()
628 mutex_unlock(&rdev->pacing.dbq_lock); in bnxt_re_pacing_timer_exp()
631 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev) in bnxt_re_pacing_alert() argument
635 if (!rdev->pacing.dbr_pacing) in bnxt_re_pacing_alert()
637 mutex_lock(&rdev->pacing.dbq_lock); in bnxt_re_pacing_alert()
638 pacing_data = rdev->qplib_res.pacing_data; in bnxt_re_pacing_alert()
646 cancel_work_sync(&rdev->dbq_fifo_check_work); in bnxt_re_pacing_alert()
647 schedule_work(&rdev->dbq_fifo_check_work); in bnxt_re_pacing_alert()
648 mutex_unlock(&rdev->pacing.dbq_lock); in bnxt_re_pacing_alert()
651 static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev) in bnxt_re_initialize_dbr_pacing() argument
654 rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL); in bnxt_re_initialize_dbr_pacing()
655 if (!rdev->pacing.dbr_page) in bnxt_re_initialize_dbr_pacing()
658 memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE); in bnxt_re_initialize_dbr_pacing()
659 rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page; in bnxt_re_initialize_dbr_pacing()
661 if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev)) { in bnxt_re_initialize_dbr_pacing()
662 free_page((u64)rdev->pacing.dbr_page); in bnxt_re_initialize_dbr_pacing()
663 rdev->pacing.dbr_page = NULL; in bnxt_re_initialize_dbr_pacing()
668 writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK, in bnxt_re_initialize_dbr_pacing()
669 rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); in bnxt_re_initialize_dbr_pacing()
670 rdev->pacing.dbr_db_fifo_reg_off = in bnxt_re_initialize_dbr_pacing()
671 (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) + in bnxt_re_initialize_dbr_pacing()
673 rdev->pacing.dbr_bar_addr = in bnxt_re_initialize_dbr_pacing()
674 pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off; in bnxt_re_initialize_dbr_pacing()
676 if (is_dbr_fifo_full(rdev)) { in bnxt_re_initialize_dbr_pacing()
677 free_page((u64)rdev->pacing.dbr_page); in bnxt_re_initialize_dbr_pacing()
678 rdev->pacing.dbr_page = NULL; in bnxt_re_initialize_dbr_pacing()
682 rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD; in bnxt_re_initialize_dbr_pacing()
683 rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME; in bnxt_re_initialize_dbr_pacing()
684 rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION; in bnxt_re_initialize_dbr_pacing()
685 rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing; in bnxt_re_initialize_dbr_pacing()
686 rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off; in bnxt_re_initialize_dbr_pacing()
687 bnxt_re_set_default_pacing_data(rdev); in bnxt_re_initialize_dbr_pacing()
689 INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check); in bnxt_re_initialize_dbr_pacing()
690 INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp); in bnxt_re_initialize_dbr_pacing()
694 static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev) in bnxt_re_deinitialize_dbr_pacing() argument
696 cancel_work_sync(&rdev->dbq_fifo_check_work); in bnxt_re_deinitialize_dbr_pacing()
697 cancel_delayed_work_sync(&rdev->dbq_pacing_work); in bnxt_re_deinitialize_dbr_pacing()
698 if (rdev->pacing.dbr_page) in bnxt_re_deinitialize_dbr_pacing()
699 free_page((u64)rdev->pacing.dbr_page); in bnxt_re_deinitialize_dbr_pacing()
701 rdev->pacing.dbr_page = NULL; in bnxt_re_deinitialize_dbr_pacing()
702 rdev->pacing.dbr_pacing = false; in bnxt_re_deinitialize_dbr_pacing()
705 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, in bnxt_re_net_ring_free() argument
714 if (!rdev) in bnxt_re_net_ring_free()
717 en_dev = rdev->en_dev; in bnxt_re_net_ring_free()
722 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) in bnxt_re_net_ring_free()
732 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", in bnxt_re_net_ring_free()
737 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, in bnxt_re_net_ring_alloc() argument
741 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_ring_alloc()
773 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, in bnxt_re_net_stats_ctx_free() argument
776 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_stats_ctx_free()
785 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) in bnxt_re_net_stats_ctx_free()
794 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", in bnxt_re_net_stats_ctx_free()
800 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, in bnxt_re_net_stats_ctx_alloc() argument
804 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx; in bnxt_re_net_stats_ctx_alloc()
807 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_stats_ctx_alloc()
849 struct bnxt_re_dev *rdev = in hw_rev_show() local
852 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor); in hw_rev_show()
859 struct bnxt_re_dev *rdev = in hca_type_show() local
862 return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc); in hca_type_show()
933 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) in bnxt_re_register_ib() argument
935 struct ib_device *ibdev = &rdev->ibdev; in bnxt_re_register_ib()
944 addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); in bnxt_re_register_ib()
946 ibdev->num_comp_vectors = rdev->num_msix - 1; in bnxt_re_register_ib()
947 ibdev->dev.parent = &rdev->en_dev->pdev->dev; in bnxt_re_register_ib()
954 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); in bnxt_re_register_ib()
958 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX); in bnxt_re_register_ib()
960 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); in bnxt_re_register_ib()
966 struct bnxt_re_dev *rdev; in bnxt_re_dev_add() local
969 rdev = ib_alloc_device(bnxt_re_dev, ibdev); in bnxt_re_dev_add()
970 if (!rdev) { in bnxt_re_dev_add()
976 rdev->nb.notifier_call = NULL; in bnxt_re_dev_add()
977 rdev->netdev = en_dev->net; in bnxt_re_dev_add()
978 rdev->en_dev = en_dev; in bnxt_re_dev_add()
979 rdev->adev = adev; in bnxt_re_dev_add()
980 rdev->id = rdev->en_dev->pdev->devfn; in bnxt_re_dev_add()
981 INIT_LIST_HEAD(&rdev->qp_list); in bnxt_re_dev_add()
982 mutex_init(&rdev->qp_lock); in bnxt_re_dev_add()
983 mutex_init(&rdev->pacing.dbq_lock); in bnxt_re_dev_add()
984 atomic_set(&rdev->stats.res.qp_count, 0); in bnxt_re_dev_add()
985 atomic_set(&rdev->stats.res.cq_count, 0); in bnxt_re_dev_add()
986 atomic_set(&rdev->stats.res.srq_count, 0); in bnxt_re_dev_add()
987 atomic_set(&rdev->stats.res.mr_count, 0); in bnxt_re_dev_add()
988 atomic_set(&rdev->stats.res.mw_count, 0); in bnxt_re_dev_add()
989 atomic_set(&rdev->stats.res.ah_count, 0); in bnxt_re_dev_add()
990 atomic_set(&rdev->stats.res.pd_count, 0); in bnxt_re_dev_add()
991 rdev->cosq[0] = 0xFFFF; in bnxt_re_dev_add()
992 rdev->cosq[1] = 0xFFFF; in bnxt_re_dev_add()
994 return rdev; in bnxt_re_dev_add()
1048 event.device = &qp->rdev->ibdev; in bnxt_re_handle_qp_async_event()
1137 ibdev_dbg(&qp->rdev->ibdev, in bnxt_re_handle_qp_async_event()
1183 ibevent.device = &cq->rdev->ibdev; in bnxt_re_handle_cq_async_error()
1185 ibdev_dbg(&cq->rdev->ibdev, in bnxt_re_handle_cq_async_error()
1251 ib_event.device = &srq->rdev->ibdev; in bnxt_re_srqn_handler()
1275 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) in bnxt_re_cleanup_res() argument
1279 for (i = 1; i < rdev->num_msix; i++) in bnxt_re_cleanup_res()
1280 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); in bnxt_re_cleanup_res()
1282 if (rdev->qplib_res.rcfw) in bnxt_re_cleanup_res()
1283 bnxt_qplib_cleanup_res(&rdev->qplib_res); in bnxt_re_cleanup_res()
1286 static int bnxt_re_init_res(struct bnxt_re_dev *rdev) in bnxt_re_init_res() argument
1292 bnxt_qplib_init_res(&rdev->qplib_res); in bnxt_re_init_res()
1294 for (i = 1; i < rdev->num_msix ; i++) { in bnxt_re_init_res()
1295 db_offt = rdev->en_dev->msix_entries[i].db_offset; in bnxt_re_init_res()
1296 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], in bnxt_re_init_res()
1297 i - 1, rdev->en_dev->msix_entries[i].vector, in bnxt_re_init_res()
1301 ibdev_err(&rdev->ibdev, in bnxt_re_init_res()
1310 bnxt_qplib_disable_nq(&rdev->nq[i]); in bnxt_re_init_res()
1314 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) in bnxt_re_free_nq_res() argument
1319 for (i = 0; i < rdev->num_msix - 1; i++) { in bnxt_re_free_nq_res()
1320 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); in bnxt_re_free_nq_res()
1321 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); in bnxt_re_free_nq_res()
1322 bnxt_qplib_free_nq(&rdev->nq[i]); in bnxt_re_free_nq_res()
1323 rdev->nq[i].res = NULL; in bnxt_re_free_nq_res()
1327 static void bnxt_re_free_res(struct bnxt_re_dev *rdev) in bnxt_re_free_res() argument
1329 bnxt_re_free_nq_res(rdev); in bnxt_re_free_res()
1331 if (rdev->qplib_res.dpi_tbl.max) { in bnxt_re_free_res()
1332 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, in bnxt_re_free_res()
1333 &rdev->dpi_privileged); in bnxt_re_free_res()
1335 if (rdev->qplib_res.rcfw) { in bnxt_re_free_res()
1336 bnxt_qplib_free_res(&rdev->qplib_res); in bnxt_re_free_res()
1337 rdev->qplib_res.rcfw = NULL; in bnxt_re_free_res()
1341 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) in bnxt_re_alloc_res() argument
1349 rdev->qplib_res.rcfw = &rdev->rcfw; in bnxt_re_alloc_res()
1350 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr); in bnxt_re_alloc_res()
1354 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev, in bnxt_re_alloc_res()
1355 rdev->netdev, &rdev->dev_attr); in bnxt_re_alloc_res()
1359 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res, in bnxt_re_alloc_res()
1360 &rdev->dpi_privileged, in bnxt_re_alloc_res()
1361 rdev, BNXT_QPLIB_DPI_TYPE_KERNEL); in bnxt_re_alloc_res()
1365 for (i = 0; i < rdev->num_msix - 1; i++) { in bnxt_re_alloc_res()
1368 nq = &rdev->nq[i]; in bnxt_re_alloc_res()
1370 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); in bnxt_re_alloc_res()
1372 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", in bnxt_re_alloc_res()
1376 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); in bnxt_re_alloc_res()
1378 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res()
1382 rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx; in bnxt_re_alloc_res()
1383 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); in bnxt_re_alloc_res()
1385 ibdev_err(&rdev->ibdev, in bnxt_re_alloc_res()
1388 bnxt_qplib_free_nq(&rdev->nq[i]); in bnxt_re_alloc_res()
1396 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); in bnxt_re_alloc_res()
1397 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); in bnxt_re_alloc_res()
1398 bnxt_qplib_free_nq(&rdev->nq[i]); in bnxt_re_alloc_res()
1400 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, in bnxt_re_alloc_res()
1401 &rdev->dpi_privileged); in bnxt_re_alloc_res()
1403 bnxt_qplib_free_res(&rdev->qplib_res); in bnxt_re_alloc_res()
1406 rdev->qplib_res.rcfw = NULL; in bnxt_re_alloc_res()
1429 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, in bnxt_re_is_qp1_or_shadow_qp() argument
1433 (qp == rdev->gsi_ctx.gsi_sqp); in bnxt_re_is_qp1_or_shadow_qp()
1436 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) in bnxt_re_dev_stop() argument
1443 mutex_lock(&rdev->qp_lock); in bnxt_re_dev_stop()
1444 list_for_each_entry(qp, &rdev->qp_list, list) { in bnxt_re_dev_stop()
1446 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) { in bnxt_re_dev_stop()
1451 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp, in bnxt_re_dev_stop()
1458 mutex_unlock(&rdev->qp_lock); in bnxt_re_dev_stop()
1461 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) in bnxt_re_update_gid() argument
1463 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; in bnxt_re_update_gid()
1468 if (!ib_device_try_get(&rdev->ibdev)) in bnxt_re_update_gid()
1486 rdev->qplib_res.netdev->dev_addr); in bnxt_re_update_gid()
1489 ib_device_put(&rdev->ibdev); in bnxt_re_update_gid()
1493 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) in bnxt_re_get_priority_mask() argument
1499 netdev = rdev->netdev; in bnxt_re_get_priority_mask()
1514 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) in bnxt_re_setup_qos() argument
1519 prio_map = bnxt_re_get_priority_mask(rdev); in bnxt_re_setup_qos()
1521 if (prio_map == rdev->cur_prio_map) in bnxt_re_setup_qos()
1523 rdev->cur_prio_map = prio_map; in bnxt_re_setup_qos()
1527 if ((prio_map == 0 && rdev->qplib_res.prio) || in bnxt_re_setup_qos()
1528 (prio_map != 0 && !rdev->qplib_res.prio)) { in bnxt_re_setup_qos()
1529 rdev->qplib_res.prio = prio_map; in bnxt_re_setup_qos()
1530 bnxt_re_update_gid(rdev); in bnxt_re_setup_qos()
1536 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) in bnxt_re_query_hwrm_intf_version() argument
1538 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_query_hwrm_intf_version()
1553 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", in bnxt_re_query_hwrm_intf_version()
1558 cctx = rdev->chip_ctx; in bnxt_re_query_hwrm_intf_version()
1571 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) in bnxt_re_ib_init() argument
1577 rc = bnxt_re_register_ib(rdev); in bnxt_re_ib_init()
1582 dev_info(rdev_to_dev(rdev), "Device registered with IB successfully"); in bnxt_re_ib_init()
1583 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); in bnxt_re_ib_init()
1585 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? in bnxt_re_ib_init()
1588 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event); in bnxt_re_ib_init()
1593 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) in bnxt_re_dev_uninit() argument
1598 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) in bnxt_re_dev_uninit()
1599 cancel_delayed_work_sync(&rdev->worker); in bnxt_re_dev_uninit()
1602 &rdev->flags)) in bnxt_re_dev_uninit()
1603 bnxt_re_cleanup_res(rdev); in bnxt_re_dev_uninit()
1604 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags)) in bnxt_re_dev_uninit()
1605 bnxt_re_free_res(rdev); in bnxt_re_dev_uninit()
1607 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { in bnxt_re_dev_uninit()
1608 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); in bnxt_re_dev_uninit()
1610 ibdev_warn(&rdev->ibdev, in bnxt_re_dev_uninit()
1612 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); in bnxt_re_dev_uninit()
1613 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); in bnxt_re_dev_uninit()
1614 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); in bnxt_re_dev_uninit()
1615 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); in bnxt_re_dev_uninit()
1616 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); in bnxt_re_dev_uninit()
1617 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); in bnxt_re_dev_uninit()
1620 rdev->num_msix = 0; in bnxt_re_dev_uninit()
1622 if (rdev->pacing.dbr_pacing) in bnxt_re_dev_uninit()
1623 bnxt_re_deinitialize_dbr_pacing(rdev); in bnxt_re_dev_uninit()
1625 bnxt_re_destroy_chip_ctx(rdev); in bnxt_re_dev_uninit()
1627 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) in bnxt_re_dev_uninit()
1628 bnxt_unregister_dev(rdev->en_dev); in bnxt_re_dev_uninit()
1635 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, in bnxt_re_worker() local
1638 bnxt_re_setup_qos(rdev); in bnxt_re_worker()
1639 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); in bnxt_re_worker()
1642 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) in bnxt_re_dev_init() argument
1653 rc = bnxt_re_register_netdev(rdev); in bnxt_re_dev_init()
1655 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1660 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); in bnxt_re_dev_init()
1662 rc = bnxt_re_setup_chip_ctx(rdev); in bnxt_re_dev_init()
1664 bnxt_unregister_dev(rdev->en_dev); in bnxt_re_dev_init()
1665 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); in bnxt_re_dev_init()
1666 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); in bnxt_re_dev_init()
1671 bnxt_re_get_sriov_func_type(rdev); in bnxt_re_dev_init()
1673 if (!rdev->en_dev->ulp_tbl->msix_requested) { in bnxt_re_dev_init()
1674 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1679 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", in bnxt_re_dev_init()
1680 rdev->en_dev->ulp_tbl->msix_requested); in bnxt_re_dev_init()
1681 rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested; in bnxt_re_dev_init()
1683 bnxt_re_query_hwrm_intf_version(rdev); in bnxt_re_dev_init()
1688 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw, in bnxt_re_dev_init()
1689 &rdev->qplib_ctx, in bnxt_re_dev_init()
1692 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1697 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); in bnxt_re_dev_init()
1698 creq = &rdev->rcfw.creq; in bnxt_re_dev_init()
1704 rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; in bnxt_re_dev_init()
1705 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); in bnxt_re_dev_init()
1707 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); in bnxt_re_dev_init()
1710 db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset; in bnxt_re_dev_init()
1711 vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector; in bnxt_re_dev_init()
1712 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, in bnxt_re_dev_init()
1716 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", in bnxt_re_dev_init()
1721 if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) { in bnxt_re_dev_init()
1722 rc = bnxt_re_initialize_dbr_pacing(rdev); in bnxt_re_dev_init()
1724 rdev->pacing.dbr_pacing = true; in bnxt_re_dev_init()
1726 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1728 rdev->pacing.dbr_pacing = false; in bnxt_re_dev_init()
1731 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr); in bnxt_re_dev_init()
1735 bnxt_re_set_resource_limits(rdev); in bnxt_re_dev_init()
1737 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, in bnxt_re_dev_init()
1738 bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)); in bnxt_re_dev_init()
1740 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1744 rc = bnxt_re_net_stats_ctx_alloc(rdev, in bnxt_re_dev_init()
1745 rdev->qplib_ctx.stats.dma_map, in bnxt_re_dev_init()
1746 &rdev->qplib_ctx.stats.fw_id); in bnxt_re_dev_init()
1748 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1753 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, in bnxt_re_dev_init()
1754 rdev->is_virtfn); in bnxt_re_dev_init()
1756 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1760 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); in bnxt_re_dev_init()
1763 rc = bnxt_re_alloc_res(rdev); in bnxt_re_dev_init()
1765 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1769 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); in bnxt_re_dev_init()
1770 rc = bnxt_re_init_res(rdev); in bnxt_re_dev_init()
1772 ibdev_err(&rdev->ibdev, in bnxt_re_dev_init()
1777 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags); in bnxt_re_dev_init()
1779 if (!rdev->is_virtfn) { in bnxt_re_dev_init()
1780 rc = bnxt_re_setup_qos(rdev); in bnxt_re_dev_init()
1782 ibdev_info(&rdev->ibdev, in bnxt_re_dev_init()
1785 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); in bnxt_re_dev_init()
1786 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); in bnxt_re_dev_init()
1787 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); in bnxt_re_dev_init()
1792 bnxt_re_vf_res_config(rdev); in bnxt_re_dev_init()
1794 hash_init(rdev->cq_hash); in bnxt_re_dev_init()
1795 if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) in bnxt_re_dev_init()
1796 hash_init(rdev->srq_hash); in bnxt_re_dev_init()
1800 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); in bnxt_re_dev_init()
1802 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); in bnxt_re_dev_init()
1804 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); in bnxt_re_dev_init()
1806 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); in bnxt_re_dev_init()
1807 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); in bnxt_re_dev_init()
1809 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); in bnxt_re_dev_init()
1811 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); in bnxt_re_dev_init()
1816 static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev, in bnxt_re_update_en_info_rdev() argument
1826 en_info->rdev = rdev; in bnxt_re_update_en_info_rdev()
1836 struct bnxt_re_dev *rdev; in bnxt_re_add_device() local
1843 rdev = bnxt_re_dev_add(adev, en_dev); in bnxt_re_add_device()
1844 if (!rdev || !rdev_to_dev(rdev)) { in bnxt_re_add_device()
1849 bnxt_re_update_en_info_rdev(rdev, en_info, adev); in bnxt_re_add_device()
1851 rc = bnxt_re_dev_init(rdev, op_type); in bnxt_re_add_device()
1855 rc = bnxt_re_ib_init(rdev); in bnxt_re_add_device()
1862 rdev->nb.notifier_call = bnxt_re_netdev_event; in bnxt_re_add_device()
1863 rc = register_netdevice_notifier(&rdev->nb); in bnxt_re_add_device()
1865 rdev->nb.notifier_call = NULL; in bnxt_re_add_device()
1870 bnxt_re_setup_cc(rdev, true); in bnxt_re_add_device()
1875 ib_unregister_device(&rdev->ibdev); in bnxt_re_add_device()
1878 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); in bnxt_re_add_device()
1880 ib_dealloc_device(&rdev->ibdev); in bnxt_re_add_device()
1885 static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) in bnxt_re_setup_cc() argument
1890 if (rdev->is_virtfn) in bnxt_re_setup_cc()
1894 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_setup_cc()
1906 if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param)) in bnxt_re_setup_cc()
1907 ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable); in bnxt_re_setup_cc()
1928 struct bnxt_re_dev *rdev; in bnxt_re_netdev_event() local
1937 rdev = bnxt_re_from_netdev(real_dev); in bnxt_re_netdev_event()
1938 if (!rdev) in bnxt_re_netdev_event()
1946 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, in bnxt_re_netdev_event()
1954 ib_device_put(&rdev->ibdev); in bnxt_re_netdev_event()
1961 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type, in bnxt_re_remove_device() argument
1964 if (rdev->nb.notifier_call) { in bnxt_re_remove_device()
1965 unregister_netdevice_notifier(&rdev->nb); in bnxt_re_remove_device()
1966 rdev->nb.notifier_call = NULL; in bnxt_re_remove_device()
1973 bnxt_re_setup_cc(rdev, false); in bnxt_re_remove_device()
1974 ib_unregister_device(&rdev->ibdev); in bnxt_re_remove_device()
1975 bnxt_re_dev_uninit(rdev, op_type); in bnxt_re_remove_device()
1976 ib_dealloc_device(&rdev->ibdev); in bnxt_re_remove_device()
1982 struct bnxt_re_dev *rdev; in bnxt_re_remove() local
1985 rdev = en_info->rdev; in bnxt_re_remove()
1987 if (rdev) in bnxt_re_remove()
1988 bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev); in bnxt_re_remove()
2031 struct bnxt_re_dev *rdev; in bnxt_re_suspend() local
2033 rdev = en_info->rdev; in bnxt_re_suspend()
2043 ibdev_info(&rdev->ibdev, "Handle device suspend call"); in bnxt_re_suspend()
2049 if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) in bnxt_re_suspend()
2050 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); in bnxt_re_suspend()
2052 bnxt_re_dev_stop(rdev); in bnxt_re_suspend()
2057 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); in bnxt_re_suspend()
2058 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); in bnxt_re_suspend()
2059 wake_up_all(&rdev->rcfw.cmdq.waitq); in bnxt_re_suspend()
2061 if (rdev->pacing.dbr_pacing) in bnxt_re_suspend()
2062 bnxt_re_set_pacing_dev_state(rdev); in bnxt_re_suspend()
2064 ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx", in bnxt_re_suspend()
2066 bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev); in bnxt_re_suspend()
2075 struct bnxt_re_dev *rdev; in bnxt_re_resume() local
2086 rdev = en_info->rdev; in bnxt_re_resume()
2087 ibdev_info(&rdev->ibdev, "Device resume completed"); in bnxt_re_resume()