Lines Matching refs:hdev

49 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
50 static int hclge_init_vlan_config(struct hclge_dev *hdev);
51 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
54 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
55 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
58 static int hclge_set_default_loopback(struct hclge_dev *hdev);
60 static void hclge_sync_mac_table(struct hclge_dev *hdev);
61 static void hclge_restore_hw_table(struct hclge_dev *hdev);
62 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
63 static void hclge_sync_fd_table(struct hclge_dev *hdev);
64 static void hclge_update_fec_stats(struct hclge_dev *hdev);
65 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
67 static int hclge_update_port_info(struct hclge_dev *hdev);
439 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) in hclge_mac_update_stats_defective() argument
443 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_defective()
451 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
453 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
475 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) in hclge_mac_update_stats_complete() argument
479 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; in hclge_mac_update_stats_complete()
480 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_complete()
499 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
505 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); in hclge_mac_update_stats_complete()
522 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) in hclge_mac_query_reg_num() argument
532 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { in hclge_mac_query_reg_num()
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
540 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
548 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
556 int hclge_mac_update_stats(struct hclge_dev *hdev) in hclge_mac_update_stats() argument
559 if (hdev->ae_dev->dev_specs.mac_stats_num) in hclge_mac_update_stats()
560 return hclge_mac_update_stats_complete(hdev); in hclge_mac_update_stats()
562 return hclge_mac_update_stats_defective(hdev); in hclge_mac_update_stats()
565 static int hclge_comm_get_count(struct hclge_dev *hdev, in hclge_comm_get_count() argument
573 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_count()
579 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, in hclge_comm_get_stats() argument
587 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_stats()
590 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); in hclge_comm_get_stats()
597 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, in hclge_comm_get_strings() argument
608 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_strings()
618 static void hclge_update_stats_for_all(struct hclge_dev *hdev) in hclge_update_stats_for_all() argument
623 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
625 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats_for_all()
627 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
633 hclge_update_fec_stats(hdev); in hclge_update_stats_for_all()
635 status = hclge_mac_update_stats(hdev); in hclge_update_stats_for_all()
637 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
644 struct hclge_dev *hdev = vport->back; in hclge_update_stats() local
647 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
650 status = hclge_mac_update_stats(hdev); in hclge_update_stats()
652 dev_err(&hdev->pdev->dev, in hclge_update_stats()
656 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats()
658 dev_err(&hdev->pdev->dev, in hclge_update_stats()
662 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
674 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count() local
685 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || in hclge_get_sset_count()
686 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
687 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
688 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
693 if (hdev->ae_dev->dev_specs.hilink_version != in hclge_get_sset_count()
704 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && in hclge_get_sset_count()
705 hdev->hw.mac.phydev->drv->set_loopback) || in hclge_get_sset_count()
706 hnae3_dev_phy_imp_supported(hdev)) { in hclge_get_sset_count()
711 count = hclge_comm_get_count(hdev, g_mac_stats_string, in hclge_get_sset_count()
723 struct hclge_dev *hdev = vport->back; in hclge_get_strings() local
729 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, in hclge_get_strings()
765 struct hclge_dev *hdev = vport->back; in hclge_get_stats() local
768 p = hclge_comm_get_stats(hdev, g_mac_stats_string, in hclge_get_stats()
777 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat() local
781 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
782 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
785 static int hclge_parse_func_status(struct hclge_dev *hdev, in hclge_parse_func_status() argument
795 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
797 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
799 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; in hclge_parse_func_status()
803 static int hclge_query_function_status(struct hclge_dev *hdev) in hclge_query_function_status() argument
816 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
818 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
829 return hclge_parse_func_status(hdev, req); in hclge_query_function_status()
832 static int hclge_query_pf_resource(struct hclge_dev *hdev) in hclge_query_pf_resource() argument
839 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
841 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
847 hdev->num_tqps = le16_to_cpu(req->tqp_num) + in hclge_query_pf_resource()
849 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
852 hdev->tx_buf_size = in hclge_query_pf_resource()
855 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
857 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
860 hdev->dv_buf_size = in hclge_query_pf_resource()
863 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
865 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
867 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); in hclge_query_pf_resource()
868 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
869 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
871 hdev->num_nic_msi); in hclge_query_pf_resource()
875 if (hnae3_dev_roce_supported(hdev)) { in hclge_query_pf_resource()
876 hdev->num_roce_msi = in hclge_query_pf_resource()
882 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; in hclge_query_pf_resource()
884 hdev->num_msi = hdev->num_nic_msi; in hclge_query_pf_resource()
956 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed() local
957 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
1128 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, in hclge_parse_fiber_link_mode() argument
1131 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1140 if (hnae3_dev_fec_supported(hdev)) in hclge_parse_fiber_link_mode()
1143 if (hnae3_dev_pause_supported(hdev)) in hclge_parse_fiber_link_mode()
1150 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, in hclge_parse_backplane_link_mode() argument
1153 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1156 if (hnae3_dev_fec_supported(hdev)) in hclge_parse_backplane_link_mode()
1159 if (hnae3_dev_pause_supported(hdev)) in hclge_parse_backplane_link_mode()
1166 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, in hclge_parse_copper_link_mode() argument
1169 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1191 if (hnae3_dev_pause_supported(hdev)) { in hclge_parse_copper_link_mode()
1200 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) in hclge_parse_link_mode() argument
1202 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1205 hclge_parse_fiber_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1207 hclge_parse_copper_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1209 hclge_parse_backplane_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1339 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) in hclge_get_cfg() argument
1360 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1362 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1371 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) in hclge_set_default_dev_specs() argument
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_set_default_dev_specs()
1388 static void hclge_parse_dev_specs(struct hclge_dev *hdev, in hclge_parse_dev_specs() argument
1391 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_parse_dev_specs()
1413 static void hclge_check_dev_specs(struct hclge_dev *hdev) in hclge_check_dev_specs() argument
1415 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclge_check_dev_specs()
1435 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) in hclge_query_mac_stats_num() argument
1440 ret = hclge_mac_query_reg_num(hdev, &reg_num); in hclge_query_mac_stats_num()
1444 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; in hclge_query_mac_stats_num()
1448 static int hclge_query_dev_specs(struct hclge_dev *hdev) in hclge_query_dev_specs() argument
1454 ret = hclge_query_mac_stats_num(hdev); in hclge_query_dev_specs()
1461 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_query_dev_specs()
1462 hclge_set_default_dev_specs(hdev); in hclge_query_dev_specs()
1473 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); in hclge_query_dev_specs()
1477 hclge_parse_dev_specs(hdev, desc); in hclge_query_dev_specs()
1478 hclge_check_dev_specs(hdev); in hclge_query_dev_specs()
1483 static int hclge_get_cap(struct hclge_dev *hdev) in hclge_get_cap() argument
1487 ret = hclge_query_function_status(hdev); in hclge_get_cap()
1489 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1495 return hclge_query_pf_resource(hdev); in hclge_get_cap()
1498 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) in hclge_init_kdump_kernel_config() argument
1506 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1510 hdev->num_tqps = hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1511 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1512 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1515 static void hclge_init_tc_config(struct hclge_dev *hdev) in hclge_init_tc_config() argument
1519 if (hdev->tc_max > HNAE3_MAX_TC || in hclge_init_tc_config()
1520 hdev->tc_max < 1) { in hclge_init_tc_config()
1521 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", in hclge_init_tc_config()
1522 hdev->tc_max); in hclge_init_tc_config()
1523 hdev->tc_max = 1; in hclge_init_tc_config()
1527 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_init_tc_config()
1528 hdev->tc_max = 1; in hclge_init_tc_config()
1529 hdev->pfc_max = 0; in hclge_init_tc_config()
1531 hdev->pfc_max = hdev->tc_max; in hclge_init_tc_config()
1534 hdev->tm_info.num_tc = 1; in hclge_init_tc_config()
1537 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_init_tc_config()
1538 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_init_tc_config()
1540 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_init_tc_config()
1543 static int hclge_configure(struct hclge_dev *hdev) in hclge_configure() argument
1545 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_configure()
1549 ret = hclge_get_cfg(hdev, &cfg); in hclge_configure()
1553 hdev->base_tqp_pid = 0; in hclge_configure()
1554 hdev->vf_rss_size_max = cfg.vf_rss_size_max; in hclge_configure()
1555 hdev->pf_rss_size_max = cfg.pf_rss_size_max; in hclge_configure()
1556 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1557 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1558 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1559 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1560 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1561 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1562 hdev->tm_info.num_pg = 1; in hclge_configure()
1563 hdev->tc_max = cfg.tc_num; in hclge_configure()
1564 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1566 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1568 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; in hclge_configure()
1569 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; in hclge_configure()
1570 hdev->gro_en = true; in hclge_configure()
1574 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_configure()
1575 hdev->fd_en = true; in hclge_configure()
1576 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1579 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1581 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", in hclge_configure()
1585 hdev->hw.mac.req_speed = hdev->hw.mac.speed; in hclge_configure()
1586 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; in hclge_configure()
1587 hdev->hw.mac.req_duplex = DUPLEX_FULL; in hclge_configure()
1589 hclge_parse_link_mode(hdev, cfg.speed_ability); in hclge_configure()
1591 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); in hclge_configure()
1593 hclge_init_tc_config(hdev); in hclge_configure()
1594 hclge_init_kdump_kernel_config(hdev); in hclge_configure()
1599 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, in hclge_config_tso() argument
1611 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1614 static int hclge_config_gro(struct hclge_dev *hdev) in hclge_config_gro() argument
1620 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) in hclge_config_gro()
1626 req->gro_en = hdev->gro_en ? 1 : 0; in hclge_config_gro()
1628 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1630 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1636 static int hclge_alloc_tqps(struct hclge_dev *hdev) in hclge_alloc_tqps() argument
1638 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_alloc_tqps()
1642 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1644 if (!hdev->htqp) in hclge_alloc_tqps()
1647 tqp = hdev->htqp; in hclge_alloc_tqps()
1649 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1650 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1654 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1655 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1656 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1662 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1666 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1677 tqp->q.mem_base = hdev->hw.hw.mem_base + in hclge_alloc_tqps()
1678 HCLGE_TQP_MEM_OFFSET(hdev, i); in hclge_alloc_tqps()
1686 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, in hclge_map_tqps_to_func() argument
1703 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1705 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1713 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp() local
1716 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1718 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1719 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1720 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1721 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1722 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1723 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1724 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1729 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, in hclge_assign_tqp()
1730 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1734 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1745 struct hclge_dev *hdev = vport->back; in hclge_knic_setup() local
1751 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1752 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; in hclge_knic_setup()
1754 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1761 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1766 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, in hclge_map_tqp_to_vport() argument
1781 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1790 static int hclge_map_tqp(struct hclge_dev *hdev) in hclge_map_tqp() argument
1792 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1795 num_vport = hdev->num_req_vfs + 1; in hclge_map_tqp()
1799 ret = hclge_map_tqp_to_vport(hdev, vport); in hclge_map_tqp()
1812 struct hclge_dev *hdev = vport->back; in hclge_vport_setup() local
1815 nic->pdev = hdev->pdev; in hclge_vport_setup()
1817 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, in hclge_vport_setup()
1819 nic->kinfo.io_base = hdev->hw.hw.io_base; in hclge_vport_setup()
1822 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1824 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1829 static int hclge_alloc_vport(struct hclge_dev *hdev) in hclge_alloc_vport() argument
1831 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1839 num_vport = hdev->num_req_vfs + 1; in hclge_alloc_vport()
1841 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1842 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", in hclge_alloc_vport()
1843 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1848 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1849 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1856 hdev->vport = vport; in hclge_alloc_vport()
1857 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1860 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1863 vport->back = hdev; in hclge_alloc_vport()
1893 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, in hclge_cmd_alloc_tx_buff() argument
1915 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1917 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1923 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, in hclge_tx_buffer_alloc() argument
1926 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); in hclge_tx_buffer_alloc()
1929 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1934 static u32 hclge_get_tc_num(struct hclge_dev *hdev) in hclge_get_tc_num() argument
1940 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1946 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_pfc_priv_num() argument
1955 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1964 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_no_pfc_priv_num() argument
1973 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
1974 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
2006 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, in hclge_is_rx_buf_ok() argument
2011 u32 tc_num = hclge_get_tc_num(hdev); in hclge_is_rx_buf_ok()
2016 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
2018 if (hnae3_dev_dcb_supported(hdev)) in hclge_is_rx_buf_ok()
2020 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2023 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2035 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
2036 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2046 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
2047 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2072 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, in hclge_tx_buffer_calc() argument
2077 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
2083 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
2084 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
2087 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
2098 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, in hclge_rx_buf_calc_all() argument
2101 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
2102 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
2113 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
2118 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
2128 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
2131 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_rx_buf_calc_all()
2134 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_nopfc_buf_till_fit() argument
2137 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2138 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2146 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
2147 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
2156 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_nopfc_buf_till_fit()
2161 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_nopfc_buf_till_fit()
2164 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_pfc_buf_till_fit() argument
2167 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
2168 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); in hclge_drop_pfc_buf_till_fit()
2176 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
2177 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
2186 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_pfc_buf_till_fit()
2191 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_pfc_buf_till_fit()
2194 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, in hclge_only_alloc_priv_buff() argument
2201 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
2202 u32 tc_num = hclge_get_tc_num(hdev); in hclge_only_alloc_priv_buff()
2203 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
2213 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2228 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2233 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2247 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, in hclge_rx_buffer_calc() argument
2251 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_rx_buffer_calc()
2252 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2255 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) in hclge_rx_buffer_calc()
2261 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2264 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) in hclge_rx_buffer_calc()
2268 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) in hclge_rx_buffer_calc()
2271 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2274 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2280 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, in hclge_rx_priv_buf_alloc() argument
2305 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2307 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2313 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, in hclge_rx_priv_wl_config() argument
2349 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2351 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2357 static int hclge_common_thrd_config(struct hclge_dev *hdev, in hclge_common_thrd_config() argument
2393 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2395 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2400 static int hclge_common_wl_config(struct hclge_dev *hdev, in hclge_common_wl_config() argument
2417 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2419 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2425 int hclge_buffer_alloc(struct hclge_dev *hdev) in hclge_buffer_alloc() argument
2434 ret = hclge_tx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2436 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2441 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2443 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2448 ret = hclge_rx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2450 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2456 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2458 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2463 if (hnae3_dev_dcb_supported(hdev)) { in hclge_buffer_alloc()
2464 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2466 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2472 ret = hclge_common_thrd_config(hdev, pkt_buf); in hclge_buffer_alloc()
2474 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2481 ret = hclge_common_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2483 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2495 struct hclge_dev *hdev = vport->back; in hclge_init_roce_base_info() local
2499 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) in hclge_init_roce_base_info()
2502 roce->rinfo.base_vector = hdev->num_nic_msi; in hclge_init_roce_base_info()
2505 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; in hclge_init_roce_base_info()
2506 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; in hclge_init_roce_base_info()
2516 static int hclge_init_msi(struct hclge_dev *hdev) in hclge_init_msi() argument
2518 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2523 hdev->num_msi, in hclge_init_msi()
2531 if (vectors < hdev->num_msi) in hclge_init_msi()
2532 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2534 hdev->num_msi, vectors); in hclge_init_msi()
2536 hdev->num_msi = vectors; in hclge_init_msi()
2537 hdev->num_msi_left = vectors; in hclge_init_msi()
2539 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2541 if (!hdev->vector_status) { in hclge_init_msi()
2546 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2547 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2549 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2551 if (!hdev->vector_irq) { in hclge_init_msi()
2593 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, in hclge_cfg_mac_speed_dup_hw() argument
2610 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2620 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2622 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2630 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) in hclge_cfg_mac_speed_dup() argument
2632 struct hclge_mac *mac = &hdev->hw.mac; in hclge_cfg_mac_speed_dup()
2640 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); in hclge_cfg_mac_speed_dup()
2644 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2645 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2647 hdev->hw.mac.lane_num = lane_num; in hclge_cfg_mac_speed_dup()
2656 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h() local
2659 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); in hclge_cfg_mac_speed_dup_h()
2664 hdev->hw.mac.req_speed = speed; in hclge_cfg_mac_speed_dup_h()
2665 hdev->hw.mac.req_duplex = duplex; in hclge_cfg_mac_speed_dup_h()
2670 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) in hclge_set_autoneg_en() argument
2684 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2686 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2695 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg() local
2697 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2699 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2707 return hclge_set_autoneg_en(hdev, enable); in hclge_set_autoneg()
2713 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg() local
2714 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2719 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2725 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg() local
2728 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2730 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_restart_autoneg()
2733 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_restart_autoneg()
2739 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg() local
2741 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2742 return hclge_set_autoneg_en(hdev, !halt); in hclge_halt_autoneg()
2747 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, in hclge_parse_fec_stats_lanes() argument
2764 hdev->fec_stats.per_lanes[i] += in hclge_parse_fec_stats_lanes()
2770 static void hclge_parse_fec_stats(struct hclge_dev *hdev, in hclge_parse_fec_stats() argument
2777 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; in hclge_parse_fec_stats()
2778 hdev->fec_stats.rs_corr_blocks += in hclge_parse_fec_stats()
2780 hdev->fec_stats.rs_uncorr_blocks += in hclge_parse_fec_stats()
2782 hdev->fec_stats.rs_error_blocks += in hclge_parse_fec_stats()
2784 hdev->fec_stats.base_r_corr_blocks += in hclge_parse_fec_stats()
2786 hdev->fec_stats.base_r_uncorr_blocks += in hclge_parse_fec_stats()
2789 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); in hclge_parse_fec_stats()
2792 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) in hclge_update_fec_stats_hw() argument
2805 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2809 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2814 static void hclge_update_fec_stats(struct hclge_dev *hdev) in hclge_update_fec_stats() argument
2816 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_fec_stats()
2820 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) in hclge_update_fec_stats()
2823 ret = hclge_update_fec_stats_hw(hdev); in hclge_update_fec_stats()
2825 dev_err(&hdev->pdev->dev, in hclge_update_fec_stats()
2828 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); in hclge_update_fec_stats()
2831 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, in hclge_get_fec_stats_total() argument
2834 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; in hclge_get_fec_stats_total()
2836 hdev->fec_stats.rs_uncorr_blocks; in hclge_get_fec_stats_total()
2839 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, in hclge_get_fec_stats_lanes() argument
2844 if (hdev->fec_stats.base_r_lane_num == 0 || in hclge_get_fec_stats_lanes()
2845 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { in hclge_get_fec_stats_lanes()
2846 dev_err(&hdev->pdev->dev, in hclge_get_fec_stats_lanes()
2848 hdev->fec_stats.base_r_lane_num); in hclge_get_fec_stats_lanes()
2852 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { in hclge_get_fec_stats_lanes()
2854 hdev->fec_stats.base_r_corr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2856 hdev->fec_stats.base_r_uncorr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2860 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, in hclge_comm_get_fec_stats() argument
2863 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_comm_get_fec_stats()
2868 hclge_get_fec_stats_total(hdev, fec_stats); in hclge_comm_get_fec_stats()
2871 hclge_get_fec_stats_lanes(hdev, fec_stats); in hclge_comm_get_fec_stats()
2874 dev_err(&hdev->pdev->dev, in hclge_comm_get_fec_stats()
2885 struct hclge_dev *hdev = vport->back; in hclge_get_fec_stats() local
2886 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_get_fec_stats()
2893 hclge_update_fec_stats(hdev); in hclge_get_fec_stats()
2895 hclge_comm_get_fec_stats(hdev, fec_stats); in hclge_get_fec_stats()
2898 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) in hclge_set_fec_hw() argument
2919 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2921 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2929 struct hclge_dev *hdev = vport->back; in hclge_set_fec() local
2930 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2934 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2938 ret = hclge_set_fec_hw(hdev, fec_mode); in hclge_set_fec()
2950 struct hclge_dev *hdev = vport->back; in hclge_get_fec() local
2951 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2959 static int hclge_mac_init(struct hclge_dev *hdev) in hclge_mac_init() argument
2961 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2964 hdev->support_sfp_query = true; in hclge_mac_init()
2966 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_mac_init()
2967 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2969 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2970 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
2975 if (!hdev->hw.mac.autoneg) { in hclge_mac_init()
2976 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed, in hclge_mac_init()
2977 hdev->hw.mac.req_duplex, in hclge_mac_init()
2978 hdev->hw.mac.lane_num); in hclge_mac_init()
2986 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
2991 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
2993 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
2997 ret = hclge_set_default_loopback(hdev); in hclge_mac_init()
3001 ret = hclge_buffer_alloc(hdev); in hclge_mac_init()
3003 dev_err(&hdev->pdev->dev, in hclge_mac_init()
3009 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) in hclge_mbx_task_schedule() argument
3011 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_mbx_task_schedule()
3012 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { in hclge_mbx_task_schedule()
3013 hdev->last_mbx_scheduled = jiffies; in hclge_mbx_task_schedule()
3014 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_mbx_task_schedule()
3018 static void hclge_reset_task_schedule(struct hclge_dev *hdev) in hclge_reset_task_schedule() argument
3020 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
3021 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && in hclge_reset_task_schedule()
3022 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { in hclge_reset_task_schedule()
3023 hdev->last_rst_scheduled = jiffies; in hclge_reset_task_schedule()
3024 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_reset_task_schedule()
3028 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) in hclge_errhand_task_schedule() argument
3030 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_errhand_task_schedule()
3031 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_task_schedule()
3032 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_errhand_task_schedule()
3035 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) in hclge_task_schedule() argument
3037 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
3038 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_task_schedule()
3039 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); in hclge_task_schedule()
3042 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) in hclge_get_mac_link_status() argument
3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
3051 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
3063 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) in hclge_get_mac_phy_link() argument
3065 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mac_phy_link()
3069 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
3075 return hclge_get_mac_link_status(hdev, link_status); in hclge_get_mac_phy_link()
3078 static void hclge_push_link_status(struct hclge_dev *hdev) in hclge_push_link_status() argument
3084 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { in hclge_push_link_status()
3085 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_push_link_status()
3093 dev_err(&hdev->pdev->dev, in hclge_push_link_status()
3100 static void hclge_update_link_status(struct hclge_dev *hdev) in hclge_update_link_status() argument
3102 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_update_link_status()
3103 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
3110 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) in hclge_update_link_status()
3113 ret = hclge_get_mac_phy_link(hdev, &state); in hclge_update_link_status()
3115 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3119 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
3120 hdev->hw.mac.link = state; in hclge_update_link_status()
3122 hclge_update_port_info(hdev); in hclge_update_link_status()
3125 hclge_config_mac_tnl_int(hdev, state); in hclge_update_link_status()
3127 if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { in hclge_update_link_status()
3128 struct hnae3_handle *rhandle = &hdev->vport[0].roce; in hclge_update_link_status()
3129 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
3136 hclge_push_link_status(hdev); in hclge_update_link_status()
3139 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3184 static void hclge_update_pause_advertising(struct hclge_dev *hdev) in hclge_update_pause_advertising() argument
3186 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_pause_advertising()
3189 switch (hdev->fc_mode_last_time) { in hclge_update_pause_advertising()
3211 static void hclge_update_advertising(struct hclge_dev *hdev) in hclge_update_advertising() argument
3213 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_advertising()
3218 hclge_update_pause_advertising(hdev); in hclge_update_advertising()
3221 static void hclge_update_port_capability(struct hclge_dev *hdev, in hclge_update_port_capability() argument
3224 if (hnae3_dev_fec_supported(hdev)) in hclge_update_port_capability()
3242 hclge_update_advertising(hdev); in hclge_update_port_capability()
3246 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) in hclge_get_sfp_speed() argument
3254 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
3256 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
3260 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
3269 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) in hclge_get_sfp_info() argument
3280 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
3282 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
3286 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
3327 struct hclge_dev *hdev = vport->back; in hclge_get_phy_link_ksettings() local
3336 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_get_phy_link_ksettings()
3338 dev_err(&hdev->pdev->dev, in hclge_get_phy_link_ksettings()
3377 struct hclge_dev *hdev = vport->back; in hclge_set_phy_link_ksettings() local
3405 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_set_phy_link_ksettings()
3407 dev_err(&hdev->pdev->dev, in hclge_set_phy_link_ksettings()
3412 hdev->hw.mac.req_autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3413 hdev->hw.mac.req_speed = cmd->base.speed; in hclge_set_phy_link_ksettings()
3414 hdev->hw.mac.req_duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3415 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3420 static int hclge_update_tp_port_info(struct hclge_dev *hdev) in hclge_update_tp_port_info() argument
3425 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_update_tp_port_info()
3428 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_update_tp_port_info()
3432 hdev->hw.mac.autoneg = cmd.base.autoneg; in hclge_update_tp_port_info()
3433 hdev->hw.mac.speed = cmd.base.speed; in hclge_update_tp_port_info()
3434 hdev->hw.mac.duplex = cmd.base.duplex; in hclge_update_tp_port_info()
3435 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); in hclge_update_tp_port_info()
3440 static int hclge_tp_port_init(struct hclge_dev *hdev) in hclge_tp_port_init() argument
3444 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_tp_port_init()
3447 cmd.base.autoneg = hdev->hw.mac.req_autoneg; in hclge_tp_port_init()
3448 cmd.base.speed = hdev->hw.mac.req_speed; in hclge_tp_port_init()
3449 cmd.base.duplex = hdev->hw.mac.req_duplex; in hclge_tp_port_init()
3450 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); in hclge_tp_port_init()
3452 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_tp_port_init()
3455 static int hclge_update_port_info(struct hclge_dev *hdev) in hclge_update_port_info() argument
3457 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
3463 return hclge_update_tp_port_info(hdev); in hclge_update_port_info()
3466 if (!hdev->support_sfp_query) in hclge_update_port_info()
3469 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3471 ret = hclge_get_sfp_info(hdev, mac); in hclge_update_port_info()
3474 ret = hclge_get_sfp_speed(hdev, &speed); in hclge_update_port_info()
3478 hdev->support_sfp_query = false; in hclge_update_port_info()
3484 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3486 hclge_update_port_capability(hdev, mac); in hclge_update_port_info()
3488 (void)hclge_tm_port_shaper_cfg(hdev); in hclge_update_port_info()
3491 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
3498 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); in hclge_update_port_info()
3505 struct hclge_dev *hdev = vport->back; in hclge_get_status() local
3507 hclge_update_link_status(hdev); in hclge_get_status()
3509 return hdev->hw.mac.link; in hclge_get_status()
3512 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) in hclge_get_vf_vport() argument
3514 if (!pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3515 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3520 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3521 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3523 vf, pci_num_vf(hdev->pdev)); in hclge_get_vf_vport()
3529 return &hdev->vport[vf]; in hclge_get_vf_vport()
3536 struct hclge_dev *hdev = vport->back; in hclge_get_vf_config() local
3538 vport = hclge_get_vf_vport(hdev, vf); in hclge_get_vf_config()
3560 struct hclge_dev *hdev = vport->back; in hclge_set_vf_link_state() local
3564 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_link_state()
3580 dev_err(&hdev->pdev->dev, in hclge_set_vf_link_state()
3587 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) in hclge_check_event_cause() argument
3592 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
3593 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
3594 hw_err_src_reg = hclge_read_dev(&hdev->hw, in hclge_check_event_cause()
3606 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
3607 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3608 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3610 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
3615 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
3616 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3617 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3619 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
3642 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
3649 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, in hclge_clear_event_cause() argument
3660 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
3663 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
3670 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) in hclge_clear_all_event_cause() argument
3672 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, in hclge_clear_all_event_cause()
3676 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); in hclge_clear_all_event_cause()
3686 struct hclge_dev *hdev = data; in hclge_misc_irq_handle() local
3691 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
3692 event_cause = hclge_check_event_cause(hdev, &clearval); in hclge_misc_irq_handle()
3697 hclge_errhand_task_schedule(hdev); in hclge_misc_irq_handle()
3700 hclge_reset_task_schedule(hdev); in hclge_misc_irq_handle()
3703 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3704 hclge_ptp_clean_tx_hwts(hdev); in hclge_misc_irq_handle()
3705 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3717 hclge_mbx_task_schedule(hdev); in hclge_misc_irq_handle()
3720 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3725 hclge_clear_event_cause(hdev, event_cause, clearval); in hclge_misc_irq_handle()
3731 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3736 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) in hclge_free_vector() argument
3738 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3739 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3744 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3745 hdev->num_msi_left += 1; in hclge_free_vector()
3746 hdev->num_msi_used -= 1; in hclge_free_vector()
3749 static void hclge_get_misc_vector(struct hclge_dev *hdev) in hclge_get_misc_vector() argument
3751 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3753 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3755 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3756 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3758 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3759 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3762 static int hclge_misc_irq_init(struct hclge_dev *hdev) in hclge_misc_irq_init() argument
3766 hclge_get_misc_vector(hdev); in hclge_misc_irq_init()
3769 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclge_misc_irq_init()
3770 HCLGE_NAME, pci_name(hdev->pdev)); in hclge_misc_irq_init()
3771 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3772 0, hdev->misc_vector.name, hdev); in hclge_misc_irq_init()
3774 hclge_free_vector(hdev, 0); in hclge_misc_irq_init()
3775 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3776 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3782 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) in hclge_misc_irq_uninit() argument
3784 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3785 hclge_free_vector(hdev, 0); in hclge_misc_irq_uninit()
3788 int hclge_notify_client(struct hclge_dev *hdev, in hclge_notify_client() argument
3791 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_notify_client()
3792 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3795 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3803 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", in hclge_notify_client()
3809 static int hclge_notify_roce_client(struct hclge_dev *hdev, in hclge_notify_roce_client() argument
3812 struct hnae3_handle *handle = &hdev->vport[0].roce; in hclge_notify_roce_client()
3813 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3816 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3824 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", in hclge_notify_roce_client()
3830 static int hclge_reset_wait(struct hclge_dev *hdev) in hclge_reset_wait() argument
3838 switch (hdev->reset_type) { in hclge_reset_wait()
3852 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3854 hdev->reset_type); in hclge_reset_wait()
3858 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3861 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3866 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3867 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3874 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) in hclge_set_vf_rst() argument
3886 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3889 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) in hclge_set_all_vf_rst() argument
3893 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3894 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3898 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3900 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3912 hdev->reset_type == HNAE3_FUNC_RESET) { in hclge_set_all_vf_rst()
3924 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3933 static void hclge_mailbox_service_task(struct hclge_dev *hdev) in hclge_mailbox_service_task() argument
3935 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || in hclge_mailbox_service_task()
3936 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || in hclge_mailbox_service_task()
3937 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3940 if (time_is_before_jiffies(hdev->last_mbx_scheduled + in hclge_mailbox_service_task()
3942 dev_warn(&hdev->pdev->dev, in hclge_mailbox_service_task()
3944 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), in hclge_mailbox_service_task()
3947 hclge_mbx_handler(hdev); in hclge_mailbox_service_task()
3949 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3952 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) in hclge_func_reset_sync_vf() argument
3964 hclge_mailbox_service_task(hdev); in hclge_func_reset_sync_vf()
3966 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3974 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3984 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3987 void hclge_report_hw_error(struct hclge_dev *hdev, in hclge_report_hw_error() argument
3990 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3993 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3996 client->ops->process_hw_error(&hdev->vport[0].nic, type); in hclge_report_hw_error()
3999 static void hclge_handle_imp_error(struct hclge_dev *hdev) in hclge_handle_imp_error() argument
4003 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
4005 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); in hclge_handle_imp_error()
4007 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4011 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); in hclge_handle_imp_error()
4013 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4017 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) in hclge_func_reset_cmd() argument
4027 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
4029 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
4035 static void hclge_do_reset(struct hclge_dev *hdev) in hclge_do_reset() argument
4037 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
4038 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
4044 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
4045 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
4049 switch (hdev->reset_type) { in hclge_do_reset()
4052 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_do_reset()
4054 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); in hclge_do_reset()
4058 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
4060 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
4065 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); in hclge_do_reset()
4066 hclge_reset_task_schedule(hdev); in hclge_do_reset()
4070 "unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
4079 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level() local
4099 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
4100 rst_level < hdev->reset_type) in hclge_get_reset_level()
4106 static void hclge_clear_reset_cause(struct hclge_dev *hdev) in hclge_clear_reset_cause() argument
4110 switch (hdev->reset_type) { in hclge_clear_reset_cause()
4127 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_clear_reset_cause()
4128 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
4131 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
4134 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) in hclge_reset_handshake() argument
4138 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
4144 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
4147 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) in hclge_func_reset_notify_vf() argument
4151 ret = hclge_set_all_vf_rst(hdev, true); in hclge_func_reset_notify_vf()
4155 hclge_func_reset_sync_vf(hdev); in hclge_func_reset_notify_vf()
4160 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) in hclge_reset_prepare_wait() argument
4165 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
4167 ret = hclge_func_reset_notify_vf(hdev); in hclge_reset_prepare_wait()
4171 ret = hclge_func_reset_cmd(hdev, 0); in hclge_reset_prepare_wait()
4173 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
4183 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_wait()
4184 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
4187 ret = hclge_func_reset_notify_vf(hdev); in hclge_reset_prepare_wait()
4192 hclge_handle_imp_error(hdev); in hclge_reset_prepare_wait()
4193 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
4194 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
4203 hclge_reset_handshake(hdev, true); in hclge_reset_prepare_wait()
4204 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
4209 static void hclge_show_rst_info(struct hclge_dev *hdev) in hclge_show_rst_info() argument
4217 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); in hclge_show_rst_info()
4219 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); in hclge_show_rst_info()
4224 static bool hclge_reset_err_handle(struct hclge_dev *hdev) in hclge_reset_err_handle() argument
4228 if (hdev->reset_pending) { in hclge_reset_err_handle()
4229 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
4230 hdev->reset_pending); in hclge_reset_err_handle()
4232 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
4234 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4236 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
4238 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
4239 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
4240 set_bit(hdev->reset_type, &hdev->reset_pending); in hclge_reset_err_handle()
4241 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4243 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
4247 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
4250 hclge_reset_handshake(hdev, true); in hclge_reset_err_handle()
4252 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
4254 hclge_show_rst_info(hdev); in hclge_reset_err_handle()
4256 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_err_handle()
4261 static void hclge_update_reset_level(struct hclge_dev *hdev) in hclge_update_reset_level() argument
4263 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_reset_level()
4270 hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_update_reset_level()
4277 &hdev->default_reset_request); in hclge_update_reset_level()
4279 set_bit(reset_level, &hdev->reset_request); in hclge_update_reset_level()
4282 static int hclge_set_rst_done(struct hclge_dev *hdev) in hclge_set_rst_done() argument
4292 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
4298 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
4303 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
4310 static int hclge_reset_prepare_up(struct hclge_dev *hdev) in hclge_reset_prepare_up() argument
4314 switch (hdev->reset_type) { in hclge_reset_prepare_up()
4317 ret = hclge_set_all_vf_rst(hdev, false); in hclge_reset_prepare_up()
4321 ret = hclge_set_rst_done(hdev); in hclge_reset_prepare_up()
4328 hclge_reset_handshake(hdev, false); in hclge_reset_prepare_up()
4333 static int hclge_reset_stack(struct hclge_dev *hdev) in hclge_reset_stack() argument
4337 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_stack()
4341 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
4345 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_stack()
4348 static int hclge_reset_prepare(struct hclge_dev *hdev) in hclge_reset_prepare() argument
4352 hdev->rst_stats.reset_cnt++; in hclge_reset_prepare()
4354 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset_prepare()
4359 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset_prepare()
4364 return hclge_reset_prepare_wait(hdev); in hclge_reset_prepare()
4367 static int hclge_reset_rebuild(struct hclge_dev *hdev) in hclge_reset_rebuild() argument
4371 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset_rebuild()
4373 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_rebuild()
4378 ret = hclge_reset_stack(hdev); in hclge_reset_rebuild()
4383 hclge_clear_reset_cause(hdev); in hclge_reset_rebuild()
4385 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_rebuild()
4390 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset_rebuild()
4393 ret = hclge_reset_prepare_up(hdev); in hclge_reset_rebuild()
4398 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_reset_rebuild()
4403 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); in hclge_reset_rebuild()
4407 hdev->last_reset_time = jiffies; in hclge_reset_rebuild()
4408 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset_rebuild()
4409 hdev->rst_stats.reset_done_cnt++; in hclge_reset_rebuild()
4410 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_rebuild()
4412 hclge_update_reset_level(hdev); in hclge_reset_rebuild()
4417 static void hclge_reset(struct hclge_dev *hdev) in hclge_reset() argument
4419 if (hclge_reset_prepare(hdev)) in hclge_reset()
4422 if (hclge_reset_wait(hdev)) in hclge_reset()
4425 if (hclge_reset_rebuild(hdev)) in hclge_reset()
4431 if (hclge_reset_err_handle(hdev)) in hclge_reset()
4432 hclge_reset_task_schedule(hdev); in hclge_reset()
4438 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event() local
4455 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
4457 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
4461 if (hdev->default_reset_request) { in hclge_reset_event()
4462 hdev->reset_level = in hclge_reset_event()
4464 &hdev->default_reset_request); in hclge_reset_event()
4465 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { in hclge_reset_event()
4466 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
4469 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
4470 hdev->reset_level); in hclge_reset_event()
4473 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
4474 hclge_reset_task_schedule(hdev); in hclge_reset_event()
4476 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
4477 hdev->reset_level++; in hclge_reset_event()
4483 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request() local
4485 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
4490 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); in hclge_reset_timer() local
4495 if (!hdev->default_reset_request) in hclge_reset_timer()
4498 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
4500 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
4503 static void hclge_reset_subtask(struct hclge_dev *hdev) in hclge_reset_subtask() argument
4505 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
4516 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
4517 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
4518 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4519 hclge_reset(hdev); in hclge_reset_subtask()
4522 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
4523 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4524 hclge_do_reset(hdev); in hclge_reset_subtask()
4526 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
4529 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) in hclge_handle_err_reset_request() argument
4531 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_reset_request()
4540 if (hdev->default_reset_request && ae_dev->ops->reset_event) in hclge_handle_err_reset_request()
4541 ae_dev->ops->reset_event(hdev->pdev, NULL); in hclge_handle_err_reset_request()
4544 hclge_enable_vector(&hdev->misc_vector, true); in hclge_handle_err_reset_request()
4547 static void hclge_handle_err_recovery(struct hclge_dev *hdev) in hclge_handle_err_recovery() argument
4549 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_recovery()
4553 if (hclge_find_error_source(hdev)) { in hclge_handle_err_recovery()
4555 hclge_handle_mac_tnl(hdev); in hclge_handle_err_recovery()
4556 hclge_handle_vf_queue_err_ras(hdev); in hclge_handle_err_recovery()
4559 hclge_handle_err_reset_request(hdev); in hclge_handle_err_recovery()
4562 static void hclge_misc_err_recovery(struct hclge_dev *hdev) in hclge_misc_err_recovery() argument
4564 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_misc_err_recovery()
4565 struct device *dev = &hdev->pdev->dev; in hclge_misc_err_recovery()
4568 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_misc_err_recovery()
4571 (hdev, &hdev->default_reset_request)) in hclge_misc_err_recovery()
4578 hclge_handle_err_reset_request(hdev); in hclge_misc_err_recovery()
4581 static void hclge_errhand_service_task(struct hclge_dev *hdev) in hclge_errhand_service_task() argument
4583 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_service_task()
4586 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_errhand_service_task()
4587 hclge_handle_err_recovery(hdev); in hclge_errhand_service_task()
4589 hclge_misc_err_recovery(hdev); in hclge_errhand_service_task()
4592 static void hclge_reset_service_task(struct hclge_dev *hdev) in hclge_reset_service_task() argument
4594 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_service_task()
4597 if (time_is_before_jiffies(hdev->last_rst_scheduled + in hclge_reset_service_task()
4599 dev_warn(&hdev->pdev->dev, in hclge_reset_service_task()
4601 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), in hclge_reset_service_task()
4604 down(&hdev->reset_sem); in hclge_reset_service_task()
4605 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4607 hclge_reset_subtask(hdev); in hclge_reset_service_task()
4609 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4610 up(&hdev->reset_sem); in hclge_reset_service_task()
4613 static void hclge_update_vport_alive(struct hclge_dev *hdev) in hclge_update_vport_alive() argument
4621 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
4622 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
4630 dev_warn(&hdev->pdev->dev, in hclge_update_vport_alive()
4637 static void hclge_periodic_service_task(struct hclge_dev *hdev) in hclge_periodic_service_task() argument
4641 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_periodic_service_task()
4647 hclge_update_link_status(hdev); in hclge_periodic_service_task()
4648 hclge_sync_mac_table(hdev); in hclge_periodic_service_task()
4649 hclge_sync_promisc_mode(hdev); in hclge_periodic_service_task()
4650 hclge_sync_fd_table(hdev); in hclge_periodic_service_task()
4652 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclge_periodic_service_task()
4653 delta = jiffies - hdev->last_serv_processed; in hclge_periodic_service_task()
4661 hdev->serv_processed_cnt++; in hclge_periodic_service_task()
4662 hclge_update_vport_alive(hdev); in hclge_periodic_service_task()
4664 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { in hclge_periodic_service_task()
4665 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4669 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) in hclge_periodic_service_task()
4670 hclge_update_stats_for_all(hdev); in hclge_periodic_service_task()
4672 hclge_update_port_info(hdev); in hclge_periodic_service_task()
4673 hclge_sync_vlan_filter(hdev); in hclge_periodic_service_task()
4675 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) in hclge_periodic_service_task()
4676 hclge_rfs_filter_expire(hdev); in hclge_periodic_service_task()
4678 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4681 hclge_task_schedule(hdev, delta); in hclge_periodic_service_task()
4684 static void hclge_ptp_service_task(struct hclge_dev *hdev) in hclge_ptp_service_task() argument
4688 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || in hclge_ptp_service_task()
4689 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || in hclge_ptp_service_task()
4690 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) in hclge_ptp_service_task()
4694 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4699 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) in hclge_ptp_service_task()
4700 hclge_ptp_clean_tx_hwts(hdev); in hclge_ptp_service_task()
4702 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4707 struct hclge_dev *hdev = in hclge_service_task() local
4710 hclge_errhand_service_task(hdev); in hclge_service_task()
4711 hclge_reset_service_task(hdev); in hclge_service_task()
4712 hclge_ptp_service_task(hdev); in hclge_service_task()
4713 hclge_mailbox_service_task(hdev); in hclge_service_task()
4714 hclge_periodic_service_task(hdev); in hclge_service_task()
4720 hclge_errhand_service_task(hdev); in hclge_service_task()
4721 hclge_reset_service_task(hdev); in hclge_service_task()
4722 hclge_mailbox_service_task(hdev); in hclge_service_task()
4736 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, in hclge_get_vector_info() argument
4741 vector_info->vector = pci_irq_vector(hdev->pdev, idx); in hclge_get_vector_info()
4745 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4749 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4756 hdev->vector_status[idx] = hdev->vport[0].vport_id; in hclge_get_vector_info()
4757 hdev->vector_irq[idx] = vector_info->vector; in hclge_get_vector_info()
4765 struct hclge_dev *hdev = vport->back; in hclge_get_vector() local
4770 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
4771 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
4774 while (++i < hdev->num_nic_msi) { in hclge_get_vector()
4775 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
4776 hclge_get_vector_info(hdev, i, vector); in hclge_get_vector()
4784 hdev->num_msi_left -= alloc; in hclge_get_vector()
4785 hdev->num_msi_used += alloc; in hclge_get_vector()
4790 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) in hclge_get_vector_index() argument
4794 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
4795 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
4804 struct hclge_dev *hdev = vport->back; in hclge_put_vector() local
4807 vector_id = hclge_get_vector_index(hdev, vector); in hclge_put_vector()
4809 dev_err(&hdev->pdev->dev, in hclge_put_vector()
4814 hclge_free_vector(hdev, vector_id); in hclge_put_vector()
4839 struct hclge_dev *hdev = vport->back; in hclge_set_rss() local
4840 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclge_set_rss()
4843 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); in hclge_set_rss()
4845 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); in hclge_set_rss()
4854 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, in hclge_set_rss()
4862 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple() local
4865 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, in hclge_set_rss_tuple()
4866 &hdev->rss_cfg, nfc); in hclge_set_rss_tuple()
4868 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4898 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size() local
4900 return hdev->pf_rss_size_max; in hclge_get_tc_size()
4903 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) in hclge_init_rss_tc_mode() argument
4905 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_init_rss_tc_mode()
4906 struct hclge_vport *vport = hdev->vport; in hclge_init_rss_tc_mode()
4920 if (!(hdev->hw_tc_map & BIT(i))) in hclge_init_rss_tc_mode()
4929 dev_err(&hdev->pdev->dev, in hclge_init_rss_tc_mode()
4943 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_init_rss_tc_mode()
4947 int hclge_rss_init_hw(struct hclge_dev *hdev) in hclge_rss_init_hw() argument
4949 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; in hclge_rss_init_hw()
4950 u8 *key = hdev->rss_cfg.rss_hash_key; in hclge_rss_init_hw()
4951 u8 hfunc = hdev->rss_cfg.rss_algo; in hclge_rss_init_hw()
4954 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclge_rss_init_hw()
4959 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); in hclge_rss_init_hw()
4963 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); in hclge_rss_init_hw()
4967 return hclge_init_rss_tc_mode(hdev); in hclge_rss_init_hw()
4974 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector() local
5011 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5013 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5037 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5039 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5052 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector() local
5055 vector_id = hclge_get_vector_index(hdev, vector); in hclge_map_ring_to_vector()
5057 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
5069 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector() local
5072 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
5075 vector_id = hclge_get_vector_index(hdev, vector); in hclge_unmap_ring_frm_vector()
5091 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, in hclge_cmd_set_promisc_mode() argument
5094 struct hclge_vport *vport = &hdev->vport[vf_id]; in hclge_cmd_set_promisc_mode()
5127 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
5129 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
5147 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode() local
5154 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_promisc_mode()
5168 static void hclge_sync_fd_state(struct hclge_dev *hdev) in hclge_sync_fd_state() argument
5170 if (hlist_empty(&hdev->fd_rule_list)) in hclge_sync_fd_state()
5171 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_sync_fd_state()
5174 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) in hclge_fd_inc_rule_cnt() argument
5176 if (!test_bit(location, hdev->fd_bmap)) { in hclge_fd_inc_rule_cnt()
5177 set_bit(location, hdev->fd_bmap); in hclge_fd_inc_rule_cnt()
5178 hdev->hclge_fd_rule_num++; in hclge_fd_inc_rule_cnt()
5182 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) in hclge_fd_dec_rule_cnt() argument
5184 if (test_bit(location, hdev->fd_bmap)) { in hclge_fd_dec_rule_cnt()
5185 clear_bit(location, hdev->fd_bmap); in hclge_fd_dec_rule_cnt()
5186 hdev->hclge_fd_rule_num--; in hclge_fd_dec_rule_cnt()
5190 static void hclge_fd_free_node(struct hclge_dev *hdev, in hclge_fd_free_node() argument
5195 hclge_sync_fd_state(hdev); in hclge_fd_free_node()
5198 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, in hclge_update_fd_rule_node() argument
5221 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5222 hclge_fd_free_node(hdev, old_rule); in hclge_update_fd_rule_node()
5237 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5238 hclge_fd_free_node(hdev, old_rule); in hclge_update_fd_rule_node()
5280 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, in hclge_fd_set_user_def_cmd() argument
5309 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_set_user_def_cmd()
5311 dev_err(&hdev->pdev->dev, in hclge_fd_set_user_def_cmd()
5316 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) in hclge_sync_fd_user_def_cfg() argument
5320 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) in hclge_sync_fd_user_def_cfg()
5324 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5326 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); in hclge_sync_fd_user_def_cfg()
5328 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_sync_fd_user_def_cfg()
5331 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5334 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_check_user_def_refcnt() argument
5337 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_fd_check_user_def_refcnt()
5347 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_check_user_def_refcnt()
5364 dev_err(&hdev->pdev->dev, in hclge_fd_check_user_def_refcnt()
5370 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_inc_user_def_refcnt() argument
5379 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_inc_user_def_refcnt()
5382 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_inc_user_def_refcnt()
5387 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_dec_user_def_refcnt() argument
5396 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_dec_user_def_refcnt()
5403 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_dec_user_def_refcnt()
5407 static void hclge_update_fd_list(struct hclge_dev *hdev, in hclge_update_fd_list() argument
5411 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_update_fd_list()
5416 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); in hclge_update_fd_list()
5418 hclge_fd_inc_user_def_refcnt(hdev, new_rule); in hclge_update_fd_list()
5419 hclge_sync_fd_user_def_cfg(hdev, true); in hclge_update_fd_list()
5421 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); in hclge_update_fd_list()
5429 dev_warn(&hdev->pdev->dev, in hclge_update_fd_list()
5435 hclge_fd_inc_user_def_refcnt(hdev, new_rule); in hclge_update_fd_list()
5436 hclge_sync_fd_user_def_cfg(hdev, true); in hclge_update_fd_list()
5439 hclge_fd_inc_rule_cnt(hdev, new_rule->location); in hclge_update_fd_list()
5442 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_update_fd_list()
5443 hclge_task_schedule(hdev, 0); in hclge_update_fd_list()
5447 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) in hclge_get_fd_mode() argument
5457 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
5459 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
5468 static int hclge_get_fd_allocation(struct hclge_dev *hdev, in hclge_get_fd_allocation() argument
5482 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
5484 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
5497 static int hclge_set_fd_key_config(struct hclge_dev *hdev, in hclge_set_fd_key_config() argument
5508 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
5518 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
5520 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
5525 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) in hclge_fd_disable_user_def() argument
5527 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; in hclge_fd_disable_user_def()
5529 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5530 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); in hclge_fd_disable_user_def()
5531 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5533 hclge_fd_set_user_def_cmd(hdev, cfg); in hclge_fd_disable_user_def()
5536 static int hclge_init_fd_config(struct hclge_dev *hdev) in hclge_init_fd_config() argument
5542 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_init_fd_config()
5545 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5549 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
5551 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
5554 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
5557 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
5559 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5563 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
5576 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { in hclge_init_fd_config()
5579 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hclge_init_fd_config()
5588 ret = hclge_get_fd_allocation(hdev, in hclge_init_fd_config()
5589 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5590 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
5591 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5592 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
5596 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); in hclge_init_fd_config()
5599 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, in hclge_fd_tcam_config() argument
5632 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
5634 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
5641 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, in hclge_fd_ad_config() argument
5644 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_fd_ad_config()
5680 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
5682 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5810 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, in hclge_config_key() argument
5813 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5841 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5849 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5852 dev_err(&hdev->pdev->dev, in hclge_config_key()
5858 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5861 dev_err(&hdev->pdev->dev, in hclge_config_key()
5867 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, in hclge_config_action() argument
5870 struct hclge_vport *vport = hdev->vport; in hclge_config_action()
5890 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { in hclge_config_action()
5893 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; in hclge_config_action()
5905 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); in hclge_config_action()
6040 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, in hclge_fd_check_ext_tuple() argument
6046 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); in hclge_fd_check_ext_tuple()
6055 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6065 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_ext_tuple()
6067 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6113 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, in hclge_fd_parse_user_def_field() argument
6118 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; in hclge_fd_parse_user_def_field()
6138 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_parse_user_def_field()
6143 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6150 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); in hclge_fd_parse_user_def_field()
6156 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6169 static int hclge_fd_check_spec(struct hclge_dev *hdev, in hclge_fd_check_spec() argument
6177 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_fd_check_spec()
6178 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6181 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); in hclge_fd_check_spec()
6185 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); in hclge_fd_check_spec()
6212 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_spec()
6214 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6223 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6230 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6236 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); in hclge_fd_check_spec()
6431 static int hclge_fd_config_rule(struct hclge_dev *hdev, in hclge_fd_config_rule() argument
6436 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
6440 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
6443 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, in hclge_add_fd_entry_common() argument
6448 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6450 if (hdev->fd_active_type != rule->rule_type && in hclge_add_fd_entry_common()
6451 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_add_fd_entry_common()
6452 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { in hclge_add_fd_entry_common()
6453 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry_common()
6455 rule->rule_type, hdev->fd_active_type); in hclge_add_fd_entry_common()
6456 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6460 ret = hclge_fd_check_user_def_refcnt(hdev, rule); in hclge_add_fd_entry_common()
6464 ret = hclge_clear_arfs_rules(hdev); in hclge_add_fd_entry_common()
6468 ret = hclge_fd_config_rule(hdev, rule); in hclge_add_fd_entry_common()
6473 hdev->fd_active_type = rule->rule_type; in hclge_add_fd_entry_common()
6474 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_common()
6477 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6484 struct hclge_dev *hdev = vport->back; in hclge_is_cls_flower_active() local
6486 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_is_cls_flower_active()
6489 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, in hclge_fd_parse_ring_cookie() argument
6492 struct hclge_vport *vport = hdev->vport; in hclge_fd_parse_ring_cookie()
6504 if (vf > hdev->num_req_vfs) { in hclge_fd_parse_ring_cookie()
6505 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6507 vf - 1U, hdev->num_req_vfs); in hclge_fd_parse_ring_cookie()
6511 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_fd_parse_ring_cookie()
6512 tqps = hdev->vport[vf].nic.kinfo.num_tqps; in hclge_fd_parse_ring_cookie()
6515 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6532 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry() local
6541 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_fd_entry()
6542 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6547 if (!hdev->fd_en) { in hclge_add_fd_entry()
6548 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6555 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); in hclge_add_fd_entry()
6559 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, in hclge_add_fd_entry()
6582 ret = hclge_add_fd_entry_common(hdev, rule); in hclge_add_fd_entry()
6593 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry() local
6597 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_fd_entry()
6602 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
6605 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6606 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_del_fd_entry()
6607 !test_bit(fs->location, hdev->fd_bmap)) { in hclge_del_fd_entry()
6608 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
6610 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6614 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
6619 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); in hclge_del_fd_entry()
6622 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6626 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, in hclge_clear_fd_rules_in_list() argument
6633 spin_lock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6635 for_each_set_bit(location, hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6636 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_clear_fd_rules_in_list()
6637 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, in hclge_clear_fd_rules_in_list()
6641 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_clear_fd_rules_in_list()
6646 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_clear_fd_rules_in_list()
6647 hdev->hclge_fd_rule_num = 0; in hclge_clear_fd_rules_in_list()
6648 bitmap_zero(hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6649 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_clear_fd_rules_in_list()
6652 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6655 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) in hclge_del_all_fd_entries() argument
6657 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_all_fd_entries()
6660 hclge_clear_fd_rules_in_list(hdev, true); in hclge_del_all_fd_entries()
6661 hclge_fd_disable_user_def(hdev); in hclge_del_all_fd_entries()
6667 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries() local
6675 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_restore_fd_entries()
6679 if (!hdev->fd_en) in hclge_restore_fd_entries()
6682 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6683 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
6687 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6688 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_restore_fd_entries()
6697 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt() local
6699 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) in hclge_get_fd_rule_cnt()
6702 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
6703 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
6876 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, in hclge_get_fd_rule() argument
6882 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule()
6912 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info() local
6915 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_fd_rule_info()
6920 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6922 rule = hclge_get_fd_rule(hdev, fs->location); in hclge_get_fd_rule_info()
6924 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6964 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6973 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules() local
6978 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_all_rules()
6981 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
6983 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6985 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
6987 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6998 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7030 hclge_fd_search_flow_keys(struct hclge_dev *hdev, in hclge_fd_search_flow_keys() argument
7036 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
7074 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs() local
7078 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_add_fd_entry_by_arfs()
7084 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7085 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && in hclge_add_fd_entry_by_arfs()
7086 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { in hclge_add_fd_entry_by_arfs()
7087 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7098 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); in hclge_add_fd_entry_by_arfs()
7100 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
7101 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
7102 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7108 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7116 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_by_arfs()
7117 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; in hclge_add_fd_entry_by_arfs()
7121 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_add_fd_entry_by_arfs()
7122 hclge_task_schedule(hdev, 0); in hclge_add_fd_entry_by_arfs()
7124 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7128 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) in hclge_rfs_filter_expire() argument
7131 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
7135 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7136 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
7137 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7140 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
7146 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_rfs_filter_expire()
7149 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7154 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) in hclge_clear_arfs_rules() argument
7161 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
7164 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_clear_arfs_rules()
7168 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_clear_arfs_rules()
7174 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_clear_arfs_rules()
7182 hclge_sync_fd_state(hdev); in hclge_clear_arfs_rules()
7310 static int hclge_parse_cls_flower(struct hclge_dev *hdev, in hclge_parse_cls_flower() argument
7327 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", in hclge_parse_cls_flower()
7345 static int hclge_check_cls_flower(struct hclge_dev *hdev, in hclge_check_cls_flower() argument
7350 if (tc < 0 || tc > hdev->tc_max) { in hclge_check_cls_flower()
7351 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); in hclge_check_cls_flower()
7356 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_check_cls_flower()
7357 dev_err(&hdev->pdev->dev, in hclge_check_cls_flower()
7359 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_check_cls_flower()
7363 if (test_bit(prio - 1, hdev->fd_bmap)) { in hclge_check_cls_flower()
7364 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); in hclge_check_cls_flower()
7375 struct hclge_dev *hdev = vport->back; in hclge_add_cls_flower() local
7379 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_cls_flower()
7380 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7385 ret = hclge_check_cls_flower(hdev, cls_flower, tc); in hclge_add_cls_flower()
7387 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7396 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); in hclge_add_cls_flower()
7409 ret = hclge_add_fd_entry_common(hdev, rule); in hclge_add_cls_flower()
7416 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, in hclge_find_cls_flower() argument
7422 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_find_cls_flower()
7434 struct hclge_dev *hdev = vport->back; in hclge_del_cls_flower() local
7438 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_cls_flower()
7441 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7443 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); in hclge_del_cls_flower()
7445 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7449 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, in hclge_del_cls_flower()
7456 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); in hclge_del_cls_flower()
7457 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_del_cls_flower()
7458 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7462 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); in hclge_del_cls_flower()
7463 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7468 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) in hclge_sync_fd_list() argument
7474 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) in hclge_sync_fd_list()
7477 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7482 ret = hclge_fd_config_rule(hdev, rule); in hclge_sync_fd_list()
7488 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_sync_fd_list()
7492 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_sync_fd_list()
7493 hclge_fd_free_node(hdev, rule); in hclge_sync_fd_list()
7502 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_sync_fd_list()
7504 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7507 static void hclge_sync_fd_table(struct hclge_dev *hdev) in hclge_sync_fd_table() argument
7509 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_sync_fd_table()
7512 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { in hclge_sync_fd_table()
7513 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_sync_fd_table()
7515 hclge_clear_fd_rules_in_list(hdev, clear_list); in hclge_sync_fd_table()
7518 hclge_sync_fd_user_def_cfg(hdev, false); in hclge_sync_fd_table()
7520 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); in hclge_sync_fd_table()
7526 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat() local
7528 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
7529 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
7535 struct hclge_dev *hdev = vport->back; in hclge_get_cmdq_stat() local
7537 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_get_cmdq_stat()
7543 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting() local
7545 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
7551 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt() local
7553 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
7559 struct hclge_dev *hdev = vport->back; in hclge_enable_fd() local
7561 hdev->fd_en = enable; in hclge_enable_fd()
7564 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); in hclge_enable_fd()
7568 hclge_task_schedule(hdev, 0); in hclge_enable_fd()
7571 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) in hclge_cfg_mac_mode() argument
7598 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
7600 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
7606 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, in hclge_cfg_mac_mode()
7610 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, in hclge_config_switch_param() argument
7627 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7629 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7639 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7641 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7646 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, in hclge_phy_link_status_wait() argument
7651 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
7658 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
7670 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, in hclge_mac_link_status_wait() argument
7678 ret = hclge_get_mac_link_status(hdev, &link_status); in hclge_mac_link_status_wait()
7689 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, in hclge_mac_phy_link_status_wait() argument
7699 hclge_phy_link_status_wait(hdev, link_ret); in hclge_mac_phy_link_status_wait()
7701 return hclge_mac_link_status_wait(hdev, link_ret, in hclge_mac_phy_link_status_wait()
7705 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) in hclge_set_app_loopback() argument
7715 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7717 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7732 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7734 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7739 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, in hclge_cfg_common_loopback_cmd_send() argument
7761 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7770 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_cmd_send()
7772 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7779 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) in hclge_cfg_common_loopback_wait() argument
7795 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_wait()
7797 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_wait()
7806 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); in hclge_cfg_common_loopback_wait()
7809 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); in hclge_cfg_common_loopback_wait()
7816 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, in hclge_cfg_common_loopback() argument
7821 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); in hclge_cfg_common_loopback()
7825 return hclge_cfg_common_loopback_wait(hdev); in hclge_cfg_common_loopback()
7828 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, in hclge_set_common_loopback() argument
7833 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); in hclge_set_common_loopback()
7837 hclge_cfg_mac_mode(hdev, en); in hclge_set_common_loopback()
7839 ret = hclge_mac_phy_link_status_wait(hdev, en, false); in hclge_set_common_loopback()
7841 dev_err(&hdev->pdev->dev, in hclge_set_common_loopback()
7847 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, in hclge_enable_phy_loopback() argument
7865 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, in hclge_disable_phy_loopback() argument
7877 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) in hclge_set_phy_loopback() argument
7879 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
7883 if (hnae3_dev_phy_imp_supported(hdev)) in hclge_set_phy_loopback()
7884 return hclge_set_common_loopback(hdev, en, in hclge_set_phy_loopback()
7890 ret = hclge_enable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
7892 ret = hclge_disable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
7894 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7899 hclge_cfg_mac_mode(hdev, en); in hclge_set_phy_loopback()
7901 ret = hclge_mac_phy_link_status_wait(hdev, en, true); in hclge_set_phy_loopback()
7903 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7909 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, in hclge_tqp_enable_cmd_send() argument
7922 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable_cmd_send()
7928 struct hclge_dev *hdev = vport->back; in hclge_tqp_enable() local
7933 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); in hclge_tqp_enable()
7944 struct hclge_dev *hdev = vport->back; in hclge_set_loopback() local
7952 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_set_loopback()
7955 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, in hclge_set_loopback()
7963 ret = hclge_set_app_loopback(hdev, en); in hclge_set_loopback()
7967 ret = hclge_set_common_loopback(hdev, en, loop_mode); in hclge_set_loopback()
7970 ret = hclge_set_phy_loopback(hdev, en); in hclge_set_loopback()
7976 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
7986 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", in hclge_set_loopback()
7992 static int hclge_set_default_loopback(struct hclge_dev *hdev) in hclge_set_default_loopback() argument
7996 ret = hclge_set_app_loopback(hdev, false); in hclge_set_default_loopback()
8000 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); in hclge_set_default_loopback()
8004 return hclge_cfg_common_loopback(hdev, false, in hclge_set_default_loopback()
8008 static void hclge_flush_link_update(struct hclge_dev *hdev) in hclge_flush_link_update() argument
8012 unsigned long last = hdev->serv_processed_cnt; in hclge_flush_link_update()
8015 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && in hclge_flush_link_update()
8017 last == hdev->serv_processed_cnt) in hclge_flush_link_update()
8024 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task() local
8027 hclge_task_schedule(hdev, 0); in hclge_set_timer_task()
8030 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
8033 hclge_flush_link_update(hdev); in hclge_set_timer_task()
8040 struct hclge_dev *hdev = vport->back; in hclge_ae_start() local
8043 hclge_cfg_mac_mode(hdev, true); in hclge_ae_start()
8044 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
8045 hdev->hw.mac.link = 0; in hclge_ae_start()
8050 hclge_mac_start_phy(hdev); in hclge_ae_start()
8058 struct hclge_dev *hdev = vport->back; in hclge_ae_stop() local
8060 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
8061 spin_lock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8062 hclge_clear_arfs_rules(hdev); in hclge_ae_stop()
8063 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8068 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { in hclge_ae_stop()
8069 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, in hclge_ae_stop()
8071 if (hdev->reset_type != HNAE3_FUNC_RESET && in hclge_ae_stop()
8072 hdev->reset_type != HNAE3_FLR_RESET) { in hclge_ae_stop()
8073 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
8074 hclge_update_link_status(hdev); in hclge_ae_stop()
8081 hclge_config_mac_tnl_int(hdev, false); in hclge_ae_stop()
8084 hclge_cfg_mac_mode(hdev, false); in hclge_ae_stop()
8086 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
8090 hclge_update_link_status(hdev); in hclge_ae_stop()
8095 struct hclge_dev *hdev = vport->back; in hclge_vport_start() local
8103 if (test_bit(vport->vport_id, hdev->vport_config_block)) { in hclge_vport_start()
8108 hclge_restore_hw_table(hdev); in hclge_vport_start()
8112 clear_bit(vport->vport_id, hdev->vport_config_block); in hclge_vport_start()
8142 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status() local
8145 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8158 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8166 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8171 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8179 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8184 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8190 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8260 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl() local
8270 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
8272 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
8289 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl() local
8307 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
8312 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
8315 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
8331 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl() local
8345 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
8361 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
8371 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
8380 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, in hclge_set_umv_space() argument
8392 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
8394 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", in hclge_set_umv_space()
8404 static int hclge_init_umv_space(struct hclge_dev *hdev) in hclge_init_umv_space() argument
8409 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); in hclge_init_umv_space()
8413 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
8414 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
8416 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
8418 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
8419 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8420 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
8421 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8423 if (hdev->ae_dev->dev_specs.mc_mac_size) in hclge_init_umv_space()
8424 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); in hclge_init_umv_space()
8429 static void hclge_reset_umv_space(struct hclge_dev *hdev) in hclge_reset_umv_space() argument
8434 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
8435 vport = &hdev->vport[i]; in hclge_reset_umv_space()
8439 mutex_lock(&hdev->vport_lock); in hclge_reset_umv_space()
8440 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
8441 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_reset_umv_space()
8442 mutex_unlock(&hdev->vport_lock); in hclge_reset_umv_space()
8444 hdev->used_mc_mac_num = 0; in hclge_reset_umv_space()
8449 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full() local
8453 mutex_lock(&hdev->vport_lock); in hclge_is_umv_space_full()
8455 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
8456 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
8459 mutex_unlock(&hdev->vport_lock); in hclge_is_umv_space_full()
8466 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space() local
8469 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
8470 hdev->share_umv_size++; in hclge_update_umv_space()
8475 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
8476 hdev->share_umv_size > 0) in hclge_update_umv_space()
8477 hdev->share_umv_size--; in hclge_update_umv_space()
8529 struct hclge_dev *hdev = vport->back; in hclge_update_mac_list() local
8554 dev_err(&hdev->pdev->dev, in hclge_update_mac_list()
8590 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common() local
8601 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
8624 mutex_lock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8629 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8632 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8635 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
8636 hdev->priv_umv_size); in hclge_add_uc_addr_common()
8661 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common() local
8670 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", in hclge_rm_uc_addr_common()
8680 mutex_lock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8682 mutex_unlock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8702 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common() local
8711 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
8720 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && in hclge_add_mc_addr_common()
8721 hdev->used_mc_mac_num >= in hclge_add_mc_addr_common()
8722 hdev->ae_dev->dev_specs.mc_mac_size) in hclge_add_mc_addr_common()
8739 hdev->used_mc_mac_num++; in hclge_add_mc_addr_common()
8747 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
8766 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common() local
8774 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
8793 hdev->used_mc_mac_num--; in hclge_rm_mc_addr_common()
9002 struct hclge_dev *hdev = vport->back; in hclge_need_sync_mac_table() local
9004 if (test_bit(vport->vport_id, hdev->vport_config_block)) in hclge_need_sync_mac_table()
9013 static void hclge_sync_mac_table(struct hclge_dev *hdev) in hclge_sync_mac_table() argument
9017 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_mac_table()
9018 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_mac_table()
9083 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_mac_table() local
9097 set_bit(vport->vport_id, hdev->vport_config_block); in hclge_rm_vport_all_mac_table()
9119 struct hclge_dev *hdev = vport->back; in hclge_uninit_vport_mac_list() local
9147 dev_warn(&hdev->pdev->dev, in hclge_uninit_vport_mac_list()
9158 static void hclge_uninit_mac_table(struct hclge_dev *hdev) in hclge_uninit_mac_table() argument
9163 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_mac_table()
9164 vport = &hdev->vport[i]; in hclge_uninit_mac_table()
9170 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, in hclge_get_mac_ethertype_cmd_status() argument
9181 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9193 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9198 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9203 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9217 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mac() local
9219 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_mac()
9225 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9238 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9245 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9251 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, in hclge_add_mgr_tbl() argument
9262 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
9264 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
9273 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); in hclge_add_mgr_tbl()
9276 static int init_mgr_tbl(struct hclge_dev *hdev) in init_mgr_tbl() argument
9282 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); in init_mgr_tbl()
9284 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
9297 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr() local
9299 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
9352 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr() local
9361 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9367 ret = hclge_pause_addr_cfg(hdev, new_addr); in hclge_set_mac_addr()
9369 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9376 old_addr = hdev->hw.mac.mac_addr; in hclge_set_mac_addr()
9382 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9388 hclge_pause_addr_cfg(hdev, old_addr); in hclge_set_mac_addr()
9395 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
9398 hclge_task_schedule(hdev, 0); in hclge_set_mac_addr()
9403 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) in hclge_mii_ioctl() argument
9407 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_mii_ioctl()
9412 data->phy_id = hdev->hw.mac.phy_addr; in hclge_mii_ioctl()
9416 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); in hclge_mii_ioctl()
9420 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); in hclge_mii_ioctl()
9430 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl() local
9434 return hclge_ptp_get_cfg(hdev, ifr); in hclge_do_ioctl()
9436 return hclge_ptp_set_cfg(hdev, ifr); in hclge_do_ioctl()
9438 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
9439 return hclge_mii_ioctl(hdev, ifr, cmd); in hclge_do_ioctl()
9442 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
9445 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, in hclge_set_port_vlan_filter_bypass() argument
9458 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter_bypass()
9460 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter_bypass()
9467 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, in hclge_set_vlan_filter_ctrl() argument
9480 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9482 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9492 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9494 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9502 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_filter() local
9503 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_set_vport_vlan_filter()
9506 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vport_vlan_filter()
9507 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vport_vlan_filter()
9511 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vport_vlan_filter()
9518 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, in hclge_set_vport_vlan_filter()
9524 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_set_vport_vlan_filter()
9536 struct hclge_dev *hdev = vport->back; in hclge_need_enable_vport_vlan_filter() local
9553 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_need_enable_vport_vlan_filter()
9565 struct hclge_dev *hdev = vport->back; in hclge_enable_vport_vlan_filter() local
9569 mutex_lock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9575 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9581 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9587 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9599 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_filter_cmd() argument
9630 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_filter_cmd()
9632 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter_cmd()
9641 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, in hclge_check_vf_vlan_cmd_status() argument
9654 set_bit(vfid, hdev->vf_vlan_full); in hclge_check_vf_vlan_cmd_status()
9655 dev_warn(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9660 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9676 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9684 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_common() argument
9687 struct hclge_vport *vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_common()
9696 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { in hclge_set_vf_vlan_common()
9698 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
9705 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); in hclge_set_vf_vlan_common()
9709 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); in hclge_set_vf_vlan_common()
9712 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, in hclge_set_port_vlan_filter() argument
9734 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
9736 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
9741 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, in hclge_need_update_port_vlan() argument
9746 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_need_update_port_vlan()
9749 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9750 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9757 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9758 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9767 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, in hclge_set_vlan_filter_hw() argument
9780 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); in hclge_set_vlan_filter_hw()
9782 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
9788 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) in hclge_set_vlan_filter_hw()
9791 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
9795 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, in hclge_set_vlan_filter_hw()
9805 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg() local
9837 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
9839 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
9850 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg() local
9877 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
9879 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
9941 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) in hclge_set_vlan_protocol_type() argument
9951 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
9953 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
9955 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
9957 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
9959 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9961 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9970 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
9971 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
9973 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9975 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9982 static int hclge_init_vlan_filter(struct hclge_dev *hdev) in hclge_init_vlan_filter() argument
9989 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_init_vlan_filter()
9990 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_filter()
9995 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_filter()
9996 vport = &hdev->vport[i]; in hclge_init_vlan_filter()
9997 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_filter()
10005 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && in hclge_init_vlan_filter()
10006 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) in hclge_init_vlan_filter()
10009 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_init_vlan_filter()
10013 static int hclge_init_vlan_type(struct hclge_dev *hdev) in hclge_init_vlan_type() argument
10015 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10016 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10017 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10018 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10019 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10020 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10022 return hclge_set_vlan_protocol_type(hdev); in hclge_init_vlan_type()
10025 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) in hclge_init_vport_vlan_offload() argument
10032 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vport_vlan_offload()
10033 vport = &hdev->vport[i]; in hclge_init_vport_vlan_offload()
10045 static int hclge_init_vlan_config(struct hclge_dev *hdev) in hclge_init_vlan_config() argument
10047 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
10050 ret = hclge_init_vlan_filter(hdev); in hclge_init_vlan_config()
10054 ret = hclge_init_vlan_type(hdev); in hclge_init_vlan_config()
10058 ret = hclge_init_vport_vlan_offload(hdev); in hclge_init_vlan_config()
10069 struct hclge_dev *hdev = vport->back; in hclge_add_vport_vlan_table() local
10071 mutex_lock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10075 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10082 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10090 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10096 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table() local
10099 mutex_lock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10103 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_add_vport_all_vlan_table()
10107 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
10111 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10118 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10127 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table() local
10132 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_vlan_table()
10148 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table() local
10150 mutex_lock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10154 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_all_vlan_table()
10166 clear_bit(vport->vport_id, hdev->vf_vlan_full); in hclge_rm_vport_all_vlan_table()
10167 mutex_unlock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10170 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) in hclge_uninit_vport_vlan_table() argument
10176 mutex_lock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10178 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
10179 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
10186 mutex_unlock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10189 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) in hclge_restore_vport_port_base_vlan_config() argument
10200 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { in hclge_restore_vport_port_base_vlan_config()
10201 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; in hclge_restore_vport_port_base_vlan_config()
10211 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); in hclge_restore_vport_port_base_vlan_config()
10212 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), in hclge_restore_vport_port_base_vlan_config()
10223 struct hclge_dev *hdev = vport->back; in hclge_restore_vport_vlan_table() local
10226 mutex_lock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10230 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_restore_vport_vlan_table()
10239 mutex_unlock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10273 static void hclge_restore_hw_table(struct hclge_dev *hdev) in hclge_restore_hw_table() argument
10275 struct hclge_vport *vport = &hdev->vport[0]; in hclge_restore_hw_table()
10279 hclge_restore_vport_port_base_vlan_config(hdev); in hclge_restore_hw_table()
10281 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_restore_hw_table()
10309 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_fltr_change() local
10311 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_set_vport_vlan_fltr_change()
10320 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries() local
10326 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); in hclge_update_vlan_filter_entries()
10329 return hclge_set_vlan_filter_hw(hdev, in hclge_update_vlan_filter_entries()
10339 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); in hclge_update_vlan_filter_entries()
10343 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
10368 struct hclge_dev *hdev = vport->back; in hclge_modify_port_base_vlan_tag() local
10372 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), in hclge_modify_port_base_vlan_tag()
10381 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, in hclge_modify_port_base_vlan_tag()
10384 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_modify_port_base_vlan_tag()
10388 dev_err(&hdev->pdev->dev, in hclge_modify_port_base_vlan_tag()
10462 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter() local
10467 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_vlan_filter()
10470 vport = hclge_get_vf_vport(hdev, vfid); in hclge_set_vf_vlan_filter()
10492 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter()
10506 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
10517 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) in hclge_clear_vf_vlan() argument
10525 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_clear_vf_vlan()
10526 vport = &hdev->vport[vf]; in hclge_clear_vf_vlan()
10529 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_clear_vf_vlan()
10533 dev_err(&hdev->pdev->dev, in hclge_clear_vf_vlan()
10543 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter() local
10551 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10552 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_set_vlan_filter()
10553 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclge_set_vlan_filter()
10555 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10560 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10569 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
10579 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10581 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10588 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10590 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10598 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) in hclge_sync_vlan_fltr_state() argument
10604 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_fltr_state()
10605 vport = &hdev->vport[i]; in hclge_sync_vlan_fltr_state()
10613 dev_err(&hdev->pdev->dev, in hclge_sync_vlan_fltr_state()
10623 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) in hclge_sync_vlan_filter() argument
10630 mutex_lock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10632 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
10633 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
10638 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_sync_vlan_filter()
10642 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10652 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10660 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10662 hclge_sync_vlan_fltr_state(hdev); in hclge_sync_vlan_filter()
10665 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) in hclge_set_mac_mtu() argument
10676 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
10688 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu() local
10694 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) in hclge_set_vport_mtu()
10698 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
10700 if (vport->vport_id && max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
10701 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10705 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10710 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
10711 if (max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
10712 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10714 i, hdev->vport[i].mps); in hclge_set_vport_mtu()
10715 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10719 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_set_vport_mtu()
10721 ret = hclge_set_mac_mtu(hdev, max_frm_size); in hclge_set_vport_mtu()
10723 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10728 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
10731 ret = hclge_buffer_alloc(hdev); in hclge_set_vport_mtu()
10733 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10737 hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_set_vport_mtu()
10738 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10742 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, in hclge_reset_tqp_cmd_send() argument
10756 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_tqp_cmd_send()
10758 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd_send()
10766 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, in hclge_get_reset_status() argument
10778 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
10780 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
10804 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp_cmd() local
10813 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); in hclge_reset_tqp_cmd()
10815 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10822 ret = hclge_get_reset_status(hdev, queue_gid, in hclge_reset_tqp_cmd()
10835 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10840 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); in hclge_reset_tqp_cmd()
10842 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10858 struct hclge_dev *hdev = vport->back; in hclge_reset_rcb() local
10873 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_rcb()
10875 dev_err(&hdev->pdev->dev, in hclge_reset_rcb()
10885 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", in hclge_reset_rcb()
10899 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp() local
10906 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
10918 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version() local
10920 return hdev->fw_version; in hclge_get_fw_version()
10923 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) in hclge_query_scc_version() argument
10932 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_scc_version()
10941 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_set_flowctrl_adv() argument
10943 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
10951 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_cfg_pauseparam() argument
10955 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
10958 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); in hclge_cfg_pauseparam()
10960 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
10966 int hclge_cfg_flowctrl(struct hclge_dev *hdev) in hclge_cfg_flowctrl() argument
10968 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
10978 return hclge_mac_pause_setup_hw(hdev); in hclge_cfg_flowctrl()
10998 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); in hclge_cfg_flowctrl()
11005 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam() local
11006 u8 media_type = hdev->hw.mac.media_type; in hclge_get_pauseparam()
11011 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
11017 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
11020 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
11023 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
11032 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, in hclge_record_user_pauseparam() argument
11036 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
11038 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
11040 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
11042 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
11044 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
11051 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam() local
11052 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
11055 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { in hclge_set_pauseparam()
11058 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
11064 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
11065 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
11070 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11072 hclge_record_user_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11074 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) in hclge_set_pauseparam()
11075 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11087 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result() local
11090 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
11092 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
11094 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
11096 *lane_num = hdev->hw.mac.lane_num; in hclge_get_ksettings_an_result()
11103 struct hclge_dev *hdev = vport->back; in hclge_get_media_type() local
11109 hclge_update_port_info(hdev); in hclge_get_media_type()
11112 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
11115 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
11122 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode() local
11123 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
11168 static void hclge_info_show(struct hclge_dev *hdev) in hclge_info_show() argument
11170 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_info_show()
11171 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
11175 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclge_info_show()
11176 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclge_info_show()
11177 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclge_info_show()
11178 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclge_info_show()
11179 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); in hclge_info_show()
11180 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclge_info_show()
11181 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); in hclge_info_show()
11182 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); in hclge_info_show()
11183 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); in hclge_info_show()
11185 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
11191 hdev->tx_spare_buf_size); in hclge_info_show()
11200 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance() local
11201 int rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
11208 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11209 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
11210 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
11216 ret = hclge_config_nic_hw_error(hdev, true); in hclge_init_nic_client_instance()
11225 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
11226 hclge_info_show(hdev); in hclge_init_nic_client_instance()
11231 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11232 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
11243 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance() local
11248 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
11249 !hdev->nic_client) in hclge_init_roce_client_instance()
11252 client = hdev->roce_client; in hclge_init_roce_client_instance()
11257 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
11262 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11263 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
11264 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
11270 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_init_roce_client_instance()
11282 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11283 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
11286 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
11294 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance() local
11295 struct hclge_vport *vport = &hdev->vport[0]; in hclge_init_client_instance()
11300 hdev->nic_client = client; in hclge_init_client_instance()
11312 if (hnae3_dev_roce_supported(hdev)) { in hclge_init_client_instance()
11313 hdev->roce_client = client; in hclge_init_client_instance()
11329 hdev->nic_client = NULL; in hclge_init_client_instance()
11333 hdev->roce_client = NULL; in hclge_init_client_instance()
11338 static bool hclge_uninit_need_wait(struct hclge_dev *hdev) in hclge_uninit_need_wait() argument
11340 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_uninit_need_wait()
11341 test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_uninit_need_wait()
11347 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance() local
11348 struct hclge_vport *vport = &hdev->vport[0]; in hclge_uninit_client_instance()
11350 if (hdev->roce_client) { in hclge_uninit_client_instance()
11351 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11352 while (hclge_uninit_need_wait(hdev)) in hclge_uninit_client_instance()
11355 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_uninit_client_instance()
11356 hdev->roce_client = NULL; in hclge_uninit_client_instance()
11361 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
11362 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11363 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11367 hdev->nic_client = NULL; in hclge_uninit_client_instance()
11372 static int hclge_dev_mem_map(struct hclge_dev *hdev) in hclge_dev_mem_map() argument
11374 struct pci_dev *pdev = hdev->pdev; in hclge_dev_mem_map()
11375 struct hclge_hw *hw = &hdev->hw; in hclge_dev_mem_map()
11393 static int hclge_pci_init(struct hclge_dev *hdev) in hclge_pci_init() argument
11395 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
11423 hw = &hdev->hw; in hclge_pci_init()
11431 ret = hclge_dev_mem_map(hdev); in hclge_pci_init()
11435 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
11440 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_init()
11449 static void hclge_pci_uninit(struct hclge_dev *hdev) in hclge_pci_uninit() argument
11451 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
11453 if (hdev->hw.hw.mem_base) in hclge_pci_uninit()
11454 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); in hclge_pci_uninit()
11456 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_uninit()
11462 static void hclge_state_init(struct hclge_dev *hdev) in hclge_state_init() argument
11464 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
11465 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
11466 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11467 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
11468 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_state_init()
11469 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11470 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
11473 static void hclge_state_uninit(struct hclge_dev *hdev) in hclge_state_uninit() argument
11475 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
11476 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
11478 if (hdev->reset_timer.function) in hclge_state_uninit()
11479 del_timer_sync(&hdev->reset_timer); in hclge_state_uninit()
11480 if (hdev->service_task.work.func) in hclge_state_uninit()
11481 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
11490 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_prepare_general() local
11495 down(&hdev->reset_sem); in hclge_reset_prepare_general()
11496 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11497 hdev->reset_type = rst_type; in hclge_reset_prepare_general()
11498 ret = hclge_reset_prepare(hdev); in hclge_reset_prepare_general()
11499 if (!ret && !hdev->reset_pending) in hclge_reset_prepare_general()
11502 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_general()
11504 ret, hdev->reset_pending, retry_cnt); in hclge_reset_prepare_general()
11505 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11506 up(&hdev->reset_sem); in hclge_reset_prepare_general()
11511 hclge_enable_vector(&hdev->misc_vector, false); in hclge_reset_prepare_general()
11512 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_general()
11514 if (hdev->reset_type == HNAE3_FLR_RESET) in hclge_reset_prepare_general()
11515 hdev->rst_stats.flr_rst_cnt++; in hclge_reset_prepare_general()
11520 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_done() local
11523 hclge_enable_vector(&hdev->misc_vector, true); in hclge_reset_done()
11525 ret = hclge_reset_rebuild(hdev); in hclge_reset_done()
11527 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); in hclge_reset_done()
11529 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_done()
11530 if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_reset_done()
11531 up(&hdev->reset_sem); in hclge_reset_done()
11534 static void hclge_clear_resetting_state(struct hclge_dev *hdev) in hclge_clear_resetting_state() argument
11538 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
11539 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
11543 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
11545 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
11551 static int hclge_clear_hw_resource(struct hclge_dev *hdev) in hclge_clear_hw_resource() argument
11558 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_clear_hw_resource()
11566 dev_err(&hdev->pdev->dev, in hclge_clear_hw_resource()
11573 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) in hclge_init_rxd_adv_layout() argument
11575 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_init_rxd_adv_layout()
11576 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); in hclge_init_rxd_adv_layout()
11579 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) in hclge_uninit_rxd_adv_layout() argument
11581 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_uninit_rxd_adv_layout()
11582 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); in hclge_uninit_rxd_adv_layout()
11592 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, in hclge_get_wol_supported_mode() argument
11603 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_wol_supported_mode()
11605 dev_err(&hdev->pdev->dev, in hclge_get_wol_supported_mode()
11615 static int hclge_set_wol_cfg(struct hclge_dev *hdev, in hclge_set_wol_cfg() argument
11628 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_wol_cfg()
11630 dev_err(&hdev->pdev->dev, in hclge_set_wol_cfg()
11636 static int hclge_update_wol(struct hclge_dev *hdev) in hclge_update_wol() argument
11638 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_update_wol()
11640 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_update_wol()
11643 return hclge_set_wol_cfg(hdev, wol_info); in hclge_update_wol()
11646 static int hclge_init_wol(struct hclge_dev *hdev) in hclge_init_wol() argument
11648 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_init_wol()
11651 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_init_wol()
11655 ret = hclge_get_wol_supported_mode(hdev, in hclge_init_wol()
11662 return hclge_update_wol(hdev); in hclge_init_wol()
11706 struct hclge_dev *hdev; in hclge_init_ae_dev() local
11709 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
11710 if (!hdev) in hclge_init_ae_dev()
11713 hdev->pdev = pdev; in hclge_init_ae_dev()
11714 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
11715 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
11716 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
11717 ae_dev->priv = hdev; in hclge_init_ae_dev()
11720 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
11722 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
11723 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
11724 sema_init(&hdev->reset_sem, 1); in hclge_init_ae_dev()
11726 ret = hclge_pci_init(hdev); in hclge_init_ae_dev()
11731 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); in hclge_init_ae_dev()
11736 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); in hclge_init_ae_dev()
11737 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_init_ae_dev()
11738 true, hdev->reset_pending); in hclge_init_ae_dev()
11742 ret = hclge_clear_hw_resource(hdev); in hclge_init_ae_dev()
11746 ret = hclge_get_cap(hdev); in hclge_init_ae_dev()
11750 ret = hclge_query_dev_specs(hdev); in hclge_init_ae_dev()
11757 ret = hclge_configure(hdev); in hclge_init_ae_dev()
11763 ret = hclge_init_msi(hdev); in hclge_init_ae_dev()
11769 ret = hclge_misc_irq_init(hdev); in hclge_init_ae_dev()
11773 ret = hclge_alloc_tqps(hdev); in hclge_init_ae_dev()
11779 ret = hclge_alloc_vport(hdev); in hclge_init_ae_dev()
11783 ret = hclge_map_tqp(hdev); in hclge_init_ae_dev()
11787 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
11789 if (hnae3_dev_phy_imp_supported(hdev)) in hclge_init_ae_dev()
11790 ret = hclge_update_tp_port_info(hdev); in hclge_init_ae_dev()
11792 ret = hclge_mac_mdio_config(hdev); in hclge_init_ae_dev()
11798 ret = hclge_init_umv_space(hdev); in hclge_init_ae_dev()
11802 ret = hclge_mac_init(hdev); in hclge_init_ae_dev()
11808 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_init_ae_dev()
11814 ret = hclge_config_gro(hdev); in hclge_init_ae_dev()
11818 ret = hclge_init_vlan_config(hdev); in hclge_init_ae_dev()
11824 ret = hclge_tm_schd_init(hdev); in hclge_init_ae_dev()
11830 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, in hclge_init_ae_dev()
11831 &hdev->rss_cfg); in hclge_init_ae_dev()
11837 ret = hclge_rss_init_hw(hdev); in hclge_init_ae_dev()
11843 ret = init_mgr_tbl(hdev); in hclge_init_ae_dev()
11849 ret = hclge_init_fd_config(hdev); in hclge_init_ae_dev()
11856 ret = hclge_ptp_init(hdev); in hclge_init_ae_dev()
11860 ret = hclge_update_port_info(hdev); in hclge_init_ae_dev()
11864 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
11866 hclge_dcb_ops_set(hdev); in hclge_init_ae_dev()
11868 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
11869 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
11871 hclge_clear_all_event_cause(hdev); in hclge_init_ae_dev()
11872 hclge_clear_resetting_state(hdev); in hclge_init_ae_dev()
11875 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_init_ae_dev()
11876 hclge_handle_occurred_error(hdev); in hclge_init_ae_dev()
11889 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
11892 hclge_init_rxd_adv_layout(hdev); in hclge_init_ae_dev()
11895 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
11897 ret = hclge_init_wol(hdev); in hclge_init_ae_dev()
11902 ret = hclge_devlink_init(hdev); in hclge_init_ae_dev()
11906 hclge_state_init(hdev); in hclge_init_ae_dev()
11907 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
11909 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
11912 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); in hclge_init_ae_dev()
11916 hclge_ptp_uninit(hdev); in hclge_init_ae_dev()
11918 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
11919 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
11921 hclge_misc_irq_uninit(hdev); in hclge_init_ae_dev()
11925 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_init_ae_dev()
11927 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_init_ae_dev()
11931 mutex_destroy(&hdev->vport_lock); in hclge_init_ae_dev()
11935 static void hclge_stats_clear(struct hclge_dev *hdev) in hclge_stats_clear() argument
11937 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); in hclge_stats_clear()
11938 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); in hclge_stats_clear()
11941 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_mac_spoofchk() argument
11943 return hclge_config_switch_param(hdev, vf, enable, in hclge_set_mac_spoofchk()
11947 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_vlan_spoofchk() argument
11949 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vlan_spoofchk()
11954 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_vf_spoofchk_hw() argument
11958 ret = hclge_set_mac_spoofchk(hdev, vf, enable); in hclge_set_vf_spoofchk_hw()
11960 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11966 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); in hclge_set_vf_spoofchk_hw()
11968 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11979 struct hclge_dev *hdev = vport->back; in hclge_set_vf_spoofchk() local
11983 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_spoofchk()
11986 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_spoofchk()
11993 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) in hclge_set_vf_spoofchk()
11994 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
11998 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
12002 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); in hclge_set_vf_spoofchk()
12010 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) in hclge_reset_vport_spoofchk() argument
12012 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_spoofchk()
12016 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_reset_vport_spoofchk()
12020 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_spoofchk()
12021 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, in hclge_reset_vport_spoofchk()
12035 struct hclge_dev *hdev = vport->back; in hclge_set_vf_trust() local
12038 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_trust()
12047 hclge_task_schedule(hdev, 0); in hclge_set_vf_trust()
12052 static void hclge_reset_vf_rate(struct hclge_dev *hdev) in hclge_reset_vf_rate() argument
12058 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_reset_vf_rate()
12059 struct hclge_vport *vport = &hdev->vport[vf]; in hclge_reset_vf_rate()
12064 dev_err(&hdev->pdev->dev, in hclge_reset_vf_rate()
12070 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, in hclge_vf_rate_param_check() argument
12074 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { in hclge_vf_rate_param_check()
12075 dev_err(&hdev->pdev->dev, in hclge_vf_rate_param_check()
12077 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); in hclge_vf_rate_param_check()
12088 struct hclge_dev *hdev = vport->back; in hclge_set_vf_rate() local
12091 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); in hclge_set_vf_rate()
12095 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_rate()
12111 static int hclge_resume_vf_rate(struct hclge_dev *hdev) in hclge_resume_vf_rate() argument
12113 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_resume_vf_rate()
12119 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { in hclge_resume_vf_rate()
12120 vport = hclge_get_vf_vport(hdev, vf); in hclge_resume_vf_rate()
12133 dev_err(&hdev->pdev->dev, in hclge_resume_vf_rate()
12143 static void hclge_reset_vport_state(struct hclge_dev *hdev) in hclge_reset_vport_state() argument
12145 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
12148 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
12156 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev() local
12160 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
12162 hclge_stats_clear(hdev); in hclge_reset_ae_dev()
12166 if (hdev->reset_type == HNAE3_IMP_RESET || in hclge_reset_ae_dev()
12167 hdev->reset_type == HNAE3_GLOBAL_RESET) { in hclge_reset_ae_dev()
12168 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
12169 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
12170 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); in hclge_reset_ae_dev()
12171 hclge_reset_umv_space(hdev); in hclge_reset_ae_dev()
12174 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_reset_ae_dev()
12175 true, hdev->reset_pending); in hclge_reset_ae_dev()
12181 ret = hclge_map_tqp(hdev); in hclge_reset_ae_dev()
12187 ret = hclge_mac_init(hdev); in hclge_reset_ae_dev()
12193 ret = hclge_tp_port_init(hdev); in hclge_reset_ae_dev()
12200 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_reset_ae_dev()
12206 ret = hclge_config_gro(hdev); in hclge_reset_ae_dev()
12210 ret = hclge_init_vlan_config(hdev); in hclge_reset_ae_dev()
12216 hclge_reset_tc_config(hdev); in hclge_reset_ae_dev()
12218 ret = hclge_tm_init_hw(hdev, true); in hclge_reset_ae_dev()
12224 ret = hclge_rss_init_hw(hdev); in hclge_reset_ae_dev()
12230 ret = init_mgr_tbl(hdev); in hclge_reset_ae_dev()
12237 ret = hclge_init_fd_config(hdev); in hclge_reset_ae_dev()
12243 ret = hclge_ptp_init(hdev); in hclge_reset_ae_dev()
12248 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_reset_ae_dev()
12249 hclge_handle_occurred_error(hdev); in hclge_reset_ae_dev()
12256 ret = hclge_config_nic_hw_error(hdev, true); in hclge_reset_ae_dev()
12264 if (hdev->roce_client) { in hclge_reset_ae_dev()
12265 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_reset_ae_dev()
12274 hclge_reset_vport_state(hdev); in hclge_reset_ae_dev()
12275 ret = hclge_reset_vport_spoofchk(hdev); in hclge_reset_ae_dev()
12279 ret = hclge_resume_vf_rate(hdev); in hclge_reset_ae_dev()
12283 hclge_init_rxd_adv_layout(hdev); in hclge_reset_ae_dev()
12285 ret = hclge_update_wol(hdev); in hclge_reset_ae_dev()
12298 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev() local
12299 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
12301 hclge_reset_vf_rate(hdev); in hclge_uninit_ae_dev()
12302 hclge_clear_vf_vlan(hdev); in hclge_uninit_ae_dev()
12303 hclge_state_uninit(hdev); in hclge_uninit_ae_dev()
12304 hclge_ptp_uninit(hdev); in hclge_uninit_ae_dev()
12305 hclge_uninit_rxd_adv_layout(hdev); in hclge_uninit_ae_dev()
12306 hclge_uninit_mac_table(hdev); in hclge_uninit_ae_dev()
12307 hclge_del_all_fd_entries(hdev); in hclge_uninit_ae_dev()
12313 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
12314 synchronize_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
12317 hclge_config_mac_tnl_int(hdev, false); in hclge_uninit_ae_dev()
12318 hclge_config_nic_hw_error(hdev, false); in hclge_uninit_ae_dev()
12319 hclge_config_rocee_ras_interrupt(hdev, false); in hclge_uninit_ae_dev()
12321 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_uninit_ae_dev()
12322 hclge_misc_irq_uninit(hdev); in hclge_uninit_ae_dev()
12323 hclge_devlink_uninit(hdev); in hclge_uninit_ae_dev()
12324 hclge_pci_uninit(hdev); in hclge_uninit_ae_dev()
12325 hclge_uninit_vport_vlan_table(hdev); in hclge_uninit_ae_dev()
12326 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
12333 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels() local
12335 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); in hclge_get_max_channels()
12351 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info() local
12354 *max_rss_size = hdev->pf_rss_size_max; in hclge_get_tqps_and_rss_info()
12361 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tc_mode_cfg() local
12373 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_rss_tc_mode_cfg()
12381 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_set_rss_tc_mode_cfg()
12391 struct hclge_dev *hdev = vport->back; in hclge_set_channels() local
12400 ret = hclge_tm_vport_map_update(hdev); in hclge_set_channels()
12402 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
12425 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
12432 dev_info(&hdev->pdev->dev, in hclge_set_channels()
12440 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) in hclge_set_led_status() argument
12452 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
12454 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
12470 struct hclge_dev *hdev = vport->back; in hclge_set_led_id() local
12474 return hclge_set_led_status(hdev, HCLGE_LED_ON); in hclge_set_led_id()
12476 return hclge_set_led_status(hdev, HCLGE_LED_OFF); in hclge_set_led_id()
12488 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode() local
12492 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
12493 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
12500 struct hclge_dev *hdev = vport->back; in hclge_gro_en() local
12501 bool gro_en_old = hdev->gro_en; in hclge_gro_en()
12504 hdev->gro_en = enable; in hclge_gro_en()
12505 ret = hclge_config_gro(hdev); in hclge_gro_en()
12507 hdev->gro_en = gro_en_old; in hclge_gro_en()
12515 struct hclge_dev *hdev = vport->back; in hclge_sync_vport_promisc_mode() local
12554 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, in hclge_sync_vport_promisc_mode()
12565 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) in hclge_sync_promisc_mode() argument
12571 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_promisc_mode()
12572 vport = &hdev->vport[i]; in hclge_sync_promisc_mode()
12580 static bool hclge_module_existed(struct hclge_dev *hdev) in hclge_module_existed() argument
12587 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_module_existed()
12589 dev_err(&hdev->pdev->dev, in hclge_module_existed()
12602 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, in hclge_get_sfp_eeprom_info() argument
12628 ret = hclge_cmd_send(&hdev->hw, desc, i); in hclge_get_sfp_eeprom_info()
12630 dev_err(&hdev->pdev->dev, in hclge_get_sfp_eeprom_info()
12657 struct hclge_dev *hdev = vport->back; in hclge_get_module_eeprom() local
12661 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) in hclge_get_module_eeprom()
12664 if (!hclge_module_existed(hdev)) in hclge_get_module_eeprom()
12668 data_len = hclge_get_sfp_eeprom_info(hdev, in hclge_get_module_eeprom()
12685 struct hclge_dev *hdev = vport->back; in hclge_get_link_diagnosis_info() local
12689 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) in hclge_get_link_diagnosis_info()
12693 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_link_diagnosis_info()
12695 dev_err(&hdev->pdev->dev, in hclge_get_link_diagnosis_info()
12709 struct hclge_dev *hdev = vport->back; in hclge_clear_vport_vf_info() local
12721 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12732 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12736 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); in hclge_clear_vport_vf_info()
12738 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12747 struct hclge_dev *hdev = ae_dev->priv; in hclge_clean_vport_config() local
12752 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_clean_vport_config()