Lines Matching refs:vsi

34 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
36 static int i40e_add_vsi(struct i40e_vsi *vsi);
37 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { in i40e_get_lump()
313 struct i40e_vsi *vsi; in i40e_find_vsi_from_id() local
316 i40e_pf_for_each_vsi(pf, i, vsi) in i40e_find_vsi_from_id()
317 if (vsi->id == id) in i40e_find_vsi_from_id()
318 return vsi; in i40e_find_vsi_from_id()
349 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout() local
350 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
358 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
359 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
361 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
362 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout()
384 tx_ring->vsi->base_vector - 1)); in i40e_tx_timeout()
389 vsi->seid, txqueue, tx_ring->next_to_clean, in i40e_tx_timeout()
411 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); in i40e_tx_timeout()
426 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) in i40e_get_vsi_stats_struct() argument
428 return &vsi->net_stats; in i40e_get_vsi_stats_struct()
464 struct i40e_vsi *vsi = np->vsi; in i40e_get_netdev_stats_struct() local
465 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); in i40e_get_netdev_stats_struct()
469 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_get_netdev_stats_struct()
472 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct()
476 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_get_netdev_stats_struct()
480 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct()
485 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_get_netdev_stats_struct()
486 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
492 ring = READ_ONCE(vsi->rx_rings[i]); in i40e_get_netdev_stats_struct()
522 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) in i40e_vsi_reset_stats() argument
527 if (!vsi) in i40e_vsi_reset_stats()
530 ns = i40e_get_vsi_stats_struct(vsi); in i40e_vsi_reset_stats()
532 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); in i40e_vsi_reset_stats()
533 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); in i40e_vsi_reset_stats()
534 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); in i40e_vsi_reset_stats()
535 if (vsi->rx_rings && vsi->rx_rings[0]) { in i40e_vsi_reset_stats()
536 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_reset_stats()
537 memset(&vsi->rx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
538 sizeof(vsi->rx_rings[i]->stats)); in i40e_vsi_reset_stats()
539 memset(&vsi->rx_rings[i]->rx_stats, 0, in i40e_vsi_reset_stats()
540 sizeof(vsi->rx_rings[i]->rx_stats)); in i40e_vsi_reset_stats()
541 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
542 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats()
543 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
544 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats()
547 vsi->stat_offsets_loaded = false; in i40e_vsi_reset_stats()
578 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) in i40e_compute_pci_to_hw_id() argument
582 if (vsi->type == I40E_VSI_SRIOV) in i40e_compute_pci_to_hw_id()
583 return (hw->port * BIT(7)) / pf_count + vsi->vf_id; in i40e_compute_pci_to_hw_id()
695 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, in i40e_stats_update_rx_discards() argument
703 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), in i40e_stats_update_rx_discards()
704 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), in i40e_stats_update_rx_discards()
713 void i40e_update_eth_stats(struct i40e_vsi *vsi) in i40e_update_eth_stats() argument
715 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); in i40e_update_eth_stats()
716 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats()
721 es = &vsi->eth_stats; in i40e_update_eth_stats()
722 oes = &vsi->eth_stats_offsets; in i40e_update_eth_stats()
726 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
729 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
734 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
738 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
742 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
746 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
751 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
755 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
759 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
763 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
766 i40e_stats_update_rx_discards(vsi, hw, stat_idx, in i40e_update_eth_stats()
767 vsi->stat_offsets_loaded, oes, es); in i40e_update_eth_stats()
769 vsi->stat_offsets_loaded = true; in i40e_update_eth_stats()
861 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) in i40e_update_vsi_stats() argument
864 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats()
880 if (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_update_vsi_stats()
884 ns = i40e_get_vsi_stats_struct(vsi); in i40e_update_vsi_stats()
885 ons = &vsi->net_stats_offsets; in i40e_update_vsi_stats()
886 es = &vsi->eth_stats; in i40e_update_vsi_stats()
887 oes = &vsi->eth_stats_offsets; in i40e_update_vsi_stats()
903 for (q = 0; q < vsi->num_queue_pairs; q++) { in i40e_update_vsi_stats()
905 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats()
923 p = READ_ONCE(vsi->rx_rings[q]); in i40e_update_vsi_stats()
941 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_update_vsi_stats()
943 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
961 vsi->tx_restart = tx_restart; in i40e_update_vsi_stats()
962 vsi->tx_busy = tx_busy; in i40e_update_vsi_stats()
963 vsi->tx_linearize = tx_linearize; in i40e_update_vsi_stats()
964 vsi->tx_force_wb = tx_force_wb; in i40e_update_vsi_stats()
965 vsi->tx_stopped = tx_stopped; in i40e_update_vsi_stats()
966 vsi->rx_page_failed = rx_page; in i40e_update_vsi_stats()
967 vsi->rx_buf_failed = rx_buf; in i40e_update_vsi_stats()
968 vsi->rx_page_reuse = rx_reuse; in i40e_update_vsi_stats()
969 vsi->rx_page_alloc = rx_alloc; in i40e_update_vsi_stats()
970 vsi->rx_page_waive = rx_waive; in i40e_update_vsi_stats()
971 vsi->rx_page_busy = rx_busy; in i40e_update_vsi_stats()
979 i40e_update_eth_stats(vsi); in i40e_update_vsi_stats()
992 if (vsi->type == I40E_VSI_MAIN) { in i40e_update_vsi_stats()
1233 void i40e_update_stats(struct i40e_vsi *vsi) in i40e_update_stats() argument
1235 struct i40e_pf *pf = vsi->back; in i40e_update_stats()
1237 if (vsi->type == I40E_VSI_MAIN) in i40e_update_stats()
1240 i40e_update_vsi_stats(vsi); in i40e_update_stats()
1249 int i40e_count_filters(struct i40e_vsi *vsi) in i40e_count_filters() argument
1256 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_count_filters()
1274 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, in i40e_find_filter() argument
1280 if (!vsi || !macaddr) in i40e_find_filter()
1284 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_filter()
1300 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) in i40e_find_mac() argument
1305 if (!vsi || !macaddr) in i40e_find_mac()
1309 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_mac()
1322 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) in i40e_is_vsi_in_vlan() argument
1325 if (vsi->info.pvid) in i40e_is_vsi_in_vlan()
1348 return vsi->has_vlan_filter; in i40e_is_vsi_in_vlan()
1380 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, in i40e_correct_mac_vlan_filters() argument
1385 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_correct_mac_vlan_filters()
1416 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_mac_vlan_filters()
1434 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_mac_vlan_filters()
1458 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_mac_vlan_filters()
1477 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, in i40e_get_vf_new_vlan() argument
1483 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_get_vf_new_vlan()
1484 struct i40e_pf *pf = vsi->back; in i40e_get_vf_new_vlan()
1527 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, in i40e_correct_vf_mac_vlan_filters() argument
1539 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL, in i40e_correct_vf_mac_vlan_filters()
1543 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_vf_mac_vlan_filters()
1544 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters, in i40e_correct_vf_mac_vlan_filters()
1547 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_vf_mac_vlan_filters()
1569 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_vf_mac_vlan_filters()
1581 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) in i40e_rm_default_mac_filter() argument
1584 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter()
1587 if (vsi->type != I40E_VSI_MAIN) in i40e_rm_default_mac_filter()
1595 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1603 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1617 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, in i40e_add_filter() argument
1623 if (!vsi || !macaddr) in i40e_add_filter()
1626 f = i40e_find_filter(vsi, macaddr, vlan); in i40e_add_filter()
1636 vsi->has_vlan_filter = true; in i40e_add_filter()
1644 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_add_filter()
1646 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_filter()
1647 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_add_filter()
1679 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) in __i40e_del_filter() argument
1696 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in __i40e_del_filter()
1697 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in __i40e_del_filter()
1712 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) in i40e_del_filter() argument
1716 if (!vsi || !macaddr) in i40e_del_filter()
1719 f = i40e_find_filter(vsi, macaddr, vlan); in i40e_del_filter()
1720 __i40e_del_filter(vsi, f); in i40e_del_filter()
1735 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, in i40e_add_mac_filter() argument
1742 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_add_mac_filter()
1743 if (vsi->info.pvid) in i40e_add_mac_filter()
1744 return i40e_add_filter(vsi, macaddr, in i40e_add_mac_filter()
1745 le16_to_cpu(vsi->info.pvid)); in i40e_add_mac_filter()
1747 if (!i40e_is_vsi_in_vlan(vsi)) in i40e_add_mac_filter()
1748 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); in i40e_add_mac_filter()
1750 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_mac_filter()
1753 add = i40e_add_filter(vsi, macaddr, f->vlan); in i40e_add_mac_filter()
1771 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) in i40e_del_mac_filter() argument
1778 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_del_mac_filter()
1779 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_del_mac_filter()
1781 __i40e_del_filter(vsi, f); in i40e_del_mac_filter()
1802 struct i40e_vsi *vsi = np->vsi; in i40e_set_mac() local
1803 struct i40e_pf *pf = vsi->back; in i40e_set_mac()
1826 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1827 i40e_del_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1829 i40e_add_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1830 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1832 if (vsi->type == I40E_VSI_MAIN) { in i40e_set_mac()
1857 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, in i40e_config_rss_aq() argument
1860 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq()
1867 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); in i40e_config_rss_aq()
1877 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_config_rss_aq()
1879 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_config_rss_aq()
1895 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) in i40e_vsi_config_rss() argument
1897 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss()
1904 if (!vsi->rss_size) in i40e_vsi_config_rss()
1905 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1906 vsi->num_queue_pairs); in i40e_vsi_config_rss()
1907 if (!vsi->rss_size) in i40e_vsi_config_rss()
1909 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_config_rss()
1916 if (vsi->rss_lut_user) in i40e_vsi_config_rss()
1917 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_vsi_config_rss()
1919 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
1920 if (vsi->rss_hkey_user) in i40e_vsi_config_rss()
1921 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_config_rss()
1924 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_config_rss()
1937 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, in i40e_vsi_setup_queue_map_mqprio() argument
1945 if (vsi->type != I40E_VSI_MAIN) in i40e_vsi_setup_queue_map_mqprio()
1949 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; in i40e_vsi_setup_queue_map_mqprio()
1950 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map_mqprio()
1951 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1961 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1964 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map_mqprio()
1965 offset = vsi->mqprio_qopt.qopt.offset[i]; in i40e_vsi_setup_queue_map_mqprio()
1966 qcount = vsi->mqprio_qopt.qopt.count[i]; in i40e_vsi_setup_queue_map_mqprio()
1969 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map_mqprio()
1970 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map_mqprio()
1971 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map_mqprio()
1977 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map_mqprio()
1978 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map_mqprio()
1979 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map_mqprio()
1984 vsi->num_queue_pairs = offset + qcount; in i40e_vsi_setup_queue_map_mqprio()
1989 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
1993 vsi->rss_size = max_qcount; in i40e_vsi_setup_queue_map_mqprio()
1994 ret = i40e_vsi_config_rss(vsi); in i40e_vsi_setup_queue_map_mqprio()
1996 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
2001 vsi->reconfig_rss = true; in i40e_vsi_setup_queue_map_mqprio()
2002 dev_dbg(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
2008 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
2009 if (override_q && override_q < vsi->num_queue_pairs) { in i40e_vsi_setup_queue_map_mqprio()
2010 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; in i40e_vsi_setup_queue_map_mqprio()
2011 vsi->next_base_queue = override_q; in i40e_vsi_setup_queue_map_mqprio()
2025 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, in i40e_vsi_setup_queue_map() argument
2030 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map()
2045 if (vsi->type == I40E_VSI_MAIN) { in i40e_vsi_setup_queue_map()
2055 if (vsi->req_queue_pairs > 0) in i40e_vsi_setup_queue_map()
2056 vsi->num_queue_pairs = vsi->req_queue_pairs; in i40e_vsi_setup_queue_map()
2058 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2060 vsi->num_queue_pairs = 1; in i40e_vsi_setup_queue_map()
2064 if (vsi->type == I40E_VSI_MAIN || in i40e_vsi_setup_queue_map()
2065 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) in i40e_vsi_setup_queue_map()
2066 num_tc_qps = vsi->num_queue_pairs; in i40e_vsi_setup_queue_map()
2068 num_tc_qps = vsi->alloc_queue_pairs; in i40e_vsi_setup_queue_map()
2070 if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { in i40e_vsi_setup_queue_map()
2085 vsi->tc_config.numtc = numtc; in i40e_vsi_setup_queue_map()
2086 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map()
2095 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map()
2099 switch (vsi->type) { in i40e_vsi_setup_queue_map()
2105 vsi->tc_config.enabled_tc != 1) { in i40e_vsi_setup_queue_map()
2119 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map()
2120 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map()
2130 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map()
2141 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map()
2142 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map()
2143 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map()
2150 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || in i40e_vsi_setup_queue_map()
2151 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || in i40e_vsi_setup_queue_map()
2152 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) in i40e_vsi_setup_queue_map()
2153 vsi->num_queue_pairs = offset; in i40e_vsi_setup_queue_map()
2161 if (vsi->type == I40E_VSI_SRIOV) { in i40e_vsi_setup_queue_map()
2164 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_setup_queue_map()
2166 cpu_to_le16(vsi->base_queue + i); in i40e_vsi_setup_queue_map()
2170 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map()
2186 struct i40e_vsi *vsi = np->vsi; in i40e_addr_sync() local
2188 if (i40e_add_mac_filter(vsi, addr)) in i40e_addr_sync()
2205 struct i40e_vsi *vsi = np->vsi; in i40e_addr_unsync() local
2215 i40e_del_mac_filter(vsi, addr); in i40e_addr_unsync()
2227 struct i40e_vsi *vsi = np->vsi; in i40e_set_rx_mode() local
2229 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2234 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2237 if (vsi->current_netdev_flags != vsi->netdev->flags) { in i40e_set_rx_mode()
2238 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_set_rx_mode()
2239 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_set_rx_mode()
2251 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, in i40e_undo_del_filter_entries() argument
2262 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_undo_del_filter_entries()
2274 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, in i40e_undo_add_filter_entries() argument
2283 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_undo_add_filter_entries()
2361 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, in i40e_aqc_del_filters() argument
2365 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_del_filters()
2369 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, in i40e_aqc_del_filters()
2375 dev_info(&vsi->back->pdev->dev, in i40e_aqc_del_filters()
2395 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, in i40e_aqc_add_filters() argument
2400 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_add_filters()
2404 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status); in i40e_aqc_add_filters()
2408 if (vsi->type == I40E_VSI_MAIN) { in i40e_aqc_add_filters()
2409 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_add_filters()
2410 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2413 } else if (vsi->type == I40E_VSI_SRIOV || in i40e_aqc_add_filters()
2414 vsi->type == I40E_VSI_VMDQ1 || in i40e_aqc_add_filters()
2415 vsi->type == I40E_VSI_VMDQ2) { in i40e_aqc_add_filters()
2416 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2421 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2424 vsi->type); in i40e_aqc_add_filters()
2442 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, in i40e_aqc_broadcast_filter() argument
2447 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_broadcast_filter()
2452 vsi->seid, in i40e_aqc_broadcast_filter()
2457 vsi->seid, in i40e_aqc_broadcast_filter()
2464 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_broadcast_filter()
2465 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_broadcast_filter()
2485 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_set_promiscuous() local
2489 if (vsi->type == I40E_VSI_MAIN && in i40e_set_promiscuous()
2499 vsi->seid, in i40e_set_promiscuous()
2503 vsi->seid, in i40e_set_promiscuous()
2514 vsi->seid, in i40e_set_promiscuous()
2525 vsi->seid, in i40e_set_promiscuous()
2549 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) in i40e_sync_vsi_filters() argument
2554 struct i40e_hw *hw = &vsi->back->hw; in i40e_sync_vsi_filters()
2575 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) in i40e_sync_vsi_filters()
2577 pf = vsi->back; in i40e_sync_vsi_filters()
2579 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2581 if (vsi->netdev) { in i40e_sync_vsi_filters()
2582 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in i40e_sync_vsi_filters()
2583 vsi->current_netdev_flags = vsi->netdev->flags; in i40e_sync_vsi_filters()
2589 if (vsi->type == I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2590 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); in i40e_sync_vsi_filters()
2591 else if (vsi->type != I40E_VSI_MAIN) in i40e_sync_vsi_filters()
2592 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); in i40e_sync_vsi_filters()
2594 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { in i40e_sync_vsi_filters()
2595 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2597 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2599 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_sync_vsi_filters()
2631 if (vsi->type != I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2633 (vsi, &tmp_add_list, &tmp_del_list, in i40e_sync_vsi_filters()
2637 (vsi, &tmp_add_list, &tmp_del_list, in i40e_sync_vsi_filters()
2638 vlan_filters, pf->vf[vsi->vf_id].trusted); in i40e_sync_vsi_filters()
2641 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); in i40e_sync_vsi_filters()
2646 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2666 i40e_aqc_broadcast_filter(vsi, vsi_name, f); in i40e_sync_vsi_filters()
2689 i40e_aqc_del_filters(vsi, vsi_name, del_list, in i40e_sync_vsi_filters()
2702 i40e_aqc_del_filters(vsi, vsi_name, del_list, in i40e_sync_vsi_filters()
2726 if (i40e_aqc_broadcast_filter(vsi, vsi_name, in i40e_sync_vsi_filters()
2756 i40e_aqc_add_filters(vsi, vsi_name, add_list, in i40e_sync_vsi_filters()
2763 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, in i40e_sync_vsi_filters()
2769 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2776 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_sync_vsi_filters()
2779 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2785 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2786 vsi->active_filters = 0; in i40e_sync_vsi_filters()
2787 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { in i40e_sync_vsi_filters()
2789 vsi->active_filters++; in i40e_sync_vsi_filters()
2793 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2800 vsi->active_filters < vsi->promisc_threshold) { in i40e_sync_vsi_filters()
2804 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2805 vsi->promisc_threshold = 0; in i40e_sync_vsi_filters()
2809 if (vsi->type == I40E_VSI_SRIOV && pf->vf && in i40e_sync_vsi_filters()
2810 !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2811 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2815 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2821 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; in i40e_sync_vsi_filters()
2827 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); in i40e_sync_vsi_filters()
2828 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, in i40e_sync_vsi_filters()
2829 vsi->seid, in i40e_sync_vsi_filters()
2849 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || in i40e_sync_vsi_filters()
2866 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2868 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2873 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2875 i40e_undo_del_filter_entries(vsi, &tmp_del_list); in i40e_sync_vsi_filters()
2876 i40e_undo_add_filter_entries(vsi, &tmp_add_list); in i40e_sync_vsi_filters()
2877 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2879 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2880 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2890 struct i40e_vsi *vsi; in i40e_sync_filters_subtask() local
2902 i40e_pf_for_each_vsi(pf, v, vsi) { in i40e_sync_filters_subtask()
2903 if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) && in i40e_sync_filters_subtask()
2904 !test_bit(__I40E_VSI_RELEASING, vsi->state)) { in i40e_sync_filters_subtask()
2905 int ret = i40e_sync_vsi_filters(vsi); in i40e_sync_filters_subtask()
2922 static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) in i40e_calculate_vsi_rx_buf_len() argument
2924 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) in i40e_calculate_vsi_rx_buf_len()
2935 static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi, in i40e_max_vsi_frame_size() argument
2938 u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); in i40e_max_vsi_frame_size()
2959 struct i40e_vsi *vsi = np->vsi; in i40e_change_mtu() local
2960 struct i40e_pf *pf = vsi->back; in i40e_change_mtu()
2963 frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); in i40e_change_mtu()
2974 i40e_vsi_reinit_locked(vsi); in i40e_change_mtu()
2989 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl()
3005 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) in i40e_vlan_stripping_enable() argument
3011 if (vsi->info.pvid) in i40e_vlan_stripping_enable()
3014 if ((vsi->info.valid_sections & in i40e_vlan_stripping_enable()
3016 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) in i40e_vlan_stripping_enable()
3019 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_enable()
3020 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_enable()
3023 ctxt.seid = vsi->seid; in i40e_vlan_stripping_enable()
3024 ctxt.info = vsi->info; in i40e_vlan_stripping_enable()
3025 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_enable()
3027 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_enable()
3030 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_enable()
3031 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_enable()
3039 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) in i40e_vlan_stripping_disable() argument
3045 if (vsi->info.pvid) in i40e_vlan_stripping_disable()
3048 if ((vsi->info.valid_sections & in i40e_vlan_stripping_disable()
3050 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == in i40e_vlan_stripping_disable()
3054 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_disable()
3055 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_disable()
3058 ctxt.seid = vsi->seid; in i40e_vlan_stripping_disable()
3059 ctxt.info = vsi->info; in i40e_vlan_stripping_disable()
3060 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_disable()
3062 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_disable()
3065 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_disable()
3066 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_disable()
3083 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) in i40e_add_vlan_all_mac() argument
3089 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vlan_all_mac()
3105 add_f = i40e_add_filter(vsi, f->macaddr, vid); in i40e_add_vlan_all_mac()
3107 dev_info(&vsi->back->pdev->dev, in i40e_add_vlan_all_mac()
3122 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) in i40e_vsi_add_vlan() argument
3126 if (vsi->info.pvid) in i40e_vsi_add_vlan()
3141 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3142 err = i40e_add_vlan_all_mac(vsi, vid); in i40e_vsi_add_vlan()
3143 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3150 i40e_service_event_schedule(vsi->back); in i40e_vsi_add_vlan()
3167 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) in i40e_rm_vlan_all_mac() argument
3173 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_rm_vlan_all_mac()
3175 __i40e_del_filter(vsi, f); in i40e_rm_vlan_all_mac()
3184 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) in i40e_vsi_kill_vlan() argument
3186 if (!vid || vsi->info.pvid) in i40e_vsi_kill_vlan()
3189 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3190 i40e_rm_vlan_all_mac(vsi, vid); in i40e_vsi_kill_vlan()
3191 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3196 i40e_service_event_schedule(vsi->back); in i40e_vsi_kill_vlan()
3211 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid() local
3217 ret = i40e_vsi_add_vlan(vsi, vid); in i40e_vlan_rx_add_vid()
3219 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid()
3234 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid_up() local
3238 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid_up()
3253 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_kill_vid() local
3259 i40e_vsi_kill_vlan(vsi, vid); in i40e_vlan_rx_kill_vid()
3261 clear_bit(vid, vsi->active_vlans); in i40e_vlan_rx_kill_vid()
3270 static void i40e_restore_vlan(struct i40e_vsi *vsi) in i40e_restore_vlan() argument
3274 if (!vsi->netdev) in i40e_restore_vlan()
3277 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in i40e_restore_vlan()
3278 i40e_vlan_stripping_enable(vsi); in i40e_restore_vlan()
3280 i40e_vlan_stripping_disable(vsi); in i40e_restore_vlan()
3282 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) in i40e_restore_vlan()
3283 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), in i40e_restore_vlan()
3292 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) in i40e_vsi_add_pvid() argument
3297 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vsi_add_pvid()
3298 vsi->info.pvid = cpu_to_le16(vid); in i40e_vsi_add_pvid()
3299 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | in i40e_vsi_add_pvid()
3303 ctxt.seid = vsi->seid; in i40e_vsi_add_pvid()
3304 ctxt.info = vsi->info; in i40e_vsi_add_pvid()
3305 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vsi_add_pvid()
3307 dev_info(&vsi->back->pdev->dev, in i40e_vsi_add_pvid()
3310 i40e_aq_str(&vsi->back->hw, in i40e_vsi_add_pvid()
3311 vsi->back->hw.aq.asq_last_status)); in i40e_vsi_add_pvid()
3324 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) in i40e_vsi_remove_pvid() argument
3326 vsi->info.pvid = 0; in i40e_vsi_remove_pvid()
3328 i40e_vlan_stripping_disable(vsi); in i40e_vsi_remove_pvid()
3341 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) in i40e_vsi_setup_tx_resources() argument
3345 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3346 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); in i40e_vsi_setup_tx_resources()
3348 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_setup_tx_resources()
3351 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3352 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3363 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) in i40e_vsi_free_tx_resources() argument
3367 if (vsi->tx_rings) { in i40e_vsi_free_tx_resources()
3368 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3369 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in i40e_vsi_free_tx_resources()
3370 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_vsi_free_tx_resources()
3373 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3374 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3375 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3376 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3390 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) in i40e_vsi_setup_rx_resources() argument
3394 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_rx_resources()
3395 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); in i40e_vsi_setup_rx_resources()
3405 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) in i40e_vsi_free_rx_resources() argument
3409 if (!vsi->rx_rings) in i40e_vsi_free_rx_resources()
3412 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_rx_resources()
3413 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in i40e_vsi_free_rx_resources()
3414 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_vsi_free_rx_resources()
3448 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); in i40e_xsk_pool()
3452 qid -= ring->vsi->alloc_queue_pairs; in i40e_xsk_pool()
3454 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) in i40e_xsk_pool()
3457 return xsk_get_pool_from_qid(ring->vsi->netdev, qid); in i40e_xsk_pool()
3468 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_tx_ring() local
3469 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_tx_ring()
3470 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_tx_ring()
3479 if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) { in i40e_configure_tx_ring()
3495 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) || in i40e_configure_tx_ring()
3496 test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) in i40e_configure_tx_ring()
3498 if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags)) in i40e_configure_tx_ring()
3501 if (vsi->type != I40E_VSI_FDIR) in i40e_configure_tx_ring()
3522 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3529 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3538 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3554 if (vsi->type == I40E_VSI_VMDQ2) { in i40e_configure_tx_ring()
3557 vsi->id); in i40e_configure_tx_ring()
3592 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_rx_ring() local
3593 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; in i40e_configure_rx_ring()
3594 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_rx_ring()
3595 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_rx_ring()
3605 ring->rx_buf_len = vsi->rx_buf_len; in i40e_configure_rx_ring()
3608 if (ring->vsi->type != I40E_VSI_MAIN) in i40e_configure_rx_ring()
3635 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3664 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); in i40e_configure_rx_ring()
3679 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3688 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3695 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) { in i40e_configure_rx_ring()
3697 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3722 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3737 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) in i40e_vsi_configure_tx() argument
3742 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3743 err = i40e_configure_tx_ring(vsi->tx_rings[i]); in i40e_vsi_configure_tx()
3745 if (err || !i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_configure_tx()
3748 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3749 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
3760 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) in i40e_vsi_configure_rx() argument
3765 vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); in i40e_vsi_configure_rx()
3766 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); in i40e_vsi_configure_rx()
3769 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && in i40e_vsi_configure_rx()
3770 vsi->netdev->mtu <= ETH_DATA_LEN) { in i40e_vsi_configure_rx()
3771 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3772 vsi->max_frame = vsi->rx_buf_len; in i40e_vsi_configure_rx()
3777 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_configure_rx()
3778 err = i40e_configure_rx_ring(vsi->rx_rings[i]); in i40e_vsi_configure_rx()
3787 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) in i40e_vsi_config_dcb_rings() argument
3793 if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { in i40e_vsi_config_dcb_rings()
3795 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_config_dcb_rings()
3796 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3797 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3805 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) in i40e_vsi_config_dcb_rings()
3808 qoffset = vsi->tc_config.tc_info[n].qoffset; in i40e_vsi_config_dcb_rings()
3809 qcount = vsi->tc_config.tc_info[n].qcount; in i40e_vsi_config_dcb_rings()
3811 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3812 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3823 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) in i40e_set_vsi_rx_mode() argument
3825 if (vsi->netdev) in i40e_set_vsi_rx_mode()
3826 i40e_set_rx_mode(vsi->netdev); in i40e_set_vsi_rx_mode()
3854 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) in i40e_fdir_filter_restore() argument
3857 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore()
3868 i40e_add_del_fdir(vsi, filter, true); in i40e_fdir_filter_restore()
3876 static int i40e_vsi_configure(struct i40e_vsi *vsi) in i40e_vsi_configure() argument
3880 i40e_set_vsi_rx_mode(vsi); in i40e_vsi_configure()
3881 i40e_restore_vlan(vsi); in i40e_vsi_configure()
3882 i40e_vsi_config_dcb_rings(vsi); in i40e_vsi_configure()
3883 err = i40e_vsi_configure_tx(vsi); in i40e_vsi_configure()
3885 err = i40e_vsi_configure_rx(vsi); in i40e_vsi_configure()
3894 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) in i40e_vsi_configure_msix() argument
3896 bool has_xdp = i40e_enabled_xdp_vsi(vsi); in i40e_vsi_configure_msix()
3897 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix()
3907 qp = vsi->base_queue; in i40e_vsi_configure_msix()
3908 vector = vsi->base_vector; in i40e_vsi_configure_msix()
3909 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { in i40e_vsi_configure_msix()
3910 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix()
3914 ITR_TO_REG(vsi->rx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3921 ITR_TO_REG(vsi->tx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3933 i40e_intrl_usec_to_reg(vsi->int_rate_limit)); in i40e_vsi_configure_msix()
3938 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; in i40e_vsi_configure_msix()
4024 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) in i40e_configure_msi_and_legacy() argument
4026 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; in i40e_configure_msi_and_legacy()
4027 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_configure_msi_and_legacy()
4028 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy()
4033 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
4037 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
4051 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_configure_msi_and_legacy()
4144 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) in i40e_vsi_request_irq_msix() argument
4146 int q_vectors = vsi->num_q_vectors; in i40e_vsi_request_irq_msix()
4147 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix()
4148 int base = vsi->base_vector; in i40e_vsi_request_irq_msix()
4156 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; in i40e_vsi_request_irq_msix()
4175 vsi->irq_handler, in i40e_vsi_request_irq_msix()
4200 vsi->irqs_ready = true; in i40e_vsi_request_irq_msix()
4209 free_irq(irq_num, &vsi->q_vectors[vector]); in i40e_vsi_request_irq_msix()
4218 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) in i40e_vsi_disable_irq() argument
4220 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq()
4222 int base = vsi->base_vector; in i40e_vsi_disable_irq()
4226 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_disable_irq()
4229 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4231 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4233 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4235 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4237 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_disable_irq()
4239 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4244 for (i = vsi->base_vector; in i40e_vsi_disable_irq()
4245 i < (vsi->num_q_vectors + vsi->base_vector); i++) in i40e_vsi_disable_irq()
4249 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_disable_irq()
4264 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) in i40e_vsi_enable_irq() argument
4266 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq()
4270 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_enable_irq()
4271 i40e_irq_dynamic_enable(vsi, i); in i40e_vsi_enable_irq()
4334 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_intr() local
4335 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_intr()
4447 struct i40e_vsi *vsi = tx_ring->vsi; in i40e_clean_fdir_tx_irq() local
4517 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) in i40e_clean_fdir_tx_irq()
4518 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); in i40e_clean_fdir_tx_irq()
4531 struct i40e_vsi *vsi; in i40e_fdir_clean_ring() local
4536 vsi = q_vector->tx.ring->vsi; in i40e_fdir_clean_ring()
4537 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); in i40e_fdir_clean_ring()
4548 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) in i40e_map_vector_to_qp() argument
4550 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_map_vector_to_qp()
4551 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; in i40e_map_vector_to_qp()
4552 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; in i40e_map_vector_to_qp()
4560 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_map_vector_to_qp()
4561 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; in i40e_map_vector_to_qp()
4584 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) in i40e_vsi_map_rings_to_vectors() argument
4586 int qp_remaining = vsi->num_queue_pairs; in i40e_vsi_map_rings_to_vectors()
4587 int q_vectors = vsi->num_q_vectors; in i40e_vsi_map_rings_to_vectors()
4600 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; in i40e_vsi_map_rings_to_vectors()
4605 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; in i40e_vsi_map_rings_to_vectors()
4613 i40e_map_vector_to_qp(vsi, v_start, qp_idx); in i40e_vsi_map_rings_to_vectors()
4625 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) in i40e_vsi_request_irq() argument
4627 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq()
4631 err = i40e_vsi_request_irq_msix(vsi, basename); in i40e_vsi_request_irq()
4656 struct i40e_vsi *vsi = np->vsi; in i40e_netpoll() local
4657 struct i40e_pf *pf = vsi->back; in i40e_netpoll()
4661 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_netpoll()
4665 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_netpoll()
4666 i40e_msix_clean_rings(0, vsi->q_vectors[i]); in i40e_netpoll()
4779 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi) in i40e_vsi_enable_tx() argument
4781 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx()
4784 pf_q = vsi->base_queue; in i40e_vsi_enable_tx()
4785 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_tx()
4786 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4792 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_enable_tx()
4795 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4796 pf_q + vsi->alloc_queue_pairs, in i40e_vsi_enable_tx()
4898 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi) in i40e_vsi_enable_rx() argument
4900 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx()
4903 pf_q = vsi->base_queue; in i40e_vsi_enable_rx()
4904 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_rx()
4909 vsi->seid, pf_q); in i40e_vsi_enable_rx()
4921 int i40e_vsi_start_rings(struct i40e_vsi *vsi) in i40e_vsi_start_rings() argument
4926 ret = i40e_vsi_enable_rx(vsi); in i40e_vsi_start_rings()
4929 ret = i40e_vsi_enable_tx(vsi); in i40e_vsi_start_rings()
4940 void i40e_vsi_stop_rings(struct i40e_vsi *vsi) in i40e_vsi_stop_rings() argument
4942 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings()
4946 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) in i40e_vsi_stop_rings()
4947 return i40e_vsi_stop_rings_no_wait(vsi); in i40e_vsi_stop_rings()
4949 tx_q_end = vsi->base_queue + in i40e_vsi_stop_rings()
4950 vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); in i40e_vsi_stop_rings()
4951 for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) in i40e_vsi_stop_rings()
4954 rx_q_end = vsi->base_queue + vsi->num_queue_pairs; in i40e_vsi_stop_rings()
4955 for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) in i40e_vsi_stop_rings()
4959 for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) in i40e_vsi_stop_rings()
4962 i40e_vsi_wait_queues_disabled(vsi); in i40e_vsi_stop_rings()
4976 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) in i40e_vsi_stop_rings_no_wait() argument
4978 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait()
4981 pf_q = vsi->base_queue; in i40e_vsi_stop_rings_no_wait()
4982 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_stop_rings_no_wait()
4992 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) in i40e_vsi_free_irq() argument
4994 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq()
4996 int base = vsi->base_vector; in i40e_vsi_free_irq()
5001 if (!vsi->q_vectors) in i40e_vsi_free_irq()
5004 if (!vsi->irqs_ready) in i40e_vsi_free_irq()
5007 vsi->irqs_ready = false; in i40e_vsi_free_irq()
5008 for (i = 0; i < vsi->num_q_vectors; i++) { in i40e_vsi_free_irq()
5016 if (!vsi->q_vectors[i] || in i40e_vsi_free_irq()
5017 !vsi->q_vectors[i]->num_ringpairs) in i40e_vsi_free_irq()
5024 free_irq(irq_num, vsi->q_vectors[i]); in i40e_vsi_free_irq()
5115 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) in i40e_free_q_vector() argument
5117 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_free_q_vector()
5131 if (vsi->netdev) in i40e_free_q_vector()
5134 vsi->q_vectors[v_idx] = NULL; in i40e_free_q_vector()
5146 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) in i40e_vsi_free_q_vectors() argument
5150 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) in i40e_vsi_free_q_vectors()
5151 i40e_free_q_vector(vsi, v_idx); in i40e_vsi_free_q_vectors()
5183 struct i40e_vsi *vsi; in i40e_clear_interrupt_scheme() local
5194 i40e_pf_for_each_vsi(pf, i, vsi) in i40e_clear_interrupt_scheme()
5195 i40e_vsi_free_q_vectors(vsi); in i40e_clear_interrupt_scheme()
5204 static void i40e_napi_enable_all(struct i40e_vsi *vsi) in i40e_napi_enable_all() argument
5208 if (!vsi->netdev) in i40e_napi_enable_all()
5211 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_enable_all()
5212 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_enable_all()
5223 static void i40e_napi_disable_all(struct i40e_vsi *vsi) in i40e_napi_disable_all() argument
5227 if (!vsi->netdev) in i40e_napi_disable_all()
5230 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_disable_all()
5231 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_disable_all()
5242 static void i40e_vsi_close(struct i40e_vsi *vsi) in i40e_vsi_close() argument
5244 struct i40e_pf *pf = vsi->back; in i40e_vsi_close()
5245 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_close()
5246 i40e_down(vsi); in i40e_vsi_close()
5247 i40e_vsi_free_irq(vsi); in i40e_vsi_close()
5248 i40e_vsi_free_tx_resources(vsi); in i40e_vsi_close()
5249 i40e_vsi_free_rx_resources(vsi); in i40e_vsi_close()
5250 vsi->current_netdev_flags = 0; in i40e_vsi_close()
5260 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) in i40e_quiesce_vsi() argument
5262 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_quiesce_vsi()
5265 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); in i40e_quiesce_vsi()
5266 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_quiesce_vsi()
5267 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); in i40e_quiesce_vsi()
5269 i40e_vsi_close(vsi); in i40e_quiesce_vsi()
5276 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) in i40e_unquiesce_vsi() argument
5278 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) in i40e_unquiesce_vsi()
5281 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_unquiesce_vsi()
5282 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); in i40e_unquiesce_vsi()
5284 i40e_vsi_open(vsi); /* this clears the DOWN bit */ in i40e_unquiesce_vsi()
5293 struct i40e_vsi *vsi; in i40e_pf_quiesce_all_vsi() local
5296 i40e_pf_for_each_vsi(pf, v, vsi) in i40e_pf_quiesce_all_vsi()
5297 i40e_quiesce_vsi(vsi); in i40e_pf_quiesce_all_vsi()
5306 struct i40e_vsi *vsi; in i40e_pf_unquiesce_all_vsi() local
5309 i40e_pf_for_each_vsi(pf, v, vsi) in i40e_pf_unquiesce_all_vsi()
5310 i40e_unquiesce_vsi(vsi); in i40e_pf_unquiesce_all_vsi()
5319 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) in i40e_vsi_wait_queues_disabled() argument
5321 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled()
5324 pf_q = vsi->base_queue; in i40e_vsi_wait_queues_disabled()
5325 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_wait_queues_disabled()
5331 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5335 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_wait_queues_disabled()
5339 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5344 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5353 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5371 struct i40e_vsi *vsi; in i40e_pf_wait_queues_disabled() local
5374 i40e_pf_for_each_vsi(pf, v, vsi) { in i40e_pf_wait_queues_disabled()
5375 ret = i40e_vsi_wait_queues_disabled(vsi); in i40e_pf_wait_queues_disabled()
5484 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_mqprio_get_enabled_tc() local
5485 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; in i40e_mqprio_get_enabled_tc()
5505 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_pf_get_num_tc() local
5507 return vsi->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5565 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) in i40e_vsi_get_bw_info() argument
5569 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info()
5576 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); in i40e_vsi_get_bw_info()
5586 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, in i40e_vsi_get_bw_info()
5604 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); in i40e_vsi_get_bw_info()
5605 vsi->bw_max_quanta = bw_config.max_bw; in i40e_vsi_get_bw_info()
5609 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; in i40e_vsi_get_bw_info()
5610 vsi->bw_ets_limit_credits[i] = in i40e_vsi_get_bw_info()
5613 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); in i40e_vsi_get_bw_info()
5627 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, in i40e_vsi_configure_bw_alloc() argument
5631 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc()
5638 if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_vsi_configure_bw_alloc()
5639 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); in i40e_vsi_configure_bw_alloc()
5643 vsi->seid); in i40e_vsi_configure_bw_alloc()
5651 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5660 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_vsi_configure_bw_alloc()
5671 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) in i40e_vsi_config_netdev_tc() argument
5673 struct net_device *netdev = vsi->netdev; in i40e_vsi_config_netdev_tc()
5674 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc()
5689 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) in i40e_vsi_config_netdev_tc()
5701 if (vsi->tc_config.enabled_tc & BIT(i)) in i40e_vsi_config_netdev_tc()
5703 vsi->tc_config.tc_info[i].netdev_tc, in i40e_vsi_config_netdev_tc()
5704 vsi->tc_config.tc_info[i].qcount, in i40e_vsi_config_netdev_tc()
5705 vsi->tc_config.tc_info[i].qoffset); in i40e_vsi_config_netdev_tc()
5716 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; in i40e_vsi_config_netdev_tc()
5726 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, in i40e_vsi_update_queue_map() argument
5733 vsi->info.mapping_flags = ctxt->info.mapping_flags; in i40e_vsi_update_queue_map()
5734 memcpy(&vsi->info.queue_mapping, in i40e_vsi_update_queue_map()
5735 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); in i40e_vsi_update_queue_map()
5736 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, in i40e_vsi_update_queue_map()
5737 sizeof(vsi->info.tc_mapping)); in i40e_vsi_update_queue_map()
5745 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) in i40e_update_adq_vsi_queues() argument
5752 if (!vsi) in i40e_update_adq_vsi_queues()
5754 pf = vsi->back; in i40e_update_adq_vsi_queues()
5757 ctxt.seid = vsi->seid; in i40e_update_adq_vsi_queues()
5759 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; in i40e_update_adq_vsi_queues()
5760 ctxt.uplink_seid = vsi->uplink_seid; in i40e_update_adq_vsi_queues()
5763 ctxt.info = vsi->info; in i40e_update_adq_vsi_queues()
5765 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, in i40e_update_adq_vsi_queues()
5767 if (vsi->reconfig_rss) { in i40e_update_adq_vsi_queues()
5768 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_update_adq_vsi_queues()
5769 vsi->num_queue_pairs); in i40e_update_adq_vsi_queues()
5770 ret = i40e_vsi_config_rss(vsi); in i40e_update_adq_vsi_queues()
5775 vsi->reconfig_rss = false; in i40e_update_adq_vsi_queues()
5786 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_update_adq_vsi_queues()
5787 vsi->info.valid_sections = 0; in i40e_update_adq_vsi_queues()
5805 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) in i40e_vsi_config_tc() argument
5808 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc()
5815 if (vsi->tc_config.enabled_tc == enabled_tc && in i40e_vsi_config_tc()
5816 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in i40e_vsi_config_tc()
5825 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); in i40e_vsi_config_tc()
5831 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5832 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, in i40e_vsi_config_tc()
5854 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); in i40e_vsi_config_tc()
5858 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5864 ctxt.seid = vsi->seid; in i40e_vsi_config_tc()
5865 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_vsi_config_tc()
5867 ctxt.uplink_seid = vsi->uplink_seid; in i40e_vsi_config_tc()
5868 ctxt.info = vsi->info; in i40e_vsi_config_tc()
5870 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); in i40e_vsi_config_tc()
5874 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); in i40e_vsi_config_tc()
5880 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { in i40e_vsi_config_tc()
5881 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, in i40e_vsi_config_tc()
5882 vsi->num_queue_pairs); in i40e_vsi_config_tc()
5883 ret = i40e_vsi_config_rss(vsi); in i40e_vsi_config_tc()
5885 dev_info(&vsi->back->pdev->dev, in i40e_vsi_config_tc()
5889 vsi->reconfig_rss = false; in i40e_vsi_config_tc()
5891 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { in i40e_vsi_config_tc()
5909 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_vsi_config_tc()
5910 vsi->info.valid_sections = 0; in i40e_vsi_config_tc()
5913 ret = i40e_vsi_get_bw_info(vsi); in i40e_vsi_config_tc()
5923 i40e_vsi_config_netdev_tc(vsi, enabled_tc); in i40e_vsi_config_tc()
5940 static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi) in i40e_vsi_reconfig_tc() argument
5944 enabled_tc = vsi->tc_config.enabled_tc; in i40e_vsi_reconfig_tc()
5945 vsi->tc_config.enabled_tc = 0; in i40e_vsi_reconfig_tc()
5947 return i40e_vsi_config_tc(vsi, enabled_tc); in i40e_vsi_reconfig_tc()
5955 static int i40e_get_link_speed(struct i40e_vsi *vsi) in i40e_get_link_speed() argument
5957 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed()
5982 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) in i40e_bw_bytes_to_mbits() argument
5985 dev_warn(&vsi->back->pdev->dev, in i40e_bw_bytes_to_mbits()
6003 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) in i40e_set_bw_limit() argument
6005 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit()
6010 speed = i40e_get_link_speed(vsi); in i40e_set_bw_limit()
6042 static void i40e_remove_queue_channels(struct i40e_vsi *vsi) in i40e_remove_queue_channels() argument
6047 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels()
6054 vsi->current_rss_size = 0; in i40e_remove_queue_channels()
6057 if (list_empty(&vsi->ch_list)) in i40e_remove_queue_channels()
6060 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_remove_queue_channels()
6075 tx_ring = vsi->tx_rings[pf_q]; in i40e_remove_queue_channels()
6078 rx_ring = vsi->rx_rings[pf_q]; in i40e_remove_queue_channels()
6083 ret = i40e_set_bw_limit(vsi, ch->seid, 0); in i40e_remove_queue_channels()
6085 dev_info(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6097 ret = i40e_add_del_cloud_filter_big_buf(vsi, in i40e_remove_queue_channels()
6101 ret = i40e_add_del_cloud_filter(vsi, cfilter, in i40e_remove_queue_channels()
6113 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_remove_queue_channels()
6116 dev_err(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6121 INIT_LIST_HEAD(&vsi->ch_list); in i40e_remove_queue_channels()
6131 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi) in i40e_get_max_queues_for_channel() argument
6136 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_get_max_queues_for_channel()
6158 struct i40e_vsi *vsi, bool *reconfig_rss) in i40e_validate_num_queues() argument
6166 if (vsi->current_rss_size) { in i40e_validate_num_queues()
6167 if (num_queues > vsi->current_rss_size) { in i40e_validate_num_queues()
6170 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6172 } else if ((num_queues < vsi->current_rss_size) && in i40e_validate_num_queues()
6176 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6187 max_ch_queues = i40e_get_max_queues_for_channel(vsi); in i40e_validate_num_queues()
6207 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) in i40e_vsi_reconfig_rss() argument
6209 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss()
6216 if (!vsi->rss_size) in i40e_vsi_reconfig_rss()
6219 if (rss_size > vsi->rss_size) in i40e_vsi_reconfig_rss()
6222 local_rss_size = min_t(int, vsi->rss_size, rss_size); in i40e_vsi_reconfig_rss()
6223 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_reconfig_rss()
6228 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
6233 if (vsi->rss_hkey_user) in i40e_vsi_reconfig_rss()
6234 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_reconfig_rss()
6238 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_reconfig_rss()
6250 if (!vsi->orig_rss_size) in i40e_vsi_reconfig_rss()
6251 vsi->orig_rss_size = vsi->rss_size; in i40e_vsi_reconfig_rss()
6252 vsi->current_rss_size = local_rss_size; in i40e_vsi_reconfig_rss()
6368 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, in i40e_channel_config_bw() argument
6380 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, in i40e_channel_config_bw()
6383 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_bw()
6385 vsi->back->hw.aq.asq_last_status, ch->seid); in i40e_channel_config_bw()
6405 struct i40e_vsi *vsi, in i40e_channel_config_tx_ring() argument
6419 ret = i40e_channel_config_bw(vsi, ch, bw_share); in i40e_channel_config_tx_ring()
6421 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_tx_ring()
6436 tx_ring = vsi->tx_rings[pf_q]; in i40e_channel_config_tx_ring()
6440 rx_ring = vsi->rx_rings[pf_q]; in i40e_channel_config_tx_ring()
6459 struct i40e_vsi *vsi, in i40e_setup_hw_channel() argument
6466 ch->base_queue = vsi->next_base_queue; in i40e_setup_hw_channel()
6482 ret = i40e_channel_config_tx_ring(pf, vsi, ch); in i40e_setup_hw_channel()
6491 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; in i40e_setup_hw_channel()
6496 vsi->next_base_queue); in i40e_setup_hw_channel()
6509 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, in i40e_setup_channel() argument
6517 if (vsi->type == I40E_VSI_MAIN) { in i40e_setup_channel()
6521 vsi->type); in i40e_setup_channel()
6530 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); in i40e_setup_channel()
6546 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) in i40e_validate_and_set_switch_mode() argument
6549 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode()
6604 int i40e_create_queue_channel(struct i40e_vsi *vsi, in i40e_create_queue_channel() argument
6607 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel()
6621 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6636 if (vsi->type == I40E_VSI_MAIN) { in i40e_create_queue_channel()
6650 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { in i40e_create_queue_channel()
6653 vsi->cnt_q_avail, ch->num_queue_pairs); in i40e_create_queue_channel()
6658 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { in i40e_create_queue_channel()
6659 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); in i40e_create_queue_channel()
6668 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_create_queue_channel()
6681 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) in i40e_create_queue_channel()
6693 ch->parent_vsi = vsi; in i40e_create_queue_channel()
6696 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_create_queue_channel()
6707 static int i40e_configure_queue_channels(struct i40e_vsi *vsi) in i40e_configure_queue_channels() argument
6714 vsi->tc_seid_map[0] = vsi->seid; in i40e_configure_queue_channels()
6716 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_configure_queue_channels()
6725 vsi->tc_config.tc_info[i].qcount; in i40e_configure_queue_channels()
6727 vsi->tc_config.tc_info[i].qoffset; in i40e_configure_queue_channels()
6732 max_rate = vsi->mqprio_qopt.max_rate[i]; in i40e_configure_queue_channels()
6736 list_add_tail(&ch->list, &vsi->ch_list); in i40e_configure_queue_channels()
6738 ret = i40e_create_queue_channel(vsi, ch); in i40e_configure_queue_channels()
6740 dev_err(&vsi->back->pdev->dev, in i40e_configure_queue_channels()
6745 vsi->tc_seid_map[i] = ch->seid; in i40e_configure_queue_channels()
6750 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); in i40e_configure_queue_channels()
6754 i40e_remove_queue_channels(vsi); in i40e_configure_queue_channels()
6819 struct i40e_vsi *vsi; in i40e_dcb_reconfigure() local
6841 i40e_pf_for_each_vsi(pf, v, vsi) { in i40e_dcb_reconfigure()
6845 if (vsi->type == I40E_VSI_MAIN) in i40e_dcb_reconfigure()
6850 ret = i40e_vsi_config_tc(vsi, tc_map); in i40e_dcb_reconfigure()
6854 vsi->seid); in i40e_dcb_reconfigure()
6858 i40e_vsi_map_rings_to_vectors(vsi); in i40e_dcb_reconfigure()
6859 if (vsi->netdev) in i40e_dcb_reconfigure()
6860 i40e_dcbnl_set_all(vsi); in i40e_dcb_reconfigure()
7276 static void i40e_print_link_message_eee(struct i40e_vsi *vsi, in i40e_print_link_message_eee() argument
7282 if (vsi->netdev->ethtool_ops->get_eee) in i40e_print_link_message_eee()
7283 vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata); in i40e_print_link_message_eee()
7286 netdev_info(vsi->netdev, in i40e_print_link_message_eee()
7291 netdev_info(vsi->netdev, in i40e_print_link_message_eee()
7301 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) in i40e_print_link_message() argument
7304 struct i40e_pf *pf = vsi->back; in i40e_print_link_message()
7316 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) in i40e_print_link_message()
7318 vsi->current_isup = isup; in i40e_print_link_message()
7319 vsi->current_speed = new_speed; in i40e_print_link_message()
7321 netdev_info(vsi->netdev, "NIC Link is Down\n"); in i40e_print_link_message()
7331 netdev_warn(vsi->netdev, in i40e_print_link_message()
7396 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7398 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7404 netdev_info(vsi->netdev, in i40e_print_link_message()
7423 netdev_info(vsi->netdev, in i40e_print_link_message()
7427 i40e_print_link_message_eee(vsi, speed, fc); in i40e_print_link_message()
7436 static int i40e_up_complete(struct i40e_vsi *vsi) in i40e_up_complete() argument
7438 struct i40e_pf *pf = vsi->back; in i40e_up_complete()
7442 i40e_vsi_configure_msix(vsi); in i40e_up_complete()
7444 i40e_configure_msi_and_legacy(vsi); in i40e_up_complete()
7447 err = i40e_vsi_start_rings(vsi); in i40e_up_complete()
7451 clear_bit(__I40E_VSI_DOWN, vsi->state); in i40e_up_complete()
7452 i40e_napi_enable_all(vsi); in i40e_up_complete()
7453 i40e_vsi_enable_irq(vsi); in i40e_up_complete()
7456 (vsi->netdev)) { in i40e_up_complete()
7457 i40e_print_link_message(vsi, true); in i40e_up_complete()
7458 netif_tx_start_all_queues(vsi->netdev); in i40e_up_complete()
7459 netif_carrier_on(vsi->netdev); in i40e_up_complete()
7463 if (vsi->type == I40E_VSI_FDIR) { in i40e_up_complete()
7467 i40e_fdir_filter_restore(vsi); in i40e_up_complete()
7486 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) in i40e_vsi_reinit_locked() argument
7488 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked()
7492 i40e_down(vsi); in i40e_vsi_reinit_locked()
7494 i40e_up(vsi); in i40e_vsi_reinit_locked()
7606 int i40e_up(struct i40e_vsi *vsi) in i40e_up() argument
7610 if (vsi->type == I40E_VSI_MAIN && in i40e_up()
7611 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || in i40e_up()
7612 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) in i40e_up()
7613 i40e_force_link_state(vsi->back, true); in i40e_up()
7615 err = i40e_vsi_configure(vsi); in i40e_up()
7617 err = i40e_up_complete(vsi); in i40e_up()
7626 void i40e_down(struct i40e_vsi *vsi) in i40e_down() argument
7633 if (vsi->netdev) { in i40e_down()
7634 netif_carrier_off(vsi->netdev); in i40e_down()
7635 netif_tx_disable(vsi->netdev); in i40e_down()
7637 i40e_vsi_disable_irq(vsi); in i40e_down()
7638 i40e_vsi_stop_rings(vsi); in i40e_down()
7639 if (vsi->type == I40E_VSI_MAIN && in i40e_down()
7640 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || in i40e_down()
7641 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) in i40e_down()
7642 i40e_force_link_state(vsi->back, false); in i40e_down()
7643 i40e_napi_disable_all(vsi); in i40e_down()
7645 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_down()
7646 i40e_clean_tx_ring(vsi->tx_rings[i]); in i40e_down()
7647 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_down()
7652 i40e_clean_tx_ring(vsi->xdp_rings[i]); in i40e_down()
7654 i40e_clean_rx_ring(vsi->rx_rings[i]); in i40e_down()
7664 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, in i40e_validate_mqprio_qopt() argument
7679 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7693 if (vsi->num_queue_pairs < in i40e_validate_mqprio_qopt()
7695 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7699 if (sum_max_rate > i40e_get_link_speed(vsi)) { in i40e_validate_mqprio_qopt()
7700 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7711 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) in i40e_vsi_set_default_tc_config() argument
7717 vsi->tc_config.numtc = 1; in i40e_vsi_set_default_tc_config()
7718 vsi->tc_config.enabled_tc = 1; in i40e_vsi_set_default_tc_config()
7719 qcount = min_t(int, vsi->alloc_queue_pairs, in i40e_vsi_set_default_tc_config()
7720 i40e_pf_get_max_q_per_tc(vsi->back)); in i40e_vsi_set_default_tc_config()
7725 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_set_default_tc_config()
7727 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_set_default_tc_config()
7729 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_set_default_tc_config()
7730 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_set_default_tc_config()
7794 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) in i40e_reset_ch_rings() argument
7802 tx_ring = vsi->tx_rings[pf_q]; in i40e_reset_ch_rings()
7804 rx_ring = vsi->rx_rings[pf_q]; in i40e_reset_ch_rings()
7817 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) in i40e_free_macvlan_channels() argument
7822 if (list_empty(&vsi->macvlan_list)) in i40e_free_macvlan_channels()
7825 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_free_macvlan_channels()
7829 i40e_reset_ch_rings(vsi, ch); in i40e_free_macvlan_channels()
7830 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_free_macvlan_channels()
7831 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); in i40e_free_macvlan_channels()
7845 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_free_macvlan_channels()
7848 dev_err(&vsi->back->pdev->dev, in i40e_free_macvlan_channels()
7853 vsi->macvlan_cnt = 0; in i40e_free_macvlan_channels()
7862 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, in i40e_fwd_ring_up() argument
7867 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up()
7871 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_ring_up()
7876 netdev_bind_sb_channel_queue(vsi->netdev, vdev, in i40e_fwd_ring_up()
7887 tx_ring = vsi->tx_rings[pf_q]; in i40e_fwd_ring_up()
7891 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7917 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7937 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, in i40e_setup_macvlans() argument
7940 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans()
7948 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) in i40e_setup_macvlans()
7951 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); in i40e_setup_macvlans()
7963 ctxt.seid = vsi->seid; in i40e_setup_macvlans()
7964 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_setup_macvlans()
7966 ctxt.uplink_seid = vsi->uplink_seid; in i40e_setup_macvlans()
7967 ctxt.info = vsi->info; in i40e_setup_macvlans()
7970 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_setup_macvlans()
7974 vsi->rss_size = max_t(u16, num_qps, qcnt); in i40e_setup_macvlans()
7975 ret = i40e_vsi_config_rss(vsi); in i40e_setup_macvlans()
7979 vsi->rss_size); in i40e_setup_macvlans()
7982 vsi->reconfig_rss = true; in i40e_setup_macvlans()
7983 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_macvlans()
7984 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); in i40e_setup_macvlans()
7985 vsi->next_base_queue = num_qps; in i40e_setup_macvlans()
7986 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; in i40e_setup_macvlans()
8000 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_setup_macvlans()
8001 vsi->info.valid_sections = 0; in i40e_setup_macvlans()
8004 INIT_LIST_HEAD(&vsi->macvlan_list); in i40e_setup_macvlans()
8013 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_setup_macvlans()
8018 ch->parent_vsi = vsi; in i40e_setup_macvlans()
8019 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_setup_macvlans()
8020 vsi->macvlan_cnt++; in i40e_setup_macvlans()
8021 list_add_tail(&ch->list, &vsi->macvlan_list); in i40e_setup_macvlans()
8028 i40e_free_macvlan_channels(vsi); in i40e_setup_macvlans()
8042 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_add() local
8043 struct i40e_pf *pf = vsi->back; in i40e_fwd_add()
8066 if (!vsi->macvlan_cnt) { in i40e_fwd_add()
8068 set_bit(0, vsi->fwd_bitmask); in i40e_fwd_add()
8101 i40e_quiesce_vsi(vsi); in i40e_fwd_add()
8104 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, in i40e_fwd_add()
8110 i40e_unquiesce_vsi(vsi); in i40e_fwd_add()
8112 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, in i40e_fwd_add()
8113 vsi->macvlan_cnt); in i40e_fwd_add()
8122 set_bit(avail_macvlan, vsi->fwd_bitmask); in i40e_fwd_add()
8131 ret = i40e_fwd_ring_up(vsi, vdev, fwd); in i40e_fwd_add()
8148 static void i40e_del_all_macvlans(struct i40e_vsi *vsi) in i40e_del_all_macvlans() argument
8151 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans()
8155 if (list_empty(&vsi->macvlan_list)) in i40e_del_all_macvlans()
8158 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_del_all_macvlans()
8165 i40e_reset_ch_rings(vsi, ch); in i40e_del_all_macvlans()
8166 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_del_all_macvlans()
8167 netdev_unbind_sb_channel(vsi->netdev, in i40e_del_all_macvlans()
8187 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_del() local
8188 struct i40e_pf *pf = vsi->back; in i40e_fwd_del()
8193 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_del()
8202 i40e_reset_ch_rings(vsi, ch); in i40e_fwd_del()
8203 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_fwd_del()
8228 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc() local
8229 struct i40e_pf *pf = vsi->back; in i40e_setup_tc()
8237 old_queue_pairs = vsi->num_queue_pairs; in i40e_setup_tc()
8243 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in i40e_setup_tc()
8279 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); in i40e_setup_tc()
8282 memcpy(&vsi->mqprio_qopt, mqprio_qopt, in i40e_setup_tc()
8297 if (enabled_tc == vsi->tc_config.enabled_tc && in i40e_setup_tc()
8302 i40e_quiesce_vsi(vsi); in i40e_setup_tc()
8305 i40e_remove_queue_channels(vsi); in i40e_setup_tc()
8308 ret = i40e_vsi_config_tc(vsi, enabled_tc); in i40e_setup_tc()
8311 vsi->seid); in i40e_setup_tc()
8315 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { in i40e_setup_tc()
8318 vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8324 dev_info(&vsi->back->pdev->dev, in i40e_setup_tc()
8326 vsi->seid, vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8329 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_setup_tc()
8330 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, in i40e_setup_tc()
8331 vsi->mqprio_qopt.max_rate[0]); in i40e_setup_tc()
8333 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_setup_tc()
8338 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_tc()
8342 vsi->seid); in i40e_setup_tc()
8348 ret = i40e_configure_queue_channels(vsi); in i40e_setup_tc()
8350 vsi->num_queue_pairs = old_queue_pairs; in i40e_setup_tc()
8361 i40e_vsi_set_default_tc_config(vsi); in i40e_setup_tc()
8366 i40e_unquiesce_vsi(vsi); in i40e_setup_tc()
8422 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, in i40e_add_del_cloud_filter() argument
8426 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter()
8491 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, in i40e_add_del_cloud_filter_big_buf() argument
8496 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf()
8565 ret = i40e_validate_and_set_switch_mode(vsi); in i40e_add_del_cloud_filter_big_buf()
8599 static int i40e_parse_cls_flower(struct i40e_vsi *vsi, in i40e_parse_cls_flower() argument
8606 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower()
8810 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, in i40e_handle_tclass() argument
8817 filter->seid = vsi->seid; in i40e_handle_tclass()
8819 } else if (vsi->tc_config.enabled_tc & BIT(tc)) { in i40e_handle_tclass()
8821 dev_err(&vsi->back->pdev->dev, in i40e_handle_tclass()
8825 if (list_empty(&vsi->ch_list)) in i40e_handle_tclass()
8827 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, in i40e_handle_tclass()
8829 if (ch->seid == vsi->tc_seid_map[tc]) in i40e_handle_tclass()
8834 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); in i40e_handle_tclass()
8844 static int i40e_configure_clsflower(struct i40e_vsi *vsi, in i40e_configure_clsflower() argument
8847 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); in i40e_configure_clsflower()
8849 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower()
8853 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); in i40e_configure_clsflower()
8868 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8873 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) { in i40e_configure_clsflower()
8874 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8876 clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags); in i40e_configure_clsflower()
8877 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags); in i40e_configure_clsflower()
8886 err = i40e_parse_cls_flower(vsi, cls_flower, filter); in i40e_configure_clsflower()
8890 err = i40e_handle_tclass(vsi, tc, filter); in i40e_configure_clsflower()
8896 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true); in i40e_configure_clsflower()
8898 err = i40e_add_del_cloud_filter(vsi, filter, true); in i40e_configure_clsflower()
8925 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, in i40e_find_cloud_filter() argument
8932 &vsi->back->cloud_filter_list, cloud_node) in i40e_find_cloud_filter()
8944 static int i40e_delete_clsflower(struct i40e_vsi *vsi, in i40e_delete_clsflower() argument
8948 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower()
8951 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); in i40e_delete_clsflower()
8959 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false); in i40e_delete_clsflower()
8961 err = i40e_add_del_cloud_filter(vsi, filter, false); in i40e_delete_clsflower()
8990 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc_cls_flower() local
8994 return i40e_configure_clsflower(vsi, cls_flower); in i40e_setup_tc_cls_flower()
8996 return i40e_delete_clsflower(vsi, cls_flower); in i40e_setup_tc_cls_flower()
9009 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) in i40e_setup_tc_block_cb()
9056 struct i40e_vsi *vsi = np->vsi; in i40e_open() local
9057 struct i40e_pf *pf = vsi->back; in i40e_open()
9070 err = i40e_vsi_open(vsi); in i40e_open()
9094 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) in i40e_netif_set_realnum_tx_rx_queues() argument
9098 ret = netif_set_real_num_rx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9099 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9103 return netif_set_real_num_tx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9104 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9117 int i40e_vsi_open(struct i40e_vsi *vsi) in i40e_vsi_open() argument
9119 struct i40e_pf *pf = vsi->back; in i40e_vsi_open()
9124 err = i40e_vsi_setup_tx_resources(vsi); in i40e_vsi_open()
9127 err = i40e_vsi_setup_rx_resources(vsi); in i40e_vsi_open()
9131 err = i40e_vsi_configure(vsi); in i40e_vsi_open()
9135 if (vsi->netdev) { in i40e_vsi_open()
9137 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
9138 err = i40e_vsi_request_irq(vsi, int_name); in i40e_vsi_open()
9143 err = i40e_netif_set_realnum_tx_rx_queues(vsi); in i40e_vsi_open()
9147 } else if (vsi->type == I40E_VSI_FDIR) { in i40e_vsi_open()
9151 err = i40e_vsi_request_irq(vsi, int_name); in i40e_vsi_open()
9160 err = i40e_up_complete(vsi); in i40e_vsi_open()
9167 i40e_down(vsi); in i40e_vsi_open()
9169 i40e_vsi_free_irq(vsi); in i40e_vsi_open()
9171 i40e_vsi_free_rx_resources(vsi); in i40e_vsi_open()
9173 i40e_vsi_free_tx_resources(vsi); in i40e_vsi_open()
9174 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_open()
9299 struct i40e_vsi *vsi = np->vsi; in i40e_close() local
9301 i40e_vsi_close(vsi); in i40e_close()
9319 struct i40e_vsi *vsi; in i40e_do_reset() local
9380 i40e_pf_for_each_vsi(pf, i, vsi) { in i40e_do_reset()
9382 vsi->state)) in i40e_do_reset()
9383 i40e_vsi_reinit_locked(vsi); in i40e_do_reset()
9389 i40e_pf_for_each_vsi(pf, i, vsi) { in i40e_do_reset()
9391 vsi->state)) { in i40e_do_reset()
9392 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_do_reset()
9393 i40e_down(vsi); in i40e_do_reset()
9906 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) in i40e_vsi_link_event() argument
9908 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_link_event()
9911 switch (vsi->type) { in i40e_vsi_link_event()
9913 if (!vsi->netdev || !vsi->netdev_registered) in i40e_vsi_link_event()
9917 netif_carrier_on(vsi->netdev); in i40e_vsi_link_event()
9918 netif_tx_wake_all_queues(vsi->netdev); in i40e_vsi_link_event()
9920 netif_carrier_off(vsi->netdev); in i40e_vsi_link_event()
9921 netif_tx_stop_all_queues(vsi->netdev); in i40e_vsi_link_event()
9943 struct i40e_vsi *vsi; in i40e_veb_link_event() local
9952 i40e_pf_for_each_vsi(pf, i, vsi) in i40e_veb_link_event()
9953 if (vsi->uplink_seid == veb->seid) in i40e_veb_link_event()
9954 i40e_vsi_link_event(vsi, link_up); in i40e_veb_link_event()
9963 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_link_event() local
9995 (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_link_event()
9996 new_link == netif_carrier_ok(vsi->netdev))) in i40e_link_event()
9999 i40e_print_link_message(vsi, new_link); in i40e_link_event()
10007 i40e_vsi_link_event(vsi, new_link); in i40e_link_event()
10047 struct i40e_vsi *vsi; in i40e_watchdog_subtask() local
10069 i40e_pf_for_each_vsi(pf, i, vsi) in i40e_watchdog_subtask()
10070 if (vsi->netdev) in i40e_watchdog_subtask()
10071 i40e_update_stats(vsi); in i40e_watchdog_subtask()
10335 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_enable_pf_switch_lb() local
10354 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10371 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_disable_pf_switch_lb() local
10390 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10432 struct i40e_vsi *vsi; in i40e_reconstitute_veb() local
10482 i40e_pf_for_each_vsi(pf, v, vsi) { in i40e_reconstitute_veb()
10483 if (vsi == ctl_vsi) in i40e_reconstitute_veb()
10486 if (vsi->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10487 vsi->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10488 ret = i40e_add_vsi(vsi); in i40e_reconstitute_veb()
10495 i40e_vsi_reset_stats(vsi); in i40e_reconstitute_veb()
10582 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10590 struct i40e_vsi *main_vsi, *vsi; in i40e_fdir_sb_setup() local
10611 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_sb_setup()
10614 if (!vsi) { in i40e_fdir_sb_setup()
10616 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0); in i40e_fdir_sb_setup()
10617 if (!vsi) { in i40e_fdir_sb_setup()
10625 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); in i40e_fdir_sb_setup()
10634 struct i40e_vsi *vsi; in i40e_fdir_teardown() local
10637 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_teardown()
10638 if (vsi) in i40e_fdir_teardown()
10639 i40e_vsi_release(vsi); in i40e_fdir_teardown()
10650 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) in i40e_rebuild_cloud_filters() argument
10653 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters()
10664 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, in i40e_rebuild_cloud_filters()
10667 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); in i40e_rebuild_cloud_filters()
10687 static int i40e_rebuild_channels(struct i40e_vsi *vsi) in i40e_rebuild_channels() argument
10692 if (list_empty(&vsi->ch_list)) in i40e_rebuild_channels()
10695 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_rebuild_channels()
10699 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); in i40e_rebuild_channels()
10701 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10703 vsi->uplink_seid); in i40e_rebuild_channels()
10707 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); in i40e_rebuild_channels()
10709 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10715 vsi->next_base_queue = vsi->next_base_queue + in i40e_rebuild_channels()
10720 if (i40e_set_bw_limit(vsi, ch->seid, in i40e_rebuild_channels()
10725 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10731 ret = i40e_rebuild_cloud_filters(vsi, ch->seid); in i40e_rebuild_channels()
10733 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10746 static void i40e_clean_xps_state(struct i40e_vsi *vsi) in i40e_clean_xps_state() argument
10750 if (vsi->tx_rings) in i40e_clean_xps_state()
10751 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_clean_xps_state()
10752 if (vsi->tx_rings[i]) in i40e_clean_xps_state()
10754 vsi->tx_rings[i]->state); in i40e_clean_xps_state()
10766 struct i40e_vsi *vsi; in i40e_prep_for_reset() local
10781 i40e_pf_for_each_vsi(pf, v, vsi) { in i40e_prep_for_reset()
10782 i40e_clean_xps_state(vsi); in i40e_prep_for_reset()
10783 vsi->seid = 0; in i40e_prep_for_reset()
10895 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_rebuild() local
10904 i40e_set_ethtool_ops(vsi->netdev); in i40e_rebuild()
11037 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
11056 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
11066 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11069 ret = i40e_add_vsi(vsi); in i40e_rebuild()
11077 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_rebuild()
11078 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, in i40e_rebuild()
11079 vsi->mqprio_qopt.max_rate[0]); in i40e_rebuild()
11082 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_rebuild()
11088 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild()
11092 vsi->seid); in i40e_rebuild()
11095 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); in i40e_rebuild()
11102 ret = i40e_rebuild_channels(vsi); in i40e_rebuild()
11385 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) in i40e_set_num_rings_in_vsi() argument
11387 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi()
11389 switch (vsi->type) { in i40e_set_num_rings_in_vsi()
11391 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
11392 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11393 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11395 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11396 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11399 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
11401 vsi->num_q_vectors = 1; in i40e_set_num_rings_in_vsi()
11406 vsi->alloc_queue_pairs = 1; in i40e_set_num_rings_in_vsi()
11407 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11409 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11411 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
11415 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
11416 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11417 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11419 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11420 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11422 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11426 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11427 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11428 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11430 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11431 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11441 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11442 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11456 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) in i40e_vsi_alloc_arrays() argument
11463 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * in i40e_vsi_alloc_arrays()
11464 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2); in i40e_vsi_alloc_arrays()
11465 vsi->tx_rings = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11466 if (!vsi->tx_rings) in i40e_vsi_alloc_arrays()
11468 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11469 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_vsi_alloc_arrays()
11470 vsi->xdp_rings = next_rings; in i40e_vsi_alloc_arrays()
11471 next_rings += vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11473 vsi->rx_rings = next_rings; in i40e_vsi_alloc_arrays()
11477 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; in i40e_vsi_alloc_arrays()
11478 vsi->q_vectors = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11479 if (!vsi->q_vectors) { in i40e_vsi_alloc_arrays()
11487 kfree(vsi->tx_rings); in i40e_vsi_alloc_arrays()
11502 struct i40e_vsi *vsi; in i40e_vsi_mem_alloc() local
11516 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11520 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11524 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11532 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); in i40e_vsi_mem_alloc()
11533 if (!vsi) { in i40e_vsi_mem_alloc()
11537 vsi->type = type; in i40e_vsi_mem_alloc()
11538 vsi->back = pf; in i40e_vsi_mem_alloc()
11539 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_vsi_mem_alloc()
11540 vsi->flags = 0; in i40e_vsi_mem_alloc()
11541 vsi->idx = vsi_idx; in i40e_vsi_mem_alloc()
11542 vsi->int_rate_limit = 0; in i40e_vsi_mem_alloc()
11543 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? in i40e_vsi_mem_alloc()
11545 vsi->netdev_registered = false; in i40e_vsi_mem_alloc()
11546 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; in i40e_vsi_mem_alloc()
11547 hash_init(vsi->mac_filter_hash); in i40e_vsi_mem_alloc()
11548 vsi->irqs_ready = false; in i40e_vsi_mem_alloc()
11551 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11552 if (!vsi->af_xdp_zc_qps) in i40e_vsi_mem_alloc()
11556 ret = i40e_set_num_rings_in_vsi(vsi); in i40e_vsi_mem_alloc()
11560 ret = i40e_vsi_alloc_arrays(vsi, true); in i40e_vsi_mem_alloc()
11565 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); in i40e_vsi_mem_alloc()
11568 spin_lock_init(&vsi->mac_filter_hash_lock); in i40e_vsi_mem_alloc()
11569 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11574 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_mem_alloc()
11576 kfree(vsi); in i40e_vsi_mem_alloc()
11590 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) in i40e_vsi_free_arrays() argument
11594 kfree(vsi->q_vectors); in i40e_vsi_free_arrays()
11595 vsi->q_vectors = NULL; in i40e_vsi_free_arrays()
11597 kfree(vsi->tx_rings); in i40e_vsi_free_arrays()
11598 vsi->tx_rings = NULL; in i40e_vsi_free_arrays()
11599 vsi->rx_rings = NULL; in i40e_vsi_free_arrays()
11600 vsi->xdp_rings = NULL; in i40e_vsi_free_arrays()
11608 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) in i40e_clear_rss_config_user() argument
11610 if (!vsi) in i40e_clear_rss_config_user()
11613 kfree(vsi->rss_hkey_user); in i40e_clear_rss_config_user()
11614 vsi->rss_hkey_user = NULL; in i40e_clear_rss_config_user()
11616 kfree(vsi->rss_lut_user); in i40e_clear_rss_config_user()
11617 vsi->rss_lut_user = NULL; in i40e_clear_rss_config_user()
11624 static int i40e_vsi_clear(struct i40e_vsi *vsi) in i40e_vsi_clear() argument
11628 if (!vsi) in i40e_vsi_clear()
11631 if (!vsi->back) in i40e_vsi_clear()
11633 pf = vsi->back; in i40e_vsi_clear()
11636 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11638 vsi->idx, vsi->idx, vsi->type); in i40e_vsi_clear()
11642 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11645 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11646 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11647 vsi->idx, vsi->type); in i40e_vsi_clear()
11652 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11653 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11655 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_clear()
11656 i40e_vsi_free_arrays(vsi, true); in i40e_vsi_clear()
11657 i40e_clear_rss_config_user(vsi); in i40e_vsi_clear()
11659 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11660 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11661 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11666 kfree(vsi); in i40e_vsi_clear()
11675 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) in i40e_vsi_clear_rings() argument
11679 if (vsi->tx_rings && vsi->tx_rings[0]) { in i40e_vsi_clear_rings()
11680 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_vsi_clear_rings()
11681 kfree_rcu(vsi->tx_rings[i], rcu); in i40e_vsi_clear_rings()
11682 WRITE_ONCE(vsi->tx_rings[i], NULL); in i40e_vsi_clear_rings()
11683 WRITE_ONCE(vsi->rx_rings[i], NULL); in i40e_vsi_clear_rings()
11684 if (vsi->xdp_rings) in i40e_vsi_clear_rings()
11685 WRITE_ONCE(vsi->xdp_rings[i], NULL); in i40e_vsi_clear_rings()
11694 static int i40e_alloc_rings(struct i40e_vsi *vsi) in i40e_alloc_rings() argument
11696 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2; in i40e_alloc_rings()
11697 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings()
11701 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_alloc_rings()
11708 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11710 ring->vsi = vsi; in i40e_alloc_rings()
11711 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11713 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11716 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) in i40e_alloc_rings()
11719 WRITE_ONCE(vsi->tx_rings[i], ring++); in i40e_alloc_rings()
11721 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_alloc_rings()
11724 ring->queue_index = vsi->alloc_queue_pairs + i; in i40e_alloc_rings()
11725 ring->reg_idx = vsi->base_queue + ring->queue_index; in i40e_alloc_rings()
11727 ring->vsi = vsi; in i40e_alloc_rings()
11730 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11733 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) in i40e_alloc_rings()
11737 WRITE_ONCE(vsi->xdp_rings[i], ring++); in i40e_alloc_rings()
11741 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11743 ring->vsi = vsi; in i40e_alloc_rings()
11744 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11746 ring->count = vsi->num_rx_desc; in i40e_alloc_rings()
11750 WRITE_ONCE(vsi->rx_rings[i], ring); in i40e_alloc_rings()
11756 i40e_vsi_clear_rings(vsi); in i40e_alloc_rings()
12015 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) in i40e_vsi_alloc_q_vector() argument
12024 q_vector->vsi = vsi; in i40e_vsi_alloc_q_vector()
12028 if (vsi->netdev) in i40e_vsi_alloc_q_vector()
12029 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll); in i40e_vsi_alloc_q_vector()
12032 vsi->q_vectors[v_idx] = q_vector; in i40e_vsi_alloc_q_vector()
12044 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) in i40e_vsi_alloc_q_vectors() argument
12046 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors()
12051 num_q_vectors = vsi->num_q_vectors; in i40e_vsi_alloc_q_vectors()
12052 else if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_alloc_q_vectors()
12058 err = i40e_vsi_alloc_q_vector(vsi, v_idx); in i40e_vsi_alloc_q_vectors()
12067 i40e_free_q_vector(vsi, v_idx); in i40e_vsi_alloc_q_vectors()
12140 struct i40e_vsi *vsi; in i40e_restore_interrupt_scheme() local
12157 i40e_pf_for_each_vsi(pf, i, vsi) { in i40e_restore_interrupt_scheme()
12158 err = i40e_vsi_alloc_q_vectors(vsi); in i40e_restore_interrupt_scheme()
12162 i40e_vsi_map_rings_to_vectors(vsi); in i40e_restore_interrupt_scheme()
12176 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
12177 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12273 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, in i40e_get_rss_aq() argument
12276 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq()
12281 ret = i40e_aq_get_rss_key(hw, vsi->id, in i40e_get_rss_aq()
12294 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_get_rss_aq()
12296 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_get_rss_aq()
12319 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, in i40e_config_rss_reg() argument
12322 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg()
12324 u16 vf_id = vsi->vf_id; in i40e_config_rss_reg()
12331 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12334 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12345 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12350 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12373 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, in i40e_get_rss_reg() argument
12376 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg()
12407 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) in i40e_config_rss() argument
12409 struct i40e_pf *pf = vsi->back; in i40e_config_rss()
12412 return i40e_config_rss_aq(vsi, seed, lut, lut_size); in i40e_config_rss()
12414 return i40e_config_rss_reg(vsi, seed, lut, lut_size); in i40e_config_rss()
12426 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) in i40e_get_rss() argument
12428 struct i40e_pf *pf = vsi->back; in i40e_get_rss()
12431 return i40e_get_rss_aq(vsi, seed, lut, lut_size); in i40e_get_rss()
12433 return i40e_get_rss_reg(vsi, seed, lut, lut_size); in i40e_get_rss()
12458 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_pf_config_rss() local
12482 if (!vsi->rss_size) { in i40e_pf_config_rss()
12489 qcount = vsi->num_queue_pairs / in i40e_pf_config_rss()
12490 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); in i40e_pf_config_rss()
12491 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12493 if (!vsi->rss_size) in i40e_pf_config_rss()
12496 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_pf_config_rss()
12501 if (vsi->rss_lut_user) in i40e_pf_config_rss()
12502 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_pf_config_rss()
12504 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12509 if (vsi->rss_hkey_user) in i40e_pf_config_rss()
12510 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_pf_config_rss()
12513 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_pf_config_rss()
12530 struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); in i40e_reconfig_rss_queues() local
12539 if (queue_count != vsi->num_queue_pairs) { in i40e_reconfig_rss_queues()
12542 vsi->req_queue_pairs = queue_count; in i40e_reconfig_rss_queues()
12554 if (queue_count < vsi->rss_size) { in i40e_reconfig_rss_queues()
12555 i40e_clear_rss_config_user(vsi); in i40e_reconfig_rss_queues()
12561 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; in i40e_reconfig_rss_queues()
12562 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12567 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12943 static void i40e_clear_rss_lut(struct i40e_vsi *vsi) in i40e_clear_rss_lut() argument
12945 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut()
12947 u16 vf_id = vsi->vf_id; in i40e_clear_rss_lut()
12950 if (vsi->type == I40E_VSI_MAIN) { in i40e_clear_rss_lut()
12953 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_clear_rss_lut()
12966 static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena) in i40e_set_loopback() argument
12968 bool if_running = netif_running(vsi->netdev) && in i40e_set_loopback()
12969 !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_set_loopback()
12973 i40e_down(vsi); in i40e_set_loopback()
12975 ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in i40e_set_loopback()
12977 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in i40e_set_loopback()
12979 i40e_up(vsi); in i40e_set_loopback()
12994 struct i40e_vsi *vsi = np->vsi; in i40e_set_features() local
12995 struct i40e_pf *pf = vsi->back; in i40e_set_features()
13002 i40e_clear_rss_lut(vsi); in i40e_set_features()
13005 i40e_vlan_stripping_enable(vsi); in i40e_set_features()
13007 i40e_vlan_stripping_disable(vsi); in i40e_set_features()
13016 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) in i40e_set_features()
13017 i40e_del_all_macvlans(vsi); in i40e_set_features()
13025 return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); in i40e_set_features()
13035 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_set_port()
13060 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_unset_port()
13078 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id()
13107 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add()
13162 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_setlink() local
13163 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink()
13169 if (vsi->type != I40E_VSI_MAIN) in i40e_ndo_bridge_setlink()
13173 veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); in i40e_ndo_bridge_setlink()
13188 veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13189 vsi->tc_config.enabled_tc); in i40e_ndo_bridge_setlink()
13232 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_getlink() local
13233 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink()
13237 if (vsi->type != I40E_VSI_MAIN) in i40e_ndo_bridge_getlink()
13241 veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); in i40e_ndo_bridge_getlink()
13313 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, in i40e_xdp_setup() argument
13316 int frame_size = i40e_max_vsi_frame_size(vsi, prog); in i40e_xdp_setup()
13317 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup()
13327 if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { in i40e_xdp_setup()
13333 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); in i40e_xdp_setup()
13337 old_prog = xchg(&vsi->xdp_prog, prog); in i40e_xdp_setup()
13341 xdp_features_clear_redirect_target(vsi->netdev); in i40e_xdp_setup()
13348 if (!i40e_enabled_xdp_vsi(vsi) && prog) { in i40e_xdp_setup()
13349 if (i40e_realloc_rx_bi_zc(vsi, true)) in i40e_xdp_setup()
13351 } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { in i40e_xdp_setup()
13352 if (i40e_realloc_rx_bi_zc(vsi, false)) in i40e_xdp_setup()
13356 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13357 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in i40e_xdp_setup()
13366 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13367 if (vsi->xdp_rings[i]->xsk_pool) in i40e_xdp_setup()
13368 (void)i40e_xsk_wakeup(vsi->netdev, i, in i40e_xdp_setup()
13370 xdp_features_set_redirect_target(vsi->netdev, true); in i40e_xdp_setup()
13382 static int i40e_enter_busy_conf(struct i40e_vsi *vsi) in i40e_enter_busy_conf() argument
13384 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf()
13401 static void i40e_exit_busy_conf(struct i40e_vsi *vsi) in i40e_exit_busy_conf() argument
13403 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf()
13413 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_reset_stats() argument
13415 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, in i40e_queue_pair_reset_stats()
13416 sizeof(vsi->rx_rings[queue_pair]->rx_stats)); in i40e_queue_pair_reset_stats()
13417 memset(&vsi->tx_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13418 sizeof(vsi->tx_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13419 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_queue_pair_reset_stats()
13420 memset(&vsi->xdp_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13421 sizeof(vsi->xdp_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13430 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_clean_rings() argument
13432 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13433 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_queue_pair_clean_rings()
13438 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13440 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13449 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, in i40e_queue_pair_toggle_napi() argument
13452 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_toggle_napi()
13455 if (!vsi->netdev) in i40e_queue_pair_toggle_napi()
13475 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, in i40e_queue_pair_toggle_rings() argument
13478 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings()
13481 pf_q = vsi->base_queue + queue_pair; in i40e_queue_pair_toggle_rings()
13482 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13487 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13496 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13506 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_queue_pair_toggle_rings()
13509 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13510 pf_q + vsi->alloc_queue_pairs, in i40e_queue_pair_toggle_rings()
13515 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13526 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_enable_irq() argument
13528 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_enable_irq()
13529 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq()
13534 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); in i40e_queue_pair_enable_irq()
13546 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_disable_irq() argument
13548 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_disable_irq()
13549 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq()
13559 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; in i40e_queue_pair_disable_irq()
13580 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_disable() argument
13584 err = i40e_enter_busy_conf(vsi); in i40e_queue_pair_disable()
13588 i40e_queue_pair_disable_irq(vsi, queue_pair); in i40e_queue_pair_disable()
13589 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); in i40e_queue_pair_disable()
13590 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); in i40e_queue_pair_disable()
13591 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_disable()
13592 i40e_queue_pair_clean_rings(vsi, queue_pair); in i40e_queue_pair_disable()
13593 i40e_queue_pair_reset_stats(vsi, queue_pair); in i40e_queue_pair_disable()
13605 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_enable() argument
13609 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_enable()
13613 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_queue_pair_enable()
13614 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_enable()
13619 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_enable()
13623 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); in i40e_queue_pair_enable()
13624 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); in i40e_queue_pair_enable()
13625 i40e_queue_pair_enable_irq(vsi, queue_pair); in i40e_queue_pair_enable()
13627 i40e_exit_busy_conf(vsi); in i40e_queue_pair_enable()
13641 struct i40e_vsi *vsi = np->vsi; in i40e_xdp() local
13643 if (vsi->type != I40E_VSI_MAIN) in i40e_xdp()
13648 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack); in i40e_xdp()
13650 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, in i40e_xdp()
13702 static int i40e_config_netdev(struct i40e_vsi *vsi) in i40e_config_netdev() argument
13704 struct i40e_pf *pf = vsi->back; in i40e_config_netdev()
13715 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); in i40e_config_netdev()
13719 vsi->netdev = netdev; in i40e_config_netdev()
13721 np->vsi = vsi; in i40e_config_netdev()
13789 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_netdev()
13802 i40e_rm_default_mac_filter(vsi, mac_addr); in i40e_config_netdev()
13803 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13804 i40e_add_mac_filter(vsi, mac_addr); in i40e_config_netdev()
13805 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13824 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13825 i40e_add_mac_filter(vsi, mac_addr); in i40e_config_netdev()
13826 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13843 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13844 i40e_add_mac_filter(vsi, broadcast); in i40e_config_netdev()
13845 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13856 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); in i40e_config_netdev()
13875 static void i40e_vsi_delete(struct i40e_vsi *vsi) in i40e_vsi_delete() argument
13878 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) in i40e_vsi_delete()
13881 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); in i40e_vsi_delete()
13890 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) in i40e_is_vsi_uplink_mode_veb() argument
13893 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb()
13896 if (vsi->veb_idx >= I40E_MAX_VEB) in i40e_is_vsi_uplink_mode_veb()
13899 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13925 static int i40e_add_vsi(struct i40e_vsi *vsi) in i40e_add_vsi() argument
13928 struct i40e_pf *pf = vsi->back; in i40e_add_vsi()
13939 switch (vsi->type) { in i40e_add_vsi()
13959 vsi->info = ctxt.info; in i40e_add_vsi()
13960 vsi->info.valid_sections = 0; in i40e_add_vsi()
13962 vsi->seid = ctxt.seid; in i40e_add_vsi()
13963 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
13999 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); in i40e_add_vsi()
14011 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_add_vsi()
14012 vsi->info.valid_sections = 0; in i40e_add_vsi()
14020 ret = i40e_vsi_config_tc(vsi, enabled_tc); in i40e_add_vsi()
14038 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14042 (i40e_is_vsi_uplink_mode_veb(vsi))) { in i40e_add_vsi()
14048 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); in i40e_add_vsi()
14054 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14061 if (i40e_is_vsi_uplink_mode_veb(vsi)) { in i40e_add_vsi()
14069 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); in i40e_add_vsi()
14074 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; in i40e_add_vsi()
14075 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14082 if (i40e_is_vsi_uplink_mode_veb(vsi)) { in i40e_add_vsi()
14089 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { in i40e_add_vsi()
14099 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
14107 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); in i40e_add_vsi()
14118 if (vsi->type != I40E_VSI_MAIN) { in i40e_add_vsi()
14121 dev_info(&vsi->back->pdev->dev, in i40e_add_vsi()
14129 vsi->info = ctxt.info; in i40e_add_vsi()
14130 vsi->info.valid_sections = 0; in i40e_add_vsi()
14131 vsi->seid = ctxt.seid; in i40e_add_vsi()
14132 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
14135 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14136 vsi->active_filters = 0; in i40e_add_vsi()
14138 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vsi()
14142 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14143 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_add_vsi()
14146 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_vsi()
14151 ret = i40e_vsi_get_bw_info(vsi); in i40e_add_vsi()
14171 int i40e_vsi_release(struct i40e_vsi *vsi) in i40e_vsi_release() argument
14180 pf = vsi->back; in i40e_vsi_release()
14183 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_vsi_release()
14185 vsi->seid, vsi->uplink_seid); in i40e_vsi_release()
14188 if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
14192 set_bit(__I40E_VSI_RELEASING, vsi->state); in i40e_vsi_release()
14193 uplink_seid = vsi->uplink_seid; in i40e_vsi_release()
14195 if (vsi->type != I40E_VSI_SRIOV) { in i40e_vsi_release()
14196 if (vsi->netdev_registered) { in i40e_vsi_release()
14197 vsi->netdev_registered = false; in i40e_vsi_release()
14198 if (vsi->netdev) { in i40e_vsi_release()
14200 unregister_netdev(vsi->netdev); in i40e_vsi_release()
14203 i40e_vsi_close(vsi); in i40e_vsi_release()
14205 i40e_vsi_disable_irq(vsi); in i40e_vsi_release()
14208 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_release()
14211 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14214 if (vsi->netdev) { in i40e_vsi_release()
14215 __dev_uc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14216 __dev_mc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14220 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_vsi_release()
14221 __i40e_del_filter(vsi, f); in i40e_vsi_release()
14223 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14225 i40e_sync_vsi_filters(vsi); in i40e_vsi_release()
14227 i40e_vsi_delete(vsi); in i40e_vsi_release()
14228 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_release()
14229 if (vsi->netdev) { in i40e_vsi_release()
14230 free_netdev(vsi->netdev); in i40e_vsi_release()
14231 vsi->netdev = NULL; in i40e_vsi_release()
14233 i40e_vsi_clear_rings(vsi); in i40e_vsi_release()
14234 i40e_vsi_clear(vsi); in i40e_vsi_release()
14249 i40e_pf_for_each_vsi(pf, i, vsi) in i40e_vsi_release()
14250 if (vsi->uplink_seid == uplink_seid && in i40e_vsi_release()
14251 (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_release()
14274 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) in i40e_vsi_setup_vectors() argument
14277 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors()
14279 if (vsi->q_vectors[0]) { in i40e_vsi_setup_vectors()
14281 vsi->seid); in i40e_vsi_setup_vectors()
14285 if (vsi->base_vector) { in i40e_vsi_setup_vectors()
14287 vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14291 ret = i40e_vsi_alloc_q_vectors(vsi); in i40e_vsi_setup_vectors()
14295 vsi->num_q_vectors, vsi->seid, ret); in i40e_vsi_setup_vectors()
14296 vsi->num_q_vectors = 0; in i40e_vsi_setup_vectors()
14305 if (vsi->num_q_vectors) in i40e_vsi_setup_vectors()
14306 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
14307 vsi->num_q_vectors, vsi->idx); in i40e_vsi_setup_vectors()
14308 if (vsi->base_vector < 0) { in i40e_vsi_setup_vectors()
14311 vsi->num_q_vectors, vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14312 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_setup_vectors()
14330 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) in i40e_vsi_reinit_setup() argument
14337 if (!vsi) in i40e_vsi_reinit_setup()
14340 pf = vsi->back; in i40e_vsi_reinit_setup()
14342 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
14343 i40e_vsi_clear_rings(vsi); in i40e_vsi_reinit_setup()
14345 i40e_vsi_free_arrays(vsi, false); in i40e_vsi_reinit_setup()
14346 i40e_set_num_rings_in_vsi(vsi); in i40e_vsi_reinit_setup()
14347 ret = i40e_vsi_alloc_arrays(vsi, false); in i40e_vsi_reinit_setup()
14351 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_reinit_setup()
14352 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); in i40e_vsi_reinit_setup()
14354 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
14358 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_reinit_setup()
14361 vsi->base_queue = ret; in i40e_vsi_reinit_setup()
14370 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
14371 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
14374 ret = i40e_alloc_rings(vsi); in i40e_vsi_reinit_setup()
14379 i40e_vsi_map_rings_to_vectors(vsi); in i40e_vsi_reinit_setup()
14380 return vsi; in i40e_vsi_reinit_setup()
14383 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_reinit_setup()
14384 if (vsi->netdev_registered) { in i40e_vsi_reinit_setup()
14385 vsi->netdev_registered = false; in i40e_vsi_reinit_setup()
14386 unregister_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14387 free_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14388 vsi->netdev = NULL; in i40e_vsi_reinit_setup()
14390 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
14392 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
14394 i40e_vsi_clear(vsi); in i40e_vsi_reinit_setup()
14414 struct i40e_vsi *vsi = NULL; in i40e_vsi_setup() local
14435 vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid); in i40e_vsi_setup()
14436 if (!vsi) { in i40e_vsi_setup()
14442 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14443 veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14444 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14445 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_setup()
14446 veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14447 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14449 if (vsi->type != I40E_VSI_MAIN) { in i40e_vsi_setup()
14450 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup()
14464 veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); in i40e_vsi_setup()
14470 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_vsi_setup()
14478 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14479 if (!vsi) in i40e_vsi_setup()
14481 vsi->type = type; in i40e_vsi_setup()
14482 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); in i40e_vsi_setup()
14487 vsi->vf_id = param1; in i40e_vsi_setup()
14489 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_setup()
14490 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); in i40e_vsi_setup()
14492 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14496 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_setup()
14499 vsi->base_queue = ret; in i40e_vsi_setup()
14502 vsi->uplink_seid = uplink_seid; in i40e_vsi_setup()
14503 ret = i40e_add_vsi(vsi); in i40e_vsi_setup()
14507 switch (vsi->type) { in i40e_vsi_setup()
14511 ret = i40e_config_netdev(vsi); in i40e_vsi_setup()
14514 ret = i40e_netif_set_realnum_tx_rx_queues(vsi); in i40e_vsi_setup()
14517 if (vsi->type == I40E_VSI_MAIN) { in i40e_vsi_setup()
14521 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in i40e_vsi_setup()
14523 ret = register_netdev(vsi->netdev); in i40e_vsi_setup()
14526 vsi->netdev_registered = true; in i40e_vsi_setup()
14527 netif_carrier_off(vsi->netdev); in i40e_vsi_setup()
14530 i40e_dcbnl_setup(vsi); in i40e_vsi_setup()
14535 ret = i40e_vsi_setup_vectors(vsi); in i40e_vsi_setup()
14539 ret = i40e_alloc_rings(vsi); in i40e_vsi_setup()
14544 i40e_vsi_map_rings_to_vectors(vsi); in i40e_vsi_setup()
14546 i40e_vsi_reset_stats(vsi); in i40e_vsi_setup()
14554 vsi->type == I40E_VSI_VMDQ2) { in i40e_vsi_setup()
14555 ret = i40e_vsi_config_rss(vsi); in i40e_vsi_setup()
14559 return vsi; in i40e_vsi_setup()
14562 i40e_vsi_clear_rings(vsi); in i40e_vsi_setup()
14564 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_setup()
14566 if (vsi->netdev_registered) { in i40e_vsi_setup()
14567 vsi->netdev_registered = false; in i40e_vsi_setup()
14568 unregister_netdev(vsi->netdev); in i40e_vsi_setup()
14569 free_netdev(vsi->netdev); in i40e_vsi_setup()
14570 vsi->netdev = NULL; in i40e_vsi_setup()
14573 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_setup()
14576 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14578 i40e_vsi_clear(vsi); in i40e_vsi_setup()
14694 struct i40e_vsi *vsi; in i40e_switch_branch_release() local
14708 i40e_pf_for_each_vsi(pf, i, vsi) in i40e_switch_branch_release()
14709 if (vsi->uplink_seid == branch_seid && in i40e_switch_branch_release()
14710 (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_switch_branch_release()
14711 i40e_vsi_release(vsi); in i40e_switch_branch_release()
14749 struct i40e_vsi *vsi, *vsi_it; in i40e_veb_release() local
14759 vsi = vsi_it; in i40e_veb_release()
14775 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; in i40e_veb_release()
14776 vsi->uplink_seid = veb->uplink_seid; in i40e_veb_release()
14777 vsi->veb_idx = I40E_NO_VEB; in i40e_veb_release()
14789 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) in i40e_add_veb() argument
14795 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0, in i40e_add_veb()
14796 veb->enabled_tc, vsi ? false : true, in i40e_add_veb()
14828 if (vsi) { in i40e_add_veb()
14829 vsi->uplink_seid = veb->seid; in i40e_add_veb()
14830 vsi->veb_idx = veb->idx; in i40e_add_veb()
14831 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_add_veb()
14855 struct i40e_vsi *vsi = NULL; in i40e_veb_setup() local
14871 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid); in i40e_veb_setup()
14872 if (!vsi) { in i40e_veb_setup()
14888 ret = i40e_add_veb(veb, vsi); in i40e_veb_setup()
14892 if (vsi && vsi->idx == pf->lan_vsi) in i40e_veb_setup()
15504 struct i40e_vsi *vsi; in i40e_init_recovery_mode() local
15533 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15535 if (!pf->vsi) { in i40e_init_recovery_mode()
15549 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15550 if (!vsi) { in i40e_init_recovery_mode()
15554 vsi->alloc_queue_pairs = 1; in i40e_init_recovery_mode()
15555 err = i40e_config_netdev(vsi); in i40e_init_recovery_mode()
15558 err = register_netdev(vsi->netdev); in i40e_init_recovery_mode()
15561 vsi->netdev_registered = true; in i40e_init_recovery_mode()
15622 struct i40e_vsi *vsi; in i40e_probe() local
15958 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
15960 if (!pf->vsi) { in i40e_probe()
15980 vsi = i40e_pf_get_main_vsi(pf); in i40e_probe()
15981 INIT_LIST_HEAD(&vsi->ch_list); in i40e_probe()
15984 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_probe()
15985 if (vsi) in i40e_probe()
15986 i40e_vsi_open(vsi); in i40e_probe()
16196 kfree(pf->vsi); in i40e_probe()
16232 struct i40e_vsi *vsi; in i40e_remove() local
16269 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove() local
16275 unregister_netdev(vsi->netdev); in i40e_remove()
16276 free_netdev(vsi->netdev); in i40e_remove()
16299 i40e_pf_for_each_vsi(pf, i, vsi) { in i40e_remove()
16300 i40e_vsi_close(vsi); in i40e_remove()
16301 i40e_vsi_release(vsi); in i40e_remove()
16302 pf->vsi[i] = NULL; in i40e_remove()
16340 i40e_pf_for_each_vsi(pf, i, vsi) { in i40e_remove()
16342 i40e_vsi_clear_rings(vsi); in i40e_remove()
16344 i40e_vsi_clear(vsi); in i40e_remove()
16345 pf->vsi[i] = NULL; in i40e_remove()
16355 kfree(pf->vsi); in i40e_remove()