Lines Matching refs:vsi
47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) in ice_vsi_ctrl_all_rx_rings() argument
52 ice_for_each_rxq(vsi, i) in ice_vsi_ctrl_all_rx_rings()
53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); in ice_vsi_ctrl_all_rx_rings()
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
57 ice_for_each_rxq(vsi, i) { in ice_vsi_ctrl_all_rx_rings()
58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); in ice_vsi_ctrl_all_rx_rings()
73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) in ice_vsi_alloc_arrays() argument
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
79 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_arrays()
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
89 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), in ice_vsi_alloc_arrays()
100 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
102 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
106 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
107 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
111 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
116 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
117 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
123 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
125 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
127 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
129 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
137 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) in ice_vsi_set_num_desc() argument
139 switch (vsi->type) { in ice_vsi_set_num_desc()
148 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
150 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
155 vsi->type); in ice_vsi_set_num_desc()
166 static void ice_vsi_set_num_qs(struct ice_vsi *vsi) in ice_vsi_set_num_qs() argument
168 enum ice_vsi_type vsi_type = vsi->type; in ice_vsi_set_num_qs()
169 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
170 struct ice_vf *vf = vsi->vf; in ice_vsi_set_num_qs()
177 if (vsi->req_txq) { in ice_vsi_set_num_qs()
178 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
179 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
181 vsi->alloc_txq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
186 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
190 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
192 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
193 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
194 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
196 vsi->alloc_rxq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
202 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
204 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, in ice_vsi_set_num_qs()
205 max_t(int, vsi->alloc_rxq, in ice_vsi_set_num_qs()
206 vsi->alloc_txq)); in ice_vsi_set_num_qs()
209 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
210 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
211 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
212 vsi->irq_dyn_alloc = true; in ice_vsi_set_num_qs()
217 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
218 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
224 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
227 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
228 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
229 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
232 vsi->alloc_txq = 0; in ice_vsi_set_num_qs()
233 vsi->alloc_rxq = 0; in ice_vsi_set_num_qs()
236 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
237 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
244 ice_vsi_set_num_desc(vsi); in ice_vsi_set_num_qs()
280 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) in ice_vsi_delete_from_hw() argument
282 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
286 ice_fltr_remove_all(vsi); in ice_vsi_delete_from_hw()
291 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete_from_hw()
292 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw()
293 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw()
295 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw()
297 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw()
300 vsi->vsi_num, status); in ice_vsi_delete_from_hw()
309 static void ice_vsi_free_arrays(struct ice_vsi *vsi) in ice_vsi_free_arrays() argument
311 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
317 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
318 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
319 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
320 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
321 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
322 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
323 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
324 vsi->txq_map = NULL; in ice_vsi_free_arrays()
325 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
326 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
333 static void ice_vsi_free_stats(struct ice_vsi *vsi) in ice_vsi_free_stats() argument
336 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
339 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_free_stats()
344 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_free_stats()
348 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_free_stats()
355 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_free_stats()
365 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_free_stats()
372 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) in ice_vsi_alloc_ring_stats() argument
377 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
380 vsi_stats = pf->vsi_stats[vsi->idx]; in ice_vsi_alloc_ring_stats()
385 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_alloc_ring_stats()
389 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats()
404 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_alloc_ring_stats()
408 ring = vsi->rx_rings[i]; in ice_vsi_alloc_ring_stats()
425 ice_vsi_free_stats(vsi); in ice_vsi_alloc_ring_stats()
436 void ice_vsi_free(struct ice_vsi *vsi) in ice_vsi_free() argument
441 if (!vsi || !vsi->back) in ice_vsi_free()
444 pf = vsi->back; in ice_vsi_free()
447 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_free()
448 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_free()
455 pf->vsi[vsi->idx] = NULL; in ice_vsi_free()
456 pf->next_vsi = vsi->idx; in ice_vsi_free()
458 ice_vsi_free_stats(vsi); in ice_vsi_free()
459 ice_vsi_free_arrays(vsi); in ice_vsi_free()
460 mutex_destroy(&vsi->xdp_state_lock); in ice_vsi_free()
462 devm_kfree(dev, vsi); in ice_vsi_free()
465 void ice_vsi_delete(struct ice_vsi *vsi) in ice_vsi_delete() argument
467 ice_vsi_delete_from_hw(vsi); in ice_vsi_delete()
468 ice_vsi_free(vsi); in ice_vsi_delete()
513 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) in ice_vsi_alloc_stat_arrays() argument
516 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
518 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_stat_arrays()
523 if (pf->vsi_stats[vsi->idx]) in ice_vsi_alloc_stat_arrays()
532 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_alloc_stat_arrays()
538 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_alloc_stat_arrays()
543 pf->vsi_stats[vsi->idx] = vsi_stat; in ice_vsi_alloc_stat_arrays()
552 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_alloc_stat_arrays()
562 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) in ice_vsi_alloc_def() argument
564 if (vsi->type != ICE_VSI_CHNL) { in ice_vsi_alloc_def()
565 ice_vsi_set_num_qs(vsi); in ice_vsi_alloc_def()
566 if (ice_vsi_alloc_arrays(vsi)) in ice_vsi_alloc_def()
570 switch (vsi->type) { in ice_vsi_alloc_def()
574 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc_def()
578 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc_def()
584 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def()
585 vsi->num_txq = ch->num_txq; in ice_vsi_alloc_def()
586 vsi->next_base_q = ch->base_q; in ice_vsi_alloc_def()
592 ice_vsi_free_arrays(vsi); in ice_vsi_alloc_def()
612 struct ice_vsi *vsi = NULL; in ice_vsi_alloc() local
626 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); in ice_vsi_alloc()
627 if (!vsi) in ice_vsi_alloc()
630 vsi->back = pf; in ice_vsi_alloc()
631 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
634 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
635 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
638 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
641 mutex_init(&vsi->xdp_state_lock); in ice_vsi_alloc()
645 return vsi; in ice_vsi_alloc()
656 static int ice_alloc_fd_res(struct ice_vsi *vsi) in ice_alloc_fd_res() argument
658 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
668 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || in ice_alloc_fd_res()
669 vsi->type == ICE_VSI_CHNL)) in ice_alloc_fd_res()
690 if (vsi->type == ICE_VSI_PF) { in ice_alloc_fd_res()
691 vsi->num_gfltr = g_val; in ice_alloc_fd_res()
699 vsi->num_gfltr = ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
703 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
704 } else if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
705 vsi->num_gfltr = 0; in ice_alloc_fd_res()
708 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
731 vsi->num_gfltr = g_val / numtc; in ice_alloc_fd_res()
734 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
746 static int ice_vsi_get_qs(struct ice_vsi *vsi) in ice_vsi_get_qs() argument
748 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
753 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
755 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
763 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
765 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
771 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_get_qs()
777 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
782 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
791 static void ice_vsi_put_qs(struct ice_vsi *vsi) in ice_vsi_put_qs() argument
793 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
798 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_put_qs()
799 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
800 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
803 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_put_qs()
804 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
805 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
840 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) in ice_vsi_clean_rss_flow_fld() argument
842 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
848 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
851 vsi->vsi_num, status); in ice_vsi_clean_rss_flow_fld()
858 static void ice_rss_clean(struct ice_vsi *vsi) in ice_rss_clean() argument
860 struct ice_pf *pf = vsi->back; in ice_rss_clean()
865 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
866 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
868 ice_vsi_clean_rss_flow_fld(vsi); in ice_rss_clean()
871 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
878 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) in ice_vsi_set_rss_params() argument
881 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
885 vsi->rss_size = 1; in ice_vsi_set_rss_params()
891 switch (vsi->type) { in ice_vsi_set_rss_params()
895 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
896 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_set_rss_params()
897 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params()
899 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
901 vsi->rss_lut_type = ICE_LUT_PF; in ice_vsi_set_rss_params()
904 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
905 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); in ice_vsi_set_rss_params()
906 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
912 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
913 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
914 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
920 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
986 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) in ice_vsi_setup_q_map() argument
990 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
991 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
995 if (!vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
997 vsi->tc_cfg.numtc = 1; in ice_vsi_setup_q_map()
998 vsi->tc_cfg.ena_tc = 1; in ice_vsi_setup_q_map()
1001 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
1004 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
1023 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
1025 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
1026 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
1027 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
1028 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
1034 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
1035 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
1036 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
1037 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
1057 if (rx_count > vsi->alloc_rxq) { in ice_vsi_setup_q_map()
1058 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1059 rx_count, vsi->alloc_rxq); in ice_vsi_setup_q_map()
1063 if (tx_count > vsi->alloc_txq) { in ice_vsi_setup_q_map()
1064 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1065 tx_count, vsi->alloc_txq); in ice_vsi_setup_q_map()
1069 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
1070 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map()
1072 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
1073 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1077 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
1086 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
1087 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
1097 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) in ice_set_fd_vsi_ctx() argument
1102 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
1103 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) in ice_set_fd_vsi_ctx()
1118 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
1121 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
1139 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) in ice_set_rss_vsi_ctx() argument
1145 pf = vsi->back; in ice_set_rss_vsi_ctx()
1148 switch (vsi->type) { in ice_set_rss_vsi_ctx()
1161 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
1166 vsi->rss_hfunc = hash_type; in ice_set_rss_vsi_ctx()
1174 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) in ice_chnl_vsi_setup_q_map() argument
1176 struct ice_pf *pf = vsi->back; in ice_chnl_vsi_setup_q_map()
1181 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); in ice_chnl_vsi_setup_q_map()
1189 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); in ice_chnl_vsi_setup_q_map()
1199 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) in ice_vsi_is_vlan_pruning_ena() argument
1201 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_vsi_is_vlan_pruning_ena()
1215 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) in ice_vsi_init() argument
1217 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1228 switch (vsi->type) { in ice_vsi_init()
1241 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
1251 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1265 ice_set_fd_vsi_ctx(ctxt, vsi); in ice_vsi_init()
1267 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
1272 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1273 ice_set_rss_vsi_ctx(ctxt, vsi); in ice_vsi_init()
1282 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1283 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1284 ice_chnl_vsi_setup_q_map(vsi, ctxt); in ice_vsi_init()
1286 ret = ice_vsi_setup_q_map(vsi, ctxt); in ice_vsi_init()
1300 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1307 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1314 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1323 vsi->info = ctxt->info; in ice_vsi_init()
1326 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1337 static void ice_vsi_clear_rings(struct ice_vsi *vsi) in ice_vsi_clear_rings() argument
1342 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1343 ice_for_each_q_vector(vsi, i) { in ice_vsi_clear_rings()
1344 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1353 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1354 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_clear_rings()
1355 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1356 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1357 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1361 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1362 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_clear_rings()
1363 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1364 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1365 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1375 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) in ice_vsi_alloc_rings() argument
1377 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1378 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1384 ice_for_each_alloc_txq(vsi, i) { in ice_vsi_alloc_rings()
1394 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1395 ring->vsi = vsi; in ice_vsi_alloc_rings()
1398 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1404 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1408 ice_for_each_alloc_rxq(vsi, i) { in ice_vsi_alloc_rings()
1417 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1418 ring->vsi = vsi; in ice_vsi_alloc_rings()
1419 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1421 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1423 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1429 ice_vsi_clear_rings(vsi); in ice_vsi_alloc_rings()
1442 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) in ice_vsi_manage_rss_lut() argument
1446 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1451 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1452 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1454 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1455 vsi->rss_size); in ice_vsi_manage_rss_lut()
1458 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1467 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) in ice_vsi_cfg_crc_strip() argument
1471 ice_for_each_rxq(vsi, i) in ice_vsi_cfg_crc_strip()
1473 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1475 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1482 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) in ice_vsi_cfg_rss_lut_key() argument
1484 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1490 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && in ice_vsi_cfg_rss_lut_key()
1492 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); in ice_vsi_cfg_rss_lut_key()
1494 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1502 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && in ice_vsi_cfg_rss_lut_key()
1503 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key()
1504 vsi->rss_size = vsi->orig_rss_size; in ice_vsi_cfg_rss_lut_key()
1506 vsi->orig_rss_size = 0; in ice_vsi_cfg_rss_lut_key()
1510 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1514 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1515 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1517 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1519 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1531 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1532 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1536 err = ice_set_rss_key(vsi, key); in ice_vsi_cfg_rss_lut_key()
1554 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) in ice_vsi_set_vf_rss_flow_fld() argument
1556 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1563 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1567 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1570 vsi->vsi_num, status); in ice_vsi_set_vf_rss_flow_fld()
1653 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) in ice_vsi_set_rss_flow_fld() argument
1655 u16 vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1656 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1671 status = ice_add_rss_cfg(hw, vsi, cfg); in ice_vsi_set_rss_flow_fld()
1707 void ice_update_eth_stats(struct ice_vsi *vsi) in ice_update_eth_stats() argument
1710 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1711 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1712 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1714 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1715 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1718 vsi->stat_offsets_loaded = false; in ice_update_eth_stats()
1720 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1723 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1726 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1729 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1732 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1735 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1738 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1741 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1744 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1747 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1750 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1806 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1839 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
1893 void ice_vsi_cfg_msix(struct ice_vsi *vsi) in ice_vsi_cfg_msix() argument
1895 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
1900 ice_for_each_q_vector(vsi, i) { in ice_vsi_cfg_msix()
1901 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
1918 ice_cfg_txq_interrupt(vsi, txq, reg_idx, in ice_vsi_cfg_msix()
1924 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, in ice_vsi_cfg_msix()
1937 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) in ice_vsi_start_all_rx_rings() argument
1939 return ice_vsi_ctrl_all_rx_rings(vsi, true); in ice_vsi_start_all_rx_rings()
1948 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) in ice_vsi_stop_all_rx_rings() argument
1950 return ice_vsi_ctrl_all_rx_rings(vsi, false); in ice_vsi_stop_all_rx_rings()
1962 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, in ice_vsi_stop_tx_rings() argument
1967 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
1977 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
1978 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, in ice_vsi_stop_tx_rings()
1995 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, in ice_vsi_stop_lan_tx_rings() argument
1998 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2005 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) in ice_vsi_stop_xdp_tx_rings() argument
2007 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2016 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) in ice_vsi_is_rx_queue_active() argument
2018 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2022 ice_for_each_rxq(vsi, i) { in ice_vsi_is_rx_queue_active()
2026 pf_q = vsi->rxq_map[i]; in ice_vsi_is_rx_queue_active()
2035 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) in ice_vsi_set_tc_cfg() argument
2037 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2038 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; in ice_vsi_set_tc_cfg()
2039 vsi->tc_cfg.numtc = 1; in ice_vsi_set_tc_cfg()
2044 ice_vsi_set_dcb_tc_cfg(vsi); in ice_vsi_set_tc_cfg()
2053 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) in ice_cfg_sw_lldp() argument
2057 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2065 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, in ice_cfg_sw_lldp()
2069 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2072 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, in ice_cfg_sw_lldp()
2080 vsi->vsi_num, status); in ice_cfg_sw_lldp()
2090 static void ice_set_agg_vsi(struct ice_vsi *vsi) in ice_set_agg_vsi() argument
2092 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2098 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2111 switch (vsi->type) { in ice_set_agg_vsi()
2135 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2173 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2185 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2186 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2189 vsi->idx, agg_id); in ice_set_agg_vsi()
2199 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2201 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2202 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2205 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) in ice_vsi_cfg_tc_lan() argument
2213 if (!(vsi->tc_cfg.ena_tc & BIT(i))) in ice_vsi_cfg_tc_lan()
2216 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_cfg_tc_lan()
2217 if (!vsi->alloc_txq && vsi->num_txq) in ice_vsi_cfg_tc_lan()
2218 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc_lan()
2222 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc_lan()
2225 if (vsi->type == ICE_VSI_PF) in ice_vsi_cfg_tc_lan()
2226 max_txqs[i] += vsi->num_xdp_txq; in ice_vsi_cfg_tc_lan()
2229 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); in ice_vsi_cfg_tc_lan()
2230 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc_lan()
2234 vsi->vsi_num, ret); in ice_vsi_cfg_tc_lan()
2245 static int ice_vsi_cfg_def(struct ice_vsi *vsi) in ice_vsi_cfg_def() argument
2247 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2248 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2251 vsi->vsw = pf->first_sw; in ice_vsi_cfg_def()
2253 ret = ice_vsi_alloc_def(vsi, vsi->ch); in ice_vsi_cfg_def()
2258 ret = ice_vsi_alloc_stat_arrays(vsi); in ice_vsi_cfg_def()
2262 ice_alloc_fd_res(vsi); in ice_vsi_cfg_def()
2264 ret = ice_vsi_get_qs(vsi); in ice_vsi_cfg_def()
2267 vsi->idx); in ice_vsi_cfg_def()
2272 ice_vsi_set_rss_params(vsi); in ice_vsi_cfg_def()
2275 ice_vsi_set_tc_cfg(vsi); in ice_vsi_cfg_def()
2278 ret = ice_vsi_init(vsi, vsi->flags); in ice_vsi_cfg_def()
2282 ice_vsi_init_vlan_ops(vsi); in ice_vsi_cfg_def()
2284 switch (vsi->type) { in ice_vsi_cfg_def()
2288 ret = ice_vsi_alloc_q_vectors(vsi); in ice_vsi_cfg_def()
2292 ret = ice_vsi_alloc_rings(vsi); in ice_vsi_cfg_def()
2296 ret = ice_vsi_alloc_ring_stats(vsi); in ice_vsi_cfg_def()
2300 if (ice_is_xdp_ena_vsi(vsi)) { in ice_vsi_cfg_def()
2301 ret = ice_vsi_determine_xdp_res(vsi); in ice_vsi_cfg_def()
2304 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, in ice_vsi_cfg_def()
2310 ice_vsi_map_rings_to_vectors(vsi); in ice_vsi_cfg_def()
2312 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2315 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_cfg_def()
2321 ice_vsi_cfg_rss_lut_key(vsi); in ice_vsi_cfg_def()
2322 ice_vsi_set_rss_flow_fld(vsi); in ice_vsi_cfg_def()
2324 ice_init_arfs(vsi); in ice_vsi_cfg_def()
2328 ice_vsi_cfg_rss_lut_key(vsi); in ice_vsi_cfg_def()
2329 ice_vsi_set_rss_flow_fld(vsi); in ice_vsi_cfg_def()
2338 ret = ice_vsi_alloc_q_vectors(vsi); in ice_vsi_cfg_def()
2342 ret = ice_vsi_alloc_rings(vsi); in ice_vsi_cfg_def()
2346 ret = ice_vsi_alloc_ring_stats(vsi); in ice_vsi_cfg_def()
2350 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2357 ice_vsi_cfg_rss_lut_key(vsi); in ice_vsi_cfg_def()
2358 ice_vsi_set_vf_rss_flow_fld(vsi); in ice_vsi_cfg_def()
2362 ret = ice_vsi_alloc_rings(vsi); in ice_vsi_cfg_def()
2366 ret = ice_vsi_alloc_ring_stats(vsi); in ice_vsi_cfg_def()
2382 ice_vsi_free_q_vectors(vsi); in ice_vsi_cfg_def()
2384 ice_vsi_delete_from_hw(vsi); in ice_vsi_cfg_def()
2386 ice_vsi_put_qs(vsi); in ice_vsi_cfg_def()
2388 ice_vsi_free_stats(vsi); in ice_vsi_cfg_def()
2390 ice_vsi_free_arrays(vsi); in ice_vsi_cfg_def()
2398 int ice_vsi_cfg(struct ice_vsi *vsi) in ice_vsi_cfg() argument
2400 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2403 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_cfg()
2406 ret = ice_vsi_cfg_def(vsi); in ice_vsi_cfg()
2410 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2412 ice_vsi_decfg(vsi); in ice_vsi_cfg()
2414 if (vsi->type == ICE_VSI_CTRL) { in ice_vsi_cfg()
2415 if (vsi->vf) { in ice_vsi_cfg()
2416 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2417 vsi->vf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2420 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2431 void ice_vsi_decfg(struct ice_vsi *vsi) in ice_vsi_decfg() argument
2433 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2436 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2437 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2440 vsi->vsi_num, err); in ice_vsi_decfg()
2442 if (vsi->xdp_rings) in ice_vsi_decfg()
2446 ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); in ice_vsi_decfg()
2448 ice_vsi_clear_rings(vsi); in ice_vsi_decfg()
2449 ice_vsi_free_q_vectors(vsi); in ice_vsi_decfg()
2450 ice_vsi_put_qs(vsi); in ice_vsi_decfg()
2451 ice_vsi_free_arrays(vsi); in ice_vsi_decfg()
2459 if (vsi->type == ICE_VSI_VF && in ice_vsi_decfg()
2460 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_decfg()
2461 vsi->agg_node->num_vsis--; in ice_vsi_decfg()
2478 struct ice_vsi *vsi; in ice_vsi_setup() local
2488 vsi = ice_vsi_alloc(pf); in ice_vsi_setup()
2489 if (!vsi) { in ice_vsi_setup()
2494 vsi->params = *params; in ice_vsi_setup()
2495 ret = ice_vsi_cfg(vsi); in ice_vsi_setup()
2508 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2509 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, in ice_vsi_setup()
2511 ice_cfg_sw_lldp(vsi, true, true); in ice_vsi_setup()
2514 if (!vsi->agg_node) in ice_vsi_setup()
2515 ice_set_agg_vsi(vsi); in ice_vsi_setup()
2517 return vsi; in ice_vsi_setup()
2520 ice_vsi_free(vsi); in ice_vsi_setup()
2529 static void ice_vsi_release_msix(struct ice_vsi *vsi) in ice_vsi_release_msix() argument
2531 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2537 ice_for_each_q_vector(vsi, i) { in ice_vsi_release_msix()
2538 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2543 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2544 if (vsi->xdp_rings) { in ice_vsi_release_msix()
2545 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2547 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2554 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2566 void ice_vsi_free_irq(struct ice_vsi *vsi) in ice_vsi_free_irq() argument
2568 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2571 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2574 ice_vsi_release_msix(vsi); in ice_vsi_free_irq()
2575 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2578 vsi->irqs_ready = false; in ice_vsi_free_irq()
2579 ice_free_cpu_rx_rmap(vsi); in ice_vsi_free_irq()
2581 ice_for_each_q_vector(vsi, i) { in ice_vsi_free_irq()
2584 irq_num = vsi->q_vectors[i]->irq.virq; in ice_vsi_free_irq()
2587 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2588 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2589 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2599 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2607 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) in ice_vsi_free_tx_rings() argument
2611 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2614 ice_for_each_txq(vsi, i) in ice_vsi_free_tx_rings()
2615 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2616 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2623 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) in ice_vsi_free_rx_rings() argument
2627 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2630 ice_for_each_rxq(vsi, i) in ice_vsi_free_rx_rings()
2631 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2632 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2639 void ice_vsi_close(struct ice_vsi *vsi) in ice_vsi_close() argument
2641 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2642 ice_down(vsi); in ice_vsi_close()
2644 ice_vsi_clear_napi_queues(vsi); in ice_vsi_close()
2645 ice_vsi_free_irq(vsi); in ice_vsi_close()
2646 ice_vsi_free_tx_rings(vsi); in ice_vsi_close()
2647 ice_vsi_free_rx_rings(vsi); in ice_vsi_close()
2655 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) in ice_ena_vsi() argument
2659 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2662 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2664 if (vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_ena_vsi()
2665 vsi->type == ICE_VSI_SF)) { in ice_ena_vsi()
2666 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2670 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2675 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2676 err = ice_vsi_open_ctrl(vsi); in ice_ena_vsi()
2687 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) in ice_dis_vsi() argument
2689 bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); in ice_dis_vsi()
2691 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2693 if (vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_dis_vsi()
2694 vsi->type == ICE_VSI_SF)) { in ice_dis_vsi()
2695 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2698 already_down = test_bit(ICE_VSI_DOWN, vsi->state); in ice_dis_vsi()
2700 ice_vsi_close(vsi); in ice_dis_vsi()
2705 ice_vsi_close(vsi); in ice_dis_vsi()
2707 } else if (vsi->type == ICE_VSI_CTRL && !already_down) { in ice_dis_vsi()
2708 ice_vsi_close(vsi); in ice_dis_vsi()
2719 void ice_vsi_set_napi_queues(struct ice_vsi *vsi) in ice_vsi_set_napi_queues() argument
2721 struct net_device *netdev = vsi->netdev; in ice_vsi_set_napi_queues()
2727 ice_for_each_rxq(vsi, q_idx) in ice_vsi_set_napi_queues()
2729 &vsi->rx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
2731 ice_for_each_txq(vsi, q_idx) in ice_vsi_set_napi_queues()
2733 &vsi->tx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
2735 ice_for_each_q_vector(vsi, v_idx) { in ice_vsi_set_napi_queues()
2736 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_vsi_set_napi_queues()
2749 void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) in ice_vsi_clear_napi_queues() argument
2751 struct net_device *netdev = vsi->netdev; in ice_vsi_clear_napi_queues()
2757 ice_for_each_txq(vsi, q_idx) in ice_vsi_clear_napi_queues()
2760 ice_for_each_rxq(vsi, q_idx) in ice_vsi_clear_napi_queues()
2772 void ice_napi_add(struct ice_vsi *vsi) in ice_napi_add() argument
2776 if (!vsi->netdev) in ice_napi_add()
2779 ice_for_each_q_vector(vsi, v_idx) in ice_napi_add()
2780 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, in ice_napi_add()
2790 int ice_vsi_release(struct ice_vsi *vsi) in ice_vsi_release() argument
2794 if (!vsi->back) in ice_vsi_release()
2796 pf = vsi->back; in ice_vsi_release()
2799 ice_rss_clean(vsi); in ice_vsi_release()
2801 ice_vsi_close(vsi); in ice_vsi_release()
2806 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && in ice_vsi_release()
2808 ice_cfg_sw_lldp(vsi, false, false); in ice_vsi_release()
2810 ice_vsi_decfg(vsi); in ice_vsi_release()
2817 ice_vsi_delete(vsi); in ice_vsi_release()
2830 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, in ice_vsi_rebuild_get_coalesce() argument
2835 ice_for_each_q_vector(vsi, i) { in ice_vsi_rebuild_get_coalesce()
2836 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
2842 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
2844 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
2848 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
2862 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, in ice_vsi_rebuild_set_coalesce() argument
2868 if ((size && !coalesce) || !vsi) in ice_vsi_rebuild_set_coalesce()
2877 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
2893 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
2894 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2897 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
2898 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2903 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
2904 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2907 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
2908 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2913 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
2914 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
2920 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
2922 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
2927 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
2931 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
2932 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
2941 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) in ice_vsi_realloc_stat_arrays() argument
2943 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
2944 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
2948 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
2949 u16 prev_txq = vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
2950 u16 prev_rxq = vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
2953 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_realloc_stat_arrays()
3006 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) in ice_vsi_rebuild() argument
3013 if (!vsi) in ice_vsi_rebuild()
3016 vsi->flags = vsi_flags; in ice_vsi_rebuild()
3017 pf = vsi->back; in ice_vsi_rebuild()
3018 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_rebuild()
3021 mutex_lock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3023 ret = ice_vsi_realloc_stat_arrays(vsi); in ice_vsi_rebuild()
3027 ice_vsi_decfg(vsi); in ice_vsi_rebuild()
3028 ret = ice_vsi_cfg_def(vsi); in ice_vsi_rebuild()
3032 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3039 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); in ice_vsi_rebuild()
3041 ret = ice_vsi_cfg_tc_lan(pf, vsi); in ice_vsi_rebuild()
3052 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); in ice_vsi_rebuild()
3053 clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); in ice_vsi_rebuild()
3059 ice_vsi_decfg(vsi); in ice_vsi_rebuild()
3061 mutex_unlock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3110 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) in ice_vsi_update_q_map() argument
3112 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3113 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3114 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3115 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3116 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3124 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) in ice_vsi_cfg_netdev_tc() argument
3126 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
3127 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3128 int numtc = vsi->tc_cfg.numtc; in ice_vsi_cfg_netdev_tc()
3137 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_cfg_netdev_tc()
3145 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) in ice_vsi_cfg_netdev_tc()
3146 numtc = vsi->all_numtc; in ice_vsi_cfg_netdev_tc()
3154 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
3156 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
3157 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
3158 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
3161 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_cfg_netdev_tc()
3163 if (!vsi->mqprio_qopt.qopt.count[i]) in ice_vsi_cfg_netdev_tc()
3166 vsi->mqprio_qopt.qopt.count[i], in ice_vsi_cfg_netdev_tc()
3167 vsi->mqprio_qopt.qopt.offset[i]); in ice_vsi_cfg_netdev_tc()
3177 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
3191 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, in ice_vsi_setup_q_map_mqprio() argument
3195 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; in ice_vsi_setup_q_map_mqprio()
3196 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; in ice_vsi_setup_q_map_mqprio()
3201 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; in ice_vsi_setup_q_map_mqprio()
3208 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map_mqprio()
3210 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map_mqprio()
3211 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map_mqprio()
3212 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map_mqprio()
3213 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map_mqprio()
3218 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3219 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3220 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3221 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map_mqprio()
3222 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; in ice_vsi_setup_q_map_mqprio()
3223 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; in ice_vsi_setup_q_map_mqprio()
3224 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map_mqprio()
3227 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map_mqprio()
3229 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_setup_q_map_mqprio()
3231 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3232 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3233 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3238 if (new_txq > vsi->alloc_txq) { in ice_vsi_setup_q_map_mqprio()
3239 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3240 new_txq, vsi->alloc_txq); in ice_vsi_setup_q_map_mqprio()
3245 if (new_rxq > vsi->alloc_rxq) { in ice_vsi_setup_q_map_mqprio()
3246 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3247 new_rxq, vsi->alloc_rxq); in ice_vsi_setup_q_map_mqprio()
3252 vsi->num_txq = new_txq; in ice_vsi_setup_q_map_mqprio()
3253 vsi->num_rxq = new_rxq; in ice_vsi_setup_q_map_mqprio()
3257 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map_mqprio()
3263 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { in ice_vsi_setup_q_map_mqprio()
3264 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3265 vsi->next_base_q = tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3267 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3268 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3269 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3270 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); in ice_vsi_setup_q_map_mqprio()
3282 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) in ice_vsi_cfg_tc() argument
3285 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3293 if (vsi->tc_cfg.ena_tc == ena_tc && in ice_vsi_cfg_tc()
3294 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in ice_vsi_cfg_tc()
3302 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3306 if (vsi->type == ICE_VSI_CHNL && in ice_vsi_cfg_tc()
3308 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc()
3311 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); in ice_vsi_cfg_tc()
3312 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3313 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3320 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3322 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3324 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); in ice_vsi_cfg_tc()
3326 ret = ice_vsi_setup_q_map(vsi, ctx); in ice_vsi_cfg_tc()
3329 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); in ice_vsi_cfg_tc()
3335 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3341 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3343 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); in ice_vsi_cfg_tc()
3345 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, in ice_vsi_cfg_tc()
3346 vsi->tc_cfg.ena_tc, max_txqs); in ice_vsi_cfg_tc()
3350 vsi->vsi_num, ret); in ice_vsi_cfg_tc()
3353 ice_vsi_update_q_map(vsi, ctx); in ice_vsi_cfg_tc()
3354 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3356 ice_vsi_cfg_netdev_tc(vsi, ena_tc); in ice_vsi_cfg_tc()
3423 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) in ice_is_vsi_dflt_vsi() argument
3425 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); in ice_is_vsi_dflt_vsi()
3438 int ice_set_dflt_vsi(struct ice_vsi *vsi) in ice_set_dflt_vsi() argument
3443 if (!vsi) in ice_set_dflt_vsi()
3446 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3448 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3450 vsi->vsi_num); in ice_set_dflt_vsi()
3455 if (ice_is_vsi_dflt_vsi(vsi)) { in ice_set_dflt_vsi()
3457 vsi->vsi_num); in ice_set_dflt_vsi()
3461 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3464 vsi->vsi_num, status); in ice_set_dflt_vsi()
3479 int ice_clear_dflt_vsi(struct ice_vsi *vsi) in ice_clear_dflt_vsi() argument
3484 if (!vsi) in ice_clear_dflt_vsi()
3487 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3490 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) in ice_clear_dflt_vsi()
3493 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, in ice_clear_dflt_vsi()
3497 vsi->vsi_num, status); in ice_clear_dflt_vsi()
3510 int ice_get_link_speed_mbps(struct ice_vsi *vsi) in ice_get_link_speed_mbps() argument
3514 link_speed = vsi->port_info->phy.link_info.link_speed; in ice_get_link_speed_mbps()
3525 int ice_get_link_speed_kbps(struct ice_vsi *vsi) in ice_get_link_speed_kbps() argument
3529 speed_mbps = ice_get_link_speed_mbps(vsi); in ice_get_link_speed_kbps()
3543 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) in ice_set_min_bw_limit() argument
3545 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3551 if (!vsi->port_info) { in ice_set_min_bw_limit()
3553 vsi->idx, vsi->type); in ice_set_min_bw_limit()
3557 speed = ice_get_link_speed_kbps(vsi); in ice_set_min_bw_limit()
3560 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_min_bw_limit()
3567 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_min_bw_limit()
3571 min_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_min_bw_limit()
3572 vsi->idx); in ice_set_min_bw_limit()
3577 min_tx_rate, ice_vsi_type_str(vsi->type)); in ice_set_min_bw_limit()
3579 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_min_bw_limit()
3580 vsi->idx, 0, in ice_set_min_bw_limit()
3584 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3589 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3604 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) in ice_set_max_bw_limit() argument
3606 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3612 if (!vsi->port_info) { in ice_set_max_bw_limit()
3614 vsi->idx, vsi->type); in ice_set_max_bw_limit()
3618 speed = ice_get_link_speed_kbps(vsi); in ice_set_max_bw_limit()
3621 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_max_bw_limit()
3628 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_max_bw_limit()
3632 max_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_max_bw_limit()
3633 vsi->idx); in ice_set_max_bw_limit()
3638 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3640 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_max_bw_limit()
3641 vsi->idx, 0, in ice_set_max_bw_limit()
3645 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3650 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3661 int ice_set_link(struct ice_vsi *vsi, bool ena) in ice_set_link() argument
3663 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3664 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3668 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3710 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) in ice_vsi_add_vlan_zero() argument
3712 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vsi_add_vlan_zero()
3717 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3722 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3726 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3740 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) in ice_vsi_del_vlan_zero() argument
3742 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vsi_del_vlan_zero()
3747 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3752 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3756 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3763 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3774 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) in ice_vsi_num_zero_vlans() argument
3779 if (vsi->type == ICE_VSI_VF) { in ice_vsi_num_zero_vlans()
3780 if (WARN_ON(!vsi->vf)) in ice_vsi_num_zero_vlans()
3783 if (ice_vf_is_port_vlan_ena(vsi->vf)) in ice_vsi_num_zero_vlans()
3787 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
3797 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) in ice_vsi_has_non_zero_vlans() argument
3799 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); in ice_vsi_has_non_zero_vlans()
3806 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) in ice_vsi_num_non_zero_vlans() argument
3808 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); in ice_vsi_num_non_zero_vlans()
3891 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) in ice_vsi_update_security() argument
3895 ctx.info = vsi->info; in ice_vsi_update_security()
3899 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
3902 vsi->info = ctx.info; in ice_vsi_update_security()
3952 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) in ice_vsi_update_local_lb() argument
3955 .info = vsi->info, in ice_vsi_update_local_lb()
3964 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
3967 vsi->info = ctx.info; in ice_vsi_update_local_lb()