Lines Matching +full:tx +full:- +full:sched +full:- +full:wfq

1 // SPDX-License-Identifier: GPL-2.0
8 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
23 return -EINVAL; in ice_sched_add_root_node()
25 hw = pi->hw; in ice_sched_add_root_node()
29 return -ENOMEM; in ice_sched_add_root_node()
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], in ice_sched_add_root_node()
32 sizeof(*root->children), GFP_KERNEL); in ice_sched_add_root_node()
33 if (!root->children) { in ice_sched_add_root_node()
35 return -ENOMEM; in ice_sched_add_root_node()
38 memcpy(&root->info, info, sizeof(*info)); in ice_sched_add_root_node()
39 pi->root = root; in ice_sched_add_root_node()
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
52 * This function needs to be called when holding the port_info->sched_lock
64 if (!start_node->num_children || in ice_sched_find_node_by_teid()
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || in ice_sched_find_node_by_teid()
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) in ice_sched_find_node_by_teid()
70 for (i = 0; i < start_node->num_children; i++) in ice_sched_find_node_by_teid()
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) in ice_sched_find_node_by_teid()
72 return start_node->children[i]; in ice_sched_find_node_by_teid()
74 /* Search within each child's sub-tree */ in ice_sched_find_node_by_teid()
75 for (i = 0; i < start_node->num_children; i++) { in ice_sched_find_node_by_teid()
78 tmp = ice_sched_find_node_by_teid(start_node->children[i], in ice_sched_find_node_by_teid()
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
110 cmd->num_elem_req = cpu_to_le16(elems_req); in ice_aqc_send_sched_elem_cmd()
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp); in ice_aqc_send_sched_elem_cmd()
120 * ice_aq_query_sched_elems - query scheduler elements
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
161 return -EINVAL; in ice_sched_add_node()
163 hw = pi->hw; in ice_sched_add_node()
166 parent = ice_sched_find_node_by_teid(pi->root, in ice_sched_add_node()
167 le32_to_cpu(info->parent_teid)); in ice_sched_add_node()
170 le32_to_cpu(info->parent_teid)); in ice_sched_add_node()
171 return -EINVAL; in ice_sched_add_node()
177 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); in ice_sched_add_node()
186 return -ENOMEM; in ice_sched_add_node()
187 if (hw->max_children[layer]) { in ice_sched_add_node()
188 node->children = devm_kcalloc(ice_hw_to_dev(hw), in ice_sched_add_node()
189 hw->max_children[layer], in ice_sched_add_node()
190 sizeof(*node->children), GFP_KERNEL); in ice_sched_add_node()
191 if (!node->children) { in ice_sched_add_node()
193 return -ENOMEM; in ice_sched_add_node()
197 node->in_use = true; in ice_sched_add_node()
198 node->parent = parent; in ice_sched_add_node()
199 node->tx_sched_layer = layer; in ice_sched_add_node()
200 parent->children[parent->num_children++] = node; in ice_sched_add_node()
201 node->info = elem; in ice_sched_add_node()
206 * ice_aq_delete_sched_elems - delete scheduler elements
227 * ice_sched_remove_elems - remove nodes from HW
243 buf->hdr.parent_teid = parent->info.node_teid; in ice_sched_remove_elems()
244 buf->hdr.num_elems = cpu_to_le16(1); in ice_sched_remove_elems()
245 buf->teid[0] = cpu_to_le32(node_teid); in ice_sched_remove_elems()
251 hw->adminq.sq_last_status); in ice_sched_remove_elems()
257 * ice_sched_get_first_node - get the first node of the given layer
268 return pi->sib_head[parent->tc_num][layer]; in ice_sched_get_first_node()
272 * ice_sched_get_tc_node - get pointer to TC node
282 if (!pi || !pi->root) in ice_sched_get_tc_node()
284 for (i = 0; i < pi->root->num_children; i++) in ice_sched_get_tc_node()
285 if (pi->root->children[i]->tc_num == tc) in ice_sched_get_tc_node()
286 return pi->root->children[i]; in ice_sched_get_tc_node()
291 * ice_free_sched_node - Free a Tx scheduler node from SW DB
297 * This function needs to be called with the port_info->sched_lock held
302 struct ice_hw *hw = pi->hw; in ice_free_sched_node()
309 while (node->num_children) in ice_free_sched_node()
310 ice_free_sched_node(pi, node->children[0]); in ice_free_sched_node()
313 if (node->tx_sched_layer >= hw->sw_entry_point_layer && in ice_free_sched_node()
314 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && in ice_free_sched_node()
315 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && in ice_free_sched_node()
316 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { in ice_free_sched_node()
317 u32 teid = le32_to_cpu(node->info.node_teid); in ice_free_sched_node()
319 ice_sched_remove_elems(hw, node->parent, teid); in ice_free_sched_node()
321 parent = node->parent; in ice_free_sched_node()
327 for (i = 0; i < parent->num_children; i++) in ice_free_sched_node()
328 if (parent->children[i] == node) { in ice_free_sched_node()
329 for (j = i + 1; j < parent->num_children; j++) in ice_free_sched_node()
330 parent->children[j - 1] = in ice_free_sched_node()
331 parent->children[j]; in ice_free_sched_node()
332 parent->num_children--; in ice_free_sched_node()
336 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); in ice_free_sched_node()
338 if (p->sibling == node) { in ice_free_sched_node()
339 p->sibling = node->sibling; in ice_free_sched_node()
342 p = p->sibling; in ice_free_sched_node()
346 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) in ice_free_sched_node()
347 pi->sib_head[node->tc_num][node->tx_sched_layer] = in ice_free_sched_node()
348 node->sibling; in ice_free_sched_node()
351 devm_kfree(ice_hw_to_dev(hw), node->children); in ice_free_sched_node()
352 kfree(node->name); in ice_free_sched_node()
353 xa_erase(&pi->sched_node_ids, node->id); in ice_free_sched_node()
358 * ice_aq_get_dflt_topo - gets default scheduler topology
379 cmd->port_num = lport; in ice_aq_get_dflt_topo()
382 *num_branches = cmd->num_branches; in ice_aq_get_dflt_topo()
388 * ice_aq_add_sched_elems - adds scheduling element
409 * ice_aq_cfg_sched_elems - configures scheduler elements
430 * ice_aq_move_sched_elems - move scheduler element (just 1 group)
447 * ice_aq_suspend_sched_elems - suspend scheduler elements
467 * ice_aq_resume_sched_elems - resume scheduler elements
487 * ice_aq_query_sched_res - query scheduler resource
507 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
526 return -ENOMEM; in ice_sched_suspend_resume_elems()
547 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
562 return -EINVAL; in ice_alloc_lan_q_ctx()
564 if (!vsi_ctx->lan_q_ctx[tc]) { in ice_alloc_lan_q_ctx()
568 return -ENOMEM; in ice_alloc_lan_q_ctx()
575 vsi_ctx->lan_q_ctx[tc] = q_ctx; in ice_alloc_lan_q_ctx()
576 vsi_ctx->num_lan_q_entries[tc] = new_numqs; in ice_alloc_lan_q_ctx()
580 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { in ice_alloc_lan_q_ctx()
581 u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; in ice_alloc_lan_q_ctx()
586 return -ENOMEM; in ice_alloc_lan_q_ctx()
588 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], in ice_alloc_lan_q_ctx()
590 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); in ice_alloc_lan_q_ctx()
597 vsi_ctx->lan_q_ctx[tc] = q_ctx; in ice_alloc_lan_q_ctx()
598 vsi_ctx->num_lan_q_entries[tc] = new_numqs; in ice_alloc_lan_q_ctx()
604 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
618 return -EINVAL; in ice_alloc_rdma_q_ctx()
620 if (!vsi_ctx->rdma_q_ctx[tc]) { in ice_alloc_rdma_q_ctx()
621 vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), in ice_alloc_rdma_q_ctx()
625 if (!vsi_ctx->rdma_q_ctx[tc]) in ice_alloc_rdma_q_ctx()
626 return -ENOMEM; in ice_alloc_rdma_q_ctx()
627 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; in ice_alloc_rdma_q_ctx()
631 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { in ice_alloc_rdma_q_ctx()
632 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; in ice_alloc_rdma_q_ctx()
637 return -ENOMEM; in ice_alloc_rdma_q_ctx()
638 memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], in ice_alloc_rdma_q_ctx()
640 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); in ice_alloc_rdma_q_ctx()
641 vsi_ctx->rdma_q_ctx[tc] = q_ctx; in ice_alloc_rdma_q_ctx()
642 vsi_ctx->num_rdma_q_entries[tc] = new_numqs; in ice_alloc_rdma_q_ctx()
648 * ice_aq_rl_profile - performs a rate limiting task
672 cmd->num_profiles = cpu_to_le16(num_profiles); in ice_aq_rl_profile()
675 *num_processed = le16_to_cpu(cmd->num_processed); in ice_aq_rl_profile()
680 * ice_aq_add_rl_profile - adds rate limiting profile(s)
700 * ice_aq_remove_rl_profile - removes RL profile(s)
721 * ice_sched_del_rl_profile - remove RL profile
738 if (rl_info->prof_id_ref != 0) in ice_sched_del_rl_profile()
739 return -EBUSY; in ice_sched_del_rl_profile()
742 buf = &rl_info->profile; in ice_sched_del_rl_profile()
746 return -EIO; in ice_sched_del_rl_profile()
749 list_del(&rl_info->list_entry); in ice_sched_del_rl_profile()
755 * ice_sched_clear_rl_prof - clears RL prof entries
764 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { in ice_sched_clear_rl_prof()
769 &pi->rl_prof_list[ln], list_entry) { in ice_sched_clear_rl_prof()
770 struct ice_hw *hw = pi->hw; in ice_sched_clear_rl_prof()
773 rl_prof_elem->prof_id_ref = 0; in ice_sched_clear_rl_prof()
778 list_del(&rl_prof_elem->list_entry); in ice_sched_clear_rl_prof()
786 * ice_sched_clear_agg - clears the aggregator related information
797 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) { in ice_sched_clear_agg()
802 &agg_info->agg_vsi_list, list_entry) { in ice_sched_clear_agg()
803 list_del(&agg_vsi_info->list_entry); in ice_sched_clear_agg()
806 list_del(&agg_info->list_entry); in ice_sched_clear_agg()
812 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
823 if (pi->root) { in ice_sched_clear_tx_topo()
824 ice_free_sched_node(pi, pi->root); in ice_sched_clear_tx_topo()
825 pi->root = NULL; in ice_sched_clear_tx_topo()
830 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
837 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) in ice_sched_clear_port()
840 pi->port_state = ICE_SCHED_PORT_STATE_INIT; in ice_sched_clear_port()
841 mutex_lock(&pi->sched_lock); in ice_sched_clear_port()
843 mutex_unlock(&pi->sched_lock); in ice_sched_clear_port()
844 mutex_destroy(&pi->sched_lock); in ice_sched_clear_port()
848 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
858 devm_kfree(ice_hw_to_dev(hw), hw->layer_info); in ice_sched_cleanup_all()
859 hw->layer_info = NULL; in ice_sched_cleanup_all()
861 ice_sched_clear_port(hw->port_info); in ice_sched_cleanup_all()
863 hw->num_tx_sched_layers = 0; in ice_sched_cleanup_all()
864 hw->num_tx_sched_phys_layers = 0; in ice_sched_cleanup_all()
865 hw->flattened_layers = 0; in ice_sched_cleanup_all()
866 hw->max_cgds = 0; in ice_sched_cleanup_all()
870 * ice_sched_add_elems - add nodes to HW and SW DB
891 struct ice_hw *hw = pi->hw; in ice_sched_add_elems()
899 return -ENOMEM; in ice_sched_add_elems()
901 buf->hdr.parent_teid = parent->info.node_teid; in ice_sched_add_elems()
902 buf->hdr.num_elems = cpu_to_le16(num_nodes); in ice_sched_add_elems()
904 buf->generic[i].parent_teid = parent->info.node_teid; in ice_sched_add_elems()
905 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; in ice_sched_add_elems()
906 buf->generic[i].data.valid_sections = in ice_sched_add_elems()
909 buf->generic[i].data.generic = 0; in ice_sched_add_elems()
910 buf->generic[i].data.cir_bw.bw_profile_idx = in ice_sched_add_elems()
912 buf->generic[i].data.cir_bw.bw_alloc = in ice_sched_add_elems()
914 buf->generic[i].data.eir_bw.bw_profile_idx = in ice_sched_add_elems()
916 buf->generic[i].data.eir_bw.bw_alloc = in ice_sched_add_elems()
924 hw->adminq.sq_last_status); in ice_sched_add_elems()
926 return -EIO; in ice_sched_add_elems()
933 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]); in ice_sched_add_elems()
935 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); in ice_sched_add_elems()
943 teid = le32_to_cpu(buf->generic[i].node_teid); in ice_sched_add_elems()
950 new_node->sibling = NULL; in ice_sched_add_elems()
951 new_node->tc_num = tc_node->tc_num; in ice_sched_add_elems()
952 new_node->tx_weight = ICE_SCHED_DFLT_BW_WT; in ice_sched_add_elems()
953 new_node->tx_share = ICE_SCHED_DFLT_BW; in ice_sched_add_elems()
954 new_node->tx_max = ICE_SCHED_DFLT_BW; in ice_sched_add_elems()
955 new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL); in ice_sched_add_elems()
956 if (!new_node->name) in ice_sched_add_elems()
957 return -ENOMEM; in ice_sched_add_elems()
959 status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX), in ice_sched_add_elems()
962 ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n", in ice_sched_add_elems()
967 snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id); in ice_sched_add_elems()
973 while (prev->sibling) in ice_sched_add_elems()
974 prev = prev->sibling; in ice_sched_add_elems()
975 prev->sibling = new_node; in ice_sched_add_elems()
979 if (!pi->sib_head[tc_node->tc_num][layer]) in ice_sched_add_elems()
980 pi->sib_head[tc_node->tc_num][layer] = new_node; in ice_sched_add_elems()
991 * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
1016 if (!parent || layer < pi->hw->sw_entry_point_layer) in ice_sched_add_nodes_to_hw_layer()
1017 return -EINVAL; in ice_sched_add_nodes_to_hw_layer()
1020 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; in ice_sched_add_nodes_to_hw_layer()
1023 if ((parent->num_children + num_nodes) > max_child_nodes) { in ice_sched_add_nodes_to_hw_layer()
1026 return -EIO; in ice_sched_add_nodes_to_hw_layer()
1027 return -ENOSPC; in ice_sched_add_nodes_to_hw_layer()
1035 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1070 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, in ice_sched_add_nodes_to_layer()
1072 status = -EIO; in ice_sched_add_nodes_to_layer()
1079 if (status && status != -ENOSPC) in ice_sched_add_nodes_to_layer()
1082 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; in ice_sched_add_nodes_to_layer()
1084 if (parent->num_children < max_child_nodes) { in ice_sched_add_nodes_to_layer()
1085 new_num_nodes = max_child_nodes - parent->num_children; in ice_sched_add_nodes_to_layer()
1088 parent = parent->sibling; in ice_sched_add_nodes_to_layer()
1097 new_num_nodes = num_nodes - *num_nodes_added; in ice_sched_add_nodes_to_layer()
1104 * ice_sched_get_qgrp_layer - get the current queue group layer number
1111 /* It's always total layers - 1, the array is 0 relative so -2 */ in ice_sched_get_qgrp_layer()
1112 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; in ice_sched_get_qgrp_layer()
1116 * ice_sched_get_vsi_layer - get the current VSI layer number
1129 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) in ice_sched_get_vsi_layer()
1130 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; in ice_sched_get_vsi_layer()
1131 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) in ice_sched_get_vsi_layer()
1133 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; in ice_sched_get_vsi_layer()
1134 return hw->sw_entry_point_layer; in ice_sched_get_vsi_layer()
1138 * ice_sched_get_agg_layer - get the current aggregator layer number
1150 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) in ice_sched_get_agg_layer()
1151 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; in ice_sched_get_agg_layer()
1153 return hw->sw_entry_point_layer; in ice_sched_get_agg_layer()
1157 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1167 node = pi->root; in ice_rm_dflt_leaf_node()
1169 if (!node->num_children) in ice_rm_dflt_leaf_node()
1171 node = node->children[0]; in ice_rm_dflt_leaf_node()
1173 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { in ice_rm_dflt_leaf_node()
1174 u32 teid = le32_to_cpu(node->info.node_teid); in ice_rm_dflt_leaf_node()
1178 status = ice_sched_remove_elems(pi->hw, node->parent, teid); in ice_rm_dflt_leaf_node()
1185 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1198 node = pi->root; in ice_sched_rm_dflt_nodes()
1200 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && in ice_sched_rm_dflt_nodes()
1201 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && in ice_sched_rm_dflt_nodes()
1202 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { in ice_sched_rm_dflt_nodes()
1207 if (!node->num_children) in ice_sched_rm_dflt_nodes()
1209 node = node->children[0]; in ice_sched_rm_dflt_nodes()
1214 * ice_sched_init_port - Initialize scheduler by querying information from FW
1217 * This function is the initial call to find the total number of Tx scheduler
1231 return -EINVAL; in ice_sched_init_port()
1232 hw = pi->hw; in ice_sched_init_port()
1237 return -ENOMEM; in ice_sched_init_port()
1240 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, in ice_sched_init_port()
1245 /* num_branches should be between 1-8 */ in ice_sched_init_port()
1249 status = -EINVAL; in ice_sched_init_port()
1256 /* num_elems should always be between 1-9 */ in ice_sched_init_port()
1260 status = -EINVAL; in ice_sched_init_port()
1267 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == in ice_sched_init_port()
1269 pi->last_node_teid = in ice_sched_init_port()
1270 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid); in ice_sched_init_port()
1272 pi->last_node_teid = in ice_sched_init_port()
1273 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid); in ice_sched_init_port()
1275 /* Insert the Tx Sched root node */ in ice_sched_init_port()
1289 hw->sw_entry_point_layer = j; in ice_sched_init_port()
1298 if (pi->root) in ice_sched_init_port()
1302 pi->port_state = ICE_SCHED_PORT_STATE_READY; in ice_sched_init_port()
1303 mutex_init(&pi->sched_lock); in ice_sched_init_port()
1305 INIT_LIST_HEAD(&pi->rl_prof_list[i]); in ice_sched_init_port()
1308 if (status && pi->root) { in ice_sched_init_port()
1309 ice_free_sched_node(pi, pi->root); in ice_sched_init_port()
1310 pi->root = NULL; in ice_sched_init_port()
1318 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1330 if (hw->layer_info) in ice_sched_query_res_alloc()
1335 return -ENOMEM; in ice_sched_query_res_alloc()
1341 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels); in ice_sched_query_res_alloc()
1342 hw->num_tx_sched_phys_layers = in ice_sched_query_res_alloc()
1343 le16_to_cpu(buf->sched_props.phys_levels); in ice_sched_query_res_alloc()
1344 hw->flattened_layers = buf->sched_props.flattening_bitmap; in ice_sched_query_res_alloc()
1345 hw->max_cgds = buf->sched_props.max_pf_cgds; in ice_sched_query_res_alloc()
1354 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { in ice_sched_query_res_alloc()
1355 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; in ice_sched_query_res_alloc()
1356 hw->max_children[i] = le16_to_cpu(max_sibl); in ice_sched_query_res_alloc()
1359 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, in ice_sched_query_res_alloc()
1360 (hw->num_tx_sched_layers * in ice_sched_query_res_alloc()
1361 sizeof(*hw->layer_info)), in ice_sched_query_res_alloc()
1363 if (!hw->layer_info) { in ice_sched_query_res_alloc()
1364 status = -ENOMEM; in ice_sched_query_res_alloc()
1374 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1393 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; in ice_sched_get_psm_clk_freq()
1396 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; in ice_sched_get_psm_clk_freq()
1399 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; in ice_sched_get_psm_clk_freq()
1402 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; in ice_sched_get_psm_clk_freq()
1408 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; in ice_sched_get_psm_clk_freq()
1413 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1427 for (i = 0; i < base->num_children; i++) { in ice_sched_find_node_in_subtree()
1428 struct ice_sched_node *child = base->children[i]; in ice_sched_find_node_in_subtree()
1433 if (child->tx_sched_layer > node->tx_sched_layer) in ice_sched_find_node_in_subtree()
1446 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1466 min_children = qgrp_node->num_children; in ice_sched_get_free_qgrp()
1477 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) in ice_sched_get_free_qgrp()
1478 if (qgrp_node->num_children < min_children && in ice_sched_get_free_qgrp()
1479 qgrp_node->owner == owner) { in ice_sched_get_free_qgrp()
1482 min_children = min_qgrp->num_children; in ice_sched_get_free_qgrp()
1487 qgrp_node = qgrp_node->sibling; in ice_sched_get_free_qgrp()
1493 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1510 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); in ice_sched_get_free_qparent()
1511 vsi_layer = ice_sched_get_vsi_layer(pi->hw); in ice_sched_get_free_qparent()
1512 max_children = pi->hw->max_children[qgrp_layer]; in ice_sched_get_free_qparent()
1514 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); in ice_sched_get_free_qparent()
1517 vsi_node = vsi_ctx->sched.vsi_node[tc]; in ice_sched_get_free_qparent()
1528 /* get the first queue group node from VSI sub-tree */ in ice_sched_get_free_qparent()
1532 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) in ice_sched_get_free_qparent()
1533 if (qgrp_node->num_children < max_children && in ice_sched_get_free_qparent()
1534 qgrp_node->owner == owner) in ice_sched_get_free_qparent()
1536 qgrp_node = qgrp_node->sibling; in ice_sched_get_free_qparent()
1544 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1559 vsi_layer = ice_sched_get_vsi_layer(pi->hw); in ice_sched_get_vsi_node()
1564 if (node->vsi_handle == vsi_handle) in ice_sched_get_vsi_node()
1566 node = node->sibling; in ice_sched_get_vsi_node()
1573 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1586 struct ice_hw *hw = pi->hw; in ice_sched_get_agg_node()
1596 if (node->agg_id == agg_id) in ice_sched_get_agg_node()
1598 node = node->sibling; in ice_sched_get_agg_node()
1605 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1623 for (i = qgl; i > vsil; i--) { in ice_sched_calc_vsi_child_nodes()
1625 num = DIV_ROUND_UP(num, hw->max_children[i]); in ice_sched_calc_vsi_child_nodes()
1633 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1649 struct ice_hw *hw = pi->hw; in ice_sched_add_vsi_child_nodes()
1661 return -EIO; in ice_sched_add_vsi_child_nodes()
1668 return -EIO; in ice_sched_add_vsi_child_nodes()
1678 node->owner = owner; in ice_sched_add_vsi_child_nodes()
1679 node = node->sibling; in ice_sched_add_vsi_child_nodes()
1682 parent = parent->children[0]; in ice_sched_add_vsi_child_nodes()
1690 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1696 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1707 vsil = ice_sched_get_vsi_layer(pi->hw); in ice_sched_calc_vsi_support_nodes()
1708 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) in ice_sched_calc_vsi_support_nodes()
1712 if (!tc_node->num_children || i == vsil) { in ice_sched_calc_vsi_support_nodes()
1721 if (node->num_children < pi->hw->max_children[i]) in ice_sched_calc_vsi_support_nodes()
1723 node = node->sibling; in ice_sched_calc_vsi_support_nodes()
1738 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1744 * This function adds the VSI supported nodes into Tx tree including the
1757 return -EINVAL; in ice_sched_add_vsi_support_nodes()
1759 vsil = ice_sched_get_vsi_layer(pi->hw); in ice_sched_add_vsi_support_nodes()
1760 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { in ice_sched_add_vsi_support_nodes()
1768 return -EIO; in ice_sched_add_vsi_support_nodes()
1777 parent = parent->children[0]; in ice_sched_add_vsi_support_nodes()
1780 return -EIO; in ice_sched_add_vsi_support_nodes()
1783 parent->vsi_handle = vsi_handle; in ice_sched_add_vsi_support_nodes()
1790 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1805 return -EINVAL; in ice_sched_add_vsi_to_topo()
1816 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1833 struct ice_hw *hw = pi->hw; in ice_sched_update_vsi_child_nodes()
1839 return -EIO; in ice_sched_update_vsi_child_nodes()
1843 return -EIO; in ice_sched_update_vsi_child_nodes()
1847 return -EINVAL; in ice_sched_update_vsi_child_nodes()
1850 prev_numqs = vsi_ctx->sched.max_lanq[tc]; in ice_sched_update_vsi_child_nodes()
1852 prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; in ice_sched_update_vsi_child_nodes()
1880 vsi_ctx->sched.max_lanq[tc] = new_numqs; in ice_sched_update_vsi_child_nodes()
1882 vsi_ctx->sched.max_rdmaq[tc] = new_numqs; in ice_sched_update_vsi_child_nodes()
1888 * ice_sched_cfg_vsi - configure the new/existing VSI
1906 struct ice_hw *hw = pi->hw; in ice_sched_cfg_vsi()
1909 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); in ice_sched_cfg_vsi()
1912 return -EINVAL; in ice_sched_cfg_vsi()
1915 return -EINVAL; in ice_sched_cfg_vsi()
1920 if (vsi_node && vsi_node->in_use) { in ice_sched_cfg_vsi()
1921 u32 teid = le32_to_cpu(vsi_node->info.node_teid); in ice_sched_cfg_vsi()
1926 vsi_node->in_use = false; in ice_sched_cfg_vsi()
1939 return -EIO; in ice_sched_cfg_vsi()
1941 vsi_ctx->sched.vsi_node[tc] = vsi_node; in ice_sched_cfg_vsi()
1942 vsi_node->in_use = true; in ice_sched_cfg_vsi()
1947 vsi_ctx->sched.max_lanq[tc] = 0; in ice_sched_cfg_vsi()
1948 vsi_ctx->sched.max_rdmaq[tc] = 0; in ice_sched_cfg_vsi()
1958 if (!vsi_node->in_use) { in ice_sched_cfg_vsi()
1959 u32 teid = le32_to_cpu(vsi_node->info.node_teid); in ice_sched_cfg_vsi()
1963 vsi_node->in_use = true; in ice_sched_cfg_vsi()
1970 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
1982 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list, in ice_sched_rm_agg_vsi_info()
1988 &agg_info->agg_vsi_list, list_entry) in ice_sched_rm_agg_vsi_info()
1989 if (agg_vsi_info->vsi_handle == vsi_handle) { in ice_sched_rm_agg_vsi_info()
1990 list_del(&agg_vsi_info->list_entry); in ice_sched_rm_agg_vsi_info()
1991 devm_kfree(ice_hw_to_dev(pi->hw), in ice_sched_rm_agg_vsi_info()
1999 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
2000 * @node: pointer to the sub-tree node
2002 * This function checks for a leaf node presence in a given sub-tree node.
2008 for (i = 0; i < node->num_children; i++) in ice_sched_is_leaf_node_present()
2009 if (ice_sched_is_leaf_node_present(node->children[i])) in ice_sched_is_leaf_node_present()
2012 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); in ice_sched_is_leaf_node_present()
2016 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
2028 int status = -EINVAL; in ice_sched_rm_vsi_cfg()
2031 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); in ice_sched_rm_vsi_cfg()
2032 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) in ice_sched_rm_vsi_cfg()
2034 mutex_lock(&pi->sched_lock); in ice_sched_rm_vsi_cfg()
2035 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); in ice_sched_rm_vsi_cfg()
2052 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); in ice_sched_rm_vsi_cfg()
2053 status = -EBUSY; in ice_sched_rm_vsi_cfg()
2056 while (j < vsi_node->num_children) { in ice_sched_rm_vsi_cfg()
2057 if (vsi_node->children[j]->owner == owner) { in ice_sched_rm_vsi_cfg()
2058 ice_free_sched_node(pi, vsi_node->children[j]); in ice_sched_rm_vsi_cfg()
2069 if (!vsi_node->num_children) { in ice_sched_rm_vsi_cfg()
2071 vsi_ctx->sched.vsi_node[i] = NULL; in ice_sched_rm_vsi_cfg()
2077 vsi_ctx->sched.max_lanq[i] = 0; in ice_sched_rm_vsi_cfg()
2079 vsi_ctx->sched.max_rdmaq[i] = 0; in ice_sched_rm_vsi_cfg()
2084 mutex_unlock(&pi->sched_lock); in ice_sched_rm_vsi_cfg()
2089 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2102 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
2115 * ice_get_agg_info - get the aggregator ID
2127 list_for_each_entry(agg_info, &hw->agg_list, list_entry) in ice_get_agg_info()
2128 if (agg_info->agg_id == agg_id) in ice_get_agg_info()
2135 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2147 u8 l = node->tx_sched_layer; in ice_sched_get_free_vsi_parent()
2153 if (l == vsil - 1) in ice_sched_get_free_vsi_parent()
2154 return (node->num_children < hw->max_children[l]) ? node : NULL; in ice_sched_get_free_vsi_parent()
2159 if (node->num_children < hw->max_children[l]) in ice_sched_get_free_vsi_parent()
2165 for (i = 0; i < node->num_children; i++) { in ice_sched_get_free_vsi_parent()
2168 parent = ice_sched_get_free_vsi_parent(hw, node->children[i], in ice_sched_get_free_vsi_parent()
2178 * ice_sched_update_parent - update the new parent in SW DB
2192 old_parent = node->parent; in ice_sched_update_parent()
2195 for (i = 0; i < old_parent->num_children; i++) in ice_sched_update_parent()
2196 if (old_parent->children[i] == node) { in ice_sched_update_parent()
2197 for (j = i + 1; j < old_parent->num_children; j++) in ice_sched_update_parent()
2198 old_parent->children[j - 1] = in ice_sched_update_parent()
2199 old_parent->children[j]; in ice_sched_update_parent()
2200 old_parent->num_children--; in ice_sched_update_parent()
2205 new_parent->children[new_parent->num_children++] = node; in ice_sched_update_parent()
2206 node->parent = new_parent; in ice_sched_update_parent()
2207 node->info.parent_teid = new_parent->info.node_teid; in ice_sched_update_parent()
2211 * ice_sched_move_nodes - move child nodes to a given parent
2230 hw = pi->hw; in ice_sched_move_nodes()
2233 return -EINVAL; in ice_sched_move_nodes()
2236 if (parent->num_children + num_items > in ice_sched_move_nodes()
2237 hw->max_children[parent->tx_sched_layer]) in ice_sched_move_nodes()
2238 return -ENOSPC; in ice_sched_move_nodes()
2241 node = ice_sched_find_node_by_teid(pi->root, list[i]); in ice_sched_move_nodes()
2243 status = -EINVAL; in ice_sched_move_nodes()
2247 buf->hdr.src_parent_teid = node->info.parent_teid; in ice_sched_move_nodes()
2248 buf->hdr.dest_parent_teid = parent->info.node_teid; in ice_sched_move_nodes()
2249 buf->teid[0] = node->info.node_teid; in ice_sched_move_nodes()
2250 buf->hdr.num_elems = cpu_to_le16(1); in ice_sched_move_nodes()
2253 status = -EIO; in ice_sched_move_nodes()
2265 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2287 return -EIO; in ice_sched_move_vsi_to_agg()
2291 return -ENOENT; in ice_sched_move_vsi_to_agg()
2295 return -ENOENT; in ice_sched_move_vsi_to_agg()
2298 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) in ice_sched_move_vsi_to_agg()
2301 aggl = ice_sched_get_agg_layer(pi->hw); in ice_sched_move_vsi_to_agg()
2302 vsil = ice_sched_get_vsi_layer(pi->hw); in ice_sched_move_vsi_to_agg()
2309 for (i = 0; i < agg_node->num_children; i++) { in ice_sched_move_vsi_to_agg()
2310 parent = ice_sched_get_free_vsi_parent(pi->hw, in ice_sched_move_vsi_to_agg()
2311 agg_node->children[i], in ice_sched_move_vsi_to_agg()
2325 return -EIO; in ice_sched_move_vsi_to_agg()
2334 parent = parent->children[0]; in ice_sched_move_vsi_to_agg()
2337 return -EIO; in ice_sched_move_vsi_to_agg()
2341 vsi_teid = le32_to_cpu(vsi_node->info.node_teid); in ice_sched_move_vsi_to_agg()
2346 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2365 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, in ice_move_all_vsi_to_dflt_agg()
2367 u16 vsi_handle = agg_vsi_info->vsi_handle; in ice_move_all_vsi_to_dflt_agg()
2370 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) in ice_move_all_vsi_to_dflt_agg()
2378 clear_bit(tc, agg_vsi_info->tc_bitmap); in ice_move_all_vsi_to_dflt_agg()
2379 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { in ice_move_all_vsi_to_dflt_agg()
2380 list_del(&agg_vsi_info->list_entry); in ice_move_all_vsi_to_dflt_agg()
2381 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info); in ice_move_all_vsi_to_dflt_agg()
2389 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2400 vsil = ice_sched_get_vsi_layer(pi->hw); in ice_sched_is_agg_inuse()
2401 if (node->tx_sched_layer < vsil - 1) { in ice_sched_is_agg_inuse()
2402 for (i = 0; i < node->num_children; i++) in ice_sched_is_agg_inuse()
2403 if (ice_sched_is_agg_inuse(pi, node->children[i])) in ice_sched_is_agg_inuse()
2407 return node->num_children ? true : false; in ice_sched_is_agg_inuse()
2412 * ice_sched_rm_agg_cfg - remove the aggregator node
2424 struct ice_hw *hw = pi->hw; in ice_sched_rm_agg_cfg()
2428 return -EIO; in ice_sched_rm_agg_cfg()
2432 return -ENOENT; in ice_sched_rm_agg_cfg()
2436 return -EBUSY; in ice_sched_rm_agg_cfg()
2441 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { in ice_sched_rm_agg_cfg()
2442 struct ice_sched_node *parent = agg_node->parent; in ice_sched_rm_agg_cfg()
2445 return -EIO; in ice_sched_rm_agg_cfg()
2447 if (parent->num_children > 1) in ice_sched_rm_agg_cfg()
2458 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2474 /* If nothing to remove - return success */ in ice_rm_agg_cfg_tc()
2475 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) in ice_rm_agg_cfg_tc()
2483 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); in ice_rm_agg_cfg_tc()
2487 clear_bit(tc, agg_info->tc_bitmap); in ice_rm_agg_cfg_tc()
2493 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2507 agg_info = ice_get_agg_info(pi->hw, agg_id); in ice_save_agg_tc_bitmap()
2509 return -EINVAL; in ice_save_agg_tc_bitmap()
2510 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, in ice_save_agg_tc_bitmap()
2516 * ice_sched_add_agg_cfg - create an aggregator node
2529 struct ice_hw *hw = pi->hw; in ice_sched_add_agg_cfg()
2537 return -EIO; in ice_sched_add_agg_cfg()
2553 for (i = hw->sw_entry_point_layer; i < aggl; i++) { in ice_sched_add_agg_cfg()
2558 if (parent->num_children < hw->max_children[i]) in ice_sched_add_agg_cfg()
2560 parent = parent->sibling; in ice_sched_add_agg_cfg()
2570 for (i = hw->sw_entry_point_layer; i <= aggl; i++) { in ice_sched_add_agg_cfg()
2572 return -EIO; in ice_sched_add_agg_cfg()
2579 return -EIO; in ice_sched_add_agg_cfg()
2589 parent->agg_id = agg_id; in ice_sched_add_agg_cfg()
2591 parent = parent->children[0]; in ice_sched_add_agg_cfg()
2599 * ice_sched_cfg_agg - configure aggregator node
2619 struct ice_hw *hw = pi->hw; in ice_sched_cfg_agg()
2629 return -ENOMEM; in ice_sched_cfg_agg()
2631 agg_info->agg_id = agg_id; in ice_sched_cfg_agg()
2632 agg_info->agg_type = agg_type; in ice_sched_cfg_agg()
2633 agg_info->tc_bitmap[0] = 0; in ice_sched_cfg_agg()
2636 INIT_LIST_HEAD(&agg_info->agg_vsi_list); in ice_sched_cfg_agg()
2639 list_add(&agg_info->list_entry, &hw->agg_list); in ice_sched_cfg_agg()
2652 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) in ice_sched_cfg_agg()
2661 set_bit(tc, agg_info->tc_bitmap); in ice_sched_cfg_agg()
2668 * ice_cfg_agg - config aggregator node
2683 mutex_lock(&pi->sched_lock); in ice_cfg_agg()
2687 mutex_unlock(&pi->sched_lock); in ice_cfg_agg()
2692 * ice_get_agg_vsi_info - get the aggregator ID
2704 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry) in ice_get_agg_vsi_info()
2705 if (agg_vsi_info->vsi_handle == vsi_handle) in ice_get_agg_vsi_info()
2712 * ice_get_vsi_agg_info - get the aggregator info of VSI
2725 list_for_each_entry(agg_info, &hw->agg_list, list_entry) { in ice_get_vsi_agg_info()
2736 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2752 agg_info = ice_get_agg_info(pi->hw, agg_id); in ice_save_agg_vsi_tc_bitmap()
2754 return -EINVAL; in ice_save_agg_vsi_tc_bitmap()
2758 return -EINVAL; in ice_save_agg_vsi_tc_bitmap()
2759 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, in ice_save_agg_vsi_tc_bitmap()
2765 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2781 struct ice_hw *hw = pi->hw; in ice_sched_assoc_vsi_to_agg()
2785 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) in ice_sched_assoc_vsi_to_agg()
2786 return -EINVAL; in ice_sched_assoc_vsi_to_agg()
2789 return -EINVAL; in ice_sched_assoc_vsi_to_agg()
2798 &old_agg_info->agg_vsi_list, in ice_sched_assoc_vsi_to_agg()
2800 if (iter->vsi_handle == vsi_handle) { in ice_sched_assoc_vsi_to_agg()
2813 return -EINVAL; in ice_sched_assoc_vsi_to_agg()
2816 agg_vsi_info->vsi_handle = vsi_handle; in ice_sched_assoc_vsi_to_agg()
2817 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); in ice_sched_assoc_vsi_to_agg()
2829 set_bit(tc, agg_vsi_info->tc_bitmap); in ice_sched_assoc_vsi_to_agg()
2831 clear_bit(tc, old_agg_vsi_info->tc_bitmap); in ice_sched_assoc_vsi_to_agg()
2833 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) { in ice_sched_assoc_vsi_to_agg()
2834 list_del(&old_agg_vsi_info->list_entry); in ice_sched_assoc_vsi_to_agg()
2835 devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info); in ice_sched_assoc_vsi_to_agg()
2841 * ice_sched_rm_unused_rl_prof - remove unused RL profile
2851 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { in ice_sched_rm_unused_rl_prof()
2856 &pi->rl_prof_list[ln], list_entry) { in ice_sched_rm_unused_rl_prof()
2857 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) in ice_sched_rm_unused_rl_prof()
2858 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n"); in ice_sched_rm_unused_rl_prof()
2864 * ice_sched_update_elem - update element
2870 * parameters of node from argument info data buffer (Info->data buf) and
2871 * returns success or error on config sched element failure. The caller
2896 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); in ice_sched_update_elem()
2897 return -EIO; in ice_sched_update_elem()
2903 node->info.data = info->data; in ice_sched_update_elem()
2908 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
2910 * @node: sched node to configure
2923 buf = node->info; in ice_sched_cfg_node_bw_alloc()
2926 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; in ice_sched_cfg_node_bw_alloc()
2927 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc); in ice_sched_cfg_node_bw_alloc()
2929 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; in ice_sched_cfg_node_bw_alloc()
2930 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); in ice_sched_cfg_node_bw_alloc()
2932 return -EINVAL; in ice_sched_cfg_node_bw_alloc()
2940 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
2955 mutex_lock(&pi->sched_lock); in ice_move_vsi_to_agg()
2961 mutex_unlock(&pi->sched_lock); in ice_move_vsi_to_agg()
2966 * ice_set_clear_cir_bw - set or clear CIR BW
2968 * @bw: bandwidth in Kbps - Kilo bits per sec
2975 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); in ice_set_clear_cir_bw()
2976 bw_t_info->cir_bw.bw = 0; in ice_set_clear_cir_bw()
2979 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); in ice_set_clear_cir_bw()
2980 bw_t_info->cir_bw.bw = bw; in ice_set_clear_cir_bw()
2985 * ice_set_clear_eir_bw - set or clear EIR BW
2987 * @bw: bandwidth in Kbps - Kilo bits per sec
2994 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); in ice_set_clear_eir_bw()
2995 bw_t_info->eir_bw.bw = 0; in ice_set_clear_eir_bw()
3001 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); in ice_set_clear_eir_bw()
3002 bw_t_info->shared_bw = 0; in ice_set_clear_eir_bw()
3004 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); in ice_set_clear_eir_bw()
3005 bw_t_info->eir_bw.bw = bw; in ice_set_clear_eir_bw()
3010 * ice_set_clear_shared_bw - set or clear shared BW
3012 * @bw: bandwidth in Kbps - Kilo bits per sec
3019 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); in ice_set_clear_shared_bw()
3020 bw_t_info->shared_bw = 0; in ice_set_clear_shared_bw()
3026 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); in ice_set_clear_shared_bw()
3027 bw_t_info->eir_bw.bw = 0; in ice_set_clear_shared_bw()
3029 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); in ice_set_clear_shared_bw()
3030 bw_t_info->shared_bw = bw; in ice_set_clear_shared_bw()
3035 * ice_sched_save_vsi_bw - save VSI node's BW information
3040 * @bw: bandwidth in Kbps - Kilo bits per sec
3050 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) in ice_sched_save_vsi_bw()
3051 return -EINVAL; in ice_sched_save_vsi_bw()
3052 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); in ice_sched_save_vsi_bw()
3054 return -EINVAL; in ice_sched_save_vsi_bw()
3057 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); in ice_sched_save_vsi_bw()
3060 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); in ice_sched_save_vsi_bw()
3063 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); in ice_sched_save_vsi_bw()
3066 return -EINVAL; in ice_sched_save_vsi_bw()
3072 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3086 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec); in ice_sched_calc_wakeup()
3095 hw->psm_clk_freq, bytes_per_sec); in ice_sched_calc_wakeup()
3098 wakeup_f = wakeup_a - wakeup_b; in ice_sched_calc_wakeup()
3114 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
3126 int status = -EINVAL; in ice_sched_bw_to_rl_profile()
3143 ts_rate = div64_long((s64)hw->psm_clk_freq, in ice_sched_bw_to_rl_profile()
3168 profile->rl_multiply = cpu_to_le16(mv); in ice_sched_bw_to_rl_profile()
3169 profile->wake_up_calc = cpu_to_le16(wm); in ice_sched_bw_to_rl_profile()
3170 profile->rl_encode = cpu_to_le16(encode); in ice_sched_bw_to_rl_profile()
3173 status = -ENOENT; in ice_sched_bw_to_rl_profile()
3180 * ice_sched_add_rl_profile - add RL profile
3182 * @rl_type: type of rate limit BW - min, max, or shared
3183 * @bw: bandwidth in Kbps - Kilo bits per sec
3203 if (!pi || layer_num >= pi->hw->num_tx_sched_layers) in ice_sched_add_rl_profile()
3219 hw = pi->hw; in ice_sched_add_rl_profile()
3220 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], in ice_sched_add_rl_profile()
3222 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == in ice_sched_add_rl_profile()
3223 profile_type && rl_prof_elem->bw == bw) in ice_sched_add_rl_profile()
3234 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); in ice_sched_add_rl_profile()
3238 rl_prof_elem->bw = bw; in ice_sched_add_rl_profile()
3240 rl_prof_elem->profile.level = layer_num + 1; in ice_sched_add_rl_profile()
3241 rl_prof_elem->profile.flags = profile_type; in ice_sched_add_rl_profile()
3242 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); in ice_sched_add_rl_profile()
3245 buf = &rl_prof_elem->profile; in ice_sched_add_rl_profile()
3251 /* Good entry - add in the list */ in ice_sched_add_rl_profile()
3252 rl_prof_elem->prof_id_ref = 0; in ice_sched_add_rl_profile()
3253 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); in ice_sched_add_rl_profile()
3262 * ice_sched_cfg_node_bw_lmt - configure node sched params
3264 * @node: sched node to configure
3277 buf = node->info; in ice_sched_cfg_node_bw_lmt()
3281 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; in ice_sched_cfg_node_bw_lmt()
3282 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); in ice_sched_cfg_node_bw_lmt()
3288 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) in ice_sched_cfg_node_bw_lmt()
3289 return -EIO; in ice_sched_cfg_node_bw_lmt()
3290 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; in ice_sched_cfg_node_bw_lmt()
3291 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); in ice_sched_cfg_node_bw_lmt()
3297 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; in ice_sched_cfg_node_bw_lmt()
3298 data->srl_id = 0; /* clear SRL field */ in ice_sched_cfg_node_bw_lmt()
3301 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; in ice_sched_cfg_node_bw_lmt()
3302 data->eir_bw.bw_profile_idx = in ice_sched_cfg_node_bw_lmt()
3309 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && in ice_sched_cfg_node_bw_lmt()
3310 (le16_to_cpu(data->eir_bw.bw_profile_idx) != in ice_sched_cfg_node_bw_lmt()
3312 return -EIO; in ice_sched_cfg_node_bw_lmt()
3314 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; in ice_sched_cfg_node_bw_lmt()
3316 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; in ice_sched_cfg_node_bw_lmt()
3317 data->srl_id = cpu_to_le16(rl_prof_id); in ice_sched_cfg_node_bw_lmt()
3321 return -EINVAL; in ice_sched_cfg_node_bw_lmt()
3329 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
3330 * @node: sched node
3343 data = &node->info.data; in ice_sched_get_node_rl_prof_id()
3346 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) in ice_sched_get_node_rl_prof_id()
3347 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx); in ice_sched_get_node_rl_prof_id()
3350 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) in ice_sched_get_node_rl_prof_id()
3351 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx); in ice_sched_get_node_rl_prof_id()
3354 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) in ice_sched_get_node_rl_prof_id()
3355 rl_prof_id = le16_to_cpu(data->srl_id); in ice_sched_get_node_rl_prof_id()
3365 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
3367 * @rl_type: type of rate limit BW - min, max, or shared
3376 struct ice_hw *hw = pi->hw; in ice_sched_get_rl_prof_layer()
3378 if (layer_index >= hw->num_tx_sched_layers) in ice_sched_get_rl_prof_layer()
3382 if (hw->layer_info[layer_index].max_cir_rl_profiles) in ice_sched_get_rl_prof_layer()
3386 if (hw->layer_info[layer_index].max_eir_rl_profiles) in ice_sched_get_rl_prof_layer()
3393 if (hw->layer_info[layer_index].max_srl_profiles) in ice_sched_get_rl_prof_layer()
3395 else if (layer_index < hw->num_tx_sched_layers - 1 && in ice_sched_get_rl_prof_layer()
3396 hw->layer_info[layer_index + 1].max_srl_profiles) in ice_sched_get_rl_prof_layer()
3399 hw->layer_info[layer_index - 1].max_srl_profiles) in ice_sched_get_rl_prof_layer()
3400 return layer_index - 1; in ice_sched_get_rl_prof_layer()
3409 * ice_sched_get_srl_node - get shared rate limit node
3419 if (srl_layer > node->tx_sched_layer) in ice_sched_get_srl_node()
3420 return node->children[0]; in ice_sched_get_srl_node()
3421 else if (srl_layer < node->tx_sched_layer) in ice_sched_get_srl_node()
3425 return node->parent; in ice_sched_get_srl_node()
3431 * ice_sched_rm_rl_profile - remove RL profile ID
3448 if (layer_num >= pi->hw->num_tx_sched_layers) in ice_sched_rm_rl_profile()
3449 return -EINVAL; in ice_sched_rm_rl_profile()
3451 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], in ice_sched_rm_rl_profile()
3453 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == in ice_sched_rm_rl_profile()
3455 le16_to_cpu(rl_prof_elem->profile.profile_id) == in ice_sched_rm_rl_profile()
3457 if (rl_prof_elem->prof_id_ref) in ice_sched_rm_rl_profile()
3458 rl_prof_elem->prof_id_ref--; in ice_sched_rm_rl_profile()
3461 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); in ice_sched_rm_rl_profile()
3462 if (status && status != -EBUSY) in ice_sched_rm_rl_profile()
3463 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); in ice_sched_rm_rl_profile()
3466 if (status == -EBUSY) in ice_sched_rm_rl_profile()
3472 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
3493 hw = pi->hw; in ice_sched_set_node_bw_dflt()
3509 return -EINVAL; in ice_sched_set_node_bw_dflt()
3527 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
3560 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { in ice_sched_set_eir_srl_excl()
3572 * ice_sched_set_node_bw - set node's bandwidth
3576 * @bw: bandwidth in Kbps - Kilo bits per sec
3588 struct ice_hw *hw = pi->hw; in ice_sched_set_node_bw()
3590 int status = -EINVAL; in ice_sched_set_node_bw()
3596 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id); in ice_sched_set_node_bw()
3607 rl_prof_info->prof_id_ref++; in ice_sched_set_node_bw()
3615 rl_prof_info->profile.flags & in ice_sched_set_node_bw()
3620 * ice_sched_set_node_priority - set node's priority
3623 * @priority: number 0-7 representing priority among siblings
3634 buf = node->info; in ice_sched_set_node_priority()
3637 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; in ice_sched_set_node_priority()
3638 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority); in ice_sched_set_node_priority()
3640 return ice_sched_update_elem(pi->hw, node, &buf); in ice_sched_set_node_priority()
3644 * ice_sched_set_node_weight - set node's weight
3647 * @weight: number 1-200 representing weight for WFQ
3649 * This function sets weight of the node for WFQ algorithm.
3657 buf = node->info; in ice_sched_set_node_weight()
3660 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR | in ice_sched_set_node_weight()
3662 data->cir_bw.bw_alloc = cpu_to_le16(weight); in ice_sched_set_node_weight()
3663 data->eir_bw.bw_alloc = cpu_to_le16(weight); in ice_sched_set_node_weight()
3665 data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0); in ice_sched_set_node_weight()
3667 return ice_sched_update_elem(pi->hw, node, &buf); in ice_sched_set_node_weight()
3671 * ice_sched_set_node_bw_lmt - set node's BW limit
3675 * @bw: bandwidth in Kbps - Kilo bits per sec
3691 return -EINVAL; in ice_sched_set_node_bw_lmt()
3692 hw = pi->hw; in ice_sched_set_node_bw_lmt()
3696 node->tx_sched_layer); in ice_sched_set_node_bw_lmt()
3697 if (layer_num >= hw->num_tx_sched_layers) in ice_sched_set_node_bw_lmt()
3698 return -EINVAL; in ice_sched_set_node_bw_lmt()
3704 return -EIO; in ice_sched_set_node_bw_lmt()
3720 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
3739 * ice_sched_validate_srl_node - Check node for SRL applicability
3740 * @node: sched node to configure
3755 if (sel_layer == node->tx_sched_layer || in ice_sched_validate_srl_node()
3756 ((sel_layer == node->tx_sched_layer + 1) && in ice_sched_validate_srl_node()
3757 node->num_children == 1) || in ice_sched_validate_srl_node()
3758 ((sel_layer == node->tx_sched_layer - 1) && in ice_sched_validate_srl_node()
3759 (node->parent && node->parent->num_children == 1))) in ice_sched_validate_srl_node()
3762 return -EIO; in ice_sched_validate_srl_node()
3766 * ice_sched_save_q_bw - save queue node's BW information
3769 * @bw: bandwidth in Kbps - Kilo bits per sec
3778 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); in ice_sched_save_q_bw()
3781 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); in ice_sched_save_q_bw()
3784 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); in ice_sched_save_q_bw()
3787 return -EINVAL; in ice_sched_save_q_bw()
3793 * ice_sched_set_q_bw_lmt - sets queue BW limit
3809 int status = -EINVAL; in ice_sched_set_q_bw_lmt()
3811 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) in ice_sched_set_q_bw_lmt()
3812 return -EINVAL; in ice_sched_set_q_bw_lmt()
3813 mutex_lock(&pi->sched_lock); in ice_sched_set_q_bw_lmt()
3814 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); in ice_sched_set_q_bw_lmt()
3817 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); in ice_sched_set_q_bw_lmt()
3819 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); in ice_sched_set_q_bw_lmt()
3824 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) in ice_sched_set_q_bw_lmt()
3832 node->tx_sched_layer); in ice_sched_set_q_bw_lmt()
3833 if (sel_layer >= pi->hw->num_tx_sched_layers) { in ice_sched_set_q_bw_lmt()
3834 status = -EINVAL; in ice_sched_set_q_bw_lmt()
3851 mutex_unlock(&pi->sched_lock); in ice_sched_set_q_bw_lmt()
3856 * ice_cfg_q_bw_lmt - configure queue BW limit
3875 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
3893 * ice_sched_get_node_by_id_type - get node from ID type
3914 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) in ice_sched_get_node_by_id_type()
3917 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); in ice_sched_get_node_by_id_type()
3920 node = vsi_ctx->sched.vsi_node[tc]; in ice_sched_get_node_by_id_type()
3941 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
3958 int status = -EINVAL; in ice_sched_set_node_bw_lmt_per_tc()
3966 mutex_lock(&pi->sched_lock); in ice_sched_set_node_bw_lmt_per_tc()
3969 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); in ice_sched_set_node_bw_lmt_per_tc()
3978 mutex_unlock(&pi->sched_lock); in ice_sched_set_node_bw_lmt_per_tc()
3983 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
4003 mutex_lock(&pi->sched_lock); in ice_cfg_vsi_bw_lmt_per_tc()
4005 mutex_unlock(&pi->sched_lock); in ice_cfg_vsi_bw_lmt_per_tc()
4011 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
4031 mutex_lock(&pi->sched_lock); in ice_cfg_vsi_bw_dflt_lmt_per_tc()
4034 mutex_unlock(&pi->sched_lock); in ice_cfg_vsi_bw_dflt_lmt_per_tc()
4040 * ice_cfg_rl_burst_size - Set burst size value
4054 return -EINVAL; in ice_cfg_rl_burst_size()
4076 hw->max_burst_size = burst_size_to_prog; in ice_cfg_rl_burst_size()
4081 * ice_sched_replay_node_prio - re-configure node priority
4083 * @node: sched node to configure
4097 buf = node->info; in ice_sched_replay_node_prio()
4099 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; in ice_sched_replay_node_prio()
4100 data->generic = priority; in ice_sched_replay_node_prio()
4108 * ice_sched_replay_node_bw - replay node(s) BW
4110 * @node: sched node to configure
4120 struct ice_port_info *pi = hw->port_info; in ice_sched_replay_node_bw()
4121 int status = -EINVAL; in ice_sched_replay_node_bw()
4126 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) in ice_sched_replay_node_bw()
4128 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) { in ice_sched_replay_node_bw()
4130 bw_t_info->generic); in ice_sched_replay_node_bw()
4134 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) { in ice_sched_replay_node_bw()
4136 bw_t_info->cir_bw.bw); in ice_sched_replay_node_bw()
4140 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) { in ice_sched_replay_node_bw()
4141 bw_alloc = bw_t_info->cir_bw.bw_alloc; in ice_sched_replay_node_bw()
4147 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) { in ice_sched_replay_node_bw()
4149 bw_t_info->eir_bw.bw); in ice_sched_replay_node_bw()
4153 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) { in ice_sched_replay_node_bw()
4154 bw_alloc = bw_t_info->eir_bw.bw_alloc; in ice_sched_replay_node_bw()
4160 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap)) in ice_sched_replay_node_bw()
4162 bw_t_info->shared_bw); in ice_sched_replay_node_bw()
4167 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
4191 * ice_sched_replay_agg - recreate aggregator node(s)
4200 struct ice_port_info *pi = hw->port_info; in ice_sched_replay_agg()
4203 mutex_lock(&pi->sched_lock); in ice_sched_replay_agg()
4204 list_for_each_entry(agg_info, &hw->agg_list, list_entry) in ice_sched_replay_agg()
4205 /* replay aggregator (re-create aggregator node) */ in ice_sched_replay_agg()
4206 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, in ice_sched_replay_agg()
4213 agg_info->replay_tc_bitmap, in ice_sched_replay_agg()
4215 status = ice_sched_cfg_agg(hw->port_info, in ice_sched_replay_agg()
4216 agg_info->agg_id, in ice_sched_replay_agg()
4222 agg_info->agg_id); in ice_sched_replay_agg()
4227 mutex_unlock(&pi->sched_lock); in ice_sched_replay_agg()
4231 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
4239 struct ice_port_info *pi = hw->port_info; in ice_sched_replay_agg_vsi_preinit()
4242 mutex_lock(&pi->sched_lock); in ice_sched_replay_agg_vsi_preinit()
4243 list_for_each_entry(agg_info, &hw->agg_list, list_entry) { in ice_sched_replay_agg_vsi_preinit()
4246 agg_info->tc_bitmap[0] = 0; in ice_sched_replay_agg_vsi_preinit()
4247 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, in ice_sched_replay_agg_vsi_preinit()
4249 agg_vsi_info->tc_bitmap[0] = 0; in ice_sched_replay_agg_vsi_preinit()
4251 mutex_unlock(&pi->sched_lock); in ice_sched_replay_agg_vsi_preinit()
4255 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
4267 struct ice_port_info *pi = hw->port_info; in ice_sched_replay_vsi_agg()
4273 return -EINVAL; in ice_sched_replay_vsi_agg()
4276 return 0; /* Not present in list - default Agg case */ in ice_sched_replay_vsi_agg()
4279 return 0; /* Not present in list - default Agg case */ in ice_sched_replay_vsi_agg()
4280 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, in ice_sched_replay_vsi_agg()
4283 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, in ice_sched_replay_vsi_agg()
4289 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, in ice_sched_replay_vsi_agg()
4292 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, in ice_sched_replay_vsi_agg()
4297 * ice_replay_vsi_agg - replay VSI to aggregator node
4306 struct ice_port_info *pi = hw->port_info; in ice_replay_vsi_agg()
4309 mutex_lock(&pi->sched_lock); in ice_replay_vsi_agg()
4311 mutex_unlock(&pi->sched_lock); in ice_replay_vsi_agg()
4316 * ice_sched_replay_q_bw - replay queue type node BW
4328 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); in ice_sched_replay_q_bw()
4330 return -EINVAL; in ice_sched_replay_q_bw()
4331 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); in ice_sched_replay_q_bw()