Lines Matching +full:tcam +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0
10 * boost tcam entries. The metadata labels names that match the following
27 * ice_verify_pkg - verify package
42 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || in ice_verify_pkg()
43 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || in ice_verify_pkg()
44 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || in ice_verify_pkg()
45 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) in ice_verify_pkg()
49 seg_count = le32_to_cpu(pkg->seg_count); in ice_verify_pkg()
59 u32 off = le32_to_cpu(pkg->seg_offset[i]); in ice_verify_pkg()
69 if (len < off + le32_to_cpu(seg->seg_size)) in ice_verify_pkg()
77 * ice_free_seg - free package segment pointer
85 if (hw->pkg_copy) { in ice_free_seg()
86 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); in ice_free_seg()
87 hw->pkg_copy = NULL; in ice_free_seg()
88 hw->pkg_size = 0; in ice_free_seg()
90 hw->seg = NULL; in ice_free_seg()
94 * ice_chk_pkg_version - check package version for compatibility with driver
104 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || in ice_chk_pkg_version()
105 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && in ice_chk_pkg_version()
106 pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) in ice_chk_pkg_version()
108 else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || in ice_chk_pkg_version()
109 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && in ice_chk_pkg_version()
110 pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) in ice_chk_pkg_version()
128 hdr = (const struct ice_buf_hdr *)buf->buf; in ice_pkg_val_buf()
130 section_count = le16_to_cpu(hdr->section_count); in ice_pkg_val_buf()
134 data_end = le16_to_cpu(hdr->data_end); in ice_pkg_val_buf()
150 (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); in ice_find_buf_table()
152 return (__force struct ice_buf_table *)(nvms->vers + in ice_find_buf_table()
153 le32_to_cpu(nvms->table_count)); in ice_find_buf_table()
162 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
172 state->buf_table = ice_find_buf_table(ice_seg); in ice_pkg_enum_buf()
173 if (!state->buf_table) in ice_pkg_enum_buf()
176 state->buf_idx = 0; in ice_pkg_enum_buf()
177 return ice_pkg_val_buf(state->buf_table->buf_array); in ice_pkg_enum_buf()
180 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) in ice_pkg_enum_buf()
181 return ice_pkg_val_buf(state->buf_table->buf_array + in ice_pkg_enum_buf()
182 state->buf_idx); in ice_pkg_enum_buf()
198 if (!ice_seg && !state->buf) in ice_pkg_advance_sect()
201 if (!ice_seg && state->buf) in ice_pkg_advance_sect()
202 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) in ice_pkg_advance_sect()
205 state->buf = ice_pkg_enum_buf(ice_seg, state); in ice_pkg_advance_sect()
206 if (!state->buf) in ice_pkg_advance_sect()
210 state->sect_idx = 0; in ice_pkg_advance_sect()
221 * ice segment. The first call is made with the ice_seg parameter non-NULL;
232 state->type = sect_type; in ice_pkg_enum_section()
238 while (state->buf->section_entry[state->sect_idx].type != in ice_pkg_enum_section()
239 cpu_to_le32(state->type)) in ice_pkg_enum_section()
244 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); in ice_pkg_enum_section()
248 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); in ice_pkg_enum_section()
256 state->sect_type = in ice_pkg_enum_section()
257 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); in ice_pkg_enum_section()
260 state->sect = in ice_pkg_enum_section()
261 ((u8 *)state->buf) + in ice_pkg_enum_section()
262 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); in ice_pkg_enum_section()
264 return state->sect; in ice_pkg_enum_section()
276 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
307 state->entry_idx = 0; in ice_pkg_enum_entry()
308 state->handler = handler; in ice_pkg_enum_entry()
310 state->entry_idx++; in ice_pkg_enum_entry()
313 if (!state->handler) in ice_pkg_enum_entry()
317 entry = state->handler(state->sect_type, state->sect, state->entry_idx, in ice_pkg_enum_entry()
324 state->entry_idx = 0; in ice_pkg_enum_entry()
325 entry = state->handler(state->sect_type, state->sect, in ice_pkg_enum_entry()
326 state->entry_idx, offset); in ice_pkg_enum_entry()
350 if (index >= le16_to_cpu(fv_section->count)) in ice_sw_fv_handler()
358 *offset = le16_to_cpu(fv_section->base_offset) + index; in ice_sw_fv_handler()
359 return fv_section->fv + index; in ice_sw_fv_handler()
363 * ice_get_prof_index_max - get the max profile index for used profile
381 if (!hw->seg) in ice_get_prof_index_max()
382 return -EINVAL; in ice_get_prof_index_max()
384 ice_seg = hw->seg; in ice_get_prof_index_max()
396 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) in ice_get_prof_index_max()
397 if (fv->ew[j].prot_id != ICE_PROT_INVALID || in ice_get_prof_index_max()
398 fv->ew[j].off != ICE_FV_OFFSET_INVAL) in ice_get_prof_index_max()
407 hw->switch_info->max_used_prof_index = max_prof_index; in ice_get_prof_index_max()
413 * ice_get_ddp_pkg_state - get DDP pkg state after download
420 if (hw->pkg_ver.major == hw->active_pkg_ver.major && in ice_get_ddp_pkg_state()
421 hw->pkg_ver.minor == hw->active_pkg_ver.minor && in ice_get_ddp_pkg_state()
422 hw->pkg_ver.update == hw->active_pkg_ver.update && in ice_get_ddp_pkg_state()
423 hw->pkg_ver.draft == hw->active_pkg_ver.draft && in ice_get_ddp_pkg_state()
424 !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { in ice_get_ddp_pkg_state()
429 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || in ice_get_ddp_pkg_state()
430 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { in ice_get_ddp_pkg_state()
432 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && in ice_get_ddp_pkg_state()
433 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { in ice_get_ddp_pkg_state()
441 * ice_init_pkg_regs - initialize additional package registers
450 /* setup Switch block input mask, which is 48-bits in two parts */ in ice_init_pkg_regs()
459 * @index: index of the Marker PType TCAM entry to be returned
460 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
463 * Handles enumeration of individual Marker PType TCAM entries.
480 if (index >= le16_to_cpu(marker_ptype->count)) in ice_marker_ptype_tcam_handler()
483 return marker_ptype->tcam + index; in ice_marker_ptype_tcam_handler()
494 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { in ice_add_dvm_hint()
495 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; in ice_add_dvm_hint()
496 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; in ice_add_dvm_hint()
497 hw->dvm_upd.count++; in ice_add_dvm_hint()
509 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { in ice_add_tunnel_hint()
520 * character ('0' - '7') will be located where our in ice_add_tunnel_hint()
523 if ((label_name[len] - '0') == hw->pf_id) { in ice_add_tunnel_hint()
524 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; in ice_add_tunnel_hint()
525 hw->tnl.tbl[hw->tnl.count].valid = false; in ice_add_tunnel_hint()
526 hw->tnl.tbl[hw->tnl.count].boost_addr = val; in ice_add_tunnel_hint()
527 hw->tnl.tbl[hw->tnl.count].port = 0; in ice_add_tunnel_hint()
528 hw->tnl.count++; in ice_add_tunnel_hint()
560 if (index >= le16_to_cpu(labels->count)) in ice_label_enum_handler()
563 return labels->label + index; in ice_label_enum_handler()
592 *value = le16_to_cpu(label->value); in ice_enum_labels()
593 return label->name; in ice_enum_labels()
600 * @index: index of the boost TCAM entry to be returned
601 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
604 * Handles enumeration of individual boost TCAM entries.
624 if (index >= le16_to_cpu(boost->count)) in ice_boost_tcam_handler()
627 return boost->tcam + index; in ice_boost_tcam_handler()
632 * @ice_seg: pointer to the ice segment (non-NULL)
633 * @addr: Boost TCAM address of entry to search for
636 * Finds a particular Boost TCAM entry and returns a pointer to that entry
643 struct ice_boost_tcam_entry *tcam; in ice_find_boost_entry() local
649 return -EINVAL; in ice_find_boost_entry()
652 tcam = ice_pkg_enum_entry(ice_seg, &state, in ice_find_boost_entry()
655 if (tcam && le16_to_cpu(tcam->addr) == addr) { in ice_find_boost_entry()
656 *entry = tcam; in ice_find_boost_entry()
661 } while (tcam); in ice_find_boost_entry()
664 return -EIO; in ice_find_boost_entry()
668 * ice_is_init_pkg_successful - check if DDP init was successful
700 buf->data_end = in ice_pkg_buf_alloc()
732 * ice_get_sw_prof_type - determine switch profile type
751 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { in ice_get_sw_prof_type()
753 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && in ice_get_sw_prof_type()
754 fv->ew[i].off == ICE_VNI_OFFSET) in ice_get_sw_prof_type()
758 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) in ice_get_sw_prof_type()
766 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
785 ice_seg = hw->seg; in ice_get_sw_fv_bitmap()
830 if (!lkups->n_val_words || !hw->seg) in ice_get_sw_fv_list()
831 return -EINVAL; in ice_get_sw_fv_list()
833 ice_seg = hw->seg; in ice_get_sw_fv_list()
849 for (i = 0; i < lkups->n_val_words; i++) { in ice_get_sw_fv_list()
852 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) in ice_get_sw_fv_list()
853 if (fv->ew[j].prot_id == in ice_get_sw_fv_list()
854 lkups->fv_words[i].prot_id && in ice_get_sw_fv_list()
855 fv->ew[j].off == lkups->fv_words[i].off) in ice_get_sw_fv_list()
857 if (j >= hw->blk[ICE_BLK_SW].es.fvw) in ice_get_sw_fv_list()
859 if (i + 1 == lkups->n_val_words) { in ice_get_sw_fv_list()
864 fvl->fv_ptr = fv; in ice_get_sw_fv_list()
865 fvl->profile_id = offset; in ice_get_sw_fv_list()
866 list_add(&fvl->list_entry, fv_list); in ice_get_sw_fv_list()
874 return -EIO; in ice_get_sw_fv_list()
881 list_del(&fvl->list_entry); in ice_get_sw_fv_list()
885 return -ENOMEM; in ice_get_sw_fv_list()
889 * ice_init_prof_result_bm - Initialize the profile result index bitmap
900 if (!hw->seg) in ice_init_prof_result_bm()
903 ice_seg = hw->seg; in ice_init_prof_result_bm()
914 bitmap_zero(hw->switch_info->prof_res_bm[off], in ice_init_prof_result_bm()
922 if (fv->ew[i].prot_id == ICE_PROT_INVALID && in ice_init_prof_result_bm()
923 fv->ew[i].off == ICE_FV_OFFSET_INVAL) in ice_init_prof_result_bm()
924 set_bit(i, hw->switch_info->prof_res_bm[off]); in ice_init_prof_result_bm()
960 return -EINVAL; in ice_pkg_buf_reserve_section()
962 buf = (struct ice_buf_hdr *)&bld->buf; in ice_pkg_buf_reserve_section()
965 section_count = le16_to_cpu(buf->section_count); in ice_pkg_buf_reserve_section()
967 return -EIO; in ice_pkg_buf_reserve_section()
969 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) in ice_pkg_buf_reserve_section()
970 return -EIO; in ice_pkg_buf_reserve_section()
971 bld->reserved_section_table_entries += count; in ice_pkg_buf_reserve_section()
973 data_end = le16_to_cpu(buf->data_end) + in ice_pkg_buf_reserve_section()
975 buf->data_end = cpu_to_le16(data_end); in ice_pkg_buf_reserve_section()
1001 buf = (struct ice_buf_hdr *)&bld->buf; in ice_pkg_buf_alloc_section()
1004 data_end = le16_to_cpu(buf->data_end); in ice_pkg_buf_alloc_section()
1013 sect_count = le16_to_cpu(buf->section_count); in ice_pkg_buf_alloc_section()
1014 if (sect_count < bld->reserved_section_table_entries) { in ice_pkg_buf_alloc_section()
1017 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); in ice_pkg_buf_alloc_section()
1018 buf->section_entry[sect_count].size = cpu_to_le16(size); in ice_pkg_buf_alloc_section()
1019 buf->section_entry[sect_count].type = cpu_to_le32(type); in ice_pkg_buf_alloc_section()
1022 buf->data_end = cpu_to_le16(data_end); in ice_pkg_buf_alloc_section()
1024 buf->section_count = cpu_to_le16(sect_count + 1); in ice_pkg_buf_alloc_section()
1075 * least one active section - otherwise, the buffer is not legal and should
1086 buf = (struct ice_buf_hdr *)&bld->buf; in ice_pkg_buf_get_active_sections()
1087 return le16_to_cpu(buf->section_count); in ice_pkg_buf_get_active_sections()
1101 return &bld->buf; in ice_pkg_buf()
1129 * 0 - Means the caller has acquired the global config lock
1131 * -EALREADY - Indicates another driver has already written the
1146 else if (status == -EALREADY) in ice_acquire_global_cfg_lock()
1196 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; in ice_aq_download_pkg()
1199 if (status == -EIO) { in ice_aq_download_pkg()
1205 *error_offset = le32_to_cpu(resp->error_offset); in ice_aq_download_pkg()
1207 *error_info = le32_to_cpu(resp->error_info); in ice_aq_download_pkg()
1221 if (idx < le32_to_cpu(pkg_hdr->seg_count)) in ice_get_pkg_seg_by_idx()
1224 le32_to_cpu(pkg_hdr->seg_offset[idx])); in ice_get_pkg_seg_by_idx()
1230 * ice_is_signing_seg_at_idx - determine if segment is a signing segment
1242 return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; in ice_is_signing_seg_at_idx()
1265 if (seg && le32_to_cpu(seg->seg_id) == seg_id && in ice_is_signing_seg_type_at_idx()
1266 le32_to_cpu(seg->sign_type) == sign_type) in ice_is_signing_seg_type_at_idx()
1273 * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
1278 if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) in ice_is_buffer_metadata()
1337 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) in ice_dwnld_cfg_bufs_no_lock()
1354 if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC && in ice_dwnld_cfg_bufs_no_lock()
1355 hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG) in ice_dwnld_cfg_bufs_no_lock()
1375 err = hw->adminq.sq_last_status; in ice_dwnld_cfg_bufs_no_lock()
1388 * ice_download_pkg_sig_seg - download a signature segment
1395 return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, in ice_download_pkg_sig_seg()
1396 le32_to_cpu(seg->buf_tbl.buf_count), in ice_download_pkg_sig_seg()
1401 * ice_download_pkg_config_seg - download a config segment
1423 buf_count = le32_to_cpu(bufs->buf_count); in ice_download_pkg_config_seg()
1428 return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, in ice_download_pkg_config_seg()
1433 * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
1456 count = le32_to_cpu(seg->signed_buf_count); in ice_dwnld_sign_and_cfg_segs()
1461 conf_idx = le32_to_cpu(seg->signed_seg_idx); in ice_dwnld_sign_and_cfg_segs()
1462 start = le32_to_cpu(seg->signed_buf_start); in ice_dwnld_sign_and_cfg_segs()
1472 * ice_match_signing_seg - determine if a matching signing segment exists
1482 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { in ice_match_signing_seg()
1492 * ice_post_dwnld_pkg_actions - perform post download package actions
1520 enum ice_aq_err aq_err = hw->adminq.sq_last_status; in ice_download_pkg_with_sig_seg()
1525 ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); in ice_download_pkg_with_sig_seg()
1526 ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); in ice_download_pkg_with_sig_seg()
1530 if (status == -EALREADY) in ice_download_pkg_with_sig_seg()
1537 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { in ice_download_pkg_with_sig_seg()
1538 if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, in ice_download_pkg_with_sig_seg()
1539 hw->pkg_sign_type)) in ice_download_pkg_with_sig_seg()
1579 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) in ice_dwnld_cfg_bufs()
1584 if (status == -EALREADY) in ice_dwnld_cfg_bufs()
1586 return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); in ice_dwnld_cfg_bufs()
1611 ice_seg->hdr.seg_format_ver.major, in ice_download_pkg_without_sig_seg()
1612 ice_seg->hdr.seg_format_ver.minor, in ice_download_pkg_without_sig_seg()
1613 ice_seg->hdr.seg_format_ver.update, in ice_download_pkg_without_sig_seg()
1614 ice_seg->hdr.seg_format_ver.draft); in ice_download_pkg_without_sig_seg()
1617 le32_to_cpu(ice_seg->hdr.seg_type), in ice_download_pkg_without_sig_seg()
1618 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); in ice_download_pkg_without_sig_seg()
1623 le32_to_cpu(ice_buf_tbl->buf_count)); in ice_download_pkg_without_sig_seg()
1625 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, in ice_download_pkg_without_sig_seg()
1626 le32_to_cpu(ice_buf_tbl->buf_count)); in ice_download_pkg_without_sig_seg()
1643 if (hw->pkg_has_signing_seg) in ice_download_pkg()
1703 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; in ice_aq_update_pkg()
1706 if (status == -EIO) { in ice_aq_update_pkg()
1712 *error_offset = le32_to_cpu(resp->error_offset); in ice_aq_update_pkg()
1714 *error_info = le32_to_cpu(resp->error_info); in ice_aq_update_pkg()
1756 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), in ice_update_pkg_no_lock()
1810 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, in ice_find_seg_in_pkg()
1811 pkg_hdr->pkg_format_ver.update, in ice_find_seg_in_pkg()
1812 pkg_hdr->pkg_format_ver.draft); in ice_find_seg_in_pkg()
1815 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { in ice_find_seg_in_pkg()
1818 seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]); in ice_find_seg_in_pkg()
1820 if (le32_to_cpu(seg->seg_type) == seg_type) in ice_find_seg_in_pkg()
1828 * ice_has_signing_seg - determine if package has a signing segment
1843 * ice_get_pkg_segment_id - get correct package segment id, based on device
1865 * ice_get_pkg_sign_type - get package segment sign type, based on device
1889 * ice_get_signing_req - get correct package requirements, based on device
1894 hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); in ice_get_signing_req()
1895 hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); in ice_get_signing_req()
1913 hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); in ice_init_pkg_info()
1917 hw->pkg_seg_id); in ice_init_pkg_info()
1920 ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); in ice_init_pkg_info()
1936 hw->pkg_ver = meta->ver; in ice_init_pkg_info()
1937 memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); in ice_init_pkg_info()
1940 meta->ver.major, meta->ver.minor, meta->ver.update, in ice_init_pkg_info()
1941 meta->ver.draft, meta->name); in ice_init_pkg_info()
1943 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; in ice_init_pkg_info()
1944 memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); in ice_init_pkg_info()
1947 seg_hdr->seg_format_ver.major, in ice_init_pkg_info()
1948 seg_hdr->seg_format_ver.minor, in ice_init_pkg_info()
1949 seg_hdr->seg_format_ver.update, in ice_init_pkg_info()
1950 seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); in ice_init_pkg_info()
1976 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { in ice_get_pkg_info()
1981 if (pkg_info->pkg_info[i].is_active) { in ice_get_pkg_info()
1983 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; in ice_get_pkg_info()
1984 hw->active_track_id = in ice_get_pkg_info()
1985 le32_to_cpu(pkg_info->pkg_info[i].track_id); in ice_get_pkg_info()
1986 memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, in ice_get_pkg_info()
1987 sizeof(pkg_info->pkg_info[i].name)); in ice_get_pkg_info()
1988 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; in ice_get_pkg_info()
1990 if (pkg_info->pkg_info[i].is_active_at_boot) in ice_get_pkg_info()
1992 if (pkg_info->pkg_info[i].is_modified) in ice_get_pkg_info()
1994 if (pkg_info->pkg_info[i].is_in_nvm) in ice_get_pkg_info()
1998 pkg_info->pkg_info[i].ver.major, in ice_get_pkg_info()
1999 pkg_info->pkg_info[i].ver.minor, in ice_get_pkg_info()
2000 pkg_info->pkg_info[i].ver.update, in ice_get_pkg_info()
2001 pkg_info->pkg_info[i].ver.draft, in ice_get_pkg_info()
2002 pkg_info->pkg_info[i].name, flags); in ice_get_pkg_info()
2027 state = ice_chk_pkg_version(&hw->pkg_ver); in ice_chk_pkg_compat()
2034 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, in ice_chk_pkg_compat()
2045 for (i = 0; i < le32_to_cpu(pkg->count); i++) { in ice_chk_pkg_compat()
2047 if (!pkg->pkg_info[i].is_in_nvm) in ice_chk_pkg_compat()
2049 if ((*seg)->hdr.seg_format_ver.major != in ice_chk_pkg_compat()
2050 pkg->pkg_info[i].ver.major || in ice_chk_pkg_compat()
2051 (*seg)->hdr.seg_format_ver.minor > in ice_chk_pkg_compat()
2052 pkg->pkg_info[i].ver.minor) { in ice_chk_pkg_compat()
2067 * @ice_seg: pointer to the segment of the package scan (non-NULL)
2081 memset(&hw->tnl, 0, sizeof(hw->tnl)); in ice_init_pkg_hints()
2106 /* Cache the appropriate boost TCAM entry pointers for tunnels */ in ice_init_pkg_hints()
2107 for (i = 0; i < hw->tnl.count; i++) { in ice_init_pkg_hints()
2108 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, in ice_init_pkg_hints()
2109 &hw->tnl.tbl[i].boost_entry); in ice_init_pkg_hints()
2110 if (hw->tnl.tbl[i].boost_entry) { in ice_init_pkg_hints()
2111 hw->tnl.tbl[i].valid = true; in ice_init_pkg_hints()
2112 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) in ice_init_pkg_hints()
2113 hw->tnl.valid_count[hw->tnl.tbl[i].type]++; in ice_init_pkg_hints()
2117 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ in ice_init_pkg_hints()
2118 for (i = 0; i < hw->dvm_upd.count; i++) in ice_init_pkg_hints()
2119 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, in ice_init_pkg_hints()
2120 &hw->dvm_upd.tbl[i].boost_entry); in ice_init_pkg_hints()
2124 * ice_fill_hw_ptype - fill the enabled PTYPE bit information
2129 struct ice_marker_ptype_tcam_entry *tcam; in ice_fill_hw_ptype() local
2130 struct ice_seg *seg = hw->seg; in ice_fill_hw_ptype()
2133 bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); in ice_fill_hw_ptype()
2140 tcam = ice_pkg_enum_entry(seg, &state, in ice_fill_hw_ptype()
2143 if (tcam && in ice_fill_hw_ptype()
2144 le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && in ice_fill_hw_ptype()
2145 le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) in ice_fill_hw_ptype()
2146 set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); in ice_fill_hw_ptype()
2149 } while (tcam); in ice_fill_hw_ptype()
2153 * ice_init_pkg - initialize/download package
2201 if (hw->pkg_has_signing_seg && in ice_init_pkg()
2202 !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) in ice_init_pkg()
2217 "package previously loaded - no work.\n"); in ice_init_pkg()
2231 hw->seg = seg; in ice_init_pkg()
2248 * ice_copy_and_init_pkg - initialize/download a copy of the package
2287 hw->pkg_copy = buf_copy; in ice_copy_and_init_pkg()
2288 hw->pkg_size = len; in ice_copy_and_init_pkg()
2295 * ice_get_set_tx_topo - get or set Tx topology
2301 * @set: 0-get, 1-set topology
2318 cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; in ice_get_set_tx_topo()
2321 cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | in ice_get_set_tx_topo()
2328 cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; in ice_get_set_tx_topo()
2345 * ice_cfg_tx_topo - Initialize new Tx topology if available
2368 return -EINVAL; in ice_cfg_tx_topo()
2371 if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { in ice_cfg_tx_topo()
2373 return -EOPNOTSUPP; in ice_cfg_tx_topo()
2378 return -ENOMEM; in ice_cfg_tx_topo()
2391 hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { in ice_cfg_tx_topo()
2393 return -EEXIST; in ice_cfg_tx_topo()
2398 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { in ice_cfg_tx_topo()
2400 return -EEXIST; in ice_cfg_tx_topo()
2408 return -EEXIST; in ice_cfg_tx_topo()
2413 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { in ice_cfg_tx_topo()
2423 return -EIO; in ice_cfg_tx_topo()
2431 return -EIO; in ice_cfg_tx_topo()
2434 if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { in ice_cfg_tx_topo()
2436 seg->buf_table.buf_count); in ice_cfg_tx_topo()
2437 return -EIO; in ice_cfg_tx_topo()
2440 section = ice_pkg_val_buf(seg->buf_table.buf_array); in ice_cfg_tx_topo()
2441 if (!section || le32_to_cpu(section->section_entry[0].type) != in ice_cfg_tx_topo()
2444 return -EIO; in ice_cfg_tx_topo()
2447 size = le16_to_cpu(section->section_entry[0].size); in ice_cfg_tx_topo()
2448 offset = le16_to_cpu(section->section_entry[0].offset); in ice_cfg_tx_topo()
2451 return -EIO; in ice_cfg_tx_topo()
2457 return -EIO; in ice_cfg_tx_topo()
2479 /* Reset is in progress, re-init the HW again */ in ice_cfg_tx_topo()