Lines Matching +full:queue +full:- +full:pkt +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
12 #include "fw/notif-wait.h"
13 #include "iwl-trans.h"
14 #include "iwl-op-mode.h"
16 #include "iwl-debug.h"
17 #include "iwl-drv.h"
18 #include "iwl-modparams.h"
20 #include "iwl-phy-db.h"
21 #include "iwl-nvm-utils.h"
22 #include "iwl-csr.h"
23 #include "iwl-io.h"
24 #include "iwl-prph.h"
28 #include "time-event.h"
29 #include "fw-api.h"
32 #include "time-sync.h"
48 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
92 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, in iwl_mvm_nic_config()
95 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_mvm_nic_config()
99 reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); in iwl_mvm_nic_config()
110 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC in iwl_mvm_nic_config()
111 * sampling, and shouldn't be set to any non-zero value. in iwl_mvm_nic_config()
117 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) in iwl_mvm_nic_config()
120 if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) in iwl_mvm_nic_config()
123 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, in iwl_mvm_nic_config()
138 if (!mvm->trans->cfg->apmg_not_supported) in iwl_mvm_nic_config()
139 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, in iwl_mvm_nic_config()
147 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_esr_mode_notif() local
148 struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data; in iwl_mvm_rx_esr_mode_notif()
152 if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active) in iwl_mvm_rx_esr_mode_notif()
155 if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER) in iwl_mvm_rx_esr_mode_notif()
165 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_esr_trans_fail_notif() local
166 struct iwl_esr_trans_fail_notif *notif = (void *)pkt->data; in iwl_mvm_rx_esr_trans_fail_notif()
168 u8 fw_link_id = le32_to_cpu(notif->link_id); in iwl_mvm_rx_esr_trans_fail_notif()
175 le32_to_cpu(notif->activation) ? "enter" : "exit", in iwl_mvm_rx_esr_trans_fail_notif()
176 le32_to_cpu(notif->link_id), in iwl_mvm_rx_esr_trans_fail_notif()
177 le32_to_cpu(notif->err_code)); in iwl_mvm_rx_esr_trans_fail_notif()
180 if (!le32_to_cpu(notif->activation)) { in iwl_mvm_rx_esr_trans_fail_notif()
187 "FW reported failure to activate EMLSR on a non-existing link: %d\n", in iwl_mvm_rx_esr_trans_fail_notif()
196 bss_conf->link_id); in iwl_mvm_rx_esr_trans_fail_notif()
202 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_monitor_notif() local
203 struct iwl_datapath_monitor_notif *notif = (void *)pkt->data; in iwl_mvm_rx_monitor_notif()
208 if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) in iwl_mvm_rx_monitor_notif()
211 vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id); in iwl_mvm_rx_monitor_notif()
212 if (!vif || vif->type != NL80211_IFTYPE_STATION) in iwl_mvm_rx_monitor_notif()
215 if (!vif->bss_conf.chanreq.oper.chan || in iwl_mvm_rx_monitor_notif()
216 vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ || in iwl_mvm_rx_monitor_notif()
217 vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40) in iwl_mvm_rx_monitor_notif()
220 if (!vif->cfg.assoc) in iwl_mvm_rx_monitor_notif()
224 if (mvm->cca_40mhz_workaround) in iwl_mvm_rx_monitor_notif()
228 * We'll decrement this on disconnect - so set to 2 since we'll in iwl_mvm_rx_monitor_notif()
231 mvm->cca_40mhz_workaround = 2; in iwl_mvm_rx_monitor_notif()
235 * easiest choice - otherwise we'd have to do some major changes in iwl_mvm_rx_monitor_notif()
240 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; in iwl_mvm_rx_monitor_notif()
242 WARN_ON(!sband->ht_cap.ht_supported); in iwl_mvm_rx_monitor_notif()
243 WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); in iwl_mvm_rx_monitor_notif()
244 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; in iwl_mvm_rx_monitor_notif()
252 WARN_ON(!he->has_he); in iwl_mvm_rx_monitor_notif()
253 WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & in iwl_mvm_rx_monitor_notif()
255 he->he_cap_elem.phy_cap_info[0] &= in iwl_mvm_rx_monitor_notif()
266 struct iwl_mvm *mvm = mvmvif->mvm; in iwl_mvm_update_link_smps()
272 if (mvm->fw_static_smps_request && in iwl_mvm_update_link_smps()
273 link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 && in iwl_mvm_update_link_smps()
274 link_conf->he_support) in iwl_mvm_update_link_smps()
278 link_conf->link_id); in iwl_mvm_update_link_smps()
298 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_thermal_dual_chain_req() local
299 struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; in iwl_mvm_rx_thermal_dual_chain_req()
304 req->event)) in iwl_mvm_rx_thermal_dual_chain_req()
311 mvm->fw_static_smps_request = in iwl_mvm_rx_thermal_dual_chain_req()
312 req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); in iwl_mvm_rx_thermal_dual_chain_req()
313 ieee80211_iterate_interfaces(mvm->hw, in iwl_mvm_rx_thermal_dual_chain_req()
319 * enum iwl_rx_handler_context: context for Rx handler
320 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
321 * which can't acquire mvm->mutex.
322 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
324 * it will be called from a worker with mvm->mutex held.
326 * mutex itself, it will be called from a worker without mvm->mutex held.
328 * and mvm->mutex. Will be handled with the wiphy_work queue infra
329 * instead of regular work queue.
803 const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; in iwl_mvm_min_backoff()
809 iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit); in iwl_mvm_min_backoff()
811 while (backoff->pwr) { in iwl_mvm_min_backoff()
812 if (dflt_pwr_limit >= backoff->pwr) in iwl_mvm_min_backoff()
813 return backoff->backoff; in iwl_mvm_min_backoff()
831 rcu_dereference_protected(mvm->csa_tx_blocked_vif, in iwl_mvm_tx_unblock_dwork()
832 lockdep_is_held(&mvm->mutex)); in iwl_mvm_tx_unblock_dwork()
839 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); in iwl_mvm_tx_unblock_dwork()
846 mutex_lock(&mvm->mutex); in iwl_mvm_fwrt_dump_start()
853 mutex_unlock(&mvm->mutex); in iwl_mvm_fwrt_dump_end()
878 struct iwl_trans *trans = mvm->trans; in iwl_mvm_start_get_nvm()
881 if (trans->csme_own) { in iwl_mvm_start_get_nvm()
882 if (WARN(!mvm->mei_registered, in iwl_mvm_start_get_nvm()
886 mvm->mei_nvm_data = iwl_mei_get_nvm(); in iwl_mvm_start_get_nvm()
887 if (mvm->mei_nvm_data) { in iwl_mvm_start_get_nvm()
889 * mvm->mei_nvm_data is set and because of that, in iwl_mvm_start_get_nvm()
893 mvm->nvm_data = in iwl_mvm_start_get_nvm()
894 iwl_parse_mei_nvm_data(trans, trans->cfg, in iwl_mvm_start_get_nvm()
895 mvm->mei_nvm_data, in iwl_mvm_start_get_nvm()
896 mvm->fw, in iwl_mvm_start_get_nvm()
897 mvm->set_tx_ant, in iwl_mvm_start_get_nvm()
898 mvm->set_rx_ant); in iwl_mvm_start_get_nvm()
908 wiphy_lock(mvm->hw->wiphy); in iwl_mvm_start_get_nvm()
909 mutex_lock(&mvm->mutex); in iwl_mvm_start_get_nvm()
911 ret = iwl_trans_start_hw(mvm->trans); in iwl_mvm_start_get_nvm()
913 mutex_unlock(&mvm->mutex); in iwl_mvm_start_get_nvm()
914 wiphy_unlock(mvm->hw->wiphy); in iwl_mvm_start_get_nvm()
920 if (ret && ret != -ERFKILL) in iwl_mvm_start_get_nvm()
921 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); in iwl_mvm_start_get_nvm()
923 mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; in iwl_mvm_start_get_nvm()
929 mutex_unlock(&mvm->mutex); in iwl_mvm_start_get_nvm()
930 wiphy_unlock(mvm->hw->wiphy); in iwl_mvm_start_get_nvm()
937 mvm->fw_product_reset = false; in iwl_mvm_start_get_nvm()
947 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); in iwl_mvm_start_post_nvm()
953 mvm->hw_registered = true; in iwl_mvm_start_post_nvm()
957 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, in iwl_mvm_start_post_nvm()
958 mvm->mei_rfkill_blocked, in iwl_mvm_start_post_nvm()
982 switch (key->cipher) { in iwl_mvm_frob_txf_key_iter()
984 keydata = key->key; in iwl_mvm_frob_txf_key_iter()
985 keylen = key->keylen; in iwl_mvm_frob_txf_key_iter()
996 memset(txf->buf, 0xBB, txf->buflen); in iwl_mvm_frob_txf_key_iter()
1004 for (i = 0; i < txf->buflen; i++) { in iwl_mvm_frob_txf_key_iter()
1005 if (txf->buf[i] != keydata[match]) { in iwl_mvm_frob_txf_key_iter()
1011 memset(txf->buf + i - keylen, 0xAA, keylen); in iwl_mvm_frob_txf_key_iter()
1018 for (i = 0; match && i < keylen - match; i++) { in iwl_mvm_frob_txf_key_iter()
1019 if (txf->buf[i] != keydata[match]) in iwl_mvm_frob_txf_key_iter()
1023 memset(txf->buf, 0xAA, i + 1); in iwl_mvm_frob_txf_key_iter()
1024 memset(txf->buf + txf->buflen - matchend, 0xAA, in iwl_mvm_frob_txf_key_iter()
1044 ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf); in iwl_mvm_frob_txf()
1058 if (hdr->group_id != LONG_GROUP) in iwl_mvm_frob_hcmd()
1061 switch (hdr->cmd) { in iwl_mvm_frob_hcmd()
1089 memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start); in iwl_mvm_frob_hcmd()
1098 switch (mvm->fwrt.cur_fw_img) { in iwl_mvm_frob_mem()
1105 excl = mvm->fw->dump_excl; in iwl_mvm_frob_mem()
1108 excl = mvm->fw->dump_excl_wowlan; in iwl_mvm_frob_mem()
1112 BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) != in iwl_mvm_frob_mem()
1113 sizeof(mvm->fw->dump_excl_wowlan)); in iwl_mvm_frob_mem()
1115 for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) { in iwl_mvm_frob_mem()
1133 memset((u8 *)mem + start - mem_addr, 0xAA, end - start); in iwl_mvm_frob_mem()
1152 prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); in iwl_mvm_me_conn_status()
1158 curr_conn_info->conn_info = *conn_info; in iwl_mvm_me_conn_status()
1160 rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); in iwl_mvm_me_conn_status()
1174 mvm->mei_rfkill_blocked = blocked; in iwl_mvm_mei_rfkill()
1175 if (!mvm->hw_registered) in iwl_mvm_mei_rfkill()
1178 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, in iwl_mvm_mei_rfkill()
1179 mvm->mei_rfkill_blocked, in iwl_mvm_mei_rfkill()
1187 if (!mvm->hw_registered || !mvm->csme_vif) in iwl_mvm_mei_roaming_forbidden()
1190 iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); in iwl_mvm_mei_roaming_forbidden()
1213 iwl_fw_flush_dumps(&mvm->fwrt); in iwl_mvm_sap_connected_wk()
1215 iwl_fw_runtime_free(&mvm->fwrt); in iwl_mvm_sap_connected_wk()
1216 iwl_phy_db_free(mvm->phy_db); in iwl_mvm_sap_connected_wk()
1217 kfree(mvm->scan_cmd); in iwl_mvm_sap_connected_wk()
1218 iwl_trans_op_mode_leave(mvm->trans); in iwl_mvm_sap_connected_wk()
1219 kfree(mvm->nvm_data); in iwl_mvm_sap_connected_wk()
1220 kfree(mvm->mei_nvm_data); in iwl_mvm_sap_connected_wk()
1222 ieee80211_free_hw(mvm->hw); in iwl_mvm_sap_connected_wk()
1229 if (!mvm->hw_registered) in iwl_mvm_mei_sap_connected()
1230 schedule_work(&mvm->sap_connected_wk); in iwl_mvm_mei_sap_connected()
1238 cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); in iwl_mvm_mei_nic_stolen()
1255 if (ieee80211_vif_is_mld(vif) && mvmvif->authorized) in iwl_mvm_find_link_selection_vif()
1256 iwl_mvm_select_links(mvmvif->mvm, vif); in iwl_mvm_find_link_selection_vif()
1265 mutex_lock(&mvm->mutex); in iwl_mvm_trig_link_selection()
1266 ieee80211_iterate_active_interfaces(mvm->hw, in iwl_mvm_trig_link_selection()
1270 mutex_unlock(&mvm->mutex); in iwl_mvm_trig_link_selection()
1291 * index all over the driver - check that its value corresponds to the in iwl_op_mode_mvm_start()
1294 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != in iwl_op_mode_mvm_start()
1307 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) in iwl_op_mode_mvm_start()
1312 hw->max_rx_aggregation_subframes = max_agg; in iwl_op_mode_mvm_start()
1314 if (cfg->max_tx_agg_size) in iwl_op_mode_mvm_start()
1315 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; in iwl_op_mode_mvm_start()
1317 hw->max_tx_aggregation_subframes = max_agg; in iwl_op_mode_mvm_start()
1319 op_mode = hw->priv; in iwl_op_mode_mvm_start()
1322 mvm->dev = trans->dev; in iwl_op_mode_mvm_start()
1323 mvm->trans = trans; in iwl_op_mode_mvm_start()
1324 mvm->cfg = cfg; in iwl_op_mode_mvm_start()
1325 mvm->fw = fw; in iwl_op_mode_mvm_start()
1326 mvm->hw = hw; in iwl_op_mode_mvm_start()
1328 iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, in iwl_op_mode_mvm_start()
1332 iwl_uefi_get_sgom_table(trans, &mvm->fwrt); in iwl_op_mode_mvm_start()
1335 mvm->init_status = 0; in iwl_op_mode_mvm_start()
1338 op_mode->ops = &iwl_mvm_ops_mq; in iwl_op_mode_mvm_start()
1339 trans->rx_mpdu_cmd_hdr_size = in iwl_op_mode_mvm_start()
1340 (trans->trans_cfg->device_family >= in iwl_op_mode_mvm_start()
1345 op_mode->ops = &iwl_mvm_ops; in iwl_op_mode_mvm_start()
1346 trans->rx_mpdu_cmd_hdr_size = in iwl_op_mode_mvm_start()
1349 if (WARN_ON(trans->num_rx_queues > 1)) in iwl_op_mode_mvm_start()
1353 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; in iwl_op_mode_mvm_start()
1354 mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt); in iwl_op_mode_mvm_start()
1358 * If we have the new TX/queue allocation API initialize them in iwl_op_mode_mvm_start()
1361 * time (e.g. P2P Device is optional), and if a dynamic queue in iwl_op_mode_mvm_start()
1366 mvm->aux_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1367 mvm->snif_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1368 mvm->probe_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1369 mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1371 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; in iwl_op_mode_mvm_start()
1372 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; in iwl_op_mode_mvm_start()
1373 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; in iwl_op_mode_mvm_start()
1374 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; in iwl_op_mode_mvm_start()
1377 mvm->sf_state = SF_UNINIT; in iwl_op_mode_mvm_start()
1379 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); in iwl_op_mode_mvm_start()
1381 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); in iwl_op_mode_mvm_start()
1382 mvm->drop_bcn_ap_mode = true; in iwl_op_mode_mvm_start()
1384 mutex_init(&mvm->mutex); in iwl_op_mode_mvm_start()
1385 spin_lock_init(&mvm->async_handlers_lock); in iwl_op_mode_mvm_start()
1386 INIT_LIST_HEAD(&mvm->time_event_list); in iwl_op_mode_mvm_start()
1387 INIT_LIST_HEAD(&mvm->aux_roc_te_list); in iwl_op_mode_mvm_start()
1388 INIT_LIST_HEAD(&mvm->async_handlers_list); in iwl_op_mode_mvm_start()
1389 spin_lock_init(&mvm->time_event_lock); in iwl_op_mode_mvm_start()
1390 INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); in iwl_op_mode_mvm_start()
1391 INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); in iwl_op_mode_mvm_start()
1392 INIT_LIST_HEAD(&mvm->resp_pasn_list); in iwl_op_mode_mvm_start()
1394 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); in iwl_op_mode_mvm_start()
1395 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); in iwl_op_mode_mvm_start()
1396 INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); in iwl_op_mode_mvm_start()
1397 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); in iwl_op_mode_mvm_start()
1398 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); in iwl_op_mode_mvm_start()
1399 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); in iwl_op_mode_mvm_start()
1400 INIT_LIST_HEAD(&mvm->add_stream_txqs); in iwl_op_mode_mvm_start()
1401 spin_lock_init(&mvm->add_stream_lock); in iwl_op_mode_mvm_start()
1403 wiphy_work_init(&mvm->async_handlers_wiphy_wk, in iwl_op_mode_mvm_start()
1406 wiphy_work_init(&mvm->trig_link_selection_wk, in iwl_op_mode_mvm_start()
1409 init_waitqueue_head(&mvm->rx_sync_waitq); in iwl_op_mode_mvm_start()
1411 mvm->queue_sync_state = 0; in iwl_op_mode_mvm_start()
1413 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); in iwl_op_mode_mvm_start()
1415 spin_lock_init(&mvm->tcm.lock); in iwl_op_mode_mvm_start()
1416 INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); in iwl_op_mode_mvm_start()
1417 mvm->tcm.ts = jiffies; in iwl_op_mode_mvm_start()
1418 mvm->tcm.ll_ts = jiffies; in iwl_op_mode_mvm_start()
1419 mvm->tcm.uapsd_nonagg_ts = jiffies; in iwl_op_mode_mvm_start()
1421 INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); in iwl_op_mode_mvm_start()
1423 mvm->cmd_ver.range_resp = in iwl_op_mode_mvm_start()
1424 iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, in iwl_op_mode_mvm_start()
1427 if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) in iwl_op_mode_mvm_start()
1440 trans->wide_cmd_header = true; in iwl_op_mode_mvm_start()
1442 mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; in iwl_op_mode_mvm_start()
1454 snprintf(mvm->hw->wiphy->fw_version, in iwl_op_mode_mvm_start()
1455 sizeof(mvm->hw->wiphy->fw_version), in iwl_op_mode_mvm_start()
1456 "%.31s", fw->fw_version); in iwl_op_mode_mvm_start()
1458 trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, in iwl_op_mode_mvm_start()
1462 iwl_fw_lookup_cmd_ver(mvm->fw, in iwl_op_mode_mvm_start()
1466 mvm->sta_remove_requires_queue_remove = in iwl_op_mode_mvm_start()
1469 mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw); in iwl_op_mode_mvm_start()
1472 iwl_trans_configure(mvm->trans, &trans_cfg); in iwl_op_mode_mvm_start()
1474 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; in iwl_op_mode_mvm_start()
1475 trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; in iwl_op_mode_mvm_start()
1476 trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; in iwl_op_mode_mvm_start()
1477 memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, in iwl_op_mode_mvm_start()
1478 sizeof(trans->dbg.conf_tlv)); in iwl_op_mode_mvm_start()
1479 trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; in iwl_op_mode_mvm_start()
1481 trans->iml = mvm->fw->iml; in iwl_op_mode_mvm_start()
1482 trans->iml_len = mvm->fw->iml_len; in iwl_op_mode_mvm_start()
1485 iwl_notification_wait_init(&mvm->notif_wait); in iwl_op_mode_mvm_start()
1488 mvm->phy_db = iwl_phy_db_init(trans); in iwl_op_mode_mvm_start()
1489 if (!mvm->phy_db) { in iwl_op_mode_mvm_start()
1495 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; in iwl_op_mode_mvm_start()
1497 IWL_DEBUG_EEPROM(mvm->trans->dev, in iwl_op_mode_mvm_start()
1502 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); in iwl_op_mode_mvm_start()
1503 if (!mvm->scan_cmd) in iwl_op_mode_mvm_start()
1505 mvm->scan_cmd_size = scan_size; in iwl_op_mode_mvm_start()
1508 mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; in iwl_op_mode_mvm_start()
1509 mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; in iwl_op_mode_mvm_start()
1512 mvm->last_ebs_successful = true; in iwl_op_mode_mvm_start()
1518 memset(&mvm->rx_stats_v3, 0, in iwl_op_mode_mvm_start()
1521 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); in iwl_op_mode_mvm_start()
1525 iwl_mvm_init_time_sync(&mvm->time_sync); in iwl_op_mode_mvm_start()
1527 mvm->debugfs_dir = dbgfs_dir; in iwl_op_mode_mvm_start()
1529 mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); in iwl_op_mode_mvm_start()
1531 iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter); in iwl_op_mode_mvm_start()
1539 if (trans->csme_own && mvm->mei_registered) in iwl_op_mode_mvm_start()
1553 if (mvm->mei_registered) { in iwl_op_mode_mvm_start()
1558 iwl_fw_flush_dumps(&mvm->fwrt); in iwl_op_mode_mvm_start()
1559 iwl_fw_runtime_free(&mvm->fwrt); in iwl_op_mode_mvm_start()
1561 iwl_phy_db_free(mvm->phy_db); in iwl_op_mode_mvm_start()
1562 kfree(mvm->scan_cmd); in iwl_op_mode_mvm_start()
1565 ieee80211_free_hw(mvm->hw); in iwl_op_mode_mvm_start()
1571 lockdep_assert_held(&mvm->mutex); in iwl_mvm_stop_device()
1573 iwl_fw_cancel_timestamp(&mvm->fwrt); in iwl_mvm_stop_device()
1575 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); in iwl_mvm_stop_device()
1579 iwl_fw_dbg_stop_sync(&mvm->fwrt); in iwl_mvm_stop_device()
1580 iwl_trans_stop_device(mvm->trans); in iwl_mvm_stop_device()
1581 iwl_free_fw_paging(&mvm->fwrt); in iwl_mvm_stop_device()
1582 iwl_fw_dump_conf_clear(&mvm->fwrt); in iwl_mvm_stop_device()
1591 if (mvm->mei_registered) { in iwl_op_mode_mvm_stop()
1602 cancel_work_sync(&mvm->sap_connected_wk); in iwl_op_mode_mvm_stop()
1616 if (mvm->hw_registered) in iwl_op_mode_mvm_stop()
1617 ieee80211_unregister_hw(mvm->hw); in iwl_op_mode_mvm_stop()
1619 kfree(mvm->scan_cmd); in iwl_op_mode_mvm_stop()
1620 kfree(mvm->mcast_filter_cmd); in iwl_op_mode_mvm_stop()
1621 mvm->mcast_filter_cmd = NULL; in iwl_op_mode_mvm_stop()
1623 kfree(mvm->error_recovery_buf); in iwl_op_mode_mvm_stop()
1624 mvm->error_recovery_buf = NULL; in iwl_op_mode_mvm_stop()
1628 iwl_trans_op_mode_leave(mvm->trans); in iwl_op_mode_mvm_stop()
1630 iwl_phy_db_free(mvm->phy_db); in iwl_op_mode_mvm_stop()
1631 mvm->phy_db = NULL; in iwl_op_mode_mvm_stop()
1633 kfree(mvm->nvm_data); in iwl_op_mode_mvm_stop()
1634 kfree(mvm->mei_nvm_data); in iwl_op_mode_mvm_stop()
1635 kfree(rcu_access_pointer(mvm->csme_conn_info)); in iwl_op_mode_mvm_stop()
1636 kfree(mvm->temp_nvm_data); in iwl_op_mode_mvm_stop()
1638 kfree(mvm->nvm_sections[i].data); in iwl_op_mode_mvm_stop()
1639 kfree(mvm->acs_survey); in iwl_op_mode_mvm_stop()
1641 cancel_delayed_work_sync(&mvm->tcm.work); in iwl_op_mode_mvm_stop()
1643 iwl_fw_runtime_free(&mvm->fwrt); in iwl_op_mode_mvm_stop()
1644 mutex_destroy(&mvm->mutex); in iwl_op_mode_mvm_stop()
1646 if (mvm->mei_registered) in iwl_op_mode_mvm_stop()
1649 ieee80211_free_hw(mvm->hw); in iwl_op_mode_mvm_stop()
1663 spin_lock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_purge()
1664 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { in iwl_mvm_async_handlers_purge()
1665 iwl_free_rxb(&entry->rxb); in iwl_mvm_async_handlers_purge()
1666 list_del(&entry->list); in iwl_mvm_async_handlers_purge()
1669 spin_unlock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_purge()
1673 * This function receives a bitmap of rx async handler contexts
1683 * Sync with Rx path with a lock. Remove all the entries of the in iwl_mvm_async_handlers_by_context()
1687 spin_lock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_by_context()
1688 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { in iwl_mvm_async_handlers_by_context()
1689 if (!(BIT(entry->context) & contexts)) in iwl_mvm_async_handlers_by_context()
1691 list_del(&entry->list); in iwl_mvm_async_handlers_by_context()
1692 list_add_tail(&entry->list, &local_list); in iwl_mvm_async_handlers_by_context()
1694 spin_unlock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_by_context()
1697 if (entry->context != RX_HANDLER_ASYNC_UNLOCKED) in iwl_mvm_async_handlers_by_context()
1698 mutex_lock(&mvm->mutex); in iwl_mvm_async_handlers_by_context()
1699 entry->fn(mvm, &entry->rxb); in iwl_mvm_async_handlers_by_context()
1700 iwl_free_rxb(&entry->rxb); in iwl_mvm_async_handlers_by_context()
1701 list_del(&entry->list); in iwl_mvm_async_handlers_by_context()
1702 if (entry->context != RX_HANDLER_ASYNC_UNLOCKED) in iwl_mvm_async_handlers_by_context()
1703 mutex_unlock(&mvm->mutex); in iwl_mvm_async_handlers_by_context()
1729 struct iwl_rx_packet *pkt) in iwl_mvm_rx_check_trigger() argument
1735 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, in iwl_mvm_rx_check_trigger()
1740 cmds_trig = (void *)trig->data; in iwl_mvm_rx_check_trigger()
1742 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { in iwl_mvm_rx_check_trigger()
1744 if (!cmds_trig->cmds[i].cmd_id) in iwl_mvm_rx_check_trigger()
1747 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || in iwl_mvm_rx_check_trigger()
1748 cmds_trig->cmds[i].group_id != pkt->hdr.group_id) in iwl_mvm_rx_check_trigger()
1751 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_rx_check_trigger()
1753 pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx_check_trigger()
1760 struct iwl_rx_packet *pkt) in iwl_mvm_rx_common() argument
1762 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mvm_rx_common()
1764 union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; in iwl_mvm_rx_common()
1766 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_rx_common()
1768 iwl_mvm_rx_check_trigger(mvm, pkt); in iwl_mvm_rx_common()
1771 * Do the notification wait before RX handlers so in iwl_mvm_rx_common()
1772 * even if the RX handler consumes the RXB we have in iwl_mvm_rx_common()
1775 iwl_notification_wait_notify(&mvm->notif_wait, pkt); in iwl_mvm_rx_common()
1781 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) in iwl_mvm_rx_common()
1784 if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size, in iwl_mvm_rx_common()
1786 rx_h->cmd_id, pkt_len, rx_h->min_size)) in iwl_mvm_rx_common()
1789 if (rx_h->context == RX_HANDLER_SYNC) { in iwl_mvm_rx_common()
1790 rx_h->fn(mvm, rxb); in iwl_mvm_rx_common()
1799 entry->rxb._page = rxb_steal_page(rxb); in iwl_mvm_rx_common()
1800 entry->rxb._offset = rxb->_offset; in iwl_mvm_rx_common()
1801 entry->rxb._rx_page_order = rxb->_rx_page_order; in iwl_mvm_rx_common()
1802 entry->fn = rx_h->fn; in iwl_mvm_rx_common()
1803 entry->context = rx_h->context; in iwl_mvm_rx_common()
1804 spin_lock(&mvm->async_handlers_lock); in iwl_mvm_rx_common()
1805 list_add_tail(&entry->list, &mvm->async_handlers_list); in iwl_mvm_rx_common()
1806 spin_unlock(&mvm->async_handlers_lock); in iwl_mvm_rx_common()
1807 if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY) in iwl_mvm_rx_common()
1808 wiphy_work_queue(mvm->hw->wiphy, in iwl_mvm_rx_common()
1809 &mvm->async_handlers_wiphy_wk); in iwl_mvm_rx_common()
1811 schedule_work(&mvm->async_handlers_wk); in iwl_mvm_rx_common()
1820 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx() local
1822 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx()
1829 iwl_mvm_rx_common(mvm, rxb, pkt); in iwl_mvm_rx()
1836 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_mq() local
1838 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx_mq()
1852 iwl_mvm_rx_common(mvm, rxb, pkt); in iwl_mvm_rx_mq()
1855 static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) in iwl_mvm_is_static_queue() argument
1857 return queue == mvm->aux_queue || queue == mvm->probe_queue || in iwl_mvm_is_static_queue()
1858 queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; in iwl_mvm_is_static_queue()
1874 mvm->tvqm_info[hw_queue].sta_id : in iwl_mvm_queue_state_change()
1875 mvm->queue_info[hw_queue].ra_sta_id; in iwl_mvm_queue_state_change()
1877 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) in iwl_mvm_queue_state_change()
1882 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_queue_state_change()
1889 ieee80211_stop_queues(mvm->hw); in iwl_mvm_queue_state_change()
1890 else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) in iwl_mvm_queue_state_change()
1891 ieee80211_wake_queues(mvm->hw); in iwl_mvm_queue_state_change()
1897 int tid = mvm->tvqm_info[hw_queue].txq_tid; in iwl_mvm_queue_state_change()
1901 tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; in iwl_mvm_queue_state_change()
1910 txq = sta->txq[tid]; in iwl_mvm_queue_state_change()
1913 clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); in iwl_mvm_queue_state_change()
1915 set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); in iwl_mvm_queue_state_change()
1917 if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) { in iwl_mvm_queue_state_change()
1919 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); in iwl_mvm_queue_state_change()
1940 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, in iwl_mvm_set_rfkill_state()
1947 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); in iwl_mvm_set_hw_ctkill_state()
1949 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); in iwl_mvm_set_hw_ctkill_state()
1956 return rcu_dereference_protected(mvm->csme_conn_info, in iwl_mvm_get_csme_conn_info()
1957 lockdep_is_held(&mvm->mutex)); in iwl_mvm_get_csme_conn_info()
1963 bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); in iwl_mvm_set_hw_rfkill_state()
1967 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); in iwl_mvm_set_hw_rfkill_state()
1969 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); in iwl_mvm_set_hw_rfkill_state()
1975 iwl_abort_notification_waits(&mvm->notif_wait); in iwl_mvm_set_hw_rfkill_state()
1997 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_free_skb()
1998 ieee80211_free_txskb(mvm->hw, skb); in iwl_mvm_free_skb()
2011 if (device_reprobe(reprobe->dev)) in iwl_mvm_reprobe_wk()
2012 dev_err(reprobe->dev, "reprobe failed!\n"); in iwl_mvm_reprobe_wk()
2013 put_device(reprobe->dev); in iwl_mvm_reprobe_wk()
2020 iwl_abort_notification_waits(&mvm->notif_wait); in iwl_mvm_nic_restart()
2021 iwl_dbg_tlv_del_timers(mvm->trans); in iwl_mvm_nic_restart()
2041 if (!mvm->fw_restart && fw_error) { in iwl_mvm_nic_restart()
2042 iwl_fw_error_collect(&mvm->fwrt, false); in iwl_mvm_nic_restart()
2044 &mvm->status)) { in iwl_mvm_nic_restart()
2046 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { in iwl_mvm_nic_restart()
2050 "Firmware error during reconfiguration - reprobe!\n"); in iwl_mvm_nic_restart()
2058 IWL_ERR(mvm, "Module is being unloaded - abort\n"); in iwl_mvm_nic_restart()
2067 reprobe->dev = get_device(mvm->trans->dev); in iwl_mvm_nic_restart()
2068 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); in iwl_mvm_nic_restart()
2069 schedule_work(&reprobe->work); in iwl_mvm_nic_restart()
2071 &mvm->status)) { in iwl_mvm_nic_restart()
2073 } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && in iwl_mvm_nic_restart()
2074 mvm->hw_registered && in iwl_mvm_nic_restart()
2075 !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { in iwl_mvm_nic_restart()
2080 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); in iwl_mvm_nic_restart()
2082 if (mvm->fw->ucode_capa.error_log_size) { in iwl_mvm_nic_restart()
2083 u32 src_size = mvm->fw->ucode_capa.error_log_size; in iwl_mvm_nic_restart()
2084 u32 src_addr = mvm->fw->ucode_capa.error_log_addr; in iwl_mvm_nic_restart()
2088 mvm->error_recovery_buf = recover_buf; in iwl_mvm_nic_restart()
2089 iwl_trans_read_mem_bytes(mvm->trans, in iwl_mvm_nic_restart()
2096 iwl_fw_error_collect(&mvm->fwrt, false); in iwl_mvm_nic_restart()
2098 if (fw_error && mvm->fw_restart > 0) { in iwl_mvm_nic_restart()
2099 mvm->fw_restart--; in iwl_mvm_nic_restart()
2100 ieee80211_restart_hw(mvm->hw); in iwl_mvm_nic_restart()
2101 } else if (mvm->fwrt.trans->dbg.restart_required) { in iwl_mvm_nic_restart()
2103 mvm->fwrt.trans->dbg.restart_required = false; in iwl_mvm_nic_restart()
2104 ieee80211_restart_hw(mvm->hw); in iwl_mvm_nic_restart()
2105 } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { in iwl_mvm_nic_restart()
2106 ieee80211_restart_hw(mvm->hw); in iwl_mvm_nic_restart()
2115 if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && in iwl_mvm_nic_error()
2117 &mvm->status)) in iwl_mvm_nic_error()
2121 iwl_fw_error_collect(&mvm->fwrt, true); in iwl_mvm_nic_error()
2135 if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) in iwl_mvm_nic_error()
2155 iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); in iwl_op_mode_mvm_time_point()
2163 mutex_lock(&mvm->mutex); in iwl_op_mode_mvm_device_powered_off()
2164 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); in iwl_op_mode_mvm_device_powered_off()
2165 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; in iwl_op_mode_mvm_device_powered_off()
2167 mvm->fast_resume = false; in iwl_op_mode_mvm_device_powered_off()
2168 mutex_unlock(&mvm->mutex); in iwl_op_mode_mvm_device_powered_off()
2192 .rx = iwl_mvm_rx,
2198 unsigned int queue) in iwl_mvm_rx_mq_rss() argument
2201 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_mq_rss() local
2202 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx_mq_rss()
2204 if (unlikely(queue >= mvm->trans->num_rx_queues)) in iwl_mvm_rx_mq_rss()
2208 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); in iwl_mvm_rx_mq_rss()
2211 iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); in iwl_mvm_rx_mq_rss()
2213 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); in iwl_mvm_rx_mq_rss()
2218 .rx = iwl_mvm_rx_mq,