1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /** 21 * DOC: wma_mgmt.c 22 * 23 * This file contains STA/SAP and protocol related functions. 24 */ 25 26 /* Header files */ 27 28 #include "wma.h" 29 #include "wma_api.h" 30 #include "cds_api.h" 31 #include "wmi_unified_api.h" 32 #include "wlan_qct_sys.h" 33 #include "wni_api.h" 34 #include "ani_global.h" 35 #include "wmi_unified.h" 36 #include "wni_cfg.h" 37 38 #include "qdf_nbuf.h" 39 #include "qdf_types.h" 40 #include "qdf_mem.h" 41 42 #include "wma_types.h" 43 #include "lim_api.h" 44 #include "lim_session_utils.h" 45 46 #include "cds_utils.h" 47 #include "wlan_dlm_api.h" 48 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG) 49 #include "pktlog_ac.h" 50 #else 51 #include "pktlog_ac_fmt.h" 52 #endif /* REMOVE_PKT_LOG */ 53 54 #include "dbglog_host.h" 55 #include "csr_api.h" 56 #include "ol_fw.h" 57 #include "wma_internal.h" 58 #include "wlan_policy_mgr_api.h" 59 #include "cdp_txrx_flow_ctrl_legacy.h" 60 #include <cdp_txrx_peer_ops.h> 61 #include <cdp_txrx_pmf.h> 62 #include <cdp_txrx_cfg.h> 63 #include <cdp_txrx_cmn.h> 64 #include <cdp_txrx_misc.h> 65 #include <cdp_txrx_misc.h> 66 #include "wlan_mgmt_txrx_tgt_api.h" 67 #include "wlan_objmgr_psoc_obj.h" 68 #include "wlan_objmgr_pdev_obj.h" 69 #include "wlan_objmgr_vdev_obj.h" 70 #include "wlan_lmac_if_api.h" 71 #include <cdp_txrx_handle.h> 72 #include "wma_he.h" 73 #include "wma_eht.h" 74 #include <qdf_crypto.h> 75 #include "wma_twt.h" 76 #include "wlan_p2p_cfg_api.h" 77 #include "cfg_ucfg_api.h" 78 #include "cfg_mlme_sta.h" 79 #include "wlan_mlme_api.h" 80 #include "wmi_unified_bcn_api.h" 81 #include <wlan_crypto_global_api.h> 82 #include <wlan_mlme_main.h> 83 #include <../../core/src/vdev_mgr_ops.h> 84 #include "wlan_pkt_capture_ucfg_api.h" 85 86 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG) 87 #include <wlan_logging_sock_svc.h> 88 #endif 89 #include "wlan_cm_roam_api.h" 90 #include "wlan_cm_api.h" 91 #include "wlan_mlo_link_force.h" 92 #include <target_if_spatial_reuse.h> 93 #include "wlan_nan_api_i.h" 94 95 /* Max debug string size for WMM in bytes */ 96 #define WMA_WMM_DEBUG_STRING_SIZE 512 97 98 /** 99 * wma_send_bcn_buf_ll() - prepare and send beacon buffer to fw for LL 100 * @wma: wma handle 101 * @vdev_id: vdev id 102 * @param_buf: SWBA parameters 103 * 104 * Return: none 105 */ 106 #ifdef WLAN_WMI_BCN 107 static void wma_send_bcn_buf_ll(tp_wma_handle wma, 108 uint8_t vdev_id, 109 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf) 110 { 111 struct ieee80211_frame *wh; 112 struct beacon_info *bcn; 113 wmi_tim_info *tim_info = param_buf->tim_info; 114 uint8_t *bcn_payload; 115 QDF_STATUS ret; 116 struct beacon_tim_ie *tim_ie; 117 wmi_p2p_noa_info *p2p_noa_info = param_buf->p2p_noa_info; 118 struct p2p_sub_element_noa noa_ie; 119 struct wmi_bcn_send_from_host params; 120 uint8_t i; 121 122 bcn = wma->interfaces[vdev_id].beacon; 123 if (!bcn || !bcn->buf) { 124 wma_err("Invalid beacon buffer"); 125 return; 126 } 127 128 if (!param_buf->tim_info || !param_buf->p2p_noa_info) { 129 wma_err("Invalid tim info or p2p noa info"); 130 return; 131 } 132 133 if (WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info) > 134 WMI_P2P_MAX_NOA_DESCRIPTORS) { 135 wma_err("Too many descriptors %d", 136 WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info)); 137 return; 138 } 139 140 qdf_spin_lock_bh(&bcn->lock); 141 142 bcn_payload = qdf_nbuf_data(bcn->buf); 143 144 tim_ie = (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]); 145 146 if (tim_info->tim_changed) { 147 if (tim_info->tim_num_ps_pending) 148 qdf_mem_copy(&tim_ie->tim_bitmap, tim_info->tim_bitmap, 149 WMA_TIM_SUPPORTED_PVB_LENGTH); 150 else 151 qdf_mem_zero(&tim_ie->tim_bitmap, 152 WMA_TIM_SUPPORTED_PVB_LENGTH); 153 /* 154 * Currently we support fixed number of 155 * peers as limited by HAL_NUM_STA. 156 * tim offset is always 0 157 */ 158 tim_ie->tim_bitctl = 0; 159 } 160 161 /* Update DTIM Count */ 162 if (tim_ie->dtim_count == 0) 163 tim_ie->dtim_count = tim_ie->dtim_period - 1; 164 else 165 tim_ie->dtim_count--; 166 167 /* 168 * DTIM count needs to be backedup so that 169 * when umac updates the beacon template 170 * current dtim count can be updated properly 171 */ 172 bcn->dtim_count = tim_ie->dtim_count; 173 174 /* update state for buffered multicast frames on DTIM */ 175 if (tim_info->tim_mcast && (tim_ie->dtim_count == 0 || 176 tim_ie->dtim_period == 1)) 177 tim_ie->tim_bitctl |= 1; 178 else 179 tim_ie->tim_bitctl &= ~1; 180 181 /* To avoid sw generated frame sequence the same as H/W generated frame, 182 * the value lower than min_sw_seq is reserved for HW generated frame 183 */ 184 if ((bcn->seq_no & IEEE80211_SEQ_MASK) < MIN_SW_SEQ) 185 bcn->seq_no = MIN_SW_SEQ; 186 187 wh = (struct ieee80211_frame *)bcn_payload; 188 *(uint16_t *) &wh->i_seq[0] = htole16(bcn->seq_no 189 << IEEE80211_SEQ_SEQ_SHIFT); 190 bcn->seq_no++; 191 192 if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) { 193 qdf_mem_zero(&noa_ie, sizeof(noa_ie)); 194 195 noa_ie.index = 196 (uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info); 197 noa_ie.oppPS = 198 (uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info); 199 noa_ie.ctwindow = 200 (uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info); 201 noa_ie.num_descriptors = (uint8_t) 202 WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info); 203 wma_debug("index %u, oppPs %u, ctwindow %u, num_descriptors = %u", 204 noa_ie.index, 205 noa_ie.oppPS, noa_ie.ctwindow, noa_ie.num_descriptors); 206 for (i = 0; i < noa_ie.num_descriptors; i++) { 207 noa_ie.noa_descriptors[i].type_count = 208 (uint8_t) p2p_noa_info->noa_descriptors[i]. 209 type_count; 210 noa_ie.noa_descriptors[i].duration = 211 p2p_noa_info->noa_descriptors[i].duration; 212 noa_ie.noa_descriptors[i].interval = 213 p2p_noa_info->noa_descriptors[i].interval; 214 noa_ie.noa_descriptors[i].start_time = 215 p2p_noa_info->noa_descriptors[i].start_time; 216 wma_debug("NoA descriptor[%d] type_count %u, duration %u, interval %u, start_time = %u", 217 i, 218 noa_ie.noa_descriptors[i].type_count, 219 noa_ie.noa_descriptors[i].duration, 220 noa_ie.noa_descriptors[i].interval, 221 noa_ie.noa_descriptors[i].start_time); 222 } 223 wma_update_noa(bcn, &noa_ie); 224 225 /* Send a msg to LIM to update the NoA IE in probe response 226 * frames transmitted by the host 227 */ 228 wma_update_probe_resp_noa(wma, &noa_ie); 229 } 230 231 if (bcn->dma_mapped) { 232 qdf_nbuf_unmap_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE); 233 bcn->dma_mapped = 0; 234 } 235 ret = qdf_nbuf_map_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE); 236 if (ret != QDF_STATUS_SUCCESS) { 237 wma_err("failed map beacon buf to DMA region"); 238 qdf_spin_unlock_bh(&bcn->lock); 239 return; 240 } 241 242 bcn->dma_mapped = 1; 243 params.vdev_id = vdev_id; 244 params.data_len = bcn->len; 245 params.frame_ctrl = *((A_UINT16 *) wh->i_fc); 246 params.frag_ptr = qdf_nbuf_get_frag_paddr(bcn->buf, 0); 247 params.dtim_flag = 0; 248 /* notify Firmware of DTM and mcast/bcast traffic */ 249 if (tim_ie->dtim_count == 0) { 250 params.dtim_flag |= WMI_BCN_SEND_DTIM_ZERO; 251 /* deliver mcast/bcast traffic in next DTIM beacon */ 252 if (tim_ie->tim_bitctl & 0x01) 253 params.dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET; 254 } 255 256 wmi_unified_bcn_buf_ll_cmd(wma->wmi_handle, 257 ¶ms); 258 259 qdf_spin_unlock_bh(&bcn->lock); 260 } 261 #else 262 static inline void 263 wma_send_bcn_buf_ll(tp_wma_handle wma, 264 uint8_t vdev_id, 265 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf) 266 { 267 } 268 #endif 269 /** 270 * wma_beacon_swba_handler() - swba event handler 271 * @handle: wma handle 272 * @event: event data 273 * @len: data length 274 * 275 * SWBA event is alert event to Host requesting host to Queue a beacon 276 * for transmission use only in host beacon mode 277 * 278 * Return: 0 for success or error code 279 */ 280 #ifdef WLAN_WMI_BCN 281 int wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len) 282 { 283 tp_wma_handle wma = (tp_wma_handle) handle; 284 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf; 285 wmi_host_swba_event_fixed_param *swba_event; 286 uint32_t vdev_map; 287 uint8_t vdev_id = 0; 288 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 289 290 param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) event; 291 if (!param_buf) { 292 wma_err("Invalid swba event buffer"); 293 return -EINVAL; 294 } 295 swba_event = param_buf->fixed_param; 296 vdev_map = swba_event->vdev_map; 297 298 wma_debug("vdev_map = %d", vdev_map); 299 for (; vdev_map && vdev_id < wma->max_bssid; 300 vdev_id++, vdev_map >>= 1) { 301 if (!(vdev_map & 0x1)) 302 continue; 303 if (!cdp_cfg_is_high_latency(soc, 304 (struct cdp_cfg *)cds_get_context(QDF_MODULE_ID_CFG))) 305 wma_send_bcn_buf_ll(wma, vdev_id, param_buf); 306 break; 307 } 308 return 0; 309 } 310 #else 311 static inline int 312 wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len) 313 { 314 return 0; 315 } 316 #endif 317 318 #ifdef FEATURE_WLAN_DIAG_SUPPORT 319 void wma_sta_kickout_event(uint32_t kickout_reason, uint8_t vdev_id, 320 uint8_t *macaddr) 321 { 322 WLAN_HOST_DIAG_EVENT_DEF(sta_kickout, struct host_event_wlan_kickout); 323 qdf_mem_zero(&sta_kickout, sizeof(sta_kickout)); 324 sta_kickout.reasoncode = kickout_reason; 325 sta_kickout.vdev_id = vdev_id; 326 if (macaddr) 327 qdf_mem_copy(sta_kickout.peer_mac, macaddr, 328 QDF_MAC_ADDR_SIZE); 329 WLAN_HOST_DIAG_EVENT_REPORT(&sta_kickout, EVENT_WLAN_STA_KICKOUT); 330 } 331 #endif 332 333 int wma_peer_sta_kickout_event_handler(void *handle, uint8_t *event, 334 uint32_t len) 335 { 336 tp_wma_handle wma = (tp_wma_handle) handle; 337 WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL; 338 wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL; 339 uint8_t vdev_id, macaddr[QDF_MAC_ADDR_SIZE]; 340 tpDeleteStaContext del_sta_ctx; 341 uint8_t *addr, *bssid; 342 struct wlan_objmgr_vdev *vdev; 343 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 344 345 param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) event; 346 kickout_event = param_buf->fixed_param; 347 WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, macaddr); 348 if (cdp_peer_get_vdevid(soc, macaddr, &vdev_id) != 349 QDF_STATUS_SUCCESS) { 350 wma_err("Not able to find BSSID for peer ["QDF_MAC_ADDR_FMT"]", 351 QDF_MAC_ADDR_REF(macaddr)); 352 return -EINVAL; 353 } 354 355 if (!wma_is_vdev_valid(vdev_id)) 356 return -EINVAL; 357 358 vdev = wma->interfaces[vdev_id].vdev; 359 if (!vdev) { 360 wma_err("Not able to find vdev for VDEV_%d", vdev_id); 361 return -EINVAL; 362 } 363 addr = wlan_vdev_mlme_get_macaddr(vdev); 364 365 wma_nofl_info("STA kickout for "QDF_MAC_ADDR_FMT", on mac "QDF_MAC_ADDR_FMT", vdev %d, reason:%d", 366 QDF_MAC_ADDR_REF(macaddr), QDF_MAC_ADDR_REF(addr), 367 vdev_id, kickout_event->reason); 368 369 if (wma_is_roam_in_progress(vdev_id)) { 370 wma_err("vdev_id %d: Ignore STA kick out since roaming is in progress", 371 vdev_id); 372 return -EINVAL; 373 } 374 bssid = wma_get_vdev_bssid(vdev); 375 if (!bssid) { 376 wma_err("Failed to get bssid for vdev_%d", vdev_id); 377 return -ENOMEM; 378 } 379 380 switch (kickout_event->reason) { 381 case WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT: 382 goto exit_handler; 383 #ifdef FEATURE_WLAN_TDLS 384 case WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT: 385 del_sta_ctx = (tpDeleteStaContext) 386 qdf_mem_malloc(sizeof(tDeleteStaContext)); 387 if (!del_sta_ctx) { 388 wma_err("mem alloc failed for struct del_sta_context for TDLS peer: "QDF_MAC_ADDR_FMT, 389 QDF_MAC_ADDR_REF(macaddr)); 390 return -ENOMEM; 391 } 392 393 del_sta_ctx->is_tdls = true; 394 del_sta_ctx->vdev_id = vdev_id; 395 qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE); 396 qdf_mem_copy(del_sta_ctx->bssId, bssid, 397 QDF_MAC_ADDR_SIZE); 398 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE; 399 wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, 400 (void *)del_sta_ctx, 0); 401 goto exit_handler; 402 #endif /* FEATURE_WLAN_TDLS */ 403 404 case WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED: 405 /* 406 * Default legacy value used by original firmware implementation 407 */ 408 if (wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA && 409 (wma->interfaces[vdev_id].sub_type == 0 || 410 wma->interfaces[vdev_id].sub_type == 411 WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) && 412 !qdf_mem_cmp(bssid, 413 macaddr, QDF_MAC_ADDR_SIZE)) { 414 wma_sta_kickout_event( 415 HOST_STA_KICKOUT_REASON_UNSPECIFIED, vdev_id, macaddr); 416 /* 417 * KICKOUT event is for current station-AP connection. 418 * Treat it like final beacon miss. Station may not have 419 * missed beacons but not able to transmit frames to AP 420 * for a long time. Must disconnect to get out of 421 * this sticky situation. 422 * In future implementation, roaming module will also 423 * handle this event and perform a scan. 424 */ 425 wma_warn("WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED event for STA"); 426 wma_beacon_miss_handler(wma, vdev_id, 427 kickout_event->rssi); 428 goto exit_handler; 429 } 430 break; 431 432 case WMI_PEER_STA_KICKOUT_REASON_XRETRY: 433 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: 434 /* 435 * Handle SA query kickout is same as inactivity kickout. 436 * This could be for STA or SAP role 437 */ 438 case WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT: 439 default: 440 break; 441 } 442 443 /* 444 * default action is to send delete station context indication to LIM 445 */ 446 del_sta_ctx = 447 (tDeleteStaContext *) qdf_mem_malloc(sizeof(tDeleteStaContext)); 448 if (!del_sta_ctx) { 449 wma_err("QDF MEM Alloc Failed for struct del_sta_context"); 450 return -ENOMEM; 451 } 452 453 del_sta_ctx->is_tdls = false; 454 del_sta_ctx->vdev_id = vdev_id; 455 qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE); 456 qdf_mem_copy(del_sta_ctx->bssId, addr, QDF_MAC_ADDR_SIZE); 457 if (kickout_event->reason == 458 WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT) 459 del_sta_ctx->reasonCode = 460 HAL_DEL_STA_REASON_CODE_SA_QUERY_TIMEOUT; 461 else if (kickout_event->reason == WMI_PEER_STA_KICKOUT_REASON_XRETRY) 462 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_XRETRY; 463 else 464 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE; 465 466 if (wmi_service_enabled(wma->wmi_handle, 467 wmi_service_hw_db2dbm_support)) 468 del_sta_ctx->rssi = kickout_event->rssi; 469 else 470 del_sta_ctx->rssi = kickout_event->rssi + 471 WMA_TGT_NOISE_FLOOR_DBM; 472 wma_sta_kickout_event(del_sta_ctx->reasonCode, vdev_id, macaddr); 473 wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx, 474 0); 475 wma_lost_link_info_handler(wma, vdev_id, del_sta_ctx->rssi); 476 477 exit_handler: 478 return 0; 479 } 480 481 int wma_unified_bcntx_status_event_handler(void *handle, 482 uint8_t *cmd_param_info, 483 uint32_t len) 484 { 485 tp_wma_handle wma = (tp_wma_handle) handle; 486 WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *param_buf; 487 wmi_offload_bcn_tx_status_event_fixed_param *resp_event; 488 tSirFirstBeaconTxCompleteInd *beacon_tx_complete_ind; 489 490 param_buf = 491 (WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *) cmd_param_info; 492 if (!param_buf) { 493 wma_err("Invalid bcn tx response event buffer"); 494 return -EINVAL; 495 } 496 497 resp_event = param_buf->fixed_param; 498 499 if (resp_event->vdev_id >= wma->max_bssid) { 500 wma_err("received invalid vdev_id %d", resp_event->vdev_id); 501 return -EINVAL; 502 } 503 504 /* Check for valid handle to ensure session is not 505 * deleted in any race 506 */ 507 if (!wma->interfaces[resp_event->vdev_id].vdev) { 508 wma_err("vdev is NULL for vdev_%d", resp_event->vdev_id); 509 return -EINVAL; 510 } 511 512 /* Beacon Tx Indication supports only AP mode. Ignore in other modes */ 513 if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == false) { 514 wma_debug("Beacon Tx Indication does not support type %d and sub_type %d", 515 wma->interfaces[resp_event->vdev_id].type, 516 wma->interfaces[resp_event->vdev_id].sub_type); 517 return 0; 518 } 519 520 beacon_tx_complete_ind = (tSirFirstBeaconTxCompleteInd *) 521 qdf_mem_malloc(sizeof(tSirFirstBeaconTxCompleteInd)); 522 if (!beacon_tx_complete_ind) { 523 wma_err("Failed to alloc beacon_tx_complete_ind"); 524 return -ENOMEM; 525 } 526 527 beacon_tx_complete_ind->messageType = WMA_DFS_BEACON_TX_SUCCESS_IND; 528 beacon_tx_complete_ind->length = sizeof(tSirFirstBeaconTxCompleteInd); 529 beacon_tx_complete_ind->bss_idx = resp_event->vdev_id; 530 531 wma_send_msg(wma, WMA_DFS_BEACON_TX_SUCCESS_IND, 532 (void *)beacon_tx_complete_ind, 0); 533 return 0; 534 } 535 536 /** 537 * wma_get_go_probe_timeout() - get P2P GO probe timeout 538 * @mac: UMAC handler 539 * @max_inactive_time: return max inactive time 540 * @max_unresponsive_time: return max unresponsive time 541 * 542 * Return: none 543 */ 544 #ifdef CONVERGED_P2P_ENABLE 545 static inline void 546 wma_get_go_probe_timeout(struct mac_context *mac, 547 uint32_t *max_inactive_time, 548 uint32_t *max_unresponsive_time) 549 { 550 uint32_t keep_alive; 551 QDF_STATUS status; 552 553 status = cfg_p2p_get_go_link_monitor_period(mac->psoc, 554 max_inactive_time); 555 if (QDF_IS_STATUS_ERROR(status)) { 556 wma_err("Failed to go monitor period"); 557 *max_inactive_time = WMA_LINK_MONITOR_DEFAULT_TIME_SECS; 558 } 559 status = cfg_p2p_get_go_keepalive_period(mac->psoc, 560 &keep_alive); 561 if (QDF_IS_STATUS_ERROR(status)) { 562 wma_err("Failed to read go keep alive"); 563 keep_alive = WMA_KEEP_ALIVE_DEFAULT_TIME_SECS; 564 } 565 566 *max_unresponsive_time = *max_inactive_time + keep_alive; 567 } 568 #else 569 static inline void 570 wma_get_go_probe_timeout(struct mac_context *mac, 571 uint32_t *max_inactive_time, 572 uint32_t *max_unresponsive_time) 573 { 574 } 575 #endif 576 577 /** 578 * wma_get_link_probe_timeout() - get link timeout based on sub type 579 * @mac: UMAC handler 580 * @sub_type: vdev syb type 581 * @max_inactive_time: return max inactive time 582 * @max_unresponsive_time: return max unresponsive time 583 * 584 * Return: none 585 */ 586 static inline void 587 wma_get_link_probe_timeout(struct mac_context *mac, 588 uint32_t sub_type, 589 uint32_t *max_inactive_time, 590 uint32_t *max_unresponsive_time) 591 { 592 if (sub_type == WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO) { 593 wma_get_go_probe_timeout(mac, max_inactive_time, 594 max_unresponsive_time); 595 } else { 596 *max_inactive_time = 597 mac->mlme_cfg->timeouts.ap_link_monitor_timeout; 598 *max_unresponsive_time = *max_inactive_time + 599 mac->mlme_cfg->timeouts.ap_keep_alive_timeout; 600 } 601 } 602 603 /** 604 * wma_verify_rate_code() - verify if rate code is valid. 605 * @rate_code: rate code 606 * @band: band information 607 * 608 * Return: verify result 609 */ 610 static bool wma_verify_rate_code(u_int32_t rate_code, enum cds_band_type band) 611 { 612 uint8_t preamble, nss, rate; 613 bool valid = true; 614 615 preamble = (rate_code & 0xc0) >> 6; 616 nss = (rate_code & 0x30) >> 4; 617 rate = rate_code & 0xf; 618 619 switch (preamble) { 620 case WMI_RATE_PREAMBLE_CCK: 621 if (nss != 0 || rate > 3 || band == CDS_BAND_5GHZ) 622 valid = false; 623 break; 624 case WMI_RATE_PREAMBLE_OFDM: 625 if (nss != 0 || rate > 7) 626 valid = false; 627 break; 628 case WMI_RATE_PREAMBLE_HT: 629 if (nss != 0 || rate > 7) 630 valid = false; 631 break; 632 case WMI_RATE_PREAMBLE_VHT: 633 if (nss != 0 || rate > 9) 634 valid = false; 635 break; 636 default: 637 break; 638 } 639 return valid; 640 } 641 642 #define TX_MGMT_RATE_2G_ENABLE_OFFSET 30 643 #define TX_MGMT_RATE_5G_ENABLE_OFFSET 31 644 #define TX_MGMT_RATE_2G_OFFSET 0 645 #define TX_MGMT_RATE_5G_OFFSET 12 646 647 #define MAX_VDEV_MGMT_RATE_PARAMS 2 648 /* params being sent: 649 * wmi_vdev_param_mgmt_tx_rate 650 * wmi_vdev_param_per_band_mgmt_tx_rate 651 */ 652 653 /** 654 * wma_set_mgmt_rate() - set vdev mgmt rate. 655 * @wma: wma handle 656 * @vdev_id: vdev id 657 * 658 * Return: None 659 */ 660 void wma_set_vdev_mgmt_rate(tp_wma_handle wma, uint8_t vdev_id) 661 { 662 uint32_t cfg_val; 663 uint32_t per_band_mgmt_tx_rate = 0; 664 enum cds_band_type band = 0; 665 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 666 struct dev_set_param setparam[MAX_VDEV_MGMT_RATE_PARAMS] = {}; 667 uint8_t index = 0; 668 QDF_STATUS status = QDF_STATUS_E_FAILURE; 669 670 if (!mac) { 671 wma_err("Failed to get mac"); 672 return; 673 } 674 675 cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt; 676 band = CDS_BAND_ALL; 677 if ((cfg_val == MLME_CFG_TX_MGMT_RATE_DEF) || 678 !wma_verify_rate_code(cfg_val, band)) { 679 wma_nofl_debug("default WNI_CFG_RATE_FOR_TX_MGMT, ignore"); 680 } else { 681 status = mlme_check_index_setparam(setparam, 682 wmi_vdev_param_mgmt_tx_rate, 683 cfg_val, index++, 684 MAX_VDEV_MGMT_RATE_PARAMS); 685 if (QDF_IS_STATUS_ERROR(status)) { 686 wma_err("failed at wmi_vdev_param_mgmt_tx_rate"); 687 goto error; 688 } 689 } 690 691 cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt_2g; 692 band = CDS_BAND_2GHZ; 693 if ((cfg_val == MLME_CFG_TX_MGMT_2G_RATE_DEF) || 694 !wma_verify_rate_code(cfg_val, band)) { 695 wma_nofl_debug("use default 2G MGMT rate."); 696 per_band_mgmt_tx_rate &= 697 ~(1 << TX_MGMT_RATE_2G_ENABLE_OFFSET); 698 } else { 699 per_band_mgmt_tx_rate |= 700 (1 << TX_MGMT_RATE_2G_ENABLE_OFFSET); 701 per_band_mgmt_tx_rate |= 702 ((cfg_val & 0x7FF) << TX_MGMT_RATE_2G_OFFSET); 703 } 704 705 cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt; 706 band = CDS_BAND_5GHZ; 707 if ((cfg_val == MLME_CFG_TX_MGMT_5G_RATE_DEF) || 708 !wma_verify_rate_code(cfg_val, band)) { 709 wma_nofl_debug("use default 5G MGMT rate."); 710 per_band_mgmt_tx_rate &= 711 ~(1 << TX_MGMT_RATE_5G_ENABLE_OFFSET); 712 } else { 713 per_band_mgmt_tx_rate |= 714 (1 << TX_MGMT_RATE_5G_ENABLE_OFFSET); 715 per_band_mgmt_tx_rate |= 716 ((cfg_val & 0x7FF) << TX_MGMT_RATE_5G_OFFSET); 717 } 718 719 status = mlme_check_index_setparam(setparam, 720 wmi_vdev_param_per_band_mgmt_tx_rate, 721 per_band_mgmt_tx_rate, index++, 722 MAX_VDEV_MGMT_RATE_PARAMS); 723 if (QDF_IS_STATUS_ERROR(status)) { 724 wma_err("failed at wmi_vdev_param_per_band_mgmt_tx_rate"); 725 goto error; 726 } 727 728 status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM, 729 vdev_id, setparam, index); 730 if (QDF_IS_STATUS_ERROR(status)) 731 wma_debug("failed to send MGMT_TX_RATE vdev set params stat:%d", 732 status); 733 error: 734 return; 735 } 736 737 #define MAX_VDEV_SAP_KEEPALIVE_PARAMS 3 738 /* params being sent: 739 * wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs 740 * wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs 741 * wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs 742 */ 743 744 /** 745 * wma_set_sap_keepalive() - set SAP keep alive parameters to fw 746 * @wma: wma handle 747 * @vdev_id: vdev id 748 * 749 * Return: none 750 */ 751 void wma_set_sap_keepalive(tp_wma_handle wma, uint8_t vdev_id) 752 { 753 uint32_t min_inactive_time, max_inactive_time, max_unresponsive_time; 754 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 755 QDF_STATUS status; 756 struct dev_set_param setparam[MAX_VDEV_SAP_KEEPALIVE_PARAMS] = {}; 757 uint8_t index = 0; 758 759 if (!mac) { 760 wma_err("Failed to get mac"); 761 return; 762 } 763 764 wma_get_link_probe_timeout(mac, wma->interfaces[vdev_id].sub_type, 765 &max_inactive_time, &max_unresponsive_time); 766 767 min_inactive_time = max_inactive_time / 2; 768 status = mlme_check_index_setparam( 769 setparam, 770 wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs, 771 min_inactive_time, index++, 772 MAX_VDEV_SAP_KEEPALIVE_PARAMS); 773 if (QDF_IS_STATUS_ERROR(status)) { 774 wma_err("failed to set wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs"); 775 goto error; 776 } 777 status = mlme_check_index_setparam( 778 setparam, 779 wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs, 780 max_inactive_time, index++, 781 MAX_VDEV_SAP_KEEPALIVE_PARAMS); 782 if (QDF_IS_STATUS_ERROR(status)) { 783 wma_err("failed to set wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs"); 784 goto error; 785 } 786 status = mlme_check_index_setparam( 787 setparam, 788 wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs, 789 max_unresponsive_time, index++, 790 MAX_VDEV_SAP_KEEPALIVE_PARAMS); 791 if (QDF_IS_STATUS_ERROR(status)) { 792 wma_err("failed to set wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs"); 793 goto error; 794 } 795 796 status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM, 797 vdev_id, setparam, index); 798 if (QDF_IS_STATUS_ERROR(status)) 799 wma_err("Failed to Set AP MIN/MAX IDLE INACTIVE TIME, MAX UNRESPONSIVE TIME:%d", status); 800 else 801 wma_debug("vdev_id:%d min_inactive_time: %u max_inactive_time: %u max_unresponsive_time: %u", 802 vdev_id, min_inactive_time, max_inactive_time, 803 max_unresponsive_time); 804 error: 805 return; 806 } 807 808 /** 809 * wma_set_sta_sa_query_param() - set sta sa query parameters 810 * @wma: wma handle 811 * @vdev_id: vdev id 812 813 * This function sets sta query related parameters in fw. 814 * 815 * Return: none 816 */ 817 818 void wma_set_sta_sa_query_param(tp_wma_handle wma, 819 uint8_t vdev_id) 820 { 821 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 822 uint8_t max_retries; 823 uint16_t retry_interval; 824 825 if (!mac) { 826 wma_err("mac context is NULL"); 827 return; 828 } 829 830 max_retries = mac->mlme_cfg->gen.pmf_sa_query_max_retries; 831 retry_interval = mac->mlme_cfg->gen.pmf_sa_query_retry_interval; 832 833 wmi_unified_set_sta_sa_query_param_cmd(wma->wmi_handle, 834 vdev_id, 835 max_retries, 836 retry_interval); 837 } 838 839 /** 840 * wma_set_sta_keep_alive() - set sta keep alive parameters 841 * @wma: wma handle 842 * @vdev_id: vdev id 843 * @method: method for keep alive 844 * @timeperiod: time period 845 * @hostv4addr: host ipv4 address 846 * @destv4addr: dst ipv4 address 847 * @destmac: destination mac 848 * 849 * This function sets keep alive related parameters in fw. 850 * 851 * Return: none 852 */ 853 void wma_set_sta_keep_alive(tp_wma_handle wma, uint8_t vdev_id, 854 uint32_t method, uint32_t timeperiod, 855 uint8_t *hostv4addr, uint8_t *destv4addr, 856 uint8_t *destmac) 857 { 858 struct sta_keep_alive_params params = { 0 }; 859 struct wma_txrx_node *intr; 860 861 if (wma_validate_handle(wma)) 862 return; 863 864 intr = &wma->interfaces[vdev_id]; 865 if (timeperiod > cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD)) { 866 wmi_err("Invalid period %d Max limit %d", timeperiod, 867 cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD)); 868 return; 869 } 870 871 params.vdev_id = vdev_id; 872 params.method = method; 873 params.timeperiod = timeperiod; 874 if (intr) { 875 if (intr->bss_max_idle_period) { 876 if (intr->bss_max_idle_period < timeperiod) 877 params.timeperiod = intr->bss_max_idle_period; 878 879 if (method == WMI_KEEP_ALIVE_NULL_PKT) 880 params.method = WMI_KEEP_ALIVE_MGMT_FRAME; 881 } 882 883 wlan_mlme_set_keepalive_period(intr->vdev, params.timeperiod); 884 } 885 886 if (hostv4addr) 887 qdf_mem_copy(params.hostv4addr, hostv4addr, QDF_IPV4_ADDR_SIZE); 888 if (destv4addr) 889 qdf_mem_copy(params.destv4addr, destv4addr, QDF_IPV4_ADDR_SIZE); 890 if (destmac) 891 qdf_mem_copy(params.destmac, destmac, QDF_MAC_ADDR_SIZE); 892 893 wmi_unified_set_sta_keep_alive_cmd(wma->wmi_handle, ¶ms); 894 } 895 896 /* 897 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 898 * 0 for no restriction 899 * 1 for 1/4 us - Our lower layer calculations limit our precision to 1 msec 900 * 2 for 1/2 us - Our lower layer calculations limit our precision to 1 msec 901 * 3 for 1 us 902 * 4 for 2 us 903 * 5 for 4 us 904 * 6 for 8 us 905 * 7 for 16 us 906 */ 907 static const uint8_t wma_mpdu_spacing[] = { 0, 1, 1, 1, 2, 4, 8, 16 }; 908 909 /** 910 * wma_parse_mpdudensity() - give mpdu spacing from mpdu density 911 * @mpdudensity: mpdu density 912 * 913 * Return: mpdu spacing or 0 for error 914 */ 915 static inline uint8_t wma_parse_mpdudensity(uint8_t mpdudensity) 916 { 917 if (mpdudensity < sizeof(wma_mpdu_spacing)) 918 return wma_mpdu_spacing[mpdudensity]; 919 else 920 return 0; 921 } 922 923 #define CFG_CTRL_MASK 0xFF00 924 #define CFG_DATA_MASK 0x00FF 925 926 /** 927 * wma_mask_tx_ht_rate() - mask tx ht rate based on config 928 * @wma: wma handle 929 * @mcs_set mcs set buffer 930 * 931 * Return: None 932 */ 933 static void wma_mask_tx_ht_rate(tp_wma_handle wma, uint8_t *mcs_set) 934 { 935 uint32_t i, j; 936 uint16_t mcs_limit; 937 uint8_t *rate_pos = mcs_set; 938 struct mac_context *mac = wma->mac_context; 939 940 /* 941 * Get MCS limit from ini configure, and map it to rate parameters 942 * This will limit HT rate upper bound. CFG_CTRL_MASK is used to 943 * check whether ini config is enabled and CFG_DATA_MASK to get the 944 * MCS value. 945 */ 946 mcs_limit = mac->mlme_cfg->rates.max_htmcs_txdata; 947 948 if (mcs_limit & CFG_CTRL_MASK) { 949 wma_debug("set mcs_limit %x", mcs_limit); 950 951 mcs_limit &= CFG_DATA_MASK; 952 for (i = 0, j = 0; i < MAX_SUPPORTED_RATES;) { 953 if (j < mcs_limit / 8) { 954 rate_pos[j] = 0xff; 955 j++; 956 i += 8; 957 } else if (j < mcs_limit / 8 + 1) { 958 if (i <= mcs_limit) 959 rate_pos[i / 8] |= 1 << (i % 8); 960 else 961 rate_pos[i / 8] &= ~(1 << (i % 8)); 962 i++; 963 964 if (i >= (j + 1) * 8) 965 j++; 966 } else { 967 rate_pos[j++] = 0; 968 i += 8; 969 } 970 } 971 } 972 } 973 974 #if SUPPORT_11AX 975 /** 976 * wma_fw_to_host_phymode_11ax() - convert fw to host phymode for 11ax phymodes 977 * @phymode: phymode to convert 978 * 979 * Return: one of the 11ax values defined in enum wlan_phymode; 980 * or WLAN_PHYMODE_AUTO if the input is not an 11ax phymode 981 */ 982 static enum wlan_phymode 983 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode) 984 { 985 switch (phymode) { 986 default: 987 return WLAN_PHYMODE_AUTO; 988 case WMI_HOST_MODE_11AX_HE20: 989 return WLAN_PHYMODE_11AXA_HE20; 990 case WMI_HOST_MODE_11AX_HE40: 991 return WLAN_PHYMODE_11AXA_HE40; 992 case WMI_HOST_MODE_11AX_HE80: 993 return WLAN_PHYMODE_11AXA_HE80; 994 case WMI_HOST_MODE_11AX_HE80_80: 995 return WLAN_PHYMODE_11AXA_HE80_80; 996 case WMI_HOST_MODE_11AX_HE160: 997 return WLAN_PHYMODE_11AXA_HE160; 998 case WMI_HOST_MODE_11AX_HE20_2G: 999 return WLAN_PHYMODE_11AXG_HE20; 1000 case WMI_HOST_MODE_11AX_HE40_2G: 1001 return WLAN_PHYMODE_11AXG_HE40; 1002 case WMI_HOST_MODE_11AX_HE80_2G: 1003 return WLAN_PHYMODE_11AXG_HE80; 1004 } 1005 return WLAN_PHYMODE_AUTO; 1006 } 1007 #else 1008 static enum wlan_phymode 1009 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode) 1010 { 1011 return WLAN_PHYMODE_AUTO; 1012 } 1013 #endif 1014 1015 #ifdef WLAN_FEATURE_11BE 1016 /** 1017 * wma_fw_to_host_phymode_11be() - convert fw to host phymode for 11be phymodes 1018 * @phymode: phymode to convert 1019 * 1020 * Return: one of the 11be values defined in enum wlan_phymode; 1021 * or WLAN_PHYMODE_AUTO if the input is not an 11be phymode 1022 */ 1023 static enum wlan_phymode 1024 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode) 1025 { 1026 switch (phymode) { 1027 default: 1028 return WLAN_PHYMODE_AUTO; 1029 case WMI_HOST_MODE_11BE_EHT20: 1030 return WLAN_PHYMODE_11BEA_EHT20; 1031 case WMI_HOST_MODE_11BE_EHT40: 1032 return WLAN_PHYMODE_11BEA_EHT40; 1033 case WMI_HOST_MODE_11BE_EHT80: 1034 return WLAN_PHYMODE_11BEA_EHT80; 1035 case WMI_HOST_MODE_11BE_EHT160: 1036 return WLAN_PHYMODE_11BEA_EHT160; 1037 case WMI_HOST_MODE_11BE_EHT320: 1038 return WLAN_PHYMODE_11BEA_EHT320; 1039 case WMI_HOST_MODE_11BE_EHT20_2G: 1040 return WLAN_PHYMODE_11BEG_EHT20; 1041 case WMI_HOST_MODE_11BE_EHT40_2G: 1042 return WLAN_PHYMODE_11BEG_EHT40; 1043 } 1044 return WLAN_PHYMODE_AUTO; 1045 } 1046 1047 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode) 1048 { 1049 return IS_WLAN_PHYMODE_EHT(phymode); 1050 } 1051 #else 1052 static enum wlan_phymode 1053 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode) 1054 { 1055 return WLAN_PHYMODE_AUTO; 1056 } 1057 1058 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode) 1059 { 1060 return false; 1061 } 1062 #endif 1063 1064 #ifdef CONFIG_160MHZ_SUPPORT 1065 /** 1066 * wma_fw_to_host_phymode_160() - convert fw to host phymode for 160 mhz 1067 * phymodes 1068 * @phymode: phymode to convert 1069 * 1070 * Return: one of the 160 mhz values defined in enum wlan_phymode; 1071 * or WLAN_PHYMODE_AUTO if the input is not a 160 mhz phymode 1072 */ 1073 static enum wlan_phymode 1074 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode) 1075 { 1076 switch (phymode) { 1077 default: 1078 return WLAN_PHYMODE_AUTO; 1079 case WMI_HOST_MODE_11AC_VHT80_80: 1080 return WLAN_PHYMODE_11AC_VHT80_80; 1081 case WMI_HOST_MODE_11AC_VHT160: 1082 return WLAN_PHYMODE_11AC_VHT160; 1083 } 1084 } 1085 #else 1086 static enum wlan_phymode 1087 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode) 1088 { 1089 return WLAN_PHYMODE_AUTO; 1090 } 1091 #endif 1092 1093 enum wlan_phymode wma_fw_to_host_phymode(WMI_HOST_WLAN_PHY_MODE phymode) 1094 { 1095 enum wlan_phymode host_phymode; 1096 switch (phymode) { 1097 default: 1098 host_phymode = wma_fw_to_host_phymode_160(phymode); 1099 if (host_phymode != WLAN_PHYMODE_AUTO) 1100 return host_phymode; 1101 host_phymode = wma_fw_to_host_phymode_11ax(phymode); 1102 if (host_phymode != WLAN_PHYMODE_AUTO) 1103 return host_phymode; 1104 return wma_fw_to_host_phymode_11be(phymode); 1105 case WMI_HOST_MODE_11A: 1106 return WLAN_PHYMODE_11A; 1107 case WMI_HOST_MODE_11G: 1108 return WLAN_PHYMODE_11G; 1109 case WMI_HOST_MODE_11B: 1110 return WLAN_PHYMODE_11B; 1111 case WMI_HOST_MODE_11GONLY: 1112 return WLAN_PHYMODE_11G_ONLY; 1113 case WMI_HOST_MODE_11NA_HT20: 1114 return WLAN_PHYMODE_11NA_HT20; 1115 case WMI_HOST_MODE_11NG_HT20: 1116 return WLAN_PHYMODE_11NG_HT20; 1117 case WMI_HOST_MODE_11NA_HT40: 1118 return WLAN_PHYMODE_11NA_HT40; 1119 case WMI_HOST_MODE_11NG_HT40: 1120 return WLAN_PHYMODE_11NG_HT40; 1121 case WMI_HOST_MODE_11AC_VHT20: 1122 return WLAN_PHYMODE_11AC_VHT20; 1123 case WMI_HOST_MODE_11AC_VHT40: 1124 return WLAN_PHYMODE_11AC_VHT40; 1125 case WMI_HOST_MODE_11AC_VHT80: 1126 return WLAN_PHYMODE_11AC_VHT80; 1127 case WMI_HOST_MODE_11AC_VHT20_2G: 1128 return WLAN_PHYMODE_11AC_VHT20_2G; 1129 case WMI_HOST_MODE_11AC_VHT40_2G: 1130 return WLAN_PHYMODE_11AC_VHT40_2G; 1131 case WMI_HOST_MODE_11AC_VHT80_2G: 1132 return WLAN_PHYMODE_11AC_VHT80_2G; 1133 } 1134 } 1135 1136 #ifdef WLAN_FEATURE_11BE 1137 static void wma_populate_peer_puncture(struct peer_assoc_params *peer, 1138 struct wlan_channel *des_chan) 1139 { 1140 peer->puncture_bitmap = des_chan->puncture_bitmap; 1141 wma_debug("Peer EHT puncture bitmap %d", peer->puncture_bitmap); 1142 } 1143 1144 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer, 1145 tpAddStaParams params) 1146 { 1147 struct peer_assoc_ml_partner_links *ml_links; 1148 struct peer_assoc_mlo_params *mlo_params; 1149 struct peer_ml_info *ml_info; 1150 uint8_t i; 1151 1152 ml_info = ¶ms->ml_info; 1153 mlo_params = &peer->mlo_params; 1154 ml_links = &peer->ml_links; 1155 1156 /* Assoc link info */ 1157 mlo_params->vdev_id = ml_info->vdev_id; 1158 mlo_params->ieee_link_id = ml_info->link_id; 1159 qdf_mem_copy(&mlo_params->chan, &ml_info->channel_info, 1160 sizeof(struct wlan_channel)); 1161 qdf_mem_copy(&mlo_params->bssid, &ml_info->link_addr, 1162 QDF_MAC_ADDR_SIZE); 1163 qdf_mem_copy(&mlo_params->mac_addr, &ml_info->self_mac_addr, 1164 QDF_MAC_ADDR_SIZE); 1165 1166 mlo_params->rec_max_simultaneous_links = 1167 ml_info->rec_max_simultaneous_links; 1168 1169 /* Fill partner link info */ 1170 ml_links->num_links = ml_info->num_links; 1171 for (i = 0; i < ml_links->num_links; i++) { 1172 ml_links->partner_info[i].vdev_id = 1173 ml_info->partner_info[i].vdev_id; 1174 ml_links->partner_info[i].link_id = 1175 ml_info->partner_info[i].link_id; 1176 qdf_mem_copy(&ml_links->partner_info[i].chan, 1177 &ml_info->partner_info[i].channel_info, 1178 sizeof(struct wlan_channel)); 1179 qdf_mem_copy(&ml_links->partner_info[i].bssid, 1180 &ml_info->partner_info[i].link_addr, 1181 QDF_MAC_ADDR_SIZE); 1182 qdf_mem_copy(&ml_links->partner_info[i].mac_addr, 1183 &ml_info->partner_info[i].self_mac_addr, 1184 QDF_MAC_ADDR_SIZE); 1185 } 1186 } 1187 #else 1188 static void wma_populate_peer_puncture(struct peer_assoc_params *peer, 1189 struct wlan_channel *des_chan) 1190 { 1191 } 1192 1193 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer, 1194 tpAddStaParams params) 1195 { 1196 } 1197 #endif 1198 1199 void wma_objmgr_set_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr, 1200 uint8_t nss) 1201 { 1202 uint8_t pdev_id; 1203 struct wlan_objmgr_peer *peer; 1204 struct peer_mlme_priv_obj *peer_priv; 1205 struct wlan_objmgr_psoc *psoc = wma->psoc; 1206 1207 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev); 1208 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, 1209 WLAN_LEGACY_WMA_ID); 1210 if (!peer) 1211 return; 1212 1213 peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer, 1214 WLAN_UMAC_COMP_MLME); 1215 if (!peer_priv) { 1216 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1217 return; 1218 } 1219 1220 peer_priv->nss = nss; 1221 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1222 } 1223 1224 uint8_t wma_objmgr_get_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr) 1225 { 1226 uint8_t pdev_id; 1227 struct wlan_objmgr_peer *peer; 1228 struct peer_mlme_priv_obj *peer_priv; 1229 struct wlan_objmgr_psoc *psoc = wma->psoc; 1230 uint8_t nss; 1231 1232 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev); 1233 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, 1234 WLAN_LEGACY_WMA_ID); 1235 if (!peer) 1236 return 0; 1237 1238 peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer, 1239 WLAN_UMAC_COMP_MLME); 1240 if (!peer_priv) { 1241 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1242 return 0; 1243 } 1244 1245 nss = peer_priv->nss; 1246 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1247 return nss; 1248 } 1249 1250 void wma_objmgr_set_peer_mlme_phymode(tp_wma_handle wma, uint8_t *mac_addr, 1251 enum wlan_phymode phymode) 1252 { 1253 uint8_t pdev_id; 1254 struct wlan_objmgr_peer *peer; 1255 struct wlan_objmgr_psoc *psoc = wma->psoc; 1256 1257 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev); 1258 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, 1259 WLAN_LEGACY_WMA_ID); 1260 if (!peer) 1261 return; 1262 1263 wlan_peer_obj_lock(peer); 1264 wlan_peer_set_phymode(peer, phymode); 1265 wlan_peer_obj_unlock(peer); 1266 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1267 } 1268 1269 /** 1270 * wma_objmgr_set_peer_mlme_type() - set peer type to peer object 1271 * @wma: wma handle 1272 * @mac_addr: mac addr of peer 1273 * @peer_type: peer type value to set 1274 * 1275 * Return: None 1276 */ 1277 static void wma_objmgr_set_peer_mlme_type(tp_wma_handle wma, 1278 uint8_t *mac_addr, 1279 enum wlan_peer_type peer_type) 1280 { 1281 uint8_t pdev_id; 1282 struct wlan_objmgr_peer *peer; 1283 struct wlan_objmgr_psoc *psoc = wma->psoc; 1284 1285 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev); 1286 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, 1287 WLAN_LEGACY_WMA_ID); 1288 if (!peer) 1289 return; 1290 1291 wlan_peer_obj_lock(peer); 1292 wlan_peer_set_peer_type(peer, peer_type); 1293 wlan_peer_obj_unlock(peer); 1294 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1295 } 1296 1297 #ifdef WLAN_FEATURE_11BE_MLO 1298 1299 #define MIN_TIMEOUT_VAL 0 1300 #define MAX_TIMEOUT_VAL 11 1301 1302 #define TIMEOUT_TO_US 6 1303 1304 /* 1305 * wma_convert_trans_timeout_us() - API to convert 1306 * emlsr transition timeout to microseconds. Refer Table 9-401h 1307 * of IEEE802.11be specification 1308 * @timeout: EMLSR transition timeout 1309 * 1310 * Return: Timeout value in microseconds 1311 */ 1312 static inline uint32_t 1313 wma_convert_trans_timeout_us(uint16_t timeout) 1314 { 1315 uint32_t us = 0; 1316 1317 if (timeout > MIN_TIMEOUT_VAL && timeout < MAX_TIMEOUT_VAL) { 1318 /* timeout = 1 is for 128us*/ 1319 us = (1 << (timeout + TIMEOUT_TO_US)); 1320 } 1321 1322 return us; 1323 } 1324 1325 /** 1326 * wma_set_mlo_capability() - set MLO caps to the peer assoc request 1327 * @wma: wma handle 1328 * @vdev: vdev object 1329 * @params: Add sta params 1330 * @req: peer assoc request parameters 1331 * 1332 * Return: None 1333 */ 1334 static void wma_set_mlo_capability(tp_wma_handle wma, 1335 struct wlan_objmgr_vdev *vdev, 1336 tpAddStaParams params, 1337 struct peer_assoc_params *req) 1338 { 1339 uint8_t pdev_id; 1340 struct wlan_objmgr_peer *peer; 1341 struct wlan_objmgr_psoc *psoc = wma->psoc; 1342 uint16_t link_id_bitmap; 1343 1344 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev); 1345 peer = wlan_objmgr_get_peer(psoc, pdev_id, req->peer_mac, 1346 WLAN_LEGACY_WMA_ID); 1347 1348 if (!peer) { 1349 wma_err("peer not valid"); 1350 return; 1351 } 1352 1353 if (!qdf_is_macaddr_zero((struct qdf_mac_addr *)peer->mldaddr)) { 1354 req->mlo_params.mlo_enabled = true; 1355 req->mlo_params.mlo_assoc_link = 1356 wlan_peer_mlme_is_assoc_peer(peer); 1357 WLAN_ADDR_COPY(req->mlo_params.mld_mac, peer->mldaddr); 1358 if (policy_mgr_ml_link_vdev_need_to_be_disabled(psoc, vdev, 1359 true) || 1360 policy_mgr_is_emlsr_sta_concurrency_present(psoc)) { 1361 req->mlo_params.mlo_force_link_inactive = 1; 1362 link_id_bitmap = 1 << params->link_id; 1363 ml_nlink_set_curr_force_inactive_state( 1364 psoc, vdev, link_id_bitmap, LINK_ADD); 1365 ml_nlink_init_concurrency_link_request(psoc, vdev); 1366 } 1367 wma_debug("assoc_link %d" QDF_MAC_ADDR_FMT ", force inactive %d link id %d", 1368 req->mlo_params.mlo_assoc_link, 1369 QDF_MAC_ADDR_REF(peer->mldaddr), 1370 req->mlo_params.mlo_force_link_inactive, 1371 params->link_id); 1372 1373 req->mlo_params.emlsr_support = params->emlsr_support; 1374 req->mlo_params.ieee_link_id = params->link_id; 1375 if (req->mlo_params.emlsr_support) { 1376 req->mlo_params.trans_timeout_us = 1377 wma_convert_trans_timeout_us(params->emlsr_trans_timeout); 1378 } 1379 req->mlo_params.msd_cap_support = params->msd_caps_present; 1380 req->mlo_params.medium_sync_duration = 1381 params->msd_caps.med_sync_duration; 1382 req->mlo_params.medium_sync_ofdm_ed_thresh = 1383 params->msd_caps.med_sync_ofdm_ed_thresh; 1384 req->mlo_params.medium_sync_max_txop_num = 1385 params->msd_caps.med_sync_max_txop_num; 1386 req->mlo_params.link_switch_in_progress = 1387 wlan_vdev_mlme_is_mlo_link_switch_in_progress(vdev); 1388 /* 1389 * Set max simultaneous links = 1 for MLSR, 2 for MLMR. The +1 1390 * is added as per the agreement with FW for backward 1391 * compatibility purposes. Our internal structures still 1392 * conform to the values as per spec i.e. 0 = MLSR, 1 = MLMR. 1393 */ 1394 req->mlo_params.max_num_simultaneous_links = 1395 wlan_mlme_get_sta_mlo_simultaneous_links(psoc) + 1; 1396 } else { 1397 wma_debug("Peer MLO context is NULL"); 1398 req->mlo_params.mlo_enabled = false; 1399 req->mlo_params.emlsr_support = false; 1400 } 1401 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 1402 } 1403 1404 static void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev, 1405 struct peer_assoc_params *req) 1406 { 1407 if (wlan_vdev_mlme_is_mlo_vdev(vdev) && 1408 !wlan_vdev_mlme_is_mlo_link_vdev(vdev)) 1409 req->is_assoc_vdev = true; 1410 } 1411 #else 1412 static inline void wma_set_mlo_capability(tp_wma_handle wma, 1413 struct wlan_objmgr_vdev *vdev, 1414 tpAddStaParams params, 1415 struct peer_assoc_params *req) 1416 { 1417 } 1418 1419 static inline void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev, 1420 struct peer_assoc_params *req) 1421 { 1422 } 1423 #endif 1424 1425 /** 1426 * wmi_unified_send_peer_assoc() - send peer assoc command to fw 1427 * @wma: wma handle 1428 * @nw_type: nw type 1429 * @params: add sta params 1430 * 1431 * This function send peer assoc command to firmware with 1432 * different parameters. 1433 * 1434 * Return: QDF_STATUS 1435 */ 1436 QDF_STATUS wma_send_peer_assoc(tp_wma_handle wma, 1437 tSirNwType nw_type, 1438 tpAddStaParams params) 1439 { 1440 struct peer_assoc_params *cmd; 1441 int32_t ret, max_rates, i; 1442 uint8_t *rate_pos; 1443 wmi_rate_set peer_legacy_rates, peer_ht_rates; 1444 uint32_t num_peer_11b_rates = 0; 1445 uint32_t num_peer_11a_rates = 0; 1446 enum wlan_phymode phymode, vdev_phymode; 1447 uint32_t peer_nss = 1; 1448 struct wma_txrx_node *intr = NULL; 1449 bool is_he; 1450 bool is_eht; 1451 QDF_STATUS status; 1452 struct mac_context *mac = wma->mac_context; 1453 struct wlan_channel *des_chan; 1454 int32_t keymgmt, uccipher, authmode; 1455 1456 cmd = qdf_mem_malloc(sizeof(struct peer_assoc_params)); 1457 if (!cmd) { 1458 wma_err("Failed to allocate peer_assoc_params param"); 1459 return QDF_STATUS_E_NOMEM; 1460 } 1461 1462 intr = &wma->interfaces[params->smesessionId]; 1463 1464 wma_mask_tx_ht_rate(wma, params->supportedRates.supportedMCSSet); 1465 1466 qdf_mem_zero(&peer_legacy_rates, sizeof(wmi_rate_set)); 1467 qdf_mem_zero(&peer_ht_rates, sizeof(wmi_rate_set)); 1468 qdf_mem_zero(cmd, sizeof(struct peer_assoc_params)); 1469 1470 is_he = wma_is_peer_he_capable(params); 1471 is_eht = wma_is_peer_eht_capable(params); 1472 if ((params->ch_width > CH_WIDTH_40MHZ) && 1473 ((nw_type == eSIR_11G_NW_TYPE) || 1474 (nw_type == eSIR_11B_NW_TYPE))) { 1475 wma_err("ch_width %d sent in 11G, configure to 40MHz", 1476 params->ch_width); 1477 params->ch_width = CH_WIDTH_40MHZ; 1478 } 1479 phymode = wma_peer_phymode(nw_type, params->staType, 1480 params->htCapable, params->ch_width, 1481 params->vhtCapable, is_he, is_eht); 1482 1483 des_chan = wlan_vdev_mlme_get_des_chan(intr->vdev); 1484 vdev_phymode = des_chan->ch_phymode; 1485 if ((intr->type == WMI_VDEV_TYPE_AP) && (phymode > vdev_phymode)) { 1486 wma_nofl_debug("Peer phymode %d is not allowed. Set it equal to sap/go phymode %d", 1487 phymode, vdev_phymode); 1488 phymode = vdev_phymode; 1489 } 1490 1491 if (!mac->mlme_cfg->rates.disable_abg_rate_txdata && 1492 !WLAN_REG_IS_6GHZ_CHAN_FREQ(des_chan->ch_freq)) { 1493 /* Legacy Rateset */ 1494 rate_pos = (uint8_t *) peer_legacy_rates.rates; 1495 for (i = 0; i < SIR_NUM_11B_RATES; i++) { 1496 if (!params->supportedRates.llbRates[i]) 1497 continue; 1498 rate_pos[peer_legacy_rates.num_rates++] = 1499 params->supportedRates.llbRates[i]; 1500 num_peer_11b_rates++; 1501 } 1502 for (i = 0; i < SIR_NUM_11A_RATES; i++) { 1503 if (!params->supportedRates.llaRates[i]) 1504 continue; 1505 rate_pos[peer_legacy_rates.num_rates++] = 1506 params->supportedRates.llaRates[i]; 1507 num_peer_11a_rates++; 1508 } 1509 } 1510 1511 if ((phymode == WLAN_PHYMODE_11A && num_peer_11a_rates == 0) || 1512 (phymode == WLAN_PHYMODE_11B && num_peer_11b_rates == 0)) { 1513 wma_warn("Invalid phy rates. phymode 0x%x, 11b_rates %d, 11a_rates %d", 1514 phymode, num_peer_11b_rates, 1515 num_peer_11a_rates); 1516 qdf_mem_free(cmd); 1517 return QDF_STATUS_E_INVAL; 1518 } 1519 1520 /* HT Rateset */ 1521 max_rates = sizeof(peer_ht_rates.rates) / 1522 sizeof(peer_ht_rates.rates[0]); 1523 rate_pos = (uint8_t *) peer_ht_rates.rates; 1524 for (i = 0; i < MAX_SUPPORTED_RATES; i++) { 1525 if (params->supportedRates.supportedMCSSet[i / 8] & 1526 (1 << (i % 8))) { 1527 rate_pos[peer_ht_rates.num_rates++] = i; 1528 if (i >= 8) { 1529 /* MCS8 or higher rate is present, must be 2x2 */ 1530 peer_nss = 2; 1531 } 1532 } 1533 if (peer_ht_rates.num_rates == max_rates) 1534 break; 1535 } 1536 1537 if (params->htCapable && !peer_ht_rates.num_rates) { 1538 uint8_t temp_ni_rates[8] = { 0x0, 0x1, 0x2, 0x3, 1539 0x4, 0x5, 0x6, 0x7}; 1540 /* 1541 * Workaround for EV 116382: The peer is marked HT but with 1542 * supported rx mcs set is set to 0. 11n spec mandates MCS0-7 1543 * for a HT STA. So forcing the supported rx mcs rate to 1544 * MCS 0-7. This workaround will be removed once we get 1545 * clarification from WFA regarding this STA behavior. 1546 */ 1547 1548 /* TODO: Do we really need this? */ 1549 wma_warn("Peer is marked as HT capable but supported mcs rate is 0"); 1550 peer_ht_rates.num_rates = sizeof(temp_ni_rates); 1551 qdf_mem_copy((uint8_t *) peer_ht_rates.rates, temp_ni_rates, 1552 peer_ht_rates.num_rates); 1553 } 1554 1555 /* in ap mode and for tdls peer, use mac address of the peer in 1556 * the other end as the new peer address; in sta mode, use bss id to 1557 * be the new peer address 1558 */ 1559 if ((wma_is_vdev_in_ap_mode(wma, params->smesessionId)) 1560 #ifdef FEATURE_WLAN_TDLS 1561 || (STA_ENTRY_TDLS_PEER == params->staType) 1562 #endif /* FEATURE_WLAN_TDLS */ 1563 ) { 1564 qdf_mem_copy(cmd->peer_mac, params->staMac, 1565 sizeof(cmd->peer_mac)); 1566 } else { 1567 qdf_mem_copy(cmd->peer_mac, params->bssId, 1568 sizeof(cmd->peer_mac)); 1569 } 1570 wma_objmgr_set_peer_mlme_phymode(wma, cmd->peer_mac, phymode); 1571 1572 cmd->vdev_id = params->smesessionId; 1573 cmd->peer_new_assoc = 1; 1574 cmd->peer_associd = params->assocId; 1575 1576 cmd->is_wme_set = 1; 1577 1578 if (params->wmmEnabled) 1579 cmd->qos_flag = 1; 1580 1581 if (params->uAPSD) { 1582 cmd->apsd_flag = 1; 1583 wma_nofl_debug("Set WMI_PEER_APSD: uapsd Mask %d", 1584 params->uAPSD); 1585 } 1586 1587 if (params->htCapable) { 1588 cmd->ht_flag = 1; 1589 cmd->qos_flag = 1; 1590 cmd->peer_rate_caps |= WMI_RC_HT_FLAG; 1591 } 1592 1593 if (params->vhtCapable) { 1594 cmd->ht_flag = 1; 1595 cmd->qos_flag = 1; 1596 cmd->vht_flag = 1; 1597 cmd->peer_rate_caps |= WMI_RC_HT_FLAG; 1598 } 1599 1600 if (params->ch_width) { 1601 cmd->peer_rate_caps |= WMI_RC_CW40_FLAG; 1602 if (params->fShortGI40Mhz) 1603 cmd->peer_rate_caps |= WMI_RC_SGI_FLAG; 1604 } else if (params->fShortGI20Mhz) { 1605 cmd->peer_rate_caps |= WMI_RC_SGI_FLAG; 1606 } 1607 1608 switch (params->ch_width) { 1609 case CH_WIDTH_320MHZ: 1610 wma_set_peer_assoc_params_bw_320(cmd, params->ch_width); 1611 fallthrough; 1612 case CH_WIDTH_80P80MHZ: 1613 case CH_WIDTH_160MHZ: 1614 cmd->bw_160 = 1; 1615 fallthrough; 1616 case CH_WIDTH_80MHZ: 1617 cmd->bw_80 = 1; 1618 fallthrough; 1619 case CH_WIDTH_40MHZ: 1620 cmd->bw_40 = 1; 1621 fallthrough; 1622 default: 1623 break; 1624 } 1625 1626 cmd->peer_vht_caps = params->vht_caps; 1627 if (params->p2pCapableSta) { 1628 cmd->p2p_capable_sta = 1; 1629 wma_objmgr_set_peer_mlme_type(wma, params->staMac, 1630 WLAN_PEER_P2P_CLI); 1631 } 1632 1633 if (params->rmfEnabled) 1634 cmd->is_pmf_enabled = 1; 1635 1636 if (params->stbc_capable) 1637 cmd->stbc_flag = 1; 1638 1639 if (params->htLdpcCapable || params->vhtLdpcCapable) 1640 cmd->ldpc_flag = 1; 1641 1642 switch (params->mimoPS) { 1643 case eSIR_HT_MIMO_PS_STATIC: 1644 cmd->static_mimops_flag = 1; 1645 break; 1646 case eSIR_HT_MIMO_PS_DYNAMIC: 1647 cmd->dynamic_mimops_flag = 1; 1648 break; 1649 case eSIR_HT_MIMO_PS_NO_LIMIT: 1650 cmd->spatial_mux_flag = 1; 1651 break; 1652 default: 1653 break; 1654 } 1655 1656 wma_set_twt_peer_caps(params, cmd); 1657 #ifdef FEATURE_WLAN_TDLS 1658 if (STA_ENTRY_TDLS_PEER == params->staType) 1659 cmd->auth_flag = 1; 1660 #endif /* FEATURE_WLAN_TDLS */ 1661 1662 if (params->wpa_rsn 1663 #ifdef FEATURE_WLAN_WAPI 1664 || params->encryptType == eSIR_ED_WPI 1665 #endif /* FEATURE_WLAN_WAPI */ 1666 ) { 1667 if (!params->no_ptk_4_way) { 1668 cmd->need_ptk_4_way = 1; 1669 wlan_acquire_peer_key_wakelock(wma->pdev, 1670 cmd->peer_mac); 1671 } 1672 } 1673 1674 if (params->wpa_rsn >> 1) 1675 cmd->need_gtk_2_way = 1; 1676 1677 #ifdef FEATURE_WLAN_WAPI 1678 if (params->encryptType == eSIR_ED_WPI) { 1679 ret = wma_vdev_set_param(wma->wmi_handle, params->smesessionId, 1680 wmi_vdev_param_drop_unencry, false); 1681 if (ret) { 1682 wma_err("Set wmi_vdev_param_drop_unencry Param status:%d", 1683 ret); 1684 qdf_mem_free(cmd); 1685 return ret; 1686 } 1687 } 1688 #endif /* FEATURE_WLAN_WAPI */ 1689 1690 cmd->peer_caps = params->capab_info; 1691 cmd->peer_listen_intval = params->listenInterval; 1692 cmd->peer_ht_caps = params->ht_caps; 1693 cmd->peer_max_mpdu = (1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR + 1694 params->maxAmpduSize)) - 1; 1695 cmd->peer_mpdu_density = wma_parse_mpdudensity(params->maxAmpduDensity); 1696 1697 if (params->supportedRates.supportedMCSSet[1] && 1698 params->supportedRates.supportedMCSSet[2]) 1699 cmd->peer_rate_caps |= WMI_RC_TS_FLAG; 1700 else if (params->supportedRates.supportedMCSSet[1]) 1701 cmd->peer_rate_caps |= WMI_RC_DS_FLAG; 1702 1703 /* Update peer legacy rate information */ 1704 cmd->peer_legacy_rates.num_rates = peer_legacy_rates.num_rates; 1705 qdf_mem_copy(cmd->peer_legacy_rates.rates, peer_legacy_rates.rates, 1706 peer_legacy_rates.num_rates); 1707 1708 /* Update peer HT rate information */ 1709 cmd->peer_ht_rates.num_rates = peer_ht_rates.num_rates; 1710 qdf_mem_copy(cmd->peer_ht_rates.rates, peer_ht_rates.rates, 1711 peer_ht_rates.num_rates); 1712 1713 /* VHT Rates */ 1714 1715 cmd->peer_nss = peer_nss; 1716 /* 1717 * Because of DBS a vdev may come up in any of the two MACs with 1718 * different capabilities. STBC capab should be fetched for given 1719 * hard_mode->MAC_id combo. It is planned that firmware should provide 1720 * these dev capabilities. But for now number of tx streams can be used 1721 * to identify if Tx STBC needs to be disabled. 1722 */ 1723 if (intr->tx_streams < 2) { 1724 cmd->peer_vht_caps &= ~(1 << SIR_MAC_VHT_CAP_TXSTBC); 1725 wma_nofl_debug("Num tx_streams: %d, Disabled txSTBC", 1726 intr->tx_streams); 1727 } 1728 1729 cmd->vht_capable = params->vhtCapable; 1730 if (params->vhtCapable) { 1731 #define VHT2x2MCSMASK 0xc 1732 cmd->rx_max_rate = params->supportedRates.vhtRxHighestDataRate; 1733 cmd->rx_mcs_set = params->supportedRates.vhtRxMCSMap; 1734 cmd->tx_max_rate = params->supportedRates.vhtTxHighestDataRate; 1735 cmd->tx_mcs_set = params->supportedRates.vhtTxMCSMap; 1736 /* 1737 * tx_mcs_set is intersection of self tx NSS and peer rx mcs map 1738 */ 1739 if (params->vhtSupportedRxNss) 1740 cmd->peer_nss = params->vhtSupportedRxNss; 1741 else 1742 cmd->peer_nss = ((cmd->tx_mcs_set & VHT2x2MCSMASK) 1743 == VHT2x2MCSMASK) ? 1 : 2; 1744 1745 if (params->vht_mcs_10_11_supp) { 1746 WMI_SET_BITS(cmd->tx_mcs_set, 16, cmd->peer_nss, 1747 ((1 << cmd->peer_nss) - 1)); 1748 WMI_VHT_MCS_NOTIFY_EXT_SS_SET(cmd->tx_mcs_set, 1); 1749 } 1750 if (params->vht_extended_nss_bw_cap && 1751 (params->vht_160mhz_nss || params->vht_80p80mhz_nss)) { 1752 /* 1753 * bit[2:0] : Represents value of Rx NSS for 160 MHz 1754 * bit[5:3] : Represents value of Rx NSS for 80_80 MHz 1755 * Extended NSS support 1756 * bit[30:6]: Reserved 1757 * bit[31] : MSB(0/1): 1 in case of valid data 1758 */ 1759 cmd->peer_bw_rxnss_override |= (1 << 31); 1760 if (params->vht_160mhz_nss) 1761 cmd->peer_bw_rxnss_override |= 1762 (params->vht_160mhz_nss - 1); 1763 if (params->vht_80p80mhz_nss) 1764 cmd->peer_bw_rxnss_override |= 1765 ((params->vht_80p80mhz_nss - 1) << 3); 1766 wma_debug("peer_bw_rxnss_override %0X", 1767 cmd->peer_bw_rxnss_override); 1768 } 1769 } 1770 1771 wma_set_mlo_capability(wma, intr->vdev, params, cmd); 1772 1773 wma_set_mlo_assoc_vdev(intr->vdev, cmd); 1774 1775 wma_debug("rx_max_rate %d, rx_mcs %x, tx_max_rate %d, tx_mcs: %x num rates %d need 4 way %d", 1776 cmd->rx_max_rate, cmd->rx_mcs_set, cmd->tx_max_rate, 1777 cmd->tx_mcs_set, peer_ht_rates.num_rates, 1778 cmd->need_ptk_4_way); 1779 1780 /* 1781 * Limit nss to max number of rf chain supported by target 1782 * Otherwise Fw will crash 1783 */ 1784 if (cmd->peer_nss > WMA_MAX_NSS) { 1785 wma_err("peer Nss %d is more than supported", cmd->peer_nss); 1786 cmd->peer_nss = WMA_MAX_NSS; 1787 } 1788 1789 wma_populate_peer_he_cap(cmd, params); 1790 wma_populate_peer_eht_cap(cmd, params); 1791 wma_populate_peer_puncture(cmd, des_chan); 1792 wma_populate_peer_mlo_cap(cmd, params); 1793 if (!wma_is_vdev_in_ap_mode(wma, params->smesessionId)) 1794 intr->nss = cmd->peer_nss; 1795 wma_objmgr_set_peer_mlme_nss(wma, cmd->peer_mac, cmd->peer_nss); 1796 1797 /* Till conversion is not done in WMI we need to fill fw phy mode */ 1798 cmd->peer_phymode = wmi_host_to_fw_phymode(phymode); 1799 1800 keymgmt = wlan_crypto_get_param(intr->vdev, WLAN_CRYPTO_PARAM_KEY_MGMT); 1801 authmode = wlan_crypto_get_param(intr->vdev, 1802 WLAN_CRYPTO_PARAM_AUTH_MODE); 1803 uccipher = wlan_crypto_get_param(intr->vdev, 1804 WLAN_CRYPTO_PARAM_UCAST_CIPHER); 1805 1806 cmd->akm = cm_crypto_authmode_to_wmi_authmode(authmode, 1807 keymgmt, 1808 uccipher); 1809 1810 status = wmi_unified_peer_assoc_send(wma->wmi_handle, 1811 cmd); 1812 if (QDF_IS_STATUS_ERROR(status)) 1813 wma_alert("Failed to send peer assoc command status = %d", 1814 status); 1815 qdf_mem_free(cmd); 1816 1817 return status; 1818 } 1819 1820 /** 1821 * wmi_unified_vdev_set_gtx_cfg_send() - set GTX params 1822 * @wmi_handle: wmi handle 1823 * @if_id: vdev id 1824 * @gtx_info: GTX config params 1825 * 1826 * This function set GTX related params in firmware. 1827 * 1828 * Return: 0 for success or error code 1829 */ 1830 QDF_STATUS wmi_unified_vdev_set_gtx_cfg_send(wmi_unified_t wmi_handle, 1831 uint32_t if_id, 1832 gtx_config_t *gtx_info) 1833 { 1834 struct wmi_gtx_config params; 1835 1836 params.gtx_rt_mask[0] = gtx_info->gtxRTMask[0]; 1837 params.gtx_rt_mask[1] = gtx_info->gtxRTMask[1]; 1838 params.gtx_usrcfg = gtx_info->gtxUsrcfg; 1839 params.gtx_threshold = gtx_info->gtxPERThreshold; 1840 params.gtx_margin = gtx_info->gtxPERMargin; 1841 params.gtx_tpcstep = gtx_info->gtxTPCstep; 1842 params.gtx_tpcmin = gtx_info->gtxTPCMin; 1843 params.gtx_bwmask = gtx_info->gtxBWMask; 1844 1845 return wmi_unified_vdev_set_gtx_cfg_cmd(wmi_handle, 1846 if_id, ¶ms); 1847 1848 } 1849 1850 /** 1851 * wma_update_protection_mode() - update protection mode 1852 * @wma: wma handle 1853 * @vdev_id: vdev id 1854 * @llbcoexist: protection mode info 1855 * 1856 * This function set protection mode(RTS/CTS) to fw for passed vdev id. 1857 * 1858 * Return: none 1859 */ 1860 void wma_update_protection_mode(tp_wma_handle wma, uint8_t vdev_id, 1861 uint8_t llbcoexist) 1862 { 1863 QDF_STATUS ret; 1864 enum ieee80211_protmode prot_mode; 1865 1866 prot_mode = llbcoexist ? IEEE80211_PROT_CTSONLY : IEEE80211_PROT_NONE; 1867 1868 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1869 wmi_vdev_param_protection_mode, 1870 prot_mode); 1871 1872 if (QDF_IS_STATUS_ERROR(ret)) 1873 wma_err("Failed to send wmi protection mode cmd"); 1874 else 1875 wma_nofl_debug("Updated protection mode %d to target", 1876 prot_mode); 1877 } 1878 1879 void 1880 wma_update_beacon_interval(tp_wma_handle wma, uint8_t vdev_id, 1881 uint16_t beaconInterval) 1882 { 1883 QDF_STATUS ret; 1884 1885 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1886 wmi_vdev_param_beacon_interval, 1887 beaconInterval); 1888 1889 if (QDF_IS_STATUS_ERROR(ret)) 1890 wma_err("Failed to update beacon interval"); 1891 else 1892 wma_info("Updated beacon interval %d for vdev %d", 1893 beaconInterval, vdev_id); 1894 } 1895 1896 #ifdef WLAN_FEATURE_11AX_BSS_COLOR 1897 /** 1898 * wma_update_bss_color() - update beacon bss color in fw 1899 * @wma: wma handle 1900 * @vdev_id: vdev id 1901 * @he_ops: HE operation, only the bss_color and bss_color_disabled fields 1902 * are updated. 1903 * 1904 * Return: none 1905 */ 1906 static void 1907 wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id, 1908 tUpdateBeaconParams *bcn_params) 1909 { 1910 QDF_STATUS ret; 1911 uint32_t dword_he_ops = 0; 1912 1913 WMI_HEOPS_COLOR_SET(dword_he_ops, bcn_params->bss_color); 1914 WMI_HEOPS_BSSCOLORDISABLE_SET(dword_he_ops, 1915 bcn_params->bss_color_disabled); 1916 wma_nofl_debug("vdev: %d, update bss color, HE_OPS: 0x%x", 1917 vdev_id, dword_he_ops); 1918 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1919 wmi_vdev_param_he_bss_color, dword_he_ops); 1920 if (QDF_IS_STATUS_ERROR(ret)) 1921 wma_err("Failed to update HE operations"); 1922 } 1923 #else 1924 static void wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id, 1925 tUpdateBeaconParams *bcn_params) 1926 { 1927 } 1928 #endif 1929 1930 /** 1931 * wma_process_update_beacon_params() - update beacon parameters to target 1932 * @wma: wma handle 1933 * @bcn_params: beacon parameters 1934 * 1935 * Return: none 1936 */ 1937 void 1938 wma_process_update_beacon_params(tp_wma_handle wma, 1939 tUpdateBeaconParams *bcn_params) 1940 { 1941 if (!bcn_params) { 1942 wma_err("bcn_params NULL"); 1943 return; 1944 } 1945 1946 if (bcn_params->vdev_id >= wma->max_bssid) { 1947 wma_err("Invalid vdev id %d", bcn_params->vdev_id); 1948 return; 1949 } 1950 1951 if (bcn_params->paramChangeBitmap & PARAM_BCN_INTERVAL_CHANGED) { 1952 wma_update_beacon_interval(wma, bcn_params->vdev_id, 1953 bcn_params->beaconInterval); 1954 } 1955 1956 if (bcn_params->paramChangeBitmap & PARAM_llBCOEXIST_CHANGED) 1957 wma_update_protection_mode(wma, bcn_params->vdev_id, 1958 bcn_params->llbCoexist); 1959 1960 if (bcn_params->paramChangeBitmap & PARAM_BSS_COLOR_CHANGED) 1961 wma_update_bss_color(wma, bcn_params->vdev_id, 1962 bcn_params); 1963 } 1964 1965 void wma_update_rts_params(tp_wma_handle wma, uint32_t value) 1966 { 1967 uint8_t vdev_id; 1968 QDF_STATUS ret; 1969 struct wlan_objmgr_vdev *vdev; 1970 1971 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 1972 vdev = wma->interfaces[vdev_id].vdev; 1973 if (!vdev) 1974 continue; 1975 ret = wma_vdev_set_param(wma->wmi_handle, 1976 vdev_id, 1977 wmi_vdev_param_rts_threshold, 1978 value); 1979 if (QDF_IS_STATUS_ERROR(ret)) 1980 wma_err("Update cfg param fail for vdevId %d", vdev_id); 1981 } 1982 } 1983 1984 void wma_update_frag_params(tp_wma_handle wma, uint32_t value) 1985 { 1986 uint8_t vdev_id; 1987 QDF_STATUS ret; 1988 struct wlan_objmgr_vdev *vdev; 1989 1990 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 1991 vdev = wma->interfaces[vdev_id].vdev; 1992 if (!vdev) 1993 continue; 1994 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1995 wmi_vdev_param_fragmentation_threshold, 1996 value); 1997 if (QDF_IS_STATUS_ERROR(ret)) 1998 wma_err("Update cfg params failed for vdevId %d", 1999 vdev_id); 2000 } 2001 } 2002 2003 /** 2004 * wma_process_update_edca_param_req() - update EDCA params 2005 * @handle: wma handle 2006 * @edca_params: edca parameters 2007 * 2008 * This function updates EDCA parameters to the target 2009 * 2010 * Return: QDF Status 2011 */ 2012 QDF_STATUS wma_process_update_edca_param_req(WMA_HANDLE handle, 2013 tEdcaParams *edca_params) 2014 { 2015 tp_wma_handle wma_handle = (tp_wma_handle) handle; 2016 struct wmi_host_wme_vparams wmm_param[QCA_WLAN_AC_ALL]; 2017 tSirMacEdcaParamRecord *edca_record; 2018 int ac; 2019 struct ol_tx_wmm_param_t ol_tx_wmm_param; 2020 uint8_t vdev_id; 2021 QDF_STATUS status; 2022 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 2023 uint8_t *debug_str; 2024 uint32_t len = 0; 2025 2026 vdev_id = edca_params->vdev_id; 2027 if (!wma_is_vdev_valid(vdev_id)) { 2028 wma_err("vdev id:%d is not active ", vdev_id); 2029 goto fail; 2030 } 2031 2032 debug_str = qdf_mem_malloc(WMA_WMM_DEBUG_STRING_SIZE); 2033 if (!debug_str) 2034 goto fail; 2035 2036 for (ac = 0; ac < QCA_WLAN_AC_ALL; ac++) { 2037 switch (ac) { 2038 case QCA_WLAN_AC_BE: 2039 edca_record = &edca_params->acbe; 2040 break; 2041 case QCA_WLAN_AC_BK: 2042 edca_record = &edca_params->acbk; 2043 break; 2044 case QCA_WLAN_AC_VI: 2045 edca_record = &edca_params->acvi; 2046 break; 2047 case QCA_WLAN_AC_VO: 2048 edca_record = &edca_params->acvo; 2049 break; 2050 default: 2051 qdf_mem_free(debug_str); 2052 goto fail; 2053 } 2054 2055 wma_update_edca_params_for_ac(edca_record, &wmm_param[ac], ac, 2056 edca_params->mu_edca_params, 2057 debug_str, 2058 WMA_WMM_DEBUG_STRING_SIZE, &len); 2059 2060 ol_tx_wmm_param.ac[ac].aifs = wmm_param[ac].aifs; 2061 ol_tx_wmm_param.ac[ac].cwmin = wmm_param[ac].cwmin; 2062 ol_tx_wmm_param.ac[ac].cwmax = wmm_param[ac].cwmax; 2063 } 2064 2065 wma_nofl_debug("WMM params: %s", debug_str); 2066 qdf_mem_free(debug_str); 2067 2068 status = wmi_unified_process_update_edca_param(wma_handle->wmi_handle, 2069 vdev_id, 2070 edca_params->mu_edca_params, 2071 wmm_param); 2072 if (status == QDF_STATUS_E_NOMEM) 2073 return status; 2074 else if (status == QDF_STATUS_E_FAILURE) 2075 goto fail; 2076 2077 cdp_set_wmm_param(soc, WMI_PDEV_ID_SOC, ol_tx_wmm_param); 2078 2079 return QDF_STATUS_SUCCESS; 2080 2081 fail: 2082 wma_err("Failed to set WMM Parameters"); 2083 return QDF_STATUS_E_FAILURE; 2084 } 2085 2086 /** 2087 * wmi_unified_probe_rsp_tmpl_send() - send probe response template to fw 2088 * @wma: wma handle 2089 * @vdev_id: vdev id 2090 * @probe_rsp_info: probe response info 2091 * 2092 * Return: 0 for success or error code 2093 */ 2094 static int wmi_unified_probe_rsp_tmpl_send(tp_wma_handle wma, 2095 uint8_t vdev_id, 2096 tpSendProbeRespParams probe_rsp_info) 2097 { 2098 uint64_t adjusted_tsf_le; 2099 struct ieee80211_frame *wh; 2100 struct wmi_probe_resp_params params; 2101 2102 /* 2103 * Make the TSF offset negative so probe response in the same 2104 * staggered batch have the same TSF. 2105 */ 2106 adjusted_tsf_le = cpu_to_le64(0ULL - 2107 wma->interfaces[vdev_id].tsfadjust); 2108 /* Update the timstamp in the probe response buffer with adjusted TSF */ 2109 wh = (struct ieee80211_frame *)probe_rsp_info->probeRespTemplate; 2110 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le)); 2111 2112 params.prb_rsp_template_len = probe_rsp_info->probeRespTemplateLen; 2113 params.prb_rsp_template_frm = probe_rsp_info->probeRespTemplate; 2114 2115 return wmi_unified_probe_rsp_tmpl_send_cmd(wma->wmi_handle, vdev_id, 2116 ¶ms); 2117 } 2118 2119 #ifdef WLAN_FEATURE_11BE_MLO 2120 /** 2121 * wma_upt_mlo_partner_info() - update mlo info in beacon template 2122 * @params: beacon template params 2123 * @bcn_param: beacon params 2124 * @bytes_to_strip: bytes to strip 2125 * 2126 * Return: Void 2127 */ 2128 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params, 2129 const tpSendbeaconParams bcn_param, 2130 uint8_t bytes_to_strip) 2131 { 2132 struct ml_bcn_partner_info *bcn_info; 2133 struct ml_bcn_partner_info *info; 2134 int link; 2135 2136 params->mlo_partner.num_links = bcn_param->mlo_partner.num_links; 2137 for (link = 0; link < params->mlo_partner.num_links; link++) { 2138 bcn_info = &bcn_param->mlo_partner.partner_info[link]; 2139 info = ¶ms->mlo_partner.partner_info[link]; 2140 info->vdev_id = bcn_info->vdev_id; 2141 info->beacon_interval = bcn_info->beacon_interval; 2142 if (bcn_info->csa_switch_count_offset && 2143 bcn_info->csa_switch_count_offset > bytes_to_strip) 2144 info->csa_switch_count_offset = 2145 bcn_info->csa_switch_count_offset - 2146 bytes_to_strip; 2147 if (bcn_info->ext_csa_switch_count_offset && 2148 bcn_info->ext_csa_switch_count_offset > bytes_to_strip) 2149 info->ext_csa_switch_count_offset = 2150 bcn_info->ext_csa_switch_count_offset - 2151 bytes_to_strip; 2152 } 2153 } 2154 #else 2155 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params, 2156 const tpSendbeaconParams bcn_param, 2157 uint8_t bytes_to_strip) 2158 { 2159 } 2160 #endif 2161 2162 /** 2163 * wma_unified_bcn_tmpl_send() - send beacon template to fw 2164 * @wma:wma handle 2165 * @vdev_id: vdev id 2166 * @bcn_info: beacon info 2167 * @bytes_to_strip: bytes to strip 2168 * 2169 * Return: QDF_STATUS_SUCCESS for success or error code 2170 */ 2171 static QDF_STATUS wma_unified_bcn_tmpl_send(tp_wma_handle wma, 2172 uint8_t vdev_id, 2173 const tpSendbeaconParams bcn_info, 2174 uint8_t bytes_to_strip) 2175 { 2176 struct beacon_tmpl_params params = {0}; 2177 uint32_t tmpl_len, tmpl_len_aligned; 2178 uint8_t *frm; 2179 QDF_STATUS ret; 2180 uint8_t *p2p_ie; 2181 uint16_t p2p_ie_len = 0; 2182 uint64_t adjusted_tsf_le; 2183 struct ieee80211_frame *wh; 2184 2185 if (!wma_is_vdev_valid(vdev_id)) { 2186 wma_err("vdev id:%d is not active ", vdev_id); 2187 return QDF_STATUS_E_INVAL; 2188 } 2189 2190 wma_nofl_debug("vdev %d: bcn update reason %d", vdev_id, 2191 bcn_info->reason); 2192 2193 if (bcn_info->p2pIeOffset) { 2194 p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset; 2195 p2p_ie_len = (uint16_t) p2p_ie[1] + 2; 2196 } 2197 2198 /* 2199 * XXX: The first byte of beacon buffer contains beacon length 2200 * only when UMAC in sending the beacon template. In othercases 2201 * (ex: from tbtt update) beacon length is read from beacon 2202 * information. 2203 */ 2204 if (bytes_to_strip) 2205 tmpl_len = *(uint32_t *) &bcn_info->beacon[0]; 2206 else 2207 tmpl_len = bcn_info->beaconLength; 2208 2209 if (tmpl_len > WMI_BEACON_TX_BUFFER_SIZE) { 2210 wma_err("tmpl_len: %d > %d. Invalid tmpl len", tmpl_len, 2211 WMI_BEACON_TX_BUFFER_SIZE); 2212 return -EINVAL; 2213 } 2214 2215 if (p2p_ie_len) { 2216 if (tmpl_len <= p2p_ie_len) { 2217 wma_err("tmpl_len %d <= p2p_ie_len %d, Invalid", 2218 tmpl_len, p2p_ie_len); 2219 return -EINVAL; 2220 } 2221 tmpl_len -= (uint32_t) p2p_ie_len; 2222 } 2223 2224 frm = bcn_info->beacon + bytes_to_strip; 2225 tmpl_len_aligned = roundup(tmpl_len, sizeof(A_UINT32)); 2226 /* 2227 * Make the TSF offset negative so beacons in the same 2228 * staggered batch have the same TSF. 2229 */ 2230 adjusted_tsf_le = cpu_to_le64(0ULL - 2231 wma->interfaces[vdev_id].tsfadjust); 2232 /* Update the timstamp in the beacon buffer with adjusted TSF */ 2233 wh = (struct ieee80211_frame *)frm; 2234 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le)); 2235 2236 2237 2238 params.vdev_id = vdev_id; 2239 params.tim_ie_offset = bcn_info->timIeOffset - bytes_to_strip; 2240 params.tmpl_len = tmpl_len; 2241 params.frm = frm; 2242 params.tmpl_len_aligned = tmpl_len_aligned; 2243 params.enable_bigtk = 2244 mlme_get_bigtk_support(wma->interfaces[vdev_id].vdev); 2245 if (bcn_info->csa_count_offset && 2246 (bcn_info->csa_count_offset > bytes_to_strip)) 2247 params.csa_switch_count_offset = 2248 bcn_info->csa_count_offset - bytes_to_strip; 2249 if (bcn_info->ecsa_count_offset && 2250 (bcn_info->ecsa_count_offset > bytes_to_strip)) 2251 params.ext_csa_switch_count_offset = 2252 bcn_info->ecsa_count_offset - bytes_to_strip; 2253 2254 wma_upt_mlo_partner_info(¶ms, bcn_info, bytes_to_strip); 2255 2256 ret = wmi_unified_beacon_tmpl_send_cmd(wma->wmi_handle, 2257 ¶ms); 2258 if (QDF_IS_STATUS_ERROR(ret)) 2259 wma_err("Failed to send bcn tmpl: %d", ret); 2260 2261 return ret; 2262 } 2263 2264 /** 2265 * wma_store_bcn_tmpl() - store beacon template 2266 * @wma: wma handle 2267 * @vdev_id: vdev id 2268 * @bcn_info: beacon params 2269 * 2270 * This function stores beacon template locally. 2271 * This will send to target on the reception of 2272 * SWBA event. 2273 * 2274 * Return: QDF status 2275 */ 2276 static QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id, 2277 tpSendbeaconParams bcn_info) 2278 { 2279 struct beacon_info *bcn; 2280 uint32_t len; 2281 uint8_t *bcn_payload; 2282 struct beacon_tim_ie *tim_ie; 2283 2284 bcn = wma->interfaces[vdev_id].beacon; 2285 if (!bcn || !bcn->buf) { 2286 wma_err("Memory is not allocated to hold bcn template"); 2287 return QDF_STATUS_E_INVAL; 2288 } 2289 2290 len = *(uint32_t *) &bcn_info->beacon[0]; 2291 if (len > SIR_MAX_BEACON_SIZE - sizeof(uint32_t)) { 2292 wma_err("Received beacon len %u exceeding max limit %lu", 2293 len, (unsigned long)( 2294 SIR_MAX_BEACON_SIZE - sizeof(uint32_t))); 2295 return QDF_STATUS_E_INVAL; 2296 } 2297 qdf_spin_lock_bh(&bcn->lock); 2298 2299 /* 2300 * Copy received beacon template content in local buffer. 2301 * this will be send to target on the reception of SWBA 2302 * event from target. 2303 */ 2304 qdf_nbuf_trim_tail(bcn->buf, qdf_nbuf_len(bcn->buf)); 2305 memcpy(qdf_nbuf_data(bcn->buf), 2306 bcn_info->beacon + 4 /* Exclude beacon length field */, 2307 len); 2308 if (bcn_info->timIeOffset > 3) 2309 bcn->tim_ie_offset = bcn_info->timIeOffset - 4; 2310 else 2311 bcn->tim_ie_offset = bcn_info->timIeOffset; 2312 2313 if (bcn_info->p2pIeOffset > 3) 2314 bcn->p2p_ie_offset = bcn_info->p2pIeOffset - 4; 2315 else 2316 bcn->p2p_ie_offset = bcn_info->p2pIeOffset; 2317 2318 if (bcn_info->csa_count_offset > 3) 2319 bcn->csa_count_offset = bcn_info->csa_count_offset - 4; 2320 else 2321 bcn->csa_count_offset = bcn_info->csa_count_offset; 2322 2323 if (bcn_info->ecsa_count_offset > 3) 2324 bcn->ecsa_count_offset = bcn_info->ecsa_count_offset - 4; 2325 else 2326 bcn->ecsa_count_offset = bcn_info->ecsa_count_offset; 2327 2328 bcn_payload = qdf_nbuf_data(bcn->buf); 2329 if (bcn->tim_ie_offset) { 2330 tim_ie = (struct beacon_tim_ie *) 2331 (&bcn_payload[bcn->tim_ie_offset]); 2332 /* 2333 * Initial Value of bcn->dtim_count will be 0. 2334 * But if the beacon gets updated then current dtim 2335 * count will be restored 2336 */ 2337 tim_ie->dtim_count = bcn->dtim_count; 2338 tim_ie->tim_bitctl = 0; 2339 } 2340 2341 qdf_nbuf_put_tail(bcn->buf, len); 2342 bcn->len = len; 2343 2344 qdf_spin_unlock_bh(&bcn->lock); 2345 2346 return QDF_STATUS_SUCCESS; 2347 } 2348 2349 int wma_tbttoffset_update_event_handler(void *handle, uint8_t *event, 2350 uint32_t len) 2351 { 2352 tp_wma_handle wma = (tp_wma_handle) handle; 2353 WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf; 2354 wmi_tbtt_offset_event_fixed_param *tbtt_offset_event; 2355 struct wma_txrx_node *intf; 2356 struct beacon_info *bcn; 2357 tSendbeaconParams bcn_info; 2358 uint32_t *adjusted_tsf = NULL; 2359 uint32_t if_id = 0, vdev_map; 2360 2361 if (wma_validate_handle(wma)) 2362 return -EINVAL; 2363 2364 param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *) event; 2365 if (!param_buf) { 2366 wma_err("Invalid tbtt update event buffer"); 2367 return -EINVAL; 2368 } 2369 2370 tbtt_offset_event = param_buf->fixed_param; 2371 intf = wma->interfaces; 2372 vdev_map = tbtt_offset_event->vdev_map; 2373 adjusted_tsf = param_buf->tbttoffset_list; 2374 if (!adjusted_tsf) { 2375 wma_err("Invalid adjusted_tsf"); 2376 return -EINVAL; 2377 } 2378 2379 for (; (if_id < wma->max_bssid && vdev_map); vdev_map >>= 1, if_id++) { 2380 if (!intf[if_id].vdev) 2381 continue; 2382 2383 if (!(vdev_map & 0x1)) 2384 continue; 2385 2386 bcn = intf[if_id].beacon; 2387 if (!bcn) { 2388 wma_err("Invalid beacon"); 2389 return -EINVAL; 2390 } 2391 if (!bcn->buf) { 2392 wma_err("Invalid beacon buffer"); 2393 return -EINVAL; 2394 } 2395 /* Save the adjusted TSF */ 2396 intf[if_id].tsfadjust = adjusted_tsf[if_id]; 2397 2398 qdf_spin_lock_bh(&bcn->lock); 2399 qdf_mem_zero(&bcn_info, sizeof(bcn_info)); 2400 qdf_mem_copy(bcn_info.beacon, 2401 qdf_nbuf_data(bcn->buf), bcn->len); 2402 bcn_info.p2pIeOffset = bcn->p2p_ie_offset; 2403 bcn_info.beaconLength = bcn->len; 2404 bcn_info.timIeOffset = bcn->tim_ie_offset; 2405 bcn_info.csa_count_offset = bcn->csa_count_offset; 2406 bcn_info.ecsa_count_offset = bcn->ecsa_count_offset; 2407 qdf_spin_unlock_bh(&bcn->lock); 2408 2409 wma_err_rl("Update beacon template for vdev %d due to TBTT offset update", 2410 if_id); 2411 /* Update beacon template in firmware */ 2412 wma_unified_bcn_tmpl_send(wma, if_id, &bcn_info, 0); 2413 } 2414 return 0; 2415 } 2416 2417 /** 2418 * wma_p2p_go_set_beacon_ie() - set beacon IE for p2p go 2419 * @wma_handle: wma handle 2420 * @vdev_id: vdev id 2421 * @p2pIe: p2p IE 2422 * 2423 * Return: 0 for success or error code 2424 */ 2425 static int wma_p2p_go_set_beacon_ie(t_wma_handle *wma_handle, 2426 A_UINT32 vdev_id, uint8_t *p2pIe) 2427 { 2428 if (wma_validate_handle(wma_handle)) 2429 return QDF_STATUS_E_FAILURE; 2430 2431 return wmi_unified_p2p_go_set_beacon_ie_cmd(wma_handle->wmi_handle, 2432 vdev_id, p2pIe); 2433 } 2434 2435 /** 2436 * wma_send_probe_rsp_tmpl() - send probe resp template 2437 * @wma: wma handle 2438 * @probe_rsp_info: probe response info 2439 * 2440 * This function sends probe response template to fw which 2441 * firmware will use in case of probe response offload. 2442 * 2443 * Return: none 2444 */ 2445 void wma_send_probe_rsp_tmpl(tp_wma_handle wma, 2446 tpSendProbeRespParams probe_rsp_info) 2447 { 2448 uint8_t vdev_id; 2449 struct sAniProbeRspStruct *probe_rsp; 2450 2451 if (!probe_rsp_info) { 2452 wma_err("probe_rsp_info is NULL"); 2453 return; 2454 } 2455 2456 probe_rsp = (struct sAniProbeRspStruct *) 2457 (probe_rsp_info->probeRespTemplate); 2458 if (!probe_rsp) { 2459 wma_err("probe_rsp is NULL"); 2460 return; 2461 } 2462 2463 if (wma_find_vdev_id_by_addr(wma, probe_rsp->macHdr.sa, &vdev_id)) { 2464 wma_err("failed to get vdev id"); 2465 return; 2466 } 2467 2468 if (wmi_service_enabled(wma->wmi_handle, 2469 wmi_service_beacon_offload)) { 2470 if (wmi_unified_probe_rsp_tmpl_send(wma, vdev_id, 2471 probe_rsp_info) < 0) { 2472 wma_err("wmi_unified_probe_rsp_tmpl_send Failed"); 2473 return; 2474 } 2475 } 2476 } 2477 2478 QDF_STATUS wma_set_ap_vdev_up(tp_wma_handle wma, uint8_t vdev_id) 2479 { 2480 QDF_STATUS status = QDF_STATUS_SUCCESS; 2481 struct vdev_mlme_obj *mlme_obj; 2482 struct wlan_objmgr_vdev *vdev; 2483 struct wma_txrx_node *iface; 2484 2485 iface = &wma->interfaces[vdev_id]; 2486 vdev = iface->vdev; 2487 mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev); 2488 if (!mlme_obj) { 2489 wma_err("failed to get mlme_obj"); 2490 return QDF_STATUS_E_INVAL; 2491 } 2492 mlme_obj->proto.sta.assoc_id = 0; 2493 2494 status = vdev_mgr_up_send(mlme_obj); 2495 if (QDF_IS_STATUS_ERROR(status)) { 2496 wma_err("failed to send vdev up"); 2497 return status; 2498 } 2499 wma_set_sap_keepalive(wma, vdev_id); 2500 wma_set_vdev_mgmt_rate(wma, vdev_id); 2501 wma_vdev_set_he_bss_params(wma, vdev_id, &mlme_obj->proto.he_ops_info); 2502 mlme_sr_update(vdev, true); 2503 2504 return status; 2505 } 2506 2507 /** 2508 * wma_send_beacon() - send beacon template 2509 * @wma: wma handle 2510 * @bcn_info: beacon info 2511 * 2512 * This function store beacon template locally and 2513 * update keep alive parameters 2514 * 2515 * Return: none 2516 */ 2517 void wma_send_beacon(tp_wma_handle wma, tpSendbeaconParams bcn_info) 2518 { 2519 uint8_t vdev_id; 2520 QDF_STATUS status; 2521 uint8_t *p2p_ie; 2522 struct sAniBeaconStruct *beacon; 2523 2524 beacon = (struct sAniBeaconStruct *) (bcn_info->beacon); 2525 if (wma_find_vdev_id_by_addr(wma, beacon->macHdr.sa, &vdev_id)) { 2526 wma_err("failed to get vdev id"); 2527 status = QDF_STATUS_E_INVAL; 2528 goto send_rsp; 2529 } 2530 2531 if (wmi_service_enabled(wma->wmi_handle, 2532 wmi_service_beacon_offload)) { 2533 status = wma_unified_bcn_tmpl_send(wma, vdev_id, bcn_info, 4); 2534 if (QDF_IS_STATUS_ERROR(status)) { 2535 wma_err("wmi_unified_bcn_tmpl_send Failed"); 2536 goto send_rsp; 2537 } 2538 2539 if (bcn_info->p2pIeOffset) { 2540 p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset; 2541 wma_debug("p2pIe is present - vdev_id %hu, p2p_ie = %pK, p2p ie len = %hu", 2542 vdev_id, p2p_ie, p2p_ie[1]); 2543 if (wma_p2p_go_set_beacon_ie(wma, vdev_id, 2544 p2p_ie) < 0) { 2545 wma_err("wmi_unified_bcn_tmpl_send Failed"); 2546 status = QDF_STATUS_E_INVAL; 2547 goto send_rsp; 2548 } 2549 } 2550 } 2551 status = wma_store_bcn_tmpl(wma, vdev_id, bcn_info); 2552 if (status != QDF_STATUS_SUCCESS) { 2553 wma_err("wma_store_bcn_tmpl Failed"); 2554 goto send_rsp; 2555 } 2556 2557 send_rsp: 2558 bcn_info->status = status; 2559 wma_send_msg(wma, WMA_SEND_BCN_RSP, (void *)bcn_info, 0); 2560 } 2561 2562 /** 2563 * wma_set_keepalive_req() - send keep alive request to fw 2564 * @wma: wma handle 2565 * @keepalive: keep alive parameters 2566 * 2567 * Return: none 2568 */ 2569 void wma_set_keepalive_req(tp_wma_handle wma, 2570 struct keep_alive_req *keepalive) 2571 { 2572 wma_nofl_debug("KEEPALIVE:PacketType:%d", keepalive->packetType); 2573 wma_set_sta_keep_alive(wma, keepalive->sessionId, 2574 keepalive->packetType, 2575 keepalive->timePeriod, 2576 keepalive->hostIpv4Addr, 2577 keepalive->destIpv4Addr, 2578 keepalive->dest_macaddr.bytes); 2579 2580 qdf_mem_free(keepalive); 2581 } 2582 2583 /** 2584 * wma_beacon_miss_handler() - beacon miss event handler 2585 * @wma: wma handle 2586 * @vdev_id: vdev id 2587 * @rssi: rssi value 2588 * 2589 * This function send beacon miss indication to upper layers. 2590 * 2591 * Return: none 2592 */ 2593 void wma_beacon_miss_handler(tp_wma_handle wma, uint32_t vdev_id, int32_t rssi) 2594 { 2595 struct missed_beacon_ind *beacon_miss_ind; 2596 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE); 2597 2598 beacon_miss_ind = qdf_mem_malloc(sizeof(*beacon_miss_ind)); 2599 if (!beacon_miss_ind) 2600 return; 2601 2602 if (mac && mac->sme.tx_queue_cb) 2603 mac->sme.tx_queue_cb(mac->hdd_handle, vdev_id, 2604 WLAN_STOP_ALL_NETIF_QUEUE, 2605 WLAN_CONTROL_PATH); 2606 beacon_miss_ind->messageType = WMA_MISSED_BEACON_IND; 2607 beacon_miss_ind->length = sizeof(*beacon_miss_ind); 2608 beacon_miss_ind->bss_idx = vdev_id; 2609 beacon_miss_ind->rssi = rssi; 2610 2611 wma_send_msg(wma, WMA_MISSED_BEACON_IND, beacon_miss_ind, 0); 2612 if (!wmi_service_enabled(wma->wmi_handle, 2613 wmi_service_hw_db2dbm_support)) 2614 rssi += WMA_TGT_NOISE_FLOOR_DBM; 2615 wma_lost_link_info_handler(wma, vdev_id, rssi); 2616 } 2617 2618 void wlan_cm_send_beacon_miss(uint8_t vdev_id, int32_t rssi) 2619 { 2620 tp_wma_handle wma; 2621 2622 wma = cds_get_context(QDF_MODULE_ID_WMA); 2623 if (!wma) 2624 return; 2625 2626 wma_beacon_miss_handler(wma, vdev_id, rssi); 2627 } 2628 2629 /** 2630 * wma_get_status_str() - get string of tx status from firmware 2631 * @status: tx status 2632 * 2633 * Return: converted string of tx status 2634 */ 2635 static const char *wma_get_status_str(uint32_t status) 2636 { 2637 switch (status) { 2638 default: 2639 return "unknown"; 2640 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK); 2641 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_DISCARD); 2642 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_INSPECT); 2643 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK); 2644 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_MAX); 2645 } 2646 } 2647 2648 #ifdef CONFIG_HL_SUPPORT 2649 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf) 2650 { 2651 } 2652 #else 2653 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf) 2654 { 2655 qdf_nbuf_unmap_single(wma_handle->qdf_dev, buf, QDF_DMA_TO_DEVICE); 2656 } 2657 #endif 2658 2659 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG) 2660 /** 2661 * wma_mgmt_qdf_status_map() - map MGMT Tx completion status with 2662 * packet dump Tx status 2663 * @status: MGMT Tx completion status 2664 * 2665 * Return: packet dump tx_status enum 2666 */ 2667 static inline enum qdf_dp_tx_rx_status 2668 wma_mgmt_qdf_status_map(WMI_MGMT_TX_COMP_STATUS_TYPE status) 2669 { 2670 enum qdf_dp_tx_rx_status pktdump_status; 2671 2672 switch (status) { 2673 case WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK: 2674 pktdump_status = QDF_TX_RX_STATUS_OK; 2675 break; 2676 case WMI_MGMT_TX_COMP_TYPE_DISCARD: 2677 pktdump_status = QDF_TX_RX_STATUS_DROP; 2678 break; 2679 case WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK: 2680 pktdump_status = QDF_TX_RX_STATUS_NO_ACK; 2681 break; 2682 default: 2683 pktdump_status = QDF_TX_RX_STATUS_DROP; 2684 break; 2685 } 2686 return pktdump_status; 2687 } 2688 2689 /** 2690 * wma_mgmt_pktdump_tx_handler() - calls tx cb if CONNECTIVITY_PKTLOG 2691 * feature is enabled 2692 * @wma_handle: wma handle 2693 * @buf: nbuf 2694 * @vdev_id : vdev id 2695 * @status : status 2696 * 2697 * Return: none 2698 */ 2699 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle, 2700 qdf_nbuf_t buf, uint8_t vdev_id, 2701 uint32_t status) 2702 { 2703 ol_txrx_pktdump_cb packetdump_cb; 2704 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 2705 enum qdf_dp_tx_rx_status pktdump_status; 2706 2707 packetdump_cb = wma_handle->wma_mgmt_tx_packetdump_cb; 2708 pktdump_status = wma_mgmt_qdf_status_map(status); 2709 if (packetdump_cb) 2710 packetdump_cb(soc, WMI_PDEV_ID_SOC, vdev_id, 2711 buf, pktdump_status, QDF_TX_MGMT_PKT); 2712 } 2713 2714 /** 2715 * wma_mgmt_pktdump_rx_handler() - calls rx cb if CONNECTIVITY_PKTLOG 2716 * feature is enabled 2717 * @mgmt_rx_params: mgmt rx params 2718 * @rx_pkt: cds packet 2719 * @wma_handle: wma handle 2720 * mgt_type: management type 2721 * mgt_subtype: management subtype 2722 * 2723 * Return: none 2724 */ 2725 static inline void wma_mgmt_pktdump_rx_handler( 2726 struct mgmt_rx_event_params *mgmt_rx_params, 2727 cds_pkt_t *rx_pkt, tp_wma_handle wma_handle, 2728 uint8_t mgt_type, uint8_t mgt_subtype) 2729 { 2730 ol_txrx_pktdump_cb packetdump_cb; 2731 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 2732 2733 packetdump_cb = wma_handle->wma_mgmt_rx_packetdump_cb; 2734 if ((mgt_type == IEEE80211_FC0_TYPE_MGT && 2735 mgt_subtype != MGMT_SUBTYPE_BEACON) && 2736 packetdump_cb) 2737 packetdump_cb(soc, mgmt_rx_params->pdev_id, 2738 rx_pkt->pkt_meta.session_id, rx_pkt->pkt_buf, 2739 QDF_TX_RX_STATUS_OK, QDF_RX_MGMT_PKT); 2740 } 2741 2742 #else 2743 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle, 2744 qdf_nbuf_t buf, uint8_t vdev_id, 2745 uint32_t status) 2746 { 2747 } 2748 2749 static inline void wma_mgmt_pktdump_rx_handler( 2750 struct mgmt_rx_event_params *mgmt_rx_params, 2751 cds_pkt_t *rx_pkt, tp_wma_handle wma_handle, 2752 uint8_t mgt_type, uint8_t mgt_subtype) 2753 { 2754 } 2755 #endif 2756 2757 /** 2758 * wma_process_mgmt_tx_completion() - process mgmt completion 2759 * @wma_handle: wma handle 2760 * @desc_id: descriptor id 2761 * @status: status 2762 * 2763 * Return: 0 for success or error code 2764 */ 2765 static int wma_process_mgmt_tx_completion(tp_wma_handle wma_handle, 2766 uint32_t desc_id, uint32_t status) 2767 { 2768 struct wlan_objmgr_pdev *pdev; 2769 qdf_nbuf_t buf = NULL; 2770 QDF_STATUS ret; 2771 uint8_t vdev_id = 0; 2772 struct wmi_mgmt_params mgmt_params = {}; 2773 2774 if (wma_validate_handle(wma_handle)) 2775 return -EINVAL; 2776 2777 wma_debug("status: %s wmi_desc_id: %d", 2778 wma_get_status_str(status), desc_id); 2779 2780 pdev = wma_handle->pdev; 2781 if (!pdev) { 2782 wma_err("psoc ptr is NULL"); 2783 return -EINVAL; 2784 } 2785 2786 buf = mgmt_txrx_get_nbuf(pdev, desc_id); 2787 2788 2789 if (buf) 2790 wma_mgmt_unmap_buf(wma_handle, buf); 2791 2792 vdev_id = mgmt_txrx_get_vdev_id(pdev, desc_id); 2793 mgmt_params.vdev_id = vdev_id; 2794 2795 wma_mgmt_pktdump_tx_handler(wma_handle, buf, vdev_id, status); 2796 ret = mgmt_txrx_tx_completion_handler(pdev, desc_id, status, 2797 &mgmt_params); 2798 2799 if (ret != QDF_STATUS_SUCCESS) { 2800 wma_err("Failed to process mgmt tx completion"); 2801 return -EINVAL; 2802 } 2803 2804 return 0; 2805 } 2806 2807 /** 2808 * wma_extract_mgmt_offload_event_params() - Extract mgmt event params 2809 * @params: Management offload event params 2810 * @hdr: Management header to extract 2811 * 2812 * Return: None 2813 */ 2814 static void wma_extract_mgmt_offload_event_params( 2815 struct mgmt_offload_event_params *params, 2816 wmi_mgmt_hdr *hdr) 2817 { 2818 params->tsf_l32 = hdr->tsf_l32; 2819 params->chan_freq = hdr->chan_freq; 2820 params->rate_kbps = hdr->rate_kbps; 2821 params->rssi = hdr->rssi; 2822 params->buf_len = hdr->buf_len; 2823 params->tx_status = hdr->tx_status; 2824 params->tx_retry_cnt = hdr->tx_retry_cnt; 2825 } 2826 2827 /** 2828 * wma_mgmt_tx_completion_handler() - wma mgmt Tx completion event handler 2829 * @handle: wma handle 2830 * @cmpl_event_params: completion event handler data 2831 * @len: length of @cmpl_event_params 2832 * 2833 * Return: 0 on success; error number otherwise 2834 */ 2835 2836 int wma_mgmt_tx_completion_handler(void *handle, uint8_t *cmpl_event_params, 2837 uint32_t len) 2838 { 2839 tp_wma_handle wma_handle = (tp_wma_handle)handle; 2840 WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *param_buf; 2841 wmi_mgmt_tx_compl_event_fixed_param *cmpl_params; 2842 2843 param_buf = (WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *) 2844 cmpl_event_params; 2845 if (!param_buf || !wma_handle) { 2846 wma_err("Invalid mgmt Tx completion event"); 2847 return -EINVAL; 2848 } 2849 cmpl_params = param_buf->fixed_param; 2850 2851 if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) & 2852 PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) { 2853 struct mgmt_offload_event_params params = {0}; 2854 2855 wma_extract_mgmt_offload_event_params( 2856 ¶ms, 2857 (wmi_mgmt_hdr *)param_buf->mgmt_hdr); 2858 ucfg_pkt_capture_mgmt_tx_completion(wma_handle->pdev, 2859 cmpl_params->desc_id, 2860 cmpl_params->status, 2861 ¶ms); 2862 } 2863 2864 wma_process_mgmt_tx_completion(wma_handle, cmpl_params->desc_id, 2865 cmpl_params->status); 2866 2867 return 0; 2868 } 2869 2870 /** 2871 * wma_mgmt_tx_bundle_completion_handler() - mgmt bundle comp handler 2872 * @handle: wma handle 2873 * @buf: buffer 2874 * @len: length 2875 * 2876 * Return: 0 for success or error code 2877 */ 2878 int wma_mgmt_tx_bundle_completion_handler(void *handle, uint8_t *buf, 2879 uint32_t len) 2880 { 2881 tp_wma_handle wma_handle = (tp_wma_handle)handle; 2882 WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *param_buf; 2883 wmi_mgmt_tx_compl_bundle_event_fixed_param *cmpl_params; 2884 uint32_t num_reports; 2885 uint32_t *desc_ids; 2886 uint32_t *status; 2887 uint32_t i, buf_len; 2888 bool excess_data = false; 2889 2890 param_buf = (WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *)buf; 2891 if (!param_buf || !wma_handle) { 2892 wma_err("Invalid mgmt Tx completion event"); 2893 return -EINVAL; 2894 } 2895 cmpl_params = param_buf->fixed_param; 2896 num_reports = cmpl_params->num_reports; 2897 desc_ids = (uint32_t *)(param_buf->desc_ids); 2898 status = (uint32_t *)(param_buf->status); 2899 2900 /* buf contains num_reports * sizeof(uint32) len of desc_ids and 2901 * num_reports * sizeof(uint32) status, 2902 * so (2 x (num_reports * sizeof(uint32)) should not exceed MAX 2903 */ 2904 if (cmpl_params->num_reports > (WMI_SVC_MSG_MAX_SIZE / 2905 (2 * sizeof(uint32_t)))) 2906 excess_data = true; 2907 else 2908 buf_len = cmpl_params->num_reports * (2 * sizeof(uint32_t)); 2909 2910 if (excess_data || (sizeof(*cmpl_params) > (WMI_SVC_MSG_MAX_SIZE - 2911 buf_len))) { 2912 wma_err("excess wmi buffer: num_reports %d", 2913 cmpl_params->num_reports); 2914 return -EINVAL; 2915 } 2916 2917 if ((cmpl_params->num_reports > param_buf->num_desc_ids) || 2918 (cmpl_params->num_reports > param_buf->num_status)) { 2919 wma_err("Invalid num_reports %d, num_desc_ids %d, num_status %d", 2920 cmpl_params->num_reports, param_buf->num_desc_ids, 2921 param_buf->num_status); 2922 return -EINVAL; 2923 } 2924 2925 for (i = 0; i < num_reports; i++) { 2926 if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) & 2927 PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) { 2928 struct mgmt_offload_event_params params = {0}; 2929 2930 wma_extract_mgmt_offload_event_params( 2931 ¶ms, 2932 &((wmi_mgmt_hdr *)param_buf->mgmt_hdr)[i]); 2933 ucfg_pkt_capture_mgmt_tx_completion( 2934 wma_handle->pdev, desc_ids[i], 2935 status[i], ¶ms); 2936 } 2937 2938 wma_process_mgmt_tx_completion(wma_handle, 2939 desc_ids[i], status[i]); 2940 } 2941 return 0; 2942 } 2943 2944 /** 2945 * wma_process_update_opmode() - process update VHT opmode cmd from UMAC 2946 * @wma_handle: wma handle 2947 * @update_vht_opmode: vht opmode 2948 * 2949 * Return: none 2950 */ 2951 void wma_process_update_opmode(tp_wma_handle wma_handle, 2952 tUpdateVHTOpMode *update_vht_opmode) 2953 { 2954 wmi_host_channel_width ch_width; 2955 uint8_t pdev_id; 2956 struct wlan_objmgr_peer *peer; 2957 struct wlan_objmgr_psoc *psoc = wma_handle->psoc; 2958 enum wlan_phymode peer_phymode; 2959 uint32_t fw_phymode; 2960 enum wlan_peer_type peer_type; 2961 2962 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev); 2963 peer = wlan_objmgr_get_peer(psoc, pdev_id, 2964 update_vht_opmode->peer_mac, 2965 WLAN_LEGACY_WMA_ID); 2966 if (!peer) { 2967 wma_err("peer object invalid"); 2968 return; 2969 } 2970 2971 peer_type = wlan_peer_get_peer_type(peer); 2972 if (peer_type == WLAN_PEER_SELF) { 2973 wma_err("self peer wrongly used"); 2974 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 2975 return; 2976 } 2977 2978 wlan_peer_obj_lock(peer); 2979 peer_phymode = wlan_peer_get_phymode(peer); 2980 wlan_peer_obj_unlock(peer); 2981 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 2982 2983 fw_phymode = wmi_host_to_fw_phymode(peer_phymode); 2984 2985 ch_width = wmi_get_ch_width_from_phy_mode(wma_handle->wmi_handle, 2986 fw_phymode); 2987 wma_debug("ch_width: %d, fw phymode: %d peer_phymode: %d, op_mode: %d", 2988 ch_width, fw_phymode, peer_phymode, 2989 update_vht_opmode->opMode); 2990 2991 if (ch_width < update_vht_opmode->opMode) { 2992 wma_err("Invalid peer bw update %d, self bw %d", 2993 update_vht_opmode->opMode, ch_width); 2994 return; 2995 } 2996 2997 wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac, 2998 WMI_HOST_PEER_CHWIDTH, update_vht_opmode->opMode, 2999 update_vht_opmode->smesessionId); 3000 3001 wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac, 3002 WMI_HOST_PEER_PHYMODE, 3003 fw_phymode, update_vht_opmode->smesessionId); 3004 } 3005 3006 /** 3007 * wma_process_update_rx_nss() - process update RX NSS cmd from UMAC 3008 * @wma_handle: wma handle 3009 * @update_rx_nss: rx nss value 3010 * 3011 * Return: none 3012 */ 3013 void wma_process_update_rx_nss(tp_wma_handle wma_handle, 3014 tUpdateRxNss *update_rx_nss) 3015 { 3016 struct target_psoc_info *tgt_hdl; 3017 struct wma_txrx_node *intr = 3018 &wma_handle->interfaces[update_rx_nss->smesessionId]; 3019 int rx_nss = update_rx_nss->rxNss; 3020 int num_rf_chains; 3021 3022 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 3023 if (!tgt_hdl) { 3024 wma_err("target psoc info is NULL"); 3025 return; 3026 } 3027 3028 num_rf_chains = target_if_get_num_rf_chains(tgt_hdl); 3029 if (rx_nss > num_rf_chains || rx_nss > WMA_MAX_NSS) 3030 rx_nss = QDF_MIN(num_rf_chains, WMA_MAX_NSS); 3031 3032 intr->nss = (uint8_t)rx_nss; 3033 update_rx_nss->rxNss = (uint32_t)rx_nss; 3034 3035 wma_debug("Rx Nss = %d", update_rx_nss->rxNss); 3036 3037 wma_set_peer_param(wma_handle, update_rx_nss->peer_mac, 3038 WMI_HOST_PEER_NSS, update_rx_nss->rxNss, 3039 update_rx_nss->smesessionId); 3040 } 3041 3042 /** 3043 * wma_process_update_membership() - process update group membership cmd 3044 * @wma_handle: wma handle 3045 * @membership: group membership info 3046 * 3047 * Return: none 3048 */ 3049 void wma_process_update_membership(tp_wma_handle wma_handle, 3050 tUpdateMembership *membership) 3051 { 3052 wma_debug("membership = %x ", membership->membership); 3053 3054 wma_set_peer_param(wma_handle, membership->peer_mac, 3055 WMI_HOST_PEER_MEMBERSHIP, membership->membership, 3056 membership->smesessionId); 3057 } 3058 3059 /** 3060 * wma_process_update_userpos() - process update user pos cmd from UMAC 3061 * @wma_handle: wma handle 3062 * @userpos: user pos value 3063 * 3064 * Return: none 3065 */ 3066 void wma_process_update_userpos(tp_wma_handle wma_handle, 3067 tUpdateUserPos *userpos) 3068 { 3069 wma_debug("userPos = %x ", userpos->userPos); 3070 3071 wma_set_peer_param(wma_handle, userpos->peer_mac, 3072 WMI_HOST_PEER_USERPOS, userpos->userPos, 3073 userpos->smesessionId); 3074 3075 /* Now that membership/userpos is updated in fw, 3076 * enable GID PPS. 3077 */ 3078 wma_set_ppsconfig(userpos->smesessionId, WMA_VHT_PPS_GID_MATCH, 1); 3079 3080 } 3081 3082 QDF_STATUS wma_set_cts2self_for_p2p_go(void *wma_handle, 3083 uint32_t cts2self_for_p2p_go) 3084 { 3085 int32_t ret; 3086 tp_wma_handle wma = (tp_wma_handle)wma_handle; 3087 struct pdev_params pdevparam = {}; 3088 3089 pdevparam.param_id = wmi_pdev_param_cts2self_for_p2p_go_config; 3090 pdevparam.param_value = cts2self_for_p2p_go; 3091 3092 ret = wmi_unified_pdev_param_send(wma->wmi_handle, 3093 &pdevparam, 3094 WMA_WILDCARD_PDEV_ID); 3095 if (ret) { 3096 wma_err("Fail to Set CTS2SELF for p2p GO %d", 3097 cts2self_for_p2p_go); 3098 return QDF_STATUS_E_FAILURE; 3099 } 3100 3101 wma_nofl_debug("Successfully Set CTS2SELF for p2p GO %d", 3102 cts2self_for_p2p_go); 3103 3104 return QDF_STATUS_SUCCESS; 3105 } 3106 3107 3108 /** 3109 * wma_set_htconfig() - set ht config parameters to target 3110 * @vdev_id: vdev id 3111 * @ht_capab: ht capability 3112 * @value: value of ht param 3113 * 3114 * Return: QDF status 3115 */ 3116 QDF_STATUS wma_set_htconfig(uint8_t vdev_id, uint16_t ht_capab, int value) 3117 { 3118 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 3119 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 3120 3121 if (!wma) 3122 return QDF_STATUS_E_INVAL; 3123 3124 switch (ht_capab) { 3125 case WNI_CFG_HT_CAP_INFO_ADVANCE_CODING: 3126 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 3127 wmi_vdev_param_ldpc, 3128 value); 3129 break; 3130 case WNI_CFG_HT_CAP_INFO_TX_STBC: 3131 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 3132 wmi_vdev_param_tx_stbc, 3133 value); 3134 break; 3135 case WNI_CFG_HT_CAP_INFO_RX_STBC: 3136 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 3137 wmi_vdev_param_rx_stbc, 3138 value); 3139 break; 3140 case WNI_CFG_HT_CAP_INFO_SHORT_GI_20MHZ: 3141 case WNI_CFG_HT_CAP_INFO_SHORT_GI_40MHZ: 3142 wma_err("ht_capab = %d, value = %d", ht_capab, 3143 value); 3144 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 3145 wmi_vdev_param_sgi, value); 3146 if (ret == QDF_STATUS_SUCCESS) 3147 wma->interfaces[vdev_id].config.shortgi = value; 3148 break; 3149 default: 3150 wma_err("INVALID HT CONFIG"); 3151 } 3152 3153 return ret; 3154 } 3155 3156 /** 3157 * wma_extract_ccmp_pn() - extract 6 byte PN from the CCMP header 3158 * @ccmp_ptr: CCMP header 3159 * 3160 * Return: PN extracted from header. 3161 */ 3162 static uint64_t wma_extract_ccmp_pn(uint8_t *ccmp_ptr) 3163 { 3164 uint8_t rsvd, key, pn[6]; 3165 uint64_t new_pn; 3166 3167 /* 3168 * +-----+-----+------+----------+-----+-----+-----+-----+ 3169 * | PN0 | PN1 | rsvd | rsvd/key | PN2 | PN3 | PN4 | PN5 | 3170 * +-----+-----+------+----------+-----+-----+-----+-----+ 3171 * CCMP Header Format 3172 */ 3173 3174 /* Extract individual bytes */ 3175 pn[0] = (uint8_t) *ccmp_ptr; 3176 pn[1] = (uint8_t) *(ccmp_ptr + 1); 3177 rsvd = (uint8_t) *(ccmp_ptr + 2); 3178 key = (uint8_t) *(ccmp_ptr + 3); 3179 pn[2] = (uint8_t) *(ccmp_ptr + 4); 3180 pn[3] = (uint8_t) *(ccmp_ptr + 5); 3181 pn[4] = (uint8_t) *(ccmp_ptr + 6); 3182 pn[5] = (uint8_t) *(ccmp_ptr + 7); 3183 3184 /* Form 6 byte PN with 6 individual bytes of PN */ 3185 new_pn = ((uint64_t) pn[5] << 40) | 3186 ((uint64_t) pn[4] << 32) | 3187 ((uint64_t) pn[3] << 24) | 3188 ((uint64_t) pn[2] << 16) | 3189 ((uint64_t) pn[1] << 8) | ((uint64_t) pn[0] << 0); 3190 3191 return new_pn; 3192 } 3193 3194 /** 3195 * wma_is_ccmp_pn_replay_attack() - detect replay attacking using PN in CCMP 3196 * @wma: wma context 3197 * @wh: 802.11 frame header 3198 * @ccmp_ptr: CCMP frame header 3199 * 3200 * Return: true/false 3201 */ 3202 static bool 3203 wma_is_ccmp_pn_replay_attack(tp_wma_handle wma, struct ieee80211_frame *wh, 3204 uint8_t *ccmp_ptr) 3205 { 3206 uint64_t new_pn; 3207 bool ret = false; 3208 struct peer_mlme_priv_obj *peer_priv; 3209 struct wlan_objmgr_peer *peer; 3210 3211 new_pn = wma_extract_ccmp_pn(ccmp_ptr); 3212 3213 peer = wlan_objmgr_get_peer_by_mac(wma->psoc, wh->i_addr2, 3214 WLAN_LEGACY_WMA_ID); 3215 if (!peer) 3216 return ret; 3217 3218 peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer, 3219 WLAN_UMAC_COMP_MLME); 3220 if (!peer_priv) { 3221 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 3222 return ret; 3223 } 3224 3225 if (peer_priv->last_pn_valid) { 3226 if (new_pn > peer_priv->last_pn) { 3227 peer_priv->last_pn = new_pn; 3228 } else { 3229 wma_err_rl("PN Replay attack detected"); 3230 /* per 11W amendment, keeping track of replay attacks */ 3231 peer_priv->rmf_pn_replays += 1; 3232 ret = true; 3233 } 3234 } else { 3235 peer_priv->last_pn_valid = 1; 3236 peer_priv->last_pn = new_pn; 3237 } 3238 3239 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 3240 3241 return ret; 3242 } 3243 3244 /** 3245 * wma_process_bip() - process mmie in rmf frame 3246 * @wma_handle: wma handle 3247 * @iface: txrx node 3248 * @wh: 80211 frame 3249 * @wbuf: Buffer 3250 * 3251 * Return: 0 for success or error code 3252 */ 3253 static 3254 int wma_process_bip(tp_wma_handle wma_handle, struct wma_txrx_node *iface, 3255 struct ieee80211_frame *wh, qdf_nbuf_t wbuf) 3256 { 3257 uint16_t mmie_size; 3258 uint8_t *efrm; 3259 int32_t mgmtcipherset; 3260 enum wlan_crypto_cipher_type key_cipher; 3261 3262 efrm = qdf_nbuf_data(wbuf) + qdf_nbuf_len(wbuf); 3263 3264 mgmtcipherset = wlan_crypto_get_param(iface->vdev, 3265 WLAN_CRYPTO_PARAM_MGMT_CIPHER); 3266 if (mgmtcipherset <= 0) { 3267 wma_err("Invalid key cipher %d", mgmtcipherset); 3268 return -EINVAL; 3269 } 3270 3271 if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_CMAC)) { 3272 key_cipher = WLAN_CRYPTO_CIPHER_AES_CMAC; 3273 mmie_size = cds_get_mmie_size(); 3274 } else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC)) { 3275 key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC; 3276 mmie_size = cds_get_gmac_mmie_size(); 3277 } else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { 3278 key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC_256; 3279 mmie_size = cds_get_gmac_mmie_size(); 3280 } else { 3281 wma_err("Invalid key cipher %d", mgmtcipherset); 3282 return -EINVAL; 3283 } 3284 3285 /* Check if frame is invalid length */ 3286 if (efrm - (uint8_t *)wh < sizeof(*wh) + mmie_size) { 3287 wma_err("Invalid frame length"); 3288 return -EINVAL; 3289 } 3290 3291 switch (key_cipher) { 3292 case WLAN_CRYPTO_CIPHER_AES_CMAC: 3293 if (!wmi_service_enabled(wma_handle->wmi_handle, 3294 wmi_service_sta_pmf_offload)) { 3295 if (!wlan_crypto_is_mmie_valid(iface->vdev, 3296 (uint8_t *)wh, efrm)) { 3297 wma_debug("BC/MC MIC error or MMIE not present, dropping the frame"); 3298 return -EINVAL; 3299 } 3300 } 3301 break; 3302 case WLAN_CRYPTO_CIPHER_AES_GMAC: 3303 case WLAN_CRYPTO_CIPHER_AES_GMAC_256: 3304 if (!wmi_service_enabled(wma_handle->wmi_handle, 3305 wmi_service_gmac_offload_support)) { 3306 if (!wlan_crypto_is_mmie_valid(iface->vdev, 3307 (uint8_t *)wh, efrm)) { 3308 wma_debug("BC/MC GMAC MIC error or MMIE not present, dropping the frame"); 3309 return -EINVAL; 3310 } 3311 } 3312 break; 3313 default: 3314 wma_err("Invalid key_type %d", key_cipher); 3315 return -EINVAL; 3316 } 3317 3318 qdf_nbuf_trim_tail(wbuf, mmie_size); 3319 3320 return 0; 3321 } 3322 3323 /** 3324 * wma_process_rmf_frame() - process rmf frame 3325 * @wma_handle: wma handle 3326 * @iface: txrx node 3327 * @wh: 80211 frame 3328 * @rx_pkt: rx packet 3329 * @wbuf: Buffer 3330 * 3331 * Return: 0 for success or error code 3332 */ 3333 static 3334 int wma_process_rmf_frame(tp_wma_handle wma_handle, 3335 struct wma_txrx_node *iface, 3336 struct ieee80211_frame *wh, 3337 cds_pkt_t *rx_pkt, 3338 qdf_nbuf_t wbuf) 3339 { 3340 uint8_t *orig_hdr; 3341 uint8_t *ccmp; 3342 uint8_t mic_len, hdr_len, pdev_id; 3343 QDF_STATUS status; 3344 3345 if ((wh)->i_fc[1] & IEEE80211_FC1_WEP) { 3346 if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) || 3347 IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3348 wma_err("Encrypted BC/MC frame dropping the frame"); 3349 cds_pkt_return_packet(rx_pkt); 3350 return -EINVAL; 3351 } 3352 3353 if (iface->type == WMI_VDEV_TYPE_NDI || 3354 iface->type == WMI_VDEV_TYPE_NAN) { 3355 hdr_len = IEEE80211_CCMP_HEADERLEN; 3356 mic_len = IEEE80211_CCMP_MICLEN; 3357 } else { 3358 pdev_id = 3359 wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev); 3360 status = mlme_get_peer_mic_len(wma_handle->psoc, 3361 pdev_id, wh->i_addr2, 3362 &mic_len, &hdr_len); 3363 if (QDF_IS_STATUS_ERROR(status)) { 3364 wma_err("Failed to get mic hdr and length"); 3365 cds_pkt_return_packet(rx_pkt); 3366 return -EINVAL; 3367 } 3368 } 3369 3370 if (qdf_nbuf_len(wbuf) < (sizeof(*wh) + hdr_len + mic_len)) { 3371 wma_err("Buffer length less than expected %d", 3372 (int)qdf_nbuf_len(wbuf)); 3373 cds_pkt_return_packet(rx_pkt); 3374 return -EINVAL; 3375 } 3376 3377 orig_hdr = (uint8_t *) qdf_nbuf_data(wbuf); 3378 /* Pointer to head of CCMP header */ 3379 ccmp = orig_hdr + sizeof(*wh); 3380 if (wma_is_ccmp_pn_replay_attack(wma_handle, wh, ccmp)) { 3381 wma_err_rl("Dropping the frame"); 3382 cds_pkt_return_packet(rx_pkt); 3383 return -EINVAL; 3384 } 3385 3386 /* Strip privacy headers (and trailer) 3387 * for a received frame 3388 */ 3389 qdf_mem_move(orig_hdr + 3390 hdr_len, wh, 3391 sizeof(*wh)); 3392 qdf_nbuf_pull_head(wbuf, 3393 hdr_len); 3394 qdf_nbuf_trim_tail(wbuf, mic_len); 3395 /* 3396 * CCMP header has been pulled off 3397 * reinitialize the start pointer of mac header 3398 * to avoid accessing incorrect address 3399 */ 3400 wh = (struct ieee80211_frame *) qdf_nbuf_data(wbuf); 3401 rx_pkt->pkt_meta.mpdu_hdr_ptr = 3402 qdf_nbuf_data(wbuf); 3403 rx_pkt->pkt_meta.mpdu_len = qdf_nbuf_len(wbuf); 3404 rx_pkt->pkt_buf = wbuf; 3405 if (rx_pkt->pkt_meta.mpdu_len >= 3406 rx_pkt->pkt_meta.mpdu_hdr_len) { 3407 rx_pkt->pkt_meta.mpdu_data_len = 3408 rx_pkt->pkt_meta.mpdu_len - 3409 rx_pkt->pkt_meta.mpdu_hdr_len; 3410 } else { 3411 wma_err("mpdu len %d less than hdr %d, dropping frame", 3412 rx_pkt->pkt_meta.mpdu_len, 3413 rx_pkt->pkt_meta.mpdu_hdr_len); 3414 cds_pkt_return_packet(rx_pkt); 3415 return -EINVAL; 3416 } 3417 3418 if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) { 3419 wma_err("Data Len %d greater than max, dropping frame", 3420 rx_pkt->pkt_meta.mpdu_data_len); 3421 cds_pkt_return_packet(rx_pkt); 3422 return -EINVAL; 3423 } 3424 rx_pkt->pkt_meta.mpdu_data_ptr = 3425 rx_pkt->pkt_meta.mpdu_hdr_ptr + 3426 rx_pkt->pkt_meta.mpdu_hdr_len; 3427 wma_debug("BSSID: "QDF_MAC_ADDR_FMT" tsf_delta: %u", 3428 QDF_MAC_ADDR_REF(wh->i_addr3), 3429 rx_pkt->pkt_meta.tsf_delta); 3430 } else { 3431 if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) || 3432 IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3433 if (0 != wma_process_bip(wma_handle, iface, wh, wbuf)) { 3434 cds_pkt_return_packet(rx_pkt); 3435 return -EINVAL; 3436 } 3437 } else { 3438 wma_err_rl("Rx unprotected unicast mgmt frame"); 3439 rx_pkt->pkt_meta.dpuFeedback = 3440 DPU_FEEDBACK_UNPROTECTED_ERROR; 3441 } 3442 } 3443 return 0; 3444 } 3445 3446 /** 3447 * wma_get_peer_pmf_status() - Get the PMF capability of peer 3448 * @wma: wma handle 3449 * @peer_mac: peer mac addr 3450 * 3451 * Return: True if PMF is enabled, false otherwise. 3452 */ 3453 static bool 3454 wma_get_peer_pmf_status(tp_wma_handle wma, uint8_t *peer_mac) 3455 { 3456 struct wlan_objmgr_peer *peer; 3457 bool is_pmf_enabled; 3458 3459 if (!peer_mac) { 3460 wma_err("peer_mac is NULL"); 3461 return false; 3462 } 3463 3464 peer = wlan_objmgr_get_peer(wma->psoc, 3465 wlan_objmgr_pdev_get_pdev_id(wma->pdev), 3466 peer_mac, WLAN_LEGACY_WMA_ID); 3467 if (!peer) { 3468 wma_debug("Peer of peer_mac " QDF_MAC_ADDR_FMT " not found", 3469 QDF_MAC_ADDR_REF(peer_mac)); 3470 return false; 3471 } 3472 is_pmf_enabled = mlme_get_peer_pmf_status(peer); 3473 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID); 3474 wma_nofl_debug("get is_pmf_enabled %d for "QDF_MAC_ADDR_FMT, 3475 is_pmf_enabled, QDF_MAC_ADDR_REF(peer_mac)); 3476 3477 return is_pmf_enabled; 3478 } 3479 3480 /** 3481 * wma_check_and_process_rmf_frame() - Process the frame if it is of rmf type 3482 * @wma_handle: wma handle 3483 * @vdev_id: vdev id 3484 * @wh: double pointer to 802.11 frame header which will be updated if the 3485 * frame is of rmf type. 3486 * @rx_pkt: rx packet 3487 * @buf: Buffer 3488 * 3489 * Process the frame as rmf frame only if both DUT and peer are of PMF capable 3490 * 3491 * Return: 0 for success or error code 3492 */ 3493 static int 3494 wma_check_and_process_rmf_frame(tp_wma_handle wma_handle, 3495 uint8_t vdev_id, 3496 struct ieee80211_frame **wh, 3497 cds_pkt_t *rx_pkt, 3498 qdf_nbuf_t buf) 3499 { 3500 int status; 3501 struct wma_txrx_node *iface; 3502 struct ieee80211_frame *hdr = *wh; 3503 3504 iface = &(wma_handle->interfaces[vdev_id]); 3505 if ((iface->type != WMI_VDEV_TYPE_NDI && 3506 iface->type != WMI_VDEV_TYPE_NAN) && !iface->rmfEnabled) 3507 return 0; 3508 3509 if (qdf_is_macaddr_group((struct qdf_mac_addr *)(hdr->i_addr1)) || 3510 qdf_is_macaddr_broadcast((struct qdf_mac_addr *)(hdr->i_addr1)) || 3511 wma_get_peer_pmf_status(wma_handle, hdr->i_addr2) || 3512 ((iface->type == WMI_VDEV_TYPE_NDI || 3513 iface->type == WMI_VDEV_TYPE_NAN) && 3514 (hdr->i_fc[1] & IEEE80211_FC1_WEP))) { 3515 status = wma_process_rmf_frame(wma_handle, iface, hdr, 3516 rx_pkt, buf); 3517 if (status) 3518 return status; 3519 /* 3520 * CCMP header might have been pulled off reinitialize the 3521 * start pointer of mac header 3522 */ 3523 *wh = (struct ieee80211_frame *)qdf_nbuf_data(buf); 3524 } 3525 3526 return 0; 3527 } 3528 3529 /** 3530 * wma_is_pkt_drop_candidate() - check if the mgmt frame should be dropped 3531 * @wma_handle: wma handle 3532 * @peer_addr: peer MAC address 3533 * @bssid: BSSID Address 3534 * @subtype: Management frame subtype 3535 * 3536 * This function is used to decide if a particular management frame should be 3537 * dropped to prevent DOS attack. Timestamp is used to decide the DOS attack. 3538 * 3539 * Return: true if the packet should be dropped and false otherwise 3540 */ 3541 static bool wma_is_pkt_drop_candidate(tp_wma_handle wma_handle, 3542 uint8_t *peer_addr, uint8_t *bssid, 3543 uint8_t subtype) 3544 { 3545 bool should_drop = false; 3546 uint8_t nan_addr[] = {0x50, 0x6F, 0x9A, 0x01, 0x00, 0x00}; 3547 3548 /* Drop the beacons from NAN device */ 3549 if ((subtype == MGMT_SUBTYPE_BEACON) && 3550 (!qdf_mem_cmp(nan_addr, bssid, NAN_CLUSTER_ID_BYTES))) { 3551 should_drop = true; 3552 goto end; 3553 } 3554 end: 3555 return should_drop; 3556 } 3557 3558 #define RATE_LIMIT 16 3559 3560 int wma_form_rx_packet(qdf_nbuf_t buf, 3561 struct mgmt_rx_event_params *mgmt_rx_params, 3562 cds_pkt_t *rx_pkt) 3563 { 3564 uint8_t vdev_id = WMA_INVALID_VDEV_ID; 3565 struct ieee80211_frame *wh; 3566 uint8_t mgt_type, mgt_subtype; 3567 int status; 3568 tp_wma_handle wma_handle = (tp_wma_handle) 3569 cds_get_context(QDF_MODULE_ID_WMA); 3570 static uint8_t limit_prints_invalid_len = RATE_LIMIT - 1; 3571 static uint8_t limit_prints_load_unload = RATE_LIMIT - 1; 3572 static uint8_t limit_prints_recovery = RATE_LIMIT - 1; 3573 3574 if (!wma_handle) { 3575 qdf_nbuf_free(buf); 3576 qdf_mem_free(rx_pkt); 3577 return -EINVAL; 3578 } 3579 3580 if (!mgmt_rx_params) { 3581 limit_prints_invalid_len++; 3582 if (limit_prints_invalid_len == RATE_LIMIT) { 3583 wma_debug("mgmt rx params is NULL"); 3584 limit_prints_invalid_len = 0; 3585 } 3586 qdf_nbuf_free(buf); 3587 qdf_mem_free(rx_pkt); 3588 return -EINVAL; 3589 } 3590 3591 if (cds_is_load_or_unload_in_progress()) { 3592 limit_prints_load_unload++; 3593 if (limit_prints_load_unload == RATE_LIMIT) { 3594 wma_debug("Load/Unload in progress"); 3595 limit_prints_load_unload = 0; 3596 } 3597 qdf_nbuf_free(buf); 3598 qdf_mem_free(rx_pkt); 3599 return -EINVAL; 3600 } 3601 3602 if (cds_is_driver_recovering()) { 3603 limit_prints_recovery++; 3604 if (limit_prints_recovery == RATE_LIMIT) { 3605 wma_debug("Recovery in progress"); 3606 limit_prints_recovery = 0; 3607 } 3608 qdf_nbuf_free(buf); 3609 qdf_mem_free(rx_pkt); 3610 return -EINVAL; 3611 } 3612 3613 if (cds_is_driver_in_bad_state()) { 3614 limit_prints_recovery++; 3615 if (limit_prints_recovery == RATE_LIMIT) { 3616 wma_debug("Driver in bad state"); 3617 limit_prints_recovery = 0; 3618 } 3619 qdf_nbuf_free(buf); 3620 qdf_mem_free(rx_pkt); 3621 return -EINVAL; 3622 } 3623 3624 /* 3625 * Fill in meta information needed by pe/lim 3626 * TODO: Try to maintain rx metainfo as part of skb->data. 3627 */ 3628 rx_pkt->pkt_meta.frequency = mgmt_rx_params->chan_freq; 3629 rx_pkt->pkt_meta.scan_src = mgmt_rx_params->flags; 3630 3631 /* 3632 * Get the rssi value from the current snr value 3633 * using standard noise floor of -96. 3634 */ 3635 rx_pkt->pkt_meta.rssi = mgmt_rx_params->snr + 3636 WMA_NOISE_FLOOR_DBM_DEFAULT; 3637 rx_pkt->pkt_meta.snr = mgmt_rx_params->snr; 3638 3639 /* If absolute rssi is available from firmware, use it */ 3640 if (mgmt_rx_params->rssi != 0) 3641 rx_pkt->pkt_meta.rssi_raw = mgmt_rx_params->rssi; 3642 else 3643 rx_pkt->pkt_meta.rssi_raw = rx_pkt->pkt_meta.rssi; 3644 3645 3646 /* 3647 * FIXME: Assigning the local timestamp as hw timestamp is not 3648 * available. Need to see if pe/lim really uses this data. 3649 */ 3650 rx_pkt->pkt_meta.timestamp = (uint32_t) jiffies; 3651 rx_pkt->pkt_meta.mpdu_hdr_len = sizeof(struct ieee80211_frame); 3652 rx_pkt->pkt_meta.mpdu_len = mgmt_rx_params->buf_len; 3653 3654 /* 3655 * The buf_len should be at least 802.11 header len 3656 */ 3657 if (mgmt_rx_params->buf_len < rx_pkt->pkt_meta.mpdu_hdr_len) { 3658 wma_err("MPDU Len %d lesser than header len %d", 3659 mgmt_rx_params->buf_len, 3660 rx_pkt->pkt_meta.mpdu_hdr_len); 3661 qdf_nbuf_free(buf); 3662 qdf_mem_free(rx_pkt); 3663 return -EINVAL; 3664 } 3665 3666 rx_pkt->pkt_meta.mpdu_data_len = mgmt_rx_params->buf_len - 3667 rx_pkt->pkt_meta.mpdu_hdr_len; 3668 3669 rx_pkt->pkt_meta.roamCandidateInd = 0; 3670 3671 wh = (struct ieee80211_frame *)qdf_nbuf_data(buf); 3672 3673 /* 3674 * If the mpdu_data_len is greater than Max (2k), drop the frame 3675 */ 3676 if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) { 3677 wma_err("Data Len %d greater than max, dropping frame from "QDF_MAC_ADDR_FMT, 3678 rx_pkt->pkt_meta.mpdu_data_len, 3679 QDF_MAC_ADDR_REF(wh->i_addr3)); 3680 qdf_nbuf_free(buf); 3681 qdf_mem_free(rx_pkt); 3682 return -EINVAL; 3683 } 3684 3685 rx_pkt->pkt_meta.mpdu_hdr_ptr = qdf_nbuf_data(buf); 3686 rx_pkt->pkt_meta.mpdu_data_ptr = rx_pkt->pkt_meta.mpdu_hdr_ptr + 3687 rx_pkt->pkt_meta.mpdu_hdr_len; 3688 rx_pkt->pkt_meta.tsf_delta = mgmt_rx_params->tsf_delta; 3689 rx_pkt->pkt_buf = buf; 3690 rx_pkt->pkt_meta.pkt_qdf_buf = buf; 3691 3692 /* If it is a beacon/probe response, save it for future use */ 3693 mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3694 mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3695 3696 if (mgt_type == IEEE80211_FC0_TYPE_MGT && 3697 (mgt_subtype == MGMT_SUBTYPE_DISASSOC || 3698 mgt_subtype == MGMT_SUBTYPE_DEAUTH || 3699 mgt_subtype == MGMT_SUBTYPE_ACTION)) { 3700 if (wma_find_vdev_id_by_bssid(wma_handle, wh->i_addr3, 3701 &vdev_id) == QDF_STATUS_SUCCESS) { 3702 status = wma_check_and_process_rmf_frame(wma_handle, 3703 vdev_id, 3704 &wh, 3705 rx_pkt, 3706 buf); 3707 if (status) 3708 return status; 3709 } else if (wma_find_vdev_id_by_addr(wma_handle, wh->i_addr1, 3710 &vdev_id) == QDF_STATUS_SUCCESS) { 3711 status = wma_check_and_process_rmf_frame(wma_handle, 3712 vdev_id, 3713 &wh, 3714 rx_pkt, 3715 buf); 3716 if (status) 3717 return status; 3718 } else if (mgt_subtype == MGMT_SUBTYPE_ACTION) { 3719 /* NAN Action frame */ 3720 vdev_id = wlan_nan_get_vdev_id_from_bssid( 3721 wma_handle->pdev, 3722 wh->i_addr3, 3723 WLAN_ACTION_OUI_ID); 3724 3725 if (vdev_id != WMA_INVALID_VDEV_ID) { 3726 status = wma_check_and_process_rmf_frame( 3727 wma_handle, 3728 vdev_id, &wh, 3729 rx_pkt, buf); 3730 if (status) 3731 return status; 3732 } 3733 } 3734 } 3735 3736 rx_pkt->pkt_meta.session_id = 3737 (vdev_id == WMA_INVALID_VDEV_ID ? 0 : vdev_id); 3738 3739 if (mgt_type == IEEE80211_FC0_TYPE_MGT && 3740 (mgt_subtype == MGMT_SUBTYPE_BEACON || 3741 mgt_subtype == MGMT_SUBTYPE_PROBE_RESP)) { 3742 if (mgmt_rx_params->buf_len <= 3743 (sizeof(struct ieee80211_frame) + 3744 offsetof(struct wlan_bcn_frame, ie))) { 3745 wma_debug("Dropping frame from "QDF_MAC_ADDR_FMT, 3746 QDF_MAC_ADDR_REF(wh->i_addr3)); 3747 cds_pkt_return_packet(rx_pkt); 3748 return -EINVAL; 3749 } 3750 } 3751 3752 if (wma_is_pkt_drop_candidate(wma_handle, wh->i_addr2, wh->i_addr3, 3753 mgt_subtype)) { 3754 cds_pkt_return_packet(rx_pkt); 3755 return -EINVAL; 3756 } 3757 wma_mgmt_pktdump_rx_handler(mgmt_rx_params, rx_pkt, 3758 wma_handle, mgt_type, mgt_subtype); 3759 3760 return 0; 3761 } 3762 3763 /** 3764 * wma_mem_endianness_based_copy() - does memory copy from src to dst 3765 * @dst: destination address 3766 * @src: source address 3767 * @size: size to be copied 3768 * 3769 * This function copies the memory of size passed from source 3770 * address to destination address. 3771 * 3772 * Return: Nothing 3773 */ 3774 #ifdef BIG_ENDIAN_HOST 3775 static void wma_mem_endianness_based_copy( 3776 uint8_t *dst, uint8_t *src, uint32_t size) 3777 { 3778 /* 3779 * For big endian host, copy engine byte_swap is enabled 3780 * But the rx mgmt frame buffer content is in network byte order 3781 * Need to byte swap the mgmt frame buffer content - so when 3782 * copy engine does byte_swap - host gets buffer content in the 3783 * correct byte order. 3784 */ 3785 3786 uint32_t i; 3787 uint32_t *destp, *srcp; 3788 3789 destp = (uint32_t *) dst; 3790 srcp = (uint32_t *) src; 3791 for (i = 0; i < (roundup(size, sizeof(uint32_t)) / 4); i++) { 3792 *destp = cpu_to_le32(*srcp); 3793 destp++; 3794 srcp++; 3795 } 3796 } 3797 #else 3798 static void wma_mem_endianness_based_copy( 3799 uint8_t *dst, uint8_t *src, uint32_t size) 3800 { 3801 qdf_mem_copy(dst, src, size); 3802 } 3803 #endif 3804 3805 #define RESERVE_BYTES 100 3806 /** 3807 * wma_mgmt_rx_process() - process management rx frame. 3808 * @handle: wma handle 3809 * @data: rx data 3810 * @data_len: data length 3811 * 3812 * Return: 0 for success or error code 3813 */ 3814 static int wma_mgmt_rx_process(void *handle, uint8_t *data, 3815 uint32_t data_len) 3816 { 3817 tp_wma_handle wma_handle = (tp_wma_handle) handle; 3818 struct mgmt_rx_event_params *mgmt_rx_params; 3819 struct wlan_objmgr_psoc *psoc; 3820 uint8_t *bufp; 3821 qdf_nbuf_t wbuf; 3822 QDF_STATUS status; 3823 3824 if (wma_validate_handle(wma_handle)) 3825 return -EINVAL; 3826 3827 mgmt_rx_params = qdf_mem_malloc(sizeof(*mgmt_rx_params)); 3828 if (!mgmt_rx_params) { 3829 return -ENOMEM; 3830 } 3831 3832 if (wmi_extract_mgmt_rx_params(wma_handle->wmi_handle, 3833 data, mgmt_rx_params, &bufp) != QDF_STATUS_SUCCESS) { 3834 wma_err_rl("Extraction of mgmt rx params failed"); 3835 qdf_mem_free(mgmt_rx_params); 3836 return -EINVAL; 3837 } 3838 3839 if (mgmt_rx_params->buf_len > data_len || 3840 !mgmt_rx_params->buf_len || 3841 !bufp) { 3842 wma_err_rl("Invalid data_len %u, buf_len %u bufp %pK", 3843 data_len, mgmt_rx_params->buf_len, bufp); 3844 qdf_mem_free(mgmt_rx_params); 3845 return -EINVAL; 3846 } 3847 3848 if (!mgmt_rx_params->chan_freq) { 3849 /* 3850 * It indicates that FW is legacy and is operating on 3851 * channel numbers and it also indicates that BAND_6G support 3852 * is not there as BAND_6G works only on frequencies and channel 3853 * numbers can be treated as unique. 3854 */ 3855 mgmt_rx_params->chan_freq = wlan_reg_legacy_chan_to_freq( 3856 wma_handle->pdev, 3857 mgmt_rx_params->channel); 3858 } 3859 3860 mgmt_rx_params->pdev_id = 0; 3861 mgmt_rx_params->rx_params = NULL; 3862 3863 /* 3864 * Allocate the memory for this rx packet, add extra 100 bytes for:- 3865 * 3866 * 1. Filling the missing RSN capabilities by some APs, which fill the 3867 * RSN IE length as extra 2 bytes but dont fill the IE data with 3868 * capabilities, resulting in failure in unpack core due to length 3869 * mismatch. Check sir_validate_and_rectify_ies for more info. 3870 * 3871 * 2. In the API wma_process_rmf_frame(), the driver trims the CCMP 3872 * header by overwriting the IEEE header to memory occupied by CCMP 3873 * header, but an overflow is possible if the memory allocated to 3874 * frame is less than the sizeof(struct ieee80211_frame) +CCMP 3875 * HEADER len, so allocating 100 bytes would solve this issue too. 3876 * 3877 * 3. CCMP header is pointing to orig_hdr + 3878 * sizeof(struct ieee80211_frame) which could also result in OOB 3879 * access, if the data len is less than 3880 * sizeof(struct ieee80211_frame), allocating extra bytes would 3881 * result in solving this issue too. 3882 */ 3883 wbuf = qdf_nbuf_alloc(NULL, roundup(mgmt_rx_params->buf_len + 3884 RESERVE_BYTES, 3885 4), 0, 4, false); 3886 if (!wbuf) { 3887 qdf_mem_free(mgmt_rx_params); 3888 return -ENOMEM; 3889 } 3890 3891 qdf_nbuf_put_tail(wbuf, mgmt_rx_params->buf_len); 3892 qdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL); 3893 3894 qdf_mem_zero(((uint8_t *)qdf_nbuf_data(wbuf) + mgmt_rx_params->buf_len), 3895 (roundup(mgmt_rx_params->buf_len + RESERVE_BYTES, 4) - 3896 mgmt_rx_params->buf_len)); 3897 3898 wma_mem_endianness_based_copy(qdf_nbuf_data(wbuf), 3899 bufp, mgmt_rx_params->buf_len); 3900 3901 psoc = (struct wlan_objmgr_psoc *) 3902 wma_handle->psoc; 3903 if (!psoc) { 3904 wma_err("psoc ctx is NULL"); 3905 qdf_nbuf_free(wbuf); 3906 qdf_mem_free(mgmt_rx_params); 3907 return -EINVAL; 3908 } 3909 3910 status = mgmt_txrx_rx_handler(psoc, wbuf, mgmt_rx_params); 3911 if (status != QDF_STATUS_SUCCESS) { 3912 qdf_mem_free(mgmt_rx_params); 3913 return -EINVAL; 3914 } 3915 3916 qdf_mem_free(mgmt_rx_params); 3917 return 0; 3918 } 3919 3920 /** 3921 * wma_de_register_mgmt_frm_client() - deregister management frame 3922 * 3923 * This function deregisters the event handler registered for 3924 * WMI_MGMT_RX_EVENTID. 3925 * 3926 * Return: QDF status 3927 */ 3928 QDF_STATUS wma_de_register_mgmt_frm_client(void) 3929 { 3930 tp_wma_handle wma_handle = (tp_wma_handle) 3931 cds_get_context(QDF_MODULE_ID_WMA); 3932 3933 if (!wma_handle) 3934 return QDF_STATUS_E_NULL_VALUE; 3935 3936 #ifdef QCA_WIFI_FTM 3937 if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE) 3938 return QDF_STATUS_SUCCESS; 3939 #endif 3940 3941 if (wmi_unified_unregister_event_handler(wma_handle->wmi_handle, 3942 wmi_mgmt_rx_event_id) != 0) { 3943 wma_err("Failed to Unregister rx mgmt handler with wmi"); 3944 return QDF_STATUS_E_FAILURE; 3945 } 3946 return QDF_STATUS_SUCCESS; 3947 } 3948 3949 #ifdef WLAN_FEATURE_ROAM_OFFLOAD 3950 /** 3951 * wma_register_roaming_callbacks() - Register roaming callbacks 3952 * @csr_roam_auth_event_handle_cb: CSR callback routine pointer 3953 * @pe_roam_synch_cb: PE roam synch callback routine pointer 3954 * 3955 * Register the SME and PE callback routines with WMA for 3956 * handling roaming 3957 * 3958 * Return: Success or Failure Status 3959 */ 3960 QDF_STATUS wma_register_roaming_callbacks( 3961 QDF_STATUS (*csr_roam_auth_event_handle_cb)(struct mac_context *mac, 3962 uint8_t vdev_id, 3963 struct qdf_mac_addr bssid, 3964 uint32_t akm), 3965 pe_roam_synch_fn_t pe_roam_synch_cb, 3966 QDF_STATUS (*pe_disconnect_cb) (struct mac_context *mac, 3967 uint8_t vdev_id, 3968 uint8_t *deauth_disassoc_frame, 3969 uint16_t deauth_disassoc_frame_len, 3970 uint16_t reason_code), 3971 set_ies_fn_t pe_roam_set_ie_cb) 3972 { 3973 3974 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 3975 3976 if (!wma) 3977 return QDF_STATUS_E_FAILURE; 3978 3979 wma->csr_roam_auth_event_handle_cb = csr_roam_auth_event_handle_cb; 3980 wma->pe_roam_synch_cb = pe_roam_synch_cb; 3981 wma->pe_disconnect_cb = pe_disconnect_cb; 3982 wma->pe_roam_set_ie_cb = pe_roam_set_ie_cb; 3983 wma_debug("Registered roam synch callbacks with WMA successfully"); 3984 3985 return QDF_STATUS_SUCCESS; 3986 } 3987 #endif 3988 3989 /** 3990 * wma_register_mgmt_frm_client() - register management frame callback 3991 * 3992 * This function registers event handler for WMI_MGMT_RX_EVENTID. 3993 * 3994 * Return: QDF status 3995 */ 3996 QDF_STATUS wma_register_mgmt_frm_client(void) 3997 { 3998 tp_wma_handle wma_handle = (tp_wma_handle) 3999 cds_get_context(QDF_MODULE_ID_WMA); 4000 4001 if (!wma_handle) 4002 return QDF_STATUS_E_NULL_VALUE; 4003 4004 if (wmi_unified_register_event_handler(wma_handle->wmi_handle, 4005 wmi_mgmt_rx_event_id, 4006 wma_mgmt_rx_process, 4007 WMA_RX_WORK_CTX) != 0) { 4008 wma_err("Failed to register rx mgmt handler with wmi"); 4009 return QDF_STATUS_E_FAILURE; 4010 } 4011 4012 return QDF_STATUS_SUCCESS; 4013 } 4014 4015 /** 4016 * wma_register_packetdump_callback() - stores tx and rx mgmt packet dump 4017 * callback handler 4018 * @tx_cb: tx mgmt packetdump cb 4019 * @rx_cb: rx mgmt packetdump cb 4020 * 4021 * This function is used to store tx and rx mgmt. packet dump callback 4022 * 4023 * Return: None 4024 * 4025 */ 4026 void wma_register_packetdump_callback( 4027 ol_txrx_pktdump_cb tx_cb, 4028 ol_txrx_pktdump_cb rx_cb) 4029 { 4030 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4031 4032 if (!wma_handle) 4033 return; 4034 4035 wma_handle->wma_mgmt_tx_packetdump_cb = tx_cb; 4036 wma_handle->wma_mgmt_rx_packetdump_cb = rx_cb; 4037 } 4038 4039 /** 4040 * wma_deregister_packetdump_callback() - removes tx and rx mgmt packet dump 4041 * callback handler 4042 * 4043 * This function is used to remove tx and rx mgmt. packet dump callback 4044 * 4045 * Return: None 4046 * 4047 */ 4048 void wma_deregister_packetdump_callback(void) 4049 { 4050 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4051 4052 if (!wma_handle) 4053 return; 4054 4055 wma_handle->wma_mgmt_tx_packetdump_cb = NULL; 4056 wma_handle->wma_mgmt_rx_packetdump_cb = NULL; 4057 } 4058 4059 QDF_STATUS wma_mgmt_unified_cmd_send(struct wlan_objmgr_vdev *vdev, 4060 qdf_nbuf_t buf, uint32_t desc_id, 4061 void *mgmt_tx_params) 4062 { 4063 tp_wma_handle wma_handle; 4064 int ret; 4065 QDF_STATUS status = QDF_STATUS_E_INVAL; 4066 struct wmi_mgmt_params *mgmt_params = 4067 (struct wmi_mgmt_params *)mgmt_tx_params; 4068 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 4069 4070 if (!mgmt_params) { 4071 wma_err("mgmt_params ptr passed is NULL"); 4072 return QDF_STATUS_E_INVAL; 4073 } 4074 mgmt_params->desc_id = desc_id; 4075 4076 if (!vdev) { 4077 wma_err("vdev ptr passed is NULL"); 4078 return QDF_STATUS_E_INVAL; 4079 } 4080 4081 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4082 if (!wma_handle) 4083 return QDF_STATUS_E_INVAL; 4084 4085 if (wmi_service_enabled(wma_handle->wmi_handle, 4086 wmi_service_mgmt_tx_wmi)) { 4087 status = wmi_mgmt_unified_cmd_send(wma_handle->wmi_handle, 4088 mgmt_params); 4089 } else { 4090 QDF_NBUF_CB_MGMT_TXRX_DESC_ID(buf) 4091 = mgmt_params->desc_id; 4092 4093 ret = cdp_mgmt_send_ext(soc, mgmt_params->vdev_id, buf, 4094 mgmt_params->tx_type, 4095 mgmt_params->use_6mbps, 4096 mgmt_params->chanfreq); 4097 status = qdf_status_from_os_return(ret); 4098 } 4099 4100 if (status != QDF_STATUS_SUCCESS) { 4101 wma_err("mgmt tx failed"); 4102 return status; 4103 } 4104 4105 return QDF_STATUS_SUCCESS; 4106 } 4107 4108 #ifndef CONFIG_HL_SUPPORT 4109 void wma_mgmt_nbuf_unmap_cb(struct wlan_objmgr_pdev *pdev, 4110 qdf_nbuf_t buf) 4111 { 4112 struct wlan_objmgr_psoc *psoc; 4113 qdf_device_t dev; 4114 4115 if (!buf) 4116 return; 4117 4118 psoc = wlan_pdev_get_psoc(pdev); 4119 if (!psoc) { 4120 wma_err("Psoc handle NULL"); 4121 return; 4122 } 4123 4124 dev = wlan_psoc_get_qdf_dev(psoc); 4125 qdf_nbuf_unmap_single(dev, buf, QDF_DMA_TO_DEVICE); 4126 } 4127 4128 QDF_STATUS wma_mgmt_frame_fill_peer_cb(struct wlan_objmgr_peer *peer, 4129 qdf_nbuf_t buf) 4130 { 4131 struct wlan_objmgr_psoc *psoc; 4132 struct wlan_objmgr_pdev *pdev; 4133 4134 psoc = wlan_peer_get_psoc(peer); 4135 if (!psoc) { 4136 wma_err("Psoc handle NULL"); 4137 return QDF_STATUS_E_INVAL; 4138 } 4139 4140 pdev = wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc, 4141 wlan_peer_get_pdev_id(peer), 4142 WLAN_LEGACY_WMA_ID); 4143 if (!pdev) { 4144 wma_err("Pdev handle NULL"); 4145 return QDF_STATUS_E_INVAL; 4146 } 4147 wma_mgmt_nbuf_unmap_cb(pdev, buf); 4148 wlan_objmgr_pdev_release_ref(pdev, WLAN_LEGACY_WMA_ID); 4149 4150 return QDF_STATUS_SUCCESS; 4151 } 4152 4153 QDF_STATUS 4154 wma_update_edca_pifs_param(WMA_HANDLE handle, 4155 struct edca_pifs_vparam *edca_pifs_param) 4156 { 4157 tp_wma_handle wma_handle = (tp_wma_handle) handle; 4158 QDF_STATUS status; 4159 4160 status = wmi_unified_update_edca_pifs_param(wma_handle->wmi_handle, 4161 edca_pifs_param); 4162 4163 if (QDF_IS_STATUS_ERROR(status)) 4164 wma_err("Failed to set EDCA/PIFS Parameters"); 4165 4166 return status; 4167 } 4168 #endif 4169 4170 QDF_STATUS 4171 wma_update_bss_peer_phy_mode(struct wlan_channel *des_chan, 4172 struct wlan_objmgr_vdev *vdev) 4173 { 4174 struct wlan_objmgr_peer *bss_peer; 4175 enum wlan_phymode old_peer_phymode, new_phymode; 4176 tSirNwType nw_type; 4177 struct vdev_mlme_obj *mlme_obj; 4178 4179 bss_peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_LEGACY_WMA_ID); 4180 if (!bss_peer) { 4181 wma_err("not able to find bss peer for vdev %d", 4182 wlan_vdev_get_id(vdev)); 4183 return QDF_STATUS_E_INVAL; 4184 } 4185 4186 old_peer_phymode = wlan_peer_get_phymode(bss_peer); 4187 4188 if (WLAN_REG_IS_24GHZ_CH_FREQ(des_chan->ch_freq)) { 4189 if (des_chan->ch_phymode == WLAN_PHYMODE_11B || 4190 old_peer_phymode == WLAN_PHYMODE_11B) 4191 nw_type = eSIR_11B_NW_TYPE; 4192 else 4193 nw_type = eSIR_11G_NW_TYPE; 4194 } else { 4195 nw_type = eSIR_11A_NW_TYPE; 4196 } 4197 4198 new_phymode = wma_peer_phymode(nw_type, STA_ENTRY_PEER, 4199 IS_WLAN_PHYMODE_HT(old_peer_phymode), 4200 des_chan->ch_width, 4201 IS_WLAN_PHYMODE_VHT(old_peer_phymode), 4202 IS_WLAN_PHYMODE_HE(old_peer_phymode), 4203 wma_is_phymode_eht(old_peer_phymode)); 4204 4205 if (new_phymode == old_peer_phymode) { 4206 wma_debug("Ignore update, old %d and new %d phymode are same, vdev_id : %d", 4207 old_peer_phymode, new_phymode, 4208 wlan_vdev_get_id(vdev)); 4209 wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID); 4210 return QDF_STATUS_SUCCESS; 4211 } 4212 4213 mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev); 4214 if (!mlme_obj) { 4215 wma_err("not able to get mlme_obj"); 4216 wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID); 4217 return QDF_STATUS_E_INVAL; 4218 } 4219 4220 wlan_peer_obj_lock(bss_peer); 4221 wlan_peer_set_phymode(bss_peer, new_phymode); 4222 wlan_peer_obj_unlock(bss_peer); 4223 4224 wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID); 4225 4226 mlme_obj->mgmt.generic.phy_mode = wmi_host_to_fw_phymode(new_phymode); 4227 des_chan->ch_phymode = new_phymode; 4228 4229 return QDF_STATUS_SUCCESS; 4230 } 4231 4232 QDF_STATUS 4233 cm_send_ies_for_roam_invoke(struct wlan_objmgr_vdev *vdev, uint16_t dot11_mode) 4234 { 4235 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 4236 enum QDF_OPMODE op_mode; 4237 QDF_STATUS status; 4238 uint8_t vdev_id; 4239 4240 if (!wma) 4241 return QDF_STATUS_E_FAILURE; 4242 4243 vdev_id = wlan_vdev_get_id(vdev); 4244 op_mode = wlan_vdev_mlme_get_opmode(vdev); 4245 4246 status = wma->pe_roam_set_ie_cb(wma->mac_context, vdev_id, dot11_mode, 4247 op_mode); 4248 return status; 4249 } 4250