1 /* 2 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 /** 29 * DOC: wma_mgmt.c 30 * 31 * This file contains STA/SAP/IBSS and protocol related functions. 32 */ 33 34 /* Header files */ 35 36 #include "wma.h" 37 #include "wma_api.h" 38 #include "cds_api.h" 39 #include "wmi_unified_api.h" 40 #include "wlan_qct_sys.h" 41 #include "wni_api.h" 42 #include "ani_global.h" 43 #include "wmi_unified.h" 44 #include "wni_cfg.h" 45 #include "cfg_api.h" 46 47 #include "qdf_nbuf.h" 48 #include "qdf_types.h" 49 #include "qdf_mem.h" 50 51 #include "wma_types.h" 52 #include "lim_api.h" 53 #include "lim_session_utils.h" 54 55 #include "cds_utils.h" 56 57 #if !defined(REMOVE_PKT_LOG) 58 #include "pktlog_ac.h" 59 #endif /* REMOVE_PKT_LOG */ 60 61 #include "dbglog_host.h" 62 #include "csr_api.h" 63 #include "ol_fw.h" 64 #include "dfs.h" 65 #include "wma_internal.h" 66 #include "cds_concurrency.h" 67 #include "cdp_txrx_flow_ctrl_legacy.h" 68 #include <cdp_txrx_peer_ops.h> 69 #include <cdp_txrx_pmf.h> 70 #include <cdp_txrx_cfg.h> 71 #include <cdp_txrx_cmn.h> 72 #include <cdp_txrx_misc.h> 73 #include <cdp_txrx_misc.h> 74 75 /** 76 * wma_send_bcn_buf_ll() - prepare and send beacon buffer to fw for LL 77 * @wma: wma handle 78 * @pdev: txrx pdev 79 * @vdev_id: vdev id 80 * @param_buf: SWBA parameters 81 * 82 * Return: none 83 */ 84 static void wma_send_bcn_buf_ll(tp_wma_handle wma, 85 void *pdev, 86 uint8_t vdev_id, 87 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf) 88 { 89 struct ieee80211_frame *wh; 90 struct beacon_info *bcn; 91 wmi_tim_info *tim_info = param_buf->tim_info; 92 uint8_t *bcn_payload; 93 QDF_STATUS ret; 94 struct beacon_tim_ie *tim_ie; 95 wmi_p2p_noa_info *p2p_noa_info = param_buf->p2p_noa_info; 96 struct p2p_sub_element_noa noa_ie; 97 struct wmi_bcn_send_from_host params; 98 uint8_t i; 99 100 bcn = wma->interfaces[vdev_id].beacon; 101 if (!bcn->buf) { 102 WMA_LOGE("%s: Invalid beacon buffer", __func__); 103 return; 104 } 105 106 qdf_spin_lock_bh(&bcn->lock); 107 108 bcn_payload = qdf_nbuf_data(bcn->buf); 109 110 tim_ie = (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]); 111 112 if (tim_info->tim_changed) { 113 if (tim_info->tim_num_ps_pending) 114 qdf_mem_copy(&tim_ie->tim_bitmap, tim_info->tim_bitmap, 115 WMA_TIM_SUPPORTED_PVB_LENGTH); 116 else 117 qdf_mem_zero(&tim_ie->tim_bitmap, 118 WMA_TIM_SUPPORTED_PVB_LENGTH); 119 /* 120 * Currently we support fixed number of 121 * peers as limited by HAL_NUM_STA. 122 * tim offset is always 0 123 */ 124 tim_ie->tim_bitctl = 0; 125 } 126 127 /* Update DTIM Count */ 128 if (tim_ie->dtim_count == 0) 129 tim_ie->dtim_count = tim_ie->dtim_period - 1; 130 else 131 tim_ie->dtim_count--; 132 133 /* 134 * DTIM count needs to be backedup so that 135 * when umac updates the beacon template 136 * current dtim count can be updated properly 137 */ 138 bcn->dtim_count = tim_ie->dtim_count; 139 140 /* update state for buffered multicast frames on DTIM */ 141 if (tim_info->tim_mcast && (tim_ie->dtim_count == 0 || 142 tim_ie->dtim_period == 1)) 143 tim_ie->tim_bitctl |= 1; 144 else 145 tim_ie->tim_bitctl &= ~1; 146 147 /* To avoid sw generated frame sequence the same as H/W generated frame, 148 * the value lower than min_sw_seq is reserved for HW generated frame */ 149 if ((bcn->seq_no & IEEE80211_SEQ_MASK) < MIN_SW_SEQ) 150 bcn->seq_no = MIN_SW_SEQ; 151 152 wh = (struct ieee80211_frame *)bcn_payload; 153 *(uint16_t *) &wh->i_seq[0] = htole16(bcn->seq_no 154 << IEEE80211_SEQ_SEQ_SHIFT); 155 bcn->seq_no++; 156 157 if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) { 158 qdf_mem_zero(&noa_ie, sizeof(noa_ie)); 159 160 noa_ie.index = 161 (uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info); 162 noa_ie.oppPS = 163 (uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info); 164 noa_ie.ctwindow = 165 (uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info); 166 noa_ie.num_descriptors = 167 (uint8_t) WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info); 168 WMA_LOGI("%s: index %u, oppPs %u, ctwindow %u, " 169 "num_descriptors = %u", __func__, noa_ie.index, 170 noa_ie.oppPS, noa_ie.ctwindow, noa_ie.num_descriptors); 171 for (i = 0; i < noa_ie.num_descriptors; i++) { 172 noa_ie.noa_descriptors[i].type_count = 173 (uint8_t) p2p_noa_info->noa_descriptors[i]. 174 type_count; 175 noa_ie.noa_descriptors[i].duration = 176 p2p_noa_info->noa_descriptors[i].duration; 177 noa_ie.noa_descriptors[i].interval = 178 p2p_noa_info->noa_descriptors[i].interval; 179 noa_ie.noa_descriptors[i].start_time = 180 p2p_noa_info->noa_descriptors[i].start_time; 181 WMA_LOGI("%s: NoA descriptor[%d] type_count %u, " 182 "duration %u, interval %u, start_time = %u", 183 __func__, i, 184 noa_ie.noa_descriptors[i].type_count, 185 noa_ie.noa_descriptors[i].duration, 186 noa_ie.noa_descriptors[i].interval, 187 noa_ie.noa_descriptors[i].start_time); 188 } 189 wma_update_noa(bcn, &noa_ie); 190 191 /* Send a msg to LIM to update the NoA IE in probe response 192 * frames transmitted by the host */ 193 wma_update_probe_resp_noa(wma, &noa_ie); 194 } 195 196 if (bcn->dma_mapped) { 197 qdf_nbuf_unmap_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE); 198 bcn->dma_mapped = 0; 199 } 200 ret = qdf_nbuf_map_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE); 201 if (ret != QDF_STATUS_SUCCESS) { 202 WMA_LOGE("%s: failed map beacon buf to DMA region", __func__); 203 qdf_spin_unlock_bh(&bcn->lock); 204 return; 205 } 206 207 bcn->dma_mapped = 1; 208 params.vdev_id = vdev_id; 209 params.data_len = bcn->len; 210 params.frame_ctrl = *((A_UINT16 *) wh->i_fc); 211 params.frag_ptr = qdf_nbuf_get_frag_paddr(bcn->buf, 0); 212 params.dtim_flag = 0; 213 /* notify Firmware of DTM and mcast/bcast traffic */ 214 if (tim_ie->dtim_count == 0) { 215 params.dtim_flag |= WMI_BCN_SEND_DTIM_ZERO; 216 /* deliver mcast/bcast traffic in next DTIM beacon */ 217 if (tim_ie->tim_bitctl & 0x01) 218 params.dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET; 219 } 220 221 wmi_unified_bcn_buf_ll_cmd(wma->wmi_handle, 222 ¶ms); 223 224 qdf_spin_unlock_bh(&bcn->lock); 225 } 226 227 /** 228 * wma_beacon_swba_handler() - swba event handler 229 * @handle: wma handle 230 * @event: event data 231 * @len: data length 232 * 233 * SWBA event is alert event to Host requesting host to Queue a beacon 234 * for transmission use only in host beacon mode 235 * 236 * Return: 0 for success or error code 237 */ 238 int wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len) 239 { 240 tp_wma_handle wma = (tp_wma_handle) handle; 241 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf; 242 wmi_host_swba_event_fixed_param *swba_event; 243 uint32_t vdev_map; 244 void *pdev; 245 uint8_t vdev_id = 0; 246 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 247 248 param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) event; 249 if (!param_buf) { 250 WMA_LOGE("Invalid swba event buffer"); 251 return -EINVAL; 252 } 253 swba_event = param_buf->fixed_param; 254 vdev_map = swba_event->vdev_map; 255 256 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 257 if (!pdev) { 258 WMA_LOGE("%s: pdev is NULL", __func__); 259 return -EINVAL; 260 } 261 262 for (; vdev_map; vdev_id++, vdev_map >>= 1) { 263 if (!(vdev_map & 0x1)) 264 continue; 265 if (!cdp_cfg_is_high_latency(soc, 266 cds_get_context(QDF_MODULE_ID_CFG))) 267 wma_send_bcn_buf_ll(wma, pdev, vdev_id, param_buf); 268 break; 269 } 270 return 0; 271 } 272 273 /** 274 * wma_peer_sta_kickout_event_handler() - kickout event handler 275 * @handle: wma handle 276 * @event: event data 277 * @len: data length 278 * 279 * Kickout event is received from firmware on observing beacon miss 280 * It handles kickout event for different modes and indicate to 281 * upper layers. 282 * 283 * Return: 0 for success or error code 284 */ 285 int wma_peer_sta_kickout_event_handler(void *handle, u8 *event, u32 len) 286 { 287 tp_wma_handle wma = (tp_wma_handle) handle; 288 WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL; 289 wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL; 290 uint8_t vdev_id, peer_id, macaddr[IEEE80211_ADDR_LEN]; 291 void *peer; 292 void *pdev; 293 tpDeleteStaContext del_sta_ctx; 294 tpSirIbssPeerInactivityInd p_inactivity; 295 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 296 297 WMA_LOGD("%s: Enter", __func__); 298 param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) event; 299 kickout_event = param_buf->fixed_param; 300 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 301 if (!pdev) { 302 WMA_LOGE("%s: pdev is NULL", __func__); 303 return -EINVAL; 304 } 305 WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, macaddr); 306 peer = cdp_peer_find_by_addr(soc, pdev, macaddr, &peer_id); 307 if (!peer) { 308 WMA_LOGE("PEER [%pM] not found", macaddr); 309 return -EINVAL; 310 } 311 312 if (cdp_peer_get_vdevid(soc, peer, &vdev_id) != QDF_STATUS_SUCCESS) { 313 WMA_LOGE("Not able to find BSSID for peer [%pM]", macaddr); 314 return -EINVAL; 315 } 316 317 WMA_LOGA("%s: PEER:[%pM], ADDR:[%pN], INTERFACE:%d, peer_id:%d, reason:%d", 318 __func__, macaddr, wma->interfaces[vdev_id].addr, vdev_id, 319 peer_id, kickout_event->reason); 320 if (wma->interfaces[vdev_id].roaming_in_progress) { 321 WMA_LOGE("Ignore STA kick out since roaming is in progress"); 322 return -EINVAL; 323 } 324 325 switch (kickout_event->reason) { 326 case WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT: 327 p_inactivity = (tpSirIbssPeerInactivityInd) 328 qdf_mem_malloc(sizeof(tSirIbssPeerInactivityInd)); 329 if (!p_inactivity) { 330 WMA_LOGE("QDF MEM Alloc Failed for tSirIbssPeerInactivity"); 331 return -ENOMEM; 332 } 333 334 p_inactivity->staIdx = peer_id; 335 qdf_mem_copy(p_inactivity->peer_addr.bytes, macaddr, 336 IEEE80211_ADDR_LEN); 337 wma_send_msg(wma, WMA_IBSS_PEER_INACTIVITY_IND, 338 (void *)p_inactivity, 0); 339 goto exit_handler; 340 break; 341 342 #ifdef FEATURE_WLAN_TDLS 343 case WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT: 344 del_sta_ctx = (tpDeleteStaContext) 345 qdf_mem_malloc(sizeof(tDeleteStaContext)); 346 if (!del_sta_ctx) { 347 WMA_LOGE("%s: mem alloc failed for tDeleteStaContext for TDLS peer: %pM", 348 __func__, macaddr); 349 return -ENOMEM; 350 } 351 352 del_sta_ctx->is_tdls = true; 353 del_sta_ctx->vdev_id = vdev_id; 354 del_sta_ctx->staId = peer_id; 355 qdf_mem_copy(del_sta_ctx->addr2, macaddr, IEEE80211_ADDR_LEN); 356 qdf_mem_copy(del_sta_ctx->bssId, wma->interfaces[vdev_id].bssid, 357 IEEE80211_ADDR_LEN); 358 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE; 359 wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, 360 (void *)del_sta_ctx, 0); 361 goto exit_handler; 362 break; 363 #endif /* FEATURE_WLAN_TDLS */ 364 365 case WMI_PEER_STA_KICKOUT_REASON_XRETRY: 366 if (wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA && 367 (wma->interfaces[vdev_id].sub_type == 0 || 368 wma->interfaces[vdev_id].sub_type == 369 WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) && 370 !qdf_mem_cmp(wma->interfaces[vdev_id].bssid, 371 macaddr, IEEE80211_ADDR_LEN)) { 372 /* 373 * KICKOUT event is for current station-AP connection. 374 * Treat it like final beacon miss. Station may not have 375 * missed beacons but not able to transmit frames to AP 376 * for a long time. Must disconnect to get out of 377 * this sticky situation. 378 * In future implementation, roaming module will also 379 * handle this event and perform a scan. 380 */ 381 WMA_LOGW("%s: WMI_PEER_STA_KICKOUT_REASON_XRETRY event for STA", 382 __func__); 383 wma_beacon_miss_handler(wma, vdev_id); 384 goto exit_handler; 385 } 386 break; 387 388 case WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED: 389 /* 390 * Default legacy value used by original firmware implementation. 391 */ 392 if (wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA && 393 (wma->interfaces[vdev_id].sub_type == 0 || 394 wma->interfaces[vdev_id].sub_type == 395 WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) && 396 !qdf_mem_cmp(wma->interfaces[vdev_id].bssid, 397 macaddr, IEEE80211_ADDR_LEN)) { 398 /* 399 * KICKOUT event is for current station-AP connection. 400 * Treat it like final beacon miss. Station may not have 401 * missed beacons but not able to transmit frames to AP 402 * for a long time. Must disconnect to get out of 403 * this sticky situation. 404 * In future implementation, roaming module will also 405 * handle this event and perform a scan. 406 */ 407 WMA_LOGW("%s: WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED event for STA", 408 __func__); 409 wma_beacon_miss_handler(wma, vdev_id); 410 goto exit_handler; 411 } 412 break; 413 414 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: 415 /* 416 * Handle SA query kickout is same as inactivity kickout. 417 * This could be for STA or SAP role 418 */ 419 case WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT: 420 default: 421 break; 422 } 423 424 /* 425 * default action is to send delete station context indication to LIM 426 */ 427 del_sta_ctx = 428 (tpDeleteStaContext) qdf_mem_malloc(sizeof(tDeleteStaContext)); 429 if (!del_sta_ctx) { 430 WMA_LOGE("QDF MEM Alloc Failed for tDeleteStaContext"); 431 return -ENOMEM; 432 } 433 434 del_sta_ctx->is_tdls = false; 435 del_sta_ctx->vdev_id = vdev_id; 436 del_sta_ctx->staId = peer_id; 437 qdf_mem_copy(del_sta_ctx->addr2, macaddr, IEEE80211_ADDR_LEN); 438 qdf_mem_copy(del_sta_ctx->bssId, wma->interfaces[vdev_id].addr, 439 IEEE80211_ADDR_LEN); 440 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE; 441 del_sta_ctx->rssi = kickout_event->rssi + WMA_TGT_NOISE_FLOOR_DBM; 442 wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx, 443 0); 444 445 exit_handler: 446 WMA_LOGD("%s: Exit", __func__); 447 return 0; 448 } 449 450 /** 451 * wma_unified_bcntx_status_event_handler() - beacon tx status event handler 452 * @handle: wma handle 453 * @cmd_param_info: event data 454 * @len: data length 455 * 456 * WMI Handler for WMI_OFFLOAD_BCN_TX_STATUS_EVENTID event from firmware. 457 * This event is generated by FW when the beacon transmission is offloaded 458 * and the host performs beacon template modification using WMI_BCN_TMPL_CMDID 459 * The FW generates this event when the first successful beacon transmission 460 * after template update 461 * 462 * Return: 0 for success or error code 463 */ 464 int wma_unified_bcntx_status_event_handler(void *handle, 465 uint8_t *cmd_param_info, 466 uint32_t len) 467 { 468 tp_wma_handle wma = (tp_wma_handle) handle; 469 WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *param_buf; 470 wmi_offload_bcn_tx_status_event_fixed_param *resp_event; 471 tSirFirstBeaconTxCompleteInd *beacon_tx_complete_ind; 472 473 param_buf = 474 (WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *) cmd_param_info; 475 if (!param_buf) { 476 WMA_LOGE("Invalid bcn tx response event buffer"); 477 return -EINVAL; 478 } 479 480 resp_event = param_buf->fixed_param; 481 482 WMA_LOGD("%s", __func__); 483 484 /* Check for valid handle to ensure session is not 485 * deleted in any race 486 */ 487 if (!wma->interfaces[resp_event->vdev_id].handle) { 488 WMA_LOGE("%s: The session does not exist", __func__); 489 return -EINVAL; 490 } 491 492 /* Beacon Tx Indication supports only AP mode. Ignore in other modes */ 493 if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == false) { 494 WMA_LOGI("%s: Beacon Tx Indication does not support type %d and sub_type %d", 495 __func__, wma->interfaces[resp_event->vdev_id].type, 496 wma->interfaces[resp_event->vdev_id].sub_type); 497 return 0; 498 } 499 500 beacon_tx_complete_ind = (tSirFirstBeaconTxCompleteInd *) 501 qdf_mem_malloc(sizeof(tSirFirstBeaconTxCompleteInd)); 502 if (!beacon_tx_complete_ind) { 503 WMA_LOGE("%s: Failed to alloc beacon_tx_complete_ind", 504 __func__); 505 return -ENOMEM; 506 } 507 508 beacon_tx_complete_ind->messageType = WMA_DFS_BEACON_TX_SUCCESS_IND; 509 beacon_tx_complete_ind->length = sizeof(tSirFirstBeaconTxCompleteInd); 510 beacon_tx_complete_ind->bssIdx = resp_event->vdev_id; 511 512 wma_send_msg(wma, WMA_DFS_BEACON_TX_SUCCESS_IND, 513 (void *)beacon_tx_complete_ind, 0); 514 return 0; 515 } 516 517 /** 518 * wma_get_link_probe_timeout() - get link timeout based on sub type 519 * @mac: UMAC handler 520 * @sub_type: vdev syb type 521 * @max_inactive_time: return max inactive time 522 * @max_unresponsive_time: return max unresponsive time 523 * 524 * Return: none 525 */ 526 static inline void wma_get_link_probe_timeout(struct sAniSirGlobal *mac, 527 uint32_t sub_type, 528 uint32_t *max_inactive_time, 529 uint32_t *max_unresponsive_time) 530 { 531 uint32_t keep_alive; 532 uint16_t lm_id, ka_id; 533 534 switch (sub_type) { 535 case WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO: 536 lm_id = WNI_CFG_GO_LINK_MONITOR_TIMEOUT; 537 ka_id = WNI_CFG_GO_KEEP_ALIVE_TIMEOUT; 538 break; 539 default: 540 /*For softAp the subtype value will be zero */ 541 lm_id = WNI_CFG_AP_LINK_MONITOR_TIMEOUT; 542 ka_id = WNI_CFG_AP_KEEP_ALIVE_TIMEOUT; 543 } 544 545 if (wlan_cfg_get_int(mac, lm_id, max_inactive_time) != eSIR_SUCCESS) { 546 WMA_LOGE("Failed to read link monitor for subtype %d", 547 sub_type); 548 *max_inactive_time = WMA_LINK_MONITOR_DEFAULT_TIME_SECS; 549 } 550 551 if (wlan_cfg_get_int(mac, ka_id, &keep_alive) != eSIR_SUCCESS) { 552 WMA_LOGE("Failed to read keep alive for subtype %d", sub_type); 553 keep_alive = WMA_KEEP_ALIVE_DEFAULT_TIME_SECS; 554 } 555 *max_unresponsive_time = *max_inactive_time + keep_alive; 556 } 557 558 /** 559 * wma_set_sap_keepalive() - set SAP keep alive parameters to fw 560 * @wma: wma handle 561 * @vdev_id: vdev id 562 * 563 * Return: none 564 */ 565 void wma_set_sap_keepalive(tp_wma_handle wma, uint8_t vdev_id) 566 { 567 uint32_t min_inactive_time, max_inactive_time, max_unresponsive_time; 568 struct sAniSirGlobal *mac = cds_get_context(QDF_MODULE_ID_PE); 569 QDF_STATUS status; 570 571 if (NULL == mac) { 572 WMA_LOGE("%s: Failed to get mac", __func__); 573 return; 574 } 575 576 wma_get_link_probe_timeout(mac, wma->interfaces[vdev_id].sub_type, 577 &max_inactive_time, &max_unresponsive_time); 578 579 min_inactive_time = max_inactive_time / 2; 580 581 status = wma_vdev_set_param(wma->wmi_handle, 582 vdev_id, 583 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 584 min_inactive_time); 585 if (QDF_IS_STATUS_ERROR(status)) 586 WMA_LOGE("Failed to Set AP MIN IDLE INACTIVE TIME"); 587 588 status = wma_vdev_set_param(wma->wmi_handle, 589 vdev_id, 590 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 591 max_inactive_time); 592 if (QDF_IS_STATUS_ERROR(status)) 593 WMA_LOGE("Failed to Set AP MAX IDLE INACTIVE TIME"); 594 595 status = wma_vdev_set_param(wma->wmi_handle, 596 vdev_id, 597 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 598 max_unresponsive_time); 599 if (QDF_IS_STATUS_ERROR(status)) 600 WMA_LOGE("Failed to Set MAX UNRESPONSIVE TIME"); 601 602 WMA_LOGD("%s:vdev_id:%d min_inactive_time: %u max_inactive_time: %u" 603 " max_unresponsive_time: %u", __func__, vdev_id, 604 min_inactive_time, max_inactive_time, max_unresponsive_time); 605 } 606 607 /** 608 * wma_set_sta_sa_query_param() - set sta sa query parameters 609 * @wma: wma handle 610 * @vdev_id: vdev id 611 612 * This function sets sta query related parameters in fw. 613 * 614 * Return: none 615 */ 616 617 void wma_set_sta_sa_query_param(tp_wma_handle wma, 618 uint8_t vdev_id) 619 { 620 struct sAniSirGlobal *mac = cds_get_context(QDF_MODULE_ID_PE); 621 uint32_t max_retries, retry_interval; 622 623 WMA_LOGD(FL("Enter:")); 624 625 if (wlan_cfg_get_int 626 (mac, WNI_CFG_PMF_SA_QUERY_MAX_RETRIES, 627 &max_retries) != eSIR_SUCCESS) { 628 max_retries = DEFAULT_STA_SA_QUERY_MAX_RETRIES_COUNT; 629 WMA_LOGE(FL("Failed to get value for WNI_CFG_PMF_SA_QUERY_MAX_RETRIES")); 630 } 631 if (wlan_cfg_get_int 632 (mac, WNI_CFG_PMF_SA_QUERY_RETRY_INTERVAL, 633 &retry_interval) != eSIR_SUCCESS) { 634 retry_interval = DEFAULT_STA_SA_QUERY_RETRY_INTERVAL; 635 WMA_LOGE(FL("Failed to get value for WNI_CFG_PMF_SA_QUERY_RETRY_INTERVAL")); 636 } 637 638 wmi_unified_set_sta_sa_query_param_cmd(wma->wmi_handle, 639 vdev_id, 640 max_retries, 641 retry_interval); 642 643 WMA_LOGD(FL("Exit :")); 644 return; 645 } 646 647 /** 648 * wma_set_sta_keep_alive() - set sta keep alive parameters 649 * @wma: wma handle 650 * @vdev_id: vdev id 651 * @method: method for keep alive 652 * @timeperiod: time period 653 * @hostv4addr: host ipv4 address 654 * @destv4addr: dst ipv4 address 655 * @destmac: destination mac 656 * 657 * This function sets keep alive related parameters in fw. 658 * 659 * Return: none 660 */ 661 void wma_set_sta_keep_alive(tp_wma_handle wma, uint8_t vdev_id, 662 uint32_t method, uint32_t timeperiod, 663 uint8_t *hostv4addr, uint8_t *destv4addr, 664 uint8_t *destmac) 665 { 666 struct sta_params params; 667 668 WMA_LOGD("%s: Enter", __func__); 669 670 if (!wma) { 671 WMA_LOGE("%s: wma handle is NULL", __func__); 672 return; 673 } 674 675 if (timeperiod > WNI_CFG_INFRA_STA_KEEP_ALIVE_PERIOD_STAMAX) { 676 WMI_LOGE("Invalid period %d Max limit %d", timeperiod, 677 WNI_CFG_INFRA_STA_KEEP_ALIVE_PERIOD_STAMAX); 678 return; 679 } 680 681 params.vdev_id = vdev_id; 682 params.method = method; 683 params.timeperiod = timeperiod; 684 params.hostv4addr = hostv4addr; 685 params.destv4addr = destv4addr; 686 params.destmac = destmac; 687 688 wmi_unified_set_sta_keep_alive_cmd(wma->wmi_handle, 689 ¶ms); 690 691 WMA_LOGD("%s: Exit", __func__); 692 return; 693 } 694 695 /** 696 * wma_vdev_install_key_complete_event_handler() - install key complete handler 697 * @handle: wma handle 698 * @event: event data 699 * @len: data length 700 * 701 * This event is sent by fw once WPA/WPA2 keys are installed in fw. 702 * 703 * Return: 0 for success or error code 704 */ 705 int wma_vdev_install_key_complete_event_handler(void *handle, 706 uint8_t *event, 707 uint32_t len) 708 { 709 WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID_param_tlvs *param_buf = NULL; 710 wmi_vdev_install_key_complete_event_fixed_param *key_fp = NULL; 711 712 if (!event) { 713 WMA_LOGE("%s: event param null", __func__); 714 return -EINVAL; 715 } 716 717 param_buf = (WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID_param_tlvs *) event; 718 if (!param_buf) { 719 WMA_LOGE("%s: received null buf from target", __func__); 720 return -EINVAL; 721 } 722 723 key_fp = param_buf->fixed_param; 724 if (!key_fp) { 725 WMA_LOGE("%s: received null event data from target", __func__); 726 return -EINVAL; 727 } 728 /* 729 * Do nothing for now. Completion of set key is already indicated to lim 730 */ 731 WMA_LOGI("%s: WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID", __func__); 732 return 0; 733 } 734 /* 735 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 736 * 0 for no restriction 737 * 1 for 1/4 us - Our lower layer calculations limit our precision to 1 msec 738 * 2 for 1/2 us - Our lower layer calculations limit our precision to 1 msec 739 * 3 for 1 us 740 * 4 for 2 us 741 * 5 for 4 us 742 * 6 for 8 us 743 * 7 for 16 us 744 */ 745 static const uint8_t wma_mpdu_spacing[] = { 0, 1, 1, 1, 2, 4, 8, 16 }; 746 747 /** 748 * wma_parse_mpdudensity() - give mpdu spacing from mpdu density 749 * @mpdudensity: mpdu density 750 * 751 * Return: mpdu spacing or 0 for error 752 */ 753 static inline uint8_t wma_parse_mpdudensity(uint8_t mpdudensity) 754 { 755 if (mpdudensity < sizeof(wma_mpdu_spacing)) 756 return wma_mpdu_spacing[mpdudensity]; 757 else 758 return 0; 759 } 760 761 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS) 762 763 /** 764 * wma_unified_peer_state_update() - update peer state 765 * @pdev: pdev handle 766 * @sta_mac: pointer to sta mac addr 767 * @bss_addr: bss address 768 * @sta_type: sta entry type 769 * 770 * 771 * Return: None 772 */ 773 static void 774 wma_unified_peer_state_update( 775 void *pdev, 776 uint8_t *sta_mac, 777 uint8_t *bss_addr, 778 uint8_t sta_type) 779 { 780 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 781 782 if (STA_ENTRY_TDLS_PEER == sta_type) 783 cdp_peer_state_update(soc, pdev, sta_mac, 784 OL_TXRX_PEER_STATE_AUTH); 785 else 786 cdp_peer_state_update(soc, pdev, bss_addr, 787 OL_TXRX_PEER_STATE_AUTH); 788 } 789 #else 790 791 static inline void 792 wma_unified_peer_state_update( 793 void *pdev, 794 uint8_t *sta_mac, 795 uint8_t *bss_addr, 796 uint8_t sta_type) 797 { 798 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 799 800 cdp_peer_state_update(soc, pdev, bss_addr, OL_TXRX_PEER_STATE_AUTH); 801 } 802 #endif 803 804 /** 805 * wmi_unified_send_peer_assoc() - send peer assoc command to fw 806 * @wma: wma handle 807 * @nw_type: nw type 808 * @params: add sta params 809 * 810 * This function send peer assoc command to firmware with 811 * different parameters. 812 * 813 * Return: QDF_STATUS 814 */ 815 QDF_STATUS wma_send_peer_assoc(tp_wma_handle wma, 816 tSirNwType nw_type, 817 tpAddStaParams params) 818 { 819 void *pdev; 820 struct peer_assoc_params *cmd; 821 int32_t ret, max_rates, i; 822 uint8_t rx_stbc, tx_stbc; 823 uint8_t *rate_pos; 824 wmi_rate_set peer_legacy_rates, peer_ht_rates; 825 uint32_t num_peer_11b_rates = 0; 826 uint32_t num_peer_11a_rates = 0; 827 uint32_t phymode; 828 uint32_t peer_nss = 1; 829 struct wma_txrx_node *intr = NULL; 830 QDF_STATUS status; 831 832 cmd = qdf_mem_malloc(sizeof(struct peer_assoc_params)); 833 if (!cmd) { 834 WMA_LOGE("Failed to allocate peer_assoc_params param"); 835 return QDF_STATUS_E_NOMEM; 836 } 837 838 intr = &wma->interfaces[params->smesessionId]; 839 840 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 841 842 if (NULL == pdev) { 843 WMA_LOGE("%s: Failed to get pdev", __func__); 844 qdf_mem_free(cmd); 845 return QDF_STATUS_E_INVAL; 846 } 847 848 qdf_mem_zero(&peer_legacy_rates, sizeof(wmi_rate_set)); 849 qdf_mem_zero(&peer_ht_rates, sizeof(wmi_rate_set)); 850 qdf_mem_zero(cmd, sizeof(struct peer_assoc_params)); 851 852 phymode = wma_peer_phymode(nw_type, params->staType, 853 params->htCapable, 854 params->ch_width, 855 params->vhtCapable); 856 857 /* Legacy Rateset */ 858 rate_pos = (uint8_t *) peer_legacy_rates.rates; 859 for (i = 0; i < SIR_NUM_11B_RATES; i++) { 860 if (!params->supportedRates.llbRates[i]) 861 continue; 862 rate_pos[peer_legacy_rates.num_rates++] = 863 params->supportedRates.llbRates[i]; 864 num_peer_11b_rates++; 865 } 866 for (i = 0; i < SIR_NUM_11A_RATES; i++) { 867 if (!params->supportedRates.llaRates[i]) 868 continue; 869 rate_pos[peer_legacy_rates.num_rates++] = 870 params->supportedRates.llaRates[i]; 871 num_peer_11a_rates++; 872 } 873 874 if ((phymode == MODE_11A && num_peer_11a_rates == 0) || 875 (phymode == MODE_11B && num_peer_11b_rates == 0)) { 876 WMA_LOGW("%s: Invalid phy rates. phymode 0x%x, 11b_rates %d, 11a_rates %d", 877 __func__, phymode, num_peer_11b_rates, num_peer_11a_rates); 878 qdf_mem_free(cmd); 879 return QDF_STATUS_E_INVAL; 880 } 881 882 /* HT Rateset */ 883 max_rates = sizeof(peer_ht_rates.rates) / 884 sizeof(peer_ht_rates.rates[0]); 885 rate_pos = (uint8_t *) peer_ht_rates.rates; 886 for (i = 0; i < MAX_SUPPORTED_RATES; i++) { 887 if (params->supportedRates.supportedMCSSet[i / 8] & 888 (1 << (i % 8))) { 889 rate_pos[peer_ht_rates.num_rates++] = i; 890 if (i >= 8) { 891 /* MCS8 or higher rate is present, must be 2x2 */ 892 peer_nss = 2; 893 } 894 } 895 if (peer_ht_rates.num_rates == max_rates) 896 break; 897 } 898 899 if (params->htCapable && !peer_ht_rates.num_rates) { 900 uint8_t temp_ni_rates[8] = { 0x0, 0x1, 0x2, 0x3, 901 0x4, 0x5, 0x6, 0x7}; 902 /* 903 * Workaround for EV 116382: The peer is marked HT but with 904 * supported rx mcs set is set to 0. 11n spec mandates MCS0-7 905 * for a HT STA. So forcing the supported rx mcs rate to 906 * MCS 0-7. This workaround will be removed once we get 907 * clarification from WFA regarding this STA behavior. 908 */ 909 910 /* TODO: Do we really need this? */ 911 WMA_LOGW("Peer is marked as HT capable but supported mcs rate is 0"); 912 peer_ht_rates.num_rates = sizeof(temp_ni_rates); 913 qdf_mem_copy((uint8_t *) peer_ht_rates.rates, temp_ni_rates, 914 peer_ht_rates.num_rates); 915 } 916 917 /* in ap/ibss mode and for tdls peer, use mac address of the peer in 918 * the other end as the new peer address; in sta mode, use bss id to 919 * be the new peer address 920 */ 921 if ((wma_is_vdev_in_ap_mode(wma, params->smesessionId)) 922 || (wma_is_vdev_in_ibss_mode(wma, params->smesessionId)) 923 #ifdef FEATURE_WLAN_TDLS 924 || (STA_ENTRY_TDLS_PEER == params->staType) 925 #endif /* FEATURE_WLAN_TDLS */ 926 ) 927 WMI_CHAR_ARRAY_TO_MAC_ADDR(params->staMac, &cmd->peer_macaddr); 928 else 929 WMI_CHAR_ARRAY_TO_MAC_ADDR(params->bssId, &cmd->peer_macaddr); 930 cmd->vdev_id = params->smesessionId; 931 cmd->peer_new_assoc = 1; 932 cmd->peer_associd = params->assocId; 933 934 /* 935 * The target only needs a subset of the flags maintained in the host. 936 * Just populate those flags and send it down 937 */ 938 cmd->peer_flags = 0; 939 940 if (params->wmmEnabled) 941 cmd->peer_flags |= WMI_PEER_QOS; 942 943 if (params->uAPSD) { 944 cmd->peer_flags |= WMI_PEER_APSD; 945 WMA_LOGD("Set WMI_PEER_APSD: uapsd Mask %d", params->uAPSD); 946 } 947 948 if (params->htCapable) { 949 cmd->peer_flags |= (WMI_PEER_HT | WMI_PEER_QOS); 950 cmd->peer_rate_caps |= WMI_RC_HT_FLAG; 951 952 if (params->ch_width) { 953 cmd->peer_flags |= WMI_PEER_40MHZ; 954 cmd->peer_rate_caps |= WMI_RC_CW40_FLAG; 955 if (params->fShortGI40Mhz) 956 cmd->peer_rate_caps |= WMI_RC_SGI_FLAG; 957 } else if (params->fShortGI20Mhz) { 958 cmd->peer_rate_caps |= WMI_RC_SGI_FLAG; 959 } 960 } 961 962 if (params->vhtCapable) { 963 cmd->peer_flags |= (WMI_PEER_HT | WMI_PEER_VHT | WMI_PEER_QOS); 964 cmd->peer_rate_caps |= WMI_RC_HT_FLAG; 965 } 966 967 if (params->ch_width == CH_WIDTH_80MHZ) 968 cmd->peer_flags |= WMI_PEER_80MHZ; 969 else if (params->ch_width == CH_WIDTH_160MHZ) 970 cmd->peer_flags |= WMI_PEER_160MHZ; 971 else if (params->ch_width == CH_WIDTH_80P80MHZ) 972 cmd->peer_flags |= WMI_PEER_160MHZ; 973 974 cmd->peer_vht_caps = params->vht_caps; 975 if (params->p2pCapableSta) 976 cmd->peer_flags |= WMI_PEER_IS_P2P_CAPABLE; 977 978 if (params->rmfEnabled) 979 cmd->peer_flags |= WMI_PEER_PMF; 980 981 rx_stbc = (params->ht_caps & IEEE80211_HTCAP_C_RXSTBC) >> 982 IEEE80211_HTCAP_C_RXSTBC_S; 983 if (rx_stbc) { 984 cmd->peer_flags |= WMI_PEER_STBC; 985 cmd->peer_rate_caps |= (rx_stbc << WMI_RC_RX_STBC_FLAG_S); 986 } 987 988 tx_stbc = (params->ht_caps & IEEE80211_HTCAP_C_TXSTBC) >> 989 IEEE80211_HTCAP_C_TXSTBC_S; 990 if (tx_stbc) { 991 cmd->peer_flags |= WMI_PEER_STBC; 992 cmd->peer_rate_caps |= (tx_stbc << WMI_RC_TX_STBC_FLAG_S); 993 } 994 995 if (params->htLdpcCapable || params->vhtLdpcCapable) 996 cmd->peer_flags |= WMI_PEER_LDPC; 997 998 switch (params->mimoPS) { 999 case eSIR_HT_MIMO_PS_STATIC: 1000 cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; 1001 break; 1002 case eSIR_HT_MIMO_PS_DYNAMIC: 1003 cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; 1004 break; 1005 case eSIR_HT_MIMO_PS_NO_LIMIT: 1006 cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; 1007 break; 1008 default: 1009 break; 1010 } 1011 1012 #ifdef FEATURE_WLAN_TDLS 1013 if (STA_ENTRY_TDLS_PEER == params->staType) 1014 cmd->peer_flags |= WMI_PEER_AUTH; 1015 #endif /* FEATURE_WLAN_TDLS */ 1016 1017 if (params->wpa_rsn 1018 #ifdef FEATURE_WLAN_WAPI 1019 || params->encryptType == eSIR_ED_WPI 1020 #endif /* FEATURE_WLAN_WAPI */ 1021 ) 1022 cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; 1023 if (params->wpa_rsn >> 1) 1024 cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; 1025 1026 wma_unified_peer_state_update(pdev, params->staMac, 1027 params->bssId, params->staType); 1028 1029 #ifdef FEATURE_WLAN_WAPI 1030 if (params->encryptType == eSIR_ED_WPI) { 1031 ret = wma_vdev_set_param(wma->wmi_handle, 1032 params->smesessionId, 1033 WMI_VDEV_PARAM_DROP_UNENCRY, 1034 false); 1035 if (ret) { 1036 WMA_LOGE 1037 ("Set WMI_VDEV_PARAM_DROP_UNENCRY Param status:%d\n", 1038 ret); 1039 qdf_mem_free(cmd); 1040 return ret; 1041 } 1042 } 1043 #endif /* FEATURE_WLAN_WAPI */ 1044 1045 cmd->peer_caps = params->capab_info; 1046 cmd->peer_listen_intval = params->listenInterval; 1047 cmd->peer_ht_caps = params->ht_caps; 1048 cmd->peer_max_mpdu = (1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR + 1049 params->maxAmpduSize)) - 1; 1050 cmd->peer_mpdu_density = wma_parse_mpdudensity(params->maxAmpduDensity); 1051 1052 if (params->supportedRates.supportedMCSSet[1] && 1053 params->supportedRates.supportedMCSSet[2]) 1054 cmd->peer_rate_caps |= WMI_RC_TS_FLAG; 1055 else if (params->supportedRates.supportedMCSSet[1]) 1056 cmd->peer_rate_caps |= WMI_RC_DS_FLAG; 1057 1058 /* Update peer legacy rate information */ 1059 cmd->peer_legacy_rates.num_rates = peer_legacy_rates.num_rates; 1060 qdf_mem_copy(cmd->peer_legacy_rates.rates, peer_legacy_rates.rates, 1061 peer_legacy_rates.num_rates); 1062 1063 /* Update peer HT rate information */ 1064 cmd->peer_ht_rates.num_rates = peer_ht_rates.num_rates; 1065 qdf_mem_copy(cmd->peer_ht_rates.rates, peer_ht_rates.rates, 1066 peer_ht_rates.num_rates); 1067 1068 /* VHT Rates */ 1069 1070 cmd->peer_nss = peer_nss; 1071 /* 1072 * Because of DBS a vdev may come up in any of the two MACs with 1073 * different capabilities. STBC capab should be fetched for given 1074 * hard_mode->MAC_id combo. It is planned that firmware should provide 1075 * these dev capabilities. But for now number of tx streams can be used 1076 * to identify if Tx STBC needs to be disabled. 1077 */ 1078 if (intr->tx_streams < 2) { 1079 cmd->peer_vht_caps &= ~(1 << SIR_MAC_VHT_CAP_TXSTBC); 1080 WMA_LOGD("Num tx_streams: %d, Disabled txSTBC", 1081 intr->tx_streams); 1082 } 1083 WMA_LOGD("peer_nss %d peer_ht_rates.num_rates %d ", cmd->peer_nss, 1084 peer_ht_rates.num_rates); 1085 1086 cmd->vht_capable = params->vhtCapable; 1087 if (params->vhtCapable) { 1088 #define VHT2x2MCSMASK 0xc 1089 cmd->rx_max_rate = params->supportedRates.vhtRxHighestDataRate; 1090 cmd->rx_mcs_set = params->supportedRates.vhtRxMCSMap; 1091 cmd->tx_max_rate = params->supportedRates.vhtTxHighestDataRate; 1092 cmd->tx_mcs_set = params->supportedRates.vhtTxMCSMap; 1093 1094 if (params->vhtSupportedRxNss) { 1095 cmd->peer_nss = params->vhtSupportedRxNss; 1096 } else { 1097 cmd->peer_nss = ((cmd->rx_mcs_set & VHT2x2MCSMASK) 1098 == VHT2x2MCSMASK) ? 1 : 2; 1099 } 1100 } 1101 1102 /* 1103 * Limit nss to max number of rf chain supported by target 1104 * Otherwise Fw will crash 1105 */ 1106 wma_update_txrx_chainmask(wma->num_rf_chains, &cmd->peer_nss); 1107 1108 intr->nss = cmd->peer_nss; 1109 cmd->peer_phymode = phymode; 1110 WMA_LOGD("%s: vdev_id %d associd %d peer_flags %x rate_caps %x " 1111 "peer_caps %x listen_intval %d ht_caps %x max_mpdu %d " 1112 "nss %d phymode %d peer_mpdu_density %d encr_type %d " 1113 "cmd->peer_vht_caps %x", __func__, 1114 cmd->vdev_id, cmd->peer_associd, cmd->peer_flags, 1115 cmd->peer_rate_caps, cmd->peer_caps, 1116 cmd->peer_listen_intval, cmd->peer_ht_caps, 1117 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 1118 cmd->peer_mpdu_density, params->encryptType, 1119 cmd->peer_vht_caps); 1120 1121 status = wmi_unified_peer_assoc_send(wma->wmi_handle, 1122 cmd); 1123 if (QDF_IS_STATUS_ERROR(status)) 1124 WMA_LOGP(FL("Failed to send peer assoc command status = %d"), 1125 status); 1126 qdf_mem_free(cmd); 1127 1128 return status; 1129 } 1130 1131 /** 1132 * wmi_unified_vdev_set_gtx_cfg_send() - set GTX params 1133 * @wmi_handle: wmi handle 1134 * @if_id: vdev id 1135 * @gtx_info: GTX config params 1136 * 1137 * This function set GTX related params in firmware. 1138 * 1139 * Return: 0 for success or error code 1140 */ 1141 QDF_STATUS wmi_unified_vdev_set_gtx_cfg_send(wmi_unified_t wmi_handle, 1142 uint32_t if_id, 1143 gtx_config_t *gtx_info) 1144 { 1145 struct wmi_gtx_config params; 1146 1147 params.gtx_rt_mask[0] = gtx_info->gtxRTMask[0]; 1148 params.gtx_rt_mask[1] = gtx_info->gtxRTMask[1]; 1149 params.gtx_usrcfg = gtx_info->gtxUsrcfg; 1150 params.gtx_threshold = gtx_info->gtxPERThreshold; 1151 params.gtx_margin = gtx_info->gtxPERMargin; 1152 params.gtx_tpcstep = gtx_info->gtxTPCstep; 1153 params.gtx_tpcmin = gtx_info->gtxTPCMin; 1154 params.gtx_bwmask = gtx_info->gtxBWMask; 1155 1156 return wmi_unified_vdev_set_gtx_cfg_cmd(wmi_handle, 1157 if_id, ¶ms); 1158 1159 } 1160 1161 /** 1162 * wma_update_protection_mode() - update protection mode 1163 * @wma: wma handle 1164 * @vdev_id: vdev id 1165 * @llbcoexist: protection mode info 1166 * 1167 * This function set protection mode(RTS/CTS) to fw for passed vdev id. 1168 * 1169 * Return: none 1170 */ 1171 void wma_update_protection_mode(tp_wma_handle wma, uint8_t vdev_id, 1172 uint8_t llbcoexist) 1173 { 1174 QDF_STATUS ret; 1175 enum ieee80211_protmode prot_mode; 1176 1177 prot_mode = llbcoexist ? IEEE80211_PROT_CTSONLY : IEEE80211_PROT_NONE; 1178 1179 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1180 WMI_VDEV_PARAM_PROTECTION_MODE, 1181 prot_mode); 1182 1183 if (QDF_IS_STATUS_ERROR(ret)) 1184 WMA_LOGE("Failed to send wmi protection mode cmd"); 1185 else 1186 WMA_LOGD("Updated protection mode %d to target", prot_mode); 1187 } 1188 1189 /** 1190 * wma_update_beacon_interval() - update beacon interval in fw 1191 * @wma: wma handle 1192 * @vdev_id: vdev id 1193 * @beaconInterval: becon interval 1194 * 1195 * Return: none 1196 */ 1197 static void 1198 wma_update_beacon_interval(tp_wma_handle wma, uint8_t vdev_id, 1199 uint16_t beaconInterval) 1200 { 1201 QDF_STATUS ret; 1202 1203 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1204 WMI_VDEV_PARAM_BEACON_INTERVAL, 1205 beaconInterval); 1206 1207 if (QDF_IS_STATUS_ERROR(ret)) 1208 WMA_LOGE("Failed to update beacon interval"); 1209 else 1210 WMA_LOGI("Updated beacon interval %d for vdev %d", 1211 beaconInterval, vdev_id); 1212 } 1213 1214 /** 1215 * wma_process_update_beacon_params() - update beacon parameters to target 1216 * @wma: wma handle 1217 * @bcn_params: beacon parameters 1218 * 1219 * Return: none 1220 */ 1221 void 1222 wma_process_update_beacon_params(tp_wma_handle wma, 1223 tUpdateBeaconParams *bcn_params) 1224 { 1225 if (!bcn_params) { 1226 WMA_LOGE("bcn_params NULL"); 1227 return; 1228 } 1229 1230 if (bcn_params->smeSessionId >= wma->max_bssid) { 1231 WMA_LOGE("Invalid vdev id %d", bcn_params->smeSessionId); 1232 return; 1233 } 1234 1235 if (bcn_params->paramChangeBitmap & PARAM_BCN_INTERVAL_CHANGED) { 1236 wma_update_beacon_interval(wma, bcn_params->smeSessionId, 1237 bcn_params->beaconInterval); 1238 } 1239 1240 if (bcn_params->paramChangeBitmap & PARAM_llBCOEXIST_CHANGED) 1241 wma_update_protection_mode(wma, bcn_params->smeSessionId, 1242 bcn_params->llbCoexist); 1243 } 1244 1245 /** 1246 * wma_update_cfg_params() - update cfg parameters to target 1247 * @wma: wma handle 1248 * @cfgParam: cfg parameter 1249 * 1250 * Return: none 1251 */ 1252 void wma_update_cfg_params(tp_wma_handle wma, tSirMsgQ *cfgParam) 1253 { 1254 uint8_t vdev_id; 1255 uint32_t param_id; 1256 uint32_t cfg_val; 1257 QDF_STATUS ret; 1258 /* get mac to acess CFG data base */ 1259 struct sAniSirGlobal *pmac; 1260 1261 switch (cfgParam->bodyval) { 1262 case WNI_CFG_RTS_THRESHOLD: 1263 param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; 1264 break; 1265 case WNI_CFG_FRAGMENTATION_THRESHOLD: 1266 param_id = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD; 1267 break; 1268 default: 1269 WMA_LOGD("Unhandled cfg parameter %d", cfgParam->bodyval); 1270 return; 1271 } 1272 1273 pmac = cds_get_context(QDF_MODULE_ID_PE); 1274 1275 if (NULL == pmac) { 1276 WMA_LOGE("%s: Failed to get pmac", __func__); 1277 return; 1278 } 1279 1280 if (wlan_cfg_get_int(pmac, (uint16_t) cfgParam->bodyval, 1281 &cfg_val) != eSIR_SUCCESS) { 1282 WMA_LOGE("Failed to get value for CFG PARAMS %d. returning without updating", 1283 cfgParam->bodyval); 1284 return; 1285 } 1286 1287 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 1288 if (wma->interfaces[vdev_id].handle != 0) { 1289 ret = wma_vdev_set_param(wma->wmi_handle, 1290 vdev_id, param_id, 1291 cfg_val); 1292 if (QDF_IS_STATUS_ERROR(ret)) 1293 WMA_LOGE("Update cfg params failed for vdevId %d", 1294 vdev_id); 1295 } 1296 } 1297 } 1298 1299 /** 1300 * wma_read_cfg_wepkey() - fill key_info for WEP key 1301 * @wma_handle: wma handle 1302 * @key_info: key_info ptr 1303 * @def_key_idx: default key index 1304 * @num_keys: number of keys 1305 * 1306 * This function reads WEP keys from cfg and fills 1307 * up key_info. 1308 * 1309 * Return: none 1310 */ 1311 static void wma_read_cfg_wepkey(tp_wma_handle wma_handle, 1312 tSirKeys *key_info, uint32_t *def_key_idx, 1313 uint8_t *num_keys) 1314 { 1315 tSirRetStatus status; 1316 uint32_t val = SIR_MAC_KEY_LENGTH; 1317 uint8_t i, j; 1318 1319 WMA_LOGD("Reading WEP keys from cfg"); 1320 /* NOTE:def_key_idx is initialized to 0 by the caller */ 1321 status = wlan_cfg_get_int(wma_handle->mac_context, 1322 WNI_CFG_WEP_DEFAULT_KEYID, def_key_idx); 1323 if (status != eSIR_SUCCESS) 1324 WMA_LOGE("Unable to read default id, defaulting to 0"); 1325 1326 for (i = 0, j = 0; i < SIR_MAC_MAX_NUM_OF_DEFAULT_KEYS; i++) { 1327 status = wlan_cfg_get_str(wma_handle->mac_context, 1328 (uint16_t) WNI_CFG_WEP_DEFAULT_KEY_1 + 1329 i, key_info[j].key, &val); 1330 if (status != eSIR_SUCCESS) { 1331 WMA_LOGE("WEP key is not configured at :%d", i); 1332 } else { 1333 key_info[j].keyId = i; 1334 key_info[j].keyLength = (uint16_t) val; 1335 j++; 1336 } 1337 } 1338 *num_keys = j; 1339 } 1340 1341 /** 1342 * wma_setup_install_key_cmd() - set key parameters 1343 * @wma_handle: wma handle 1344 * @key_params: key parameters 1345 * @mode: op mode 1346 * 1347 * This function fills structure from information 1348 * passed in key_params. 1349 * 1350 * Return: QDF_STATUS_SUCCESS - success 1351 QDF_STATUS_E_FAILURE - failure 1352 QDF_STATUS_E_NOMEM - invalid request 1353 */ 1354 static QDF_STATUS wma_setup_install_key_cmd(tp_wma_handle wma_handle, 1355 struct wma_set_key_params 1356 *key_params, uint8_t mode) 1357 { 1358 struct set_key_params params; 1359 QDF_STATUS status = QDF_STATUS_SUCCESS; 1360 #ifdef WLAN_FEATURE_11W 1361 struct wma_txrx_node *iface = NULL; 1362 #endif /* WLAN_FEATURE_11W */ 1363 if ((key_params->key_type == eSIR_ED_NONE && 1364 key_params->key_len) || (key_params->key_type != eSIR_ED_NONE && 1365 !key_params->key_len)) { 1366 WMA_LOGE("%s:Invalid set key request", __func__); 1367 return QDF_STATUS_E_NOMEM; 1368 } 1369 1370 params.vdev_id = key_params->vdev_id; 1371 params.key_idx = key_params->key_idx; 1372 qdf_mem_copy(params.peer_mac, key_params->peer_mac, IEEE80211_ADDR_LEN); 1373 1374 #ifdef FEATURE_WLAN_WAPI 1375 qdf_mem_set(params.tx_iv, 16, 0); 1376 qdf_mem_set(params.rx_iv, 16, 0); 1377 #endif 1378 params.key_txmic_len = 0; 1379 params.key_rxmic_len = 0; 1380 1381 params.key_flags = 0; 1382 if (key_params->unicast) 1383 params.key_flags |= PAIRWISE_USAGE; 1384 else 1385 params.key_flags |= GROUP_USAGE; 1386 1387 switch (key_params->key_type) { 1388 case eSIR_ED_NONE: 1389 params.key_cipher = WMI_CIPHER_NONE; 1390 break; 1391 case eSIR_ED_WEP40: 1392 case eSIR_ED_WEP104: 1393 params.key_cipher = WMI_CIPHER_WEP; 1394 if (key_params->unicast && 1395 params.key_idx == key_params->def_key_idx) { 1396 WMA_LOGD("STA Mode: cmd->key_flags |= TX_USAGE"); 1397 params.key_flags |= TX_USAGE; 1398 } else if ((mode == wlan_op_mode_ap) && 1399 (params.key_idx == key_params->def_key_idx)) { 1400 WMA_LOGD("AP Mode: cmd->key_flags |= TX_USAGE"); 1401 params.key_flags |= TX_USAGE; 1402 } 1403 break; 1404 case eSIR_ED_TKIP: 1405 params.key_txmic_len = WMA_TXMIC_LEN; 1406 params.key_rxmic_len = WMA_RXMIC_LEN; 1407 params.key_cipher = WMI_CIPHER_TKIP; 1408 break; 1409 #ifdef FEATURE_WLAN_WAPI 1410 #define WPI_IV_LEN 16 1411 case eSIR_ED_WPI: 1412 { 1413 /*initialize receive and transmit IV with default values */ 1414 /* **Note: tx_iv must be sent in reverse** */ 1415 unsigned char tx_iv[16] = { 0x36, 0x5c, 0x36, 0x5c, 0x36, 0x5c, 1416 0x36, 0x5c, 0x36, 0x5c, 0x36, 0x5c, 1417 0x36, 0x5c, 0x36, 0x5c}; 1418 unsigned char rx_iv[16] = { 0x5c, 0x36, 0x5c, 0x36, 0x5c, 0x36, 1419 0x5c, 0x36, 0x5c, 0x36, 0x5c, 0x36, 1420 0x5c, 0x36, 0x5c, 0x37}; 1421 if (mode == wlan_op_mode_ap) { 1422 /* Authenticator initializes the value of PN as 1423 * 0x5C365C365C365C365C365C365C365C36 for MCastkey Update 1424 */ 1425 if (key_params->unicast) 1426 tx_iv[0] = 0x37; 1427 1428 rx_iv[WPI_IV_LEN - 1] = 0x36; 1429 } else { 1430 if (!key_params->unicast) 1431 rx_iv[WPI_IV_LEN - 1] = 0x36; 1432 } 1433 1434 params.key_txmic_len = WMA_TXMIC_LEN; 1435 params.key_rxmic_len = WMA_RXMIC_LEN; 1436 1437 qdf_mem_copy(¶ms.rx_iv, &rx_iv, 1438 WPI_IV_LEN); 1439 qdf_mem_copy(¶ms.tx_iv, &tx_iv, 1440 WPI_IV_LEN); 1441 params.key_cipher = WMI_CIPHER_WAPI; 1442 break; 1443 } 1444 #endif /* FEATURE_WLAN_WAPI */ 1445 case eSIR_ED_CCMP: 1446 params.key_cipher = WMI_CIPHER_AES_CCM; 1447 break; 1448 #ifdef WLAN_FEATURE_11W 1449 case eSIR_ED_AES_128_CMAC: 1450 params.key_cipher = WMI_CIPHER_AES_CMAC; 1451 break; 1452 #endif /* WLAN_FEATURE_11W */ 1453 default: 1454 /* TODO: MFP ? */ 1455 WMA_LOGE("%s:Invalid encryption type:%d", __func__, 1456 key_params->key_type); 1457 return QDF_STATUS_E_NOMEM; 1458 } 1459 1460 #ifdef BIG_ENDIAN_HOST 1461 { 1462 /* for big endian host, copy engine byte_swap is enabled 1463 * But the key data content is in network byte order 1464 * Need to byte swap the key data content - so when copy engine 1465 * does byte_swap - target gets key_data content in the correct 1466 * order. 1467 */ 1468 int8_t i; 1469 uint32_t *destp, *srcp; 1470 1471 destp = (uint32_t *) params.key_data; 1472 srcp = (uint32_t *) key_params->key_data; 1473 for (i = 0; 1474 i < roundup(key_params->key_len, sizeof(uint32_t)) / 4; 1475 i++) { 1476 *destp = le32_to_cpu(*srcp); 1477 destp++; 1478 srcp++; 1479 } 1480 } 1481 #else 1482 qdf_mem_copy((void *)params.key_data, 1483 (const void *)key_params->key_data, key_params->key_len); 1484 #endif /* BIG_ENDIAN_HOST */ 1485 params.key_len = key_params->key_len; 1486 1487 #ifdef WLAN_FEATURE_11W 1488 if (key_params->key_type == eSIR_ED_AES_128_CMAC) { 1489 iface = &wma_handle->interfaces[key_params->vdev_id]; 1490 if (iface) { 1491 iface->key.key_length = key_params->key_len; 1492 qdf_mem_copy(iface->key.key, 1493 (const void *)key_params->key_data, 1494 iface->key.key_length); 1495 if ((params.key_idx == WMA_IGTK_KEY_INDEX_4) || 1496 (params.key_idx == WMA_IGTK_KEY_INDEX_5)) 1497 qdf_mem_zero(iface->key.key_id[params.key_idx - 1498 WMA_IGTK_KEY_INDEX_4].ipn, 1499 CMAC_IPN_LEN); 1500 } 1501 } 1502 #endif /* WLAN_FEATURE_11W */ 1503 1504 WMA_LOGD("Key setup : vdev_id %d key_idx %d key_type %d key_len %d" 1505 " unicast %d peer_mac %pM def_key_idx %d", key_params->vdev_id, 1506 key_params->key_idx, key_params->key_type, key_params->key_len, 1507 key_params->unicast, key_params->peer_mac, 1508 key_params->def_key_idx); 1509 1510 status = wmi_unified_setup_install_key_cmd(wma_handle->wmi_handle, 1511 ¶ms); 1512 1513 return status; 1514 } 1515 1516 /** 1517 * wma_set_bsskey() - set encryption key to fw. 1518 * @wma_handle: wma handle 1519 * @key_info: key info 1520 * 1521 * Return: none 1522 */ 1523 void wma_set_bsskey(tp_wma_handle wma_handle, tpSetBssKeyParams key_info) 1524 { 1525 struct wma_set_key_params key_params; 1526 QDF_STATUS status = QDF_STATUS_SUCCESS; 1527 uint32_t i; 1528 uint32_t def_key_idx = 0; 1529 uint32_t wlan_opmode; 1530 void *txrx_vdev; 1531 uint8_t *mac_addr; 1532 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 1533 1534 WMA_LOGD("BSS key setup"); 1535 txrx_vdev = wma_find_vdev_by_id(wma_handle, key_info->smesessionId); 1536 if (!txrx_vdev) { 1537 WMA_LOGE("%s:Invalid vdev handle", __func__); 1538 key_info->status = QDF_STATUS_E_FAILURE; 1539 goto out; 1540 } 1541 wlan_opmode = cdp_get_opmode(soc, txrx_vdev); 1542 1543 /* 1544 * For IBSS, WMI expects the BSS key to be set per peer key 1545 * So cache the BSS key in the wma_handle and re-use it when the 1546 * STA key is been setup for a peer 1547 */ 1548 if (wlan_op_mode_ibss == wlan_opmode) { 1549 key_info->status = QDF_STATUS_SUCCESS; 1550 if (wma_handle->ibss_started > 0) 1551 goto out; 1552 WMA_LOGD("Caching IBSS Key"); 1553 qdf_mem_copy(&wma_handle->ibsskey_info, key_info, 1554 sizeof(tSetBssKeyParams)); 1555 } 1556 1557 qdf_mem_set(&key_params, sizeof(key_params), 0); 1558 key_params.vdev_id = key_info->smesessionId; 1559 key_params.key_type = key_info->encType; 1560 key_params.singl_tid_rc = key_info->singleTidRc; 1561 key_params.unicast = false; 1562 if (wlan_opmode == wlan_op_mode_sta) { 1563 qdf_mem_copy(key_params.peer_mac, 1564 wma_handle->interfaces[key_info->smesessionId].bssid, 1565 IEEE80211_ADDR_LEN); 1566 } else { 1567 mac_addr = cdp_get_vdev_mac_addr(soc, txrx_vdev); 1568 if (mac_addr == NULL) { 1569 WMA_LOGE("%s: mac_addr is NULL for vdev with id %d", 1570 __func__, key_info->smesessionId); 1571 goto out; 1572 } 1573 /* vdev mac address will be passed for all other modes */ 1574 qdf_mem_copy(key_params.peer_mac, mac_addr, 1575 IEEE80211_ADDR_LEN); 1576 WMA_LOGA("BSS Key setup with vdev_mac %pM\n", 1577 mac_addr); 1578 } 1579 1580 if (key_info->numKeys == 0 && 1581 (key_info->encType == eSIR_ED_WEP40 || 1582 key_info->encType == eSIR_ED_WEP104)) { 1583 wma_read_cfg_wepkey(wma_handle, key_info->key, 1584 &def_key_idx, &key_info->numKeys); 1585 } 1586 1587 for (i = 0; i < key_info->numKeys; i++) { 1588 if (key_params.key_type != eSIR_ED_NONE && 1589 !key_info->key[i].keyLength) 1590 continue; 1591 if (key_info->encType == eSIR_ED_WPI) { 1592 key_params.key_idx = key_info->key[i].keyId; 1593 key_params.def_key_idx = key_info->key[i].keyId; 1594 } else 1595 key_params.key_idx = key_info->key[i].keyId; 1596 1597 key_params.key_len = key_info->key[i].keyLength; 1598 if (key_info->encType == eSIR_ED_TKIP) { 1599 qdf_mem_copy(key_params.key_data, 1600 key_info->key[i].key, 16); 1601 qdf_mem_copy(&key_params.key_data[16], 1602 &key_info->key[i].key[24], 8); 1603 qdf_mem_copy(&key_params.key_data[24], 1604 &key_info->key[i].key[16], 8); 1605 } else 1606 qdf_mem_copy((void *)key_params.key_data, 1607 (const void *)key_info->key[i].key, 1608 key_info->key[i].keyLength); 1609 1610 WMA_LOGD("%s: bss key[%d] length %d", __func__, i, 1611 key_info->key[i].keyLength); 1612 1613 status = wma_setup_install_key_cmd(wma_handle, &key_params, 1614 wlan_opmode); 1615 if (status == QDF_STATUS_E_NOMEM) { 1616 WMA_LOGE("%s:Failed to setup install key buf", 1617 __func__); 1618 key_info->status = QDF_STATUS_E_NOMEM; 1619 goto out; 1620 } else if (status == QDF_STATUS_E_FAILURE) { 1621 WMA_LOGE("%s:Failed to send install key command", 1622 __func__); 1623 key_info->status = QDF_STATUS_E_FAILURE; 1624 goto out; 1625 } 1626 } 1627 1628 wma_handle->ibss_started++; 1629 /* TODO: Should we wait till we get HTT_T2H_MSG_TYPE_SEC_IND? */ 1630 key_info->status = QDF_STATUS_SUCCESS; 1631 1632 out: 1633 wma_send_msg(wma_handle, WMA_SET_BSSKEY_RSP, (void *)key_info, 0); 1634 } 1635 1636 #ifdef QCA_IBSS_SUPPORT 1637 /** 1638 * wma_calc_ibss_heart_beat_timer() - calculate IBSS heart beat timer 1639 * @peer_num: number of peers 1640 * 1641 * Return: heart beat timer value 1642 */ 1643 static uint16_t wma_calc_ibss_heart_beat_timer(int16_t peer_num) 1644 { 1645 /* heart beat timer value look-up table */ 1646 /* entry index : (the number of currently connected peers) - 1 1647 entry value : the heart time threshold value in seconds for 1648 detecting ibss peer departure */ 1649 static const uint16_t heart_beat_timer[MAX_PEERS] = { 1650 4, 4, 4, 4, 4, 4, 4, 4, 1651 8, 8, 8, 8, 8, 8, 8, 8, 1652 12, 12, 12, 12, 12, 12, 12, 12, 1653 16, 16, 16, 16, 16, 16, 16, 16 1654 }; 1655 1656 if (peer_num < 1 || peer_num > MAX_PEERS) 1657 return 0; 1658 1659 return heart_beat_timer[peer_num - 1]; 1660 1661 } 1662 1663 /** 1664 * wma_adjust_ibss_heart_beat_timer() - set ibss heart beat timer in fw. 1665 * @wma: wma handle 1666 * @vdev_id: vdev id 1667 * @peer_num_delta: peer number delta value 1668 * 1669 * Return: none 1670 */ 1671 void wma_adjust_ibss_heart_beat_timer(tp_wma_handle wma, 1672 uint8_t vdev_id, 1673 int8_t peer_num_delta) 1674 { 1675 void *vdev; 1676 int16_t new_peer_num; 1677 uint16_t new_timer_value_sec; 1678 uint32_t new_timer_value_ms; 1679 QDF_STATUS status; 1680 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 1681 1682 if (peer_num_delta != 1 && peer_num_delta != -1) { 1683 WMA_LOGE("Invalid peer_num_delta value %d", peer_num_delta); 1684 return; 1685 } 1686 1687 vdev = wma_find_vdev_by_id(wma, vdev_id); 1688 if (!vdev) { 1689 WMA_LOGE("vdev not found : vdev_id %d", vdev_id); 1690 return; 1691 } 1692 1693 /* adjust peer numbers */ 1694 new_peer_num = cdp_peer_update_ibss_add_peer_num_of_vdev(soc, vdev, 1695 peer_num_delta); 1696 if (OL_TXRX_INVALID_NUM_PEERS == new_peer_num) { 1697 WMA_LOGE("new peer num %d out of valid boundary", new_peer_num); 1698 return; 1699 } 1700 1701 /* reset timer value if all peers departed */ 1702 if (new_peer_num == 0) { 1703 cdp_set_ibss_vdev_heart_beat_timer(soc, vdev, 0); 1704 return; 1705 } 1706 1707 /* calculate new timer value */ 1708 new_timer_value_sec = wma_calc_ibss_heart_beat_timer(new_peer_num); 1709 if (new_timer_value_sec == 0) { 1710 WMA_LOGE("timer value %d is invalid for peer number %d", 1711 new_timer_value_sec, new_peer_num); 1712 return; 1713 } 1714 if (new_timer_value_sec == 1715 cdp_set_ibss_vdev_heart_beat_timer(soc, vdev, new_timer_value_sec)) { 1716 WMA_LOGD("timer value %d stays same, no need to notify target", 1717 new_timer_value_sec); 1718 return; 1719 } 1720 1721 new_timer_value_ms = ((uint32_t) new_timer_value_sec) * 1000; 1722 1723 status = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1724 WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS, 1725 new_timer_value_ms); 1726 if (QDF_IS_STATUS_ERROR(status)) { 1727 WMA_LOGE("Failed to set IBSS link monitoring timer value"); 1728 return; 1729 } 1730 1731 WMA_LOGD("Set IBSS link monitor timer: peer_num = %d timer_value = %d", 1732 new_peer_num, new_timer_value_ms); 1733 } 1734 1735 #endif /* QCA_IBSS_SUPPORT */ 1736 /** 1737 * wma_set_ibsskey_helper() - cached IBSS key in wma handle 1738 * @wma_handle: wma handle 1739 * @key_info: set bss key info 1740 * @peerMacAddr: peer mac address 1741 * 1742 * Return: none 1743 */ 1744 static void wma_set_ibsskey_helper(tp_wma_handle wma_handle, 1745 tpSetBssKeyParams key_info, 1746 struct qdf_mac_addr peer_macaddr) 1747 { 1748 struct wma_set_key_params key_params; 1749 QDF_STATUS status = QDF_STATUS_SUCCESS; 1750 uint32_t i; 1751 uint32_t def_key_idx = 0; 1752 void *txrx_vdev; 1753 int opmode; 1754 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 1755 1756 WMA_LOGD("BSS key setup for peer"); 1757 txrx_vdev = wma_find_vdev_by_id(wma_handle, key_info->smesessionId); 1758 if (!txrx_vdev) { 1759 WMA_LOGE("%s:Invalid vdev handle", __func__); 1760 key_info->status = QDF_STATUS_E_FAILURE; 1761 return; 1762 } 1763 1764 qdf_mem_set(&key_params, sizeof(key_params), 0); 1765 opmode = cdp_get_opmode(soc, txrx_vdev); 1766 qdf_mem_set(&key_params, sizeof(key_params), 0); 1767 key_params.vdev_id = key_info->smesessionId; 1768 key_params.key_type = key_info->encType; 1769 key_params.singl_tid_rc = key_info->singleTidRc; 1770 key_params.unicast = false; 1771 ASSERT(wlan_op_mode_ibss == opmode); 1772 1773 qdf_mem_copy(key_params.peer_mac, peer_macaddr.bytes, 1774 IEEE80211_ADDR_LEN); 1775 1776 if (key_info->numKeys == 0 && 1777 (key_info->encType == eSIR_ED_WEP40 || 1778 key_info->encType == eSIR_ED_WEP104)) { 1779 wma_read_cfg_wepkey(wma_handle, key_info->key, 1780 &def_key_idx, &key_info->numKeys); 1781 } else if ((key_info->encType == eSIR_ED_WEP40) || 1782 (key_info->encType == eSIR_ED_WEP104)) { 1783 struct wma_txrx_node *intf = 1784 &wma_handle->interfaces[key_info->smesessionId]; 1785 key_params.def_key_idx = intf->wep_default_key_idx; 1786 } 1787 1788 for (i = 0; i < key_info->numKeys; i++) { 1789 if (key_params.key_type != eSIR_ED_NONE && 1790 !key_info->key[i].keyLength) 1791 continue; 1792 key_params.key_idx = key_info->key[i].keyId; 1793 key_params.key_len = key_info->key[i].keyLength; 1794 if (key_info->encType == eSIR_ED_TKIP) { 1795 qdf_mem_copy(key_params.key_data, 1796 key_info->key[i].key, 16); 1797 qdf_mem_copy(&key_params.key_data[16], 1798 &key_info->key[i].key[24], 8); 1799 qdf_mem_copy(&key_params.key_data[24], 1800 &key_info->key[i].key[16], 8); 1801 } else 1802 qdf_mem_copy((void *)key_params.key_data, 1803 (const void *)key_info->key[i].key, 1804 key_info->key[i].keyLength); 1805 1806 WMA_LOGD("%s: peer bcast key[%d] length %d", __func__, i, 1807 key_info->key[i].keyLength); 1808 1809 status = wma_setup_install_key_cmd(wma_handle, &key_params, 1810 opmode); 1811 if (status == QDF_STATUS_E_NOMEM) { 1812 WMA_LOGE("%s:Failed to setup install key buf", 1813 __func__); 1814 return; 1815 } else if (status == QDF_STATUS_E_FAILURE) { 1816 WMA_LOGE("%s:Failed to send install key command", 1817 __func__); 1818 } 1819 } 1820 } 1821 1822 /** 1823 * wma_set_stakey() - set encryption key 1824 * @wma_handle: wma handle 1825 * @key_info: station key info 1826 * 1827 * This function sets encryption key for WEP/WPA/WPA2 1828 * encryption mode in firmware and send response to upper layer. 1829 * 1830 * Return: none 1831 */ 1832 void wma_set_stakey(tp_wma_handle wma_handle, tpSetStaKeyParams key_info) 1833 { 1834 int32_t i; 1835 QDF_STATUS status = QDF_STATUS_SUCCESS; 1836 void *txrx_pdev; 1837 void *txrx_vdev; 1838 void *peer; 1839 uint8_t num_keys = 0, peer_id; 1840 struct wma_set_key_params key_params; 1841 uint32_t def_key_idx = 0; 1842 int opmode; 1843 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 1844 1845 WMA_LOGD("STA key setup"); 1846 1847 /* Get the txRx Pdev handle */ 1848 txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX); 1849 if (!txrx_pdev) { 1850 WMA_LOGE("%s:Invalid txrx pdev handle", __func__); 1851 key_info->status = QDF_STATUS_E_FAILURE; 1852 goto out; 1853 } 1854 1855 peer = cdp_peer_find_by_addr(soc, txrx_pdev, 1856 key_info->peer_macaddr.bytes, 1857 &peer_id); 1858 if (!peer) { 1859 WMA_LOGE("%s:Invalid peer for key setting", __func__); 1860 key_info->status = QDF_STATUS_E_FAILURE; 1861 goto out; 1862 } 1863 1864 txrx_vdev = wma_find_vdev_by_id(wma_handle, key_info->smesessionId); 1865 if (!txrx_vdev) { 1866 WMA_LOGE("%s:TxRx Vdev Handle is NULL", __func__); 1867 key_info->status = QDF_STATUS_E_FAILURE; 1868 goto out; 1869 } 1870 opmode = cdp_get_opmode(soc, txrx_vdev); 1871 1872 if (key_info->defWEPIdx == WMA_INVALID_KEY_IDX && 1873 (key_info->encType == eSIR_ED_WEP40 || 1874 key_info->encType == eSIR_ED_WEP104) && 1875 opmode != wlan_op_mode_ap) { 1876 wma_read_cfg_wepkey(wma_handle, key_info->key, 1877 &def_key_idx, &num_keys); 1878 key_info->defWEPIdx = def_key_idx; 1879 } else { 1880 num_keys = SIR_MAC_MAX_NUM_OF_DEFAULT_KEYS; 1881 if (key_info->encType != eSIR_ED_NONE) { 1882 for (i = 0; i < num_keys; i++) { 1883 if (key_info->key[i].keyDirection == 1884 eSIR_TX_DEFAULT) { 1885 key_info->defWEPIdx = i; 1886 break; 1887 } 1888 } 1889 } 1890 } 1891 qdf_mem_set(&key_params, sizeof(key_params), 0); 1892 key_params.vdev_id = key_info->smesessionId; 1893 key_params.key_type = key_info->encType; 1894 key_params.singl_tid_rc = key_info->singleTidRc; 1895 key_params.unicast = true; 1896 key_params.def_key_idx = key_info->defWEPIdx; 1897 qdf_mem_copy((void *)key_params.peer_mac, 1898 (const void *)key_info->peer_macaddr.bytes, 1899 IEEE80211_ADDR_LEN); 1900 for (i = 0; i < num_keys; i++) { 1901 if (key_params.key_type != eSIR_ED_NONE && 1902 !key_info->key[i].keyLength) 1903 continue; 1904 if (key_info->encType == eSIR_ED_TKIP) { 1905 qdf_mem_copy(key_params.key_data, 1906 key_info->key[i].key, 16); 1907 qdf_mem_copy(&key_params.key_data[16], 1908 &key_info->key[i].key[24], 8); 1909 qdf_mem_copy(&key_params.key_data[24], 1910 &key_info->key[i].key[16], 8); 1911 } else 1912 qdf_mem_copy(key_params.key_data, key_info->key[i].key, 1913 key_info->key[i].keyLength); 1914 if (key_info->encType == eSIR_ED_WPI) { 1915 key_params.key_idx = key_info->key[i].keyId; 1916 key_params.def_key_idx = key_info->key[i].keyId; 1917 } else 1918 key_params.key_idx = i; 1919 1920 key_params.key_len = key_info->key[i].keyLength; 1921 status = wma_setup_install_key_cmd(wma_handle, &key_params, 1922 opmode); 1923 if (status == QDF_STATUS_E_NOMEM) { 1924 WMA_LOGE("%s:Failed to setup install key buf", 1925 __func__); 1926 key_info->status = QDF_STATUS_E_NOMEM; 1927 goto out; 1928 } 1929 1930 WMA_LOGD("%s: peer unicast key[%d] %d ", __func__, i, 1931 key_info->key[i].keyLength); 1932 1933 if (status == QDF_STATUS_E_FAILURE) { 1934 WMA_LOGE("%s:Failed to send install key command", 1935 __func__); 1936 key_info->status = QDF_STATUS_E_FAILURE; 1937 goto out; 1938 } 1939 } 1940 1941 /* In IBSS mode, set the BSS KEY for this peer 1942 * BSS key is supposed to be cache into wma_handle 1943 */ 1944 if (wlan_op_mode_ibss == opmode) { 1945 wma_set_ibsskey_helper(wma_handle, &wma_handle->ibsskey_info, 1946 key_info->peer_macaddr); 1947 } 1948 1949 /* TODO: Should we wait till we get HTT_T2H_MSG_TYPE_SEC_IND? */ 1950 key_info->status = QDF_STATUS_SUCCESS; 1951 out: 1952 if (key_info->sendRsp) 1953 wma_send_msg(wma_handle, WMA_SET_STAKEY_RSP, (void *)key_info, 1954 0); 1955 } 1956 1957 /** 1958 * wma_process_update_edca_param_req() - update EDCA params 1959 * @handle: wma handle 1960 * @edca_params: edca parameters 1961 * 1962 * This function updates EDCA parameters to the target 1963 * 1964 * Return: QDF Status 1965 */ 1966 QDF_STATUS wma_process_update_edca_param_req(WMA_HANDLE handle, 1967 tEdcaParams *edca_params) 1968 { 1969 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1970 wmi_wmm_vparams wmm_param[WME_NUM_AC]; 1971 tSirMacEdcaParamRecord *edca_record; 1972 int ac; 1973 void *pdev; 1974 struct ol_tx_wmm_param_t ol_tx_wmm_param; 1975 uint8_t vdev_id; 1976 QDF_STATUS status; 1977 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 1978 vdev_id = edca_params->bssIdx; 1979 1980 for (ac = 0; ac < WME_NUM_AC; ac++) { 1981 switch (ac) { 1982 case WME_AC_BE: 1983 edca_record = &edca_params->acbe; 1984 break; 1985 case WME_AC_BK: 1986 edca_record = &edca_params->acbk; 1987 break; 1988 case WME_AC_VI: 1989 edca_record = &edca_params->acvi; 1990 break; 1991 case WME_AC_VO: 1992 edca_record = &edca_params->acvo; 1993 break; 1994 default: 1995 goto fail; 1996 } 1997 1998 wma_update_edca_params_for_ac(edca_record, &wmm_param[ac], ac); 1999 2000 ol_tx_wmm_param.ac[ac].aifs = wmm_param[ac].aifs; 2001 ol_tx_wmm_param.ac[ac].cwmin = wmm_param[ac].cwmin; 2002 ol_tx_wmm_param.ac[ac].cwmax = wmm_param[ac].cwmax; 2003 } 2004 2005 status = wmi_unified_process_update_edca_param(wma_handle->wmi_handle, 2006 vdev_id, wmm_param); 2007 if (status == QDF_STATUS_E_NOMEM) 2008 return status; 2009 else if (status == QDF_STATUS_E_FAILURE) 2010 goto fail; 2011 2012 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 2013 if (pdev) 2014 cdp_set_wmm_param(soc, pdev, ol_tx_wmm_param); 2015 else 2016 QDF_ASSERT(0); 2017 2018 return QDF_STATUS_SUCCESS; 2019 2020 fail: 2021 WMA_LOGE("%s: Failed to set WMM Paremeters", __func__); 2022 return QDF_STATUS_E_FAILURE; 2023 } 2024 2025 /** 2026 * wmi_unified_probe_rsp_tmpl_send() - send probe response template to fw 2027 * @wma: wma handle 2028 * @vdev_id: vdev id 2029 * @probe_rsp_info: probe response info 2030 * 2031 * Return: 0 for success or error code 2032 */ 2033 static int wmi_unified_probe_rsp_tmpl_send(tp_wma_handle wma, 2034 uint8_t vdev_id, 2035 tpSendProbeRespParams probe_rsp_info) 2036 { 2037 uint8_t *frm; 2038 uint64_t adjusted_tsf_le; 2039 struct ieee80211_frame *wh; 2040 struct wmi_probe_resp_params params; 2041 2042 WMA_LOGD(FL("Send probe response template for vdev %d"), vdev_id); 2043 2044 frm = probe_rsp_info->pProbeRespTemplate; 2045 2046 /* 2047 * Make the TSF offset negative so probe response in the same 2048 * staggered batch have the same TSF. 2049 */ 2050 adjusted_tsf_le = cpu_to_le64(0ULL - 2051 wma->interfaces[vdev_id].tsfadjust); 2052 /* Update the timstamp in the probe response buffer with adjusted TSF */ 2053 wh = (struct ieee80211_frame *)frm; 2054 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le)); 2055 2056 params.pProbeRespTemplate = probe_rsp_info->pProbeRespTemplate; 2057 params.probeRespTemplateLen = probe_rsp_info->probeRespTemplateLen; 2058 qdf_mem_copy(params.bssId, probe_rsp_info->bssId, 2059 IEEE80211_ADDR_LEN); 2060 qdf_mem_copy(params.ucProxyProbeReqValidIEBmap, 2061 probe_rsp_info->ucProxyProbeReqValidIEBmap, 2062 8 * sizeof(uint32_t)); 2063 2064 return wmi_unified_probe_rsp_tmpl_send_cmd(wma->wmi_handle, vdev_id, 2065 ¶ms, frm); 2066 } 2067 2068 /** 2069 * wma_unified_bcn_tmpl_send() - send beacon template to fw 2070 * @wma:wma handle 2071 * @vdev_id: vdev id 2072 * @bcn_info: beacon info 2073 * @bytes_to_strip: bytes to strip 2074 * 2075 * Return: QDF_STATUS_SUCCESS for success or error code 2076 */ 2077 static QDF_STATUS wma_unified_bcn_tmpl_send(tp_wma_handle wma, 2078 uint8_t vdev_id, 2079 const tpSendbeaconParams bcn_info, 2080 uint8_t bytes_to_strip) 2081 { 2082 struct beacon_params params = {0}; 2083 uint32_t tmpl_len, tmpl_len_aligned; 2084 uint8_t *frm; 2085 QDF_STATUS ret; 2086 uint8_t *p2p_ie; 2087 uint16_t p2p_ie_len = 0; 2088 uint64_t adjusted_tsf_le; 2089 struct ieee80211_frame *wh; 2090 2091 WMA_LOGD("Send beacon template for vdev %d", vdev_id); 2092 2093 if (bcn_info->p2pIeOffset) { 2094 p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset; 2095 p2p_ie_len = (uint16_t) p2p_ie[1] + 2; 2096 } 2097 2098 /* 2099 * XXX: The first byte of beacon buffer contains beacon length 2100 * only when UMAC in sending the beacon template. In othercases 2101 * (ex: from tbtt update) beacon length is read from beacon 2102 * information. 2103 */ 2104 if (bytes_to_strip) 2105 tmpl_len = *(uint32_t *) &bcn_info->beacon[0]; 2106 else 2107 tmpl_len = bcn_info->beaconLength; 2108 if (p2p_ie_len) { 2109 tmpl_len -= (uint32_t) p2p_ie_len; 2110 } 2111 2112 frm = bcn_info->beacon + bytes_to_strip; 2113 tmpl_len_aligned = roundup(tmpl_len, sizeof(A_UINT32)); 2114 /* 2115 * Make the TSF offset negative so beacons in the same 2116 * staggered batch have the same TSF. 2117 */ 2118 adjusted_tsf_le = cpu_to_le64(0ULL - 2119 wma->interfaces[vdev_id].tsfadjust); 2120 /* Update the timstamp in the beacon buffer with adjusted TSF */ 2121 wh = (struct ieee80211_frame *)frm; 2122 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le)); 2123 2124 2125 2126 params.vdev_id = vdev_id; 2127 params.tim_ie_offset = bcn_info->timIeOffset - bytes_to_strip; 2128 params.tmpl_len = tmpl_len; 2129 params.frm = frm; 2130 params.tmpl_len_aligned = tmpl_len_aligned; 2131 2132 ret = wmi_unified_beacon_send_cmd(wma->wmi_handle, 2133 ¶ms); 2134 if (QDF_IS_STATUS_ERROR(ret)) { 2135 WMA_LOGE("%s: Failed to send bcn tmpl: %d", __func__, ret); 2136 } 2137 2138 return ret; 2139 } 2140 2141 /** 2142 * wma_store_bcn_tmpl() - store beacon template 2143 * @wma: wma handle 2144 * @vdev_id: vdev id 2145 * @bcn_info: beacon params 2146 * 2147 * This function stores beacon template locally. 2148 * This will send to target on the reception of 2149 * SWBA event. 2150 * 2151 * Return: QDF status 2152 */ 2153 static QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id, 2154 tpSendbeaconParams bcn_info) 2155 { 2156 struct beacon_info *bcn; 2157 uint32_t len; 2158 uint8_t *bcn_payload; 2159 struct beacon_tim_ie *tim_ie; 2160 2161 bcn = wma->interfaces[vdev_id].beacon; 2162 if (!bcn || !bcn->buf) { 2163 WMA_LOGE("%s: Memory is not allocated to hold bcn template", 2164 __func__); 2165 return QDF_STATUS_E_INVAL; 2166 } 2167 2168 len = *(u32 *) &bcn_info->beacon[0]; 2169 if (len > WMA_BCN_BUF_MAX_SIZE) { 2170 WMA_LOGE("%s: Received beacon len %d exceeding max limit %d", 2171 __func__, len, WMA_BCN_BUF_MAX_SIZE); 2172 return QDF_STATUS_E_INVAL; 2173 } 2174 WMA_LOGD("%s: Storing received beacon template buf to local buffer", 2175 __func__); 2176 qdf_spin_lock_bh(&bcn->lock); 2177 2178 /* 2179 * Copy received beacon template content in local buffer. 2180 * this will be send to target on the reception of SWBA 2181 * event from target. 2182 */ 2183 qdf_nbuf_trim_tail(bcn->buf, qdf_nbuf_len(bcn->buf)); 2184 memcpy(qdf_nbuf_data(bcn->buf), 2185 bcn_info->beacon + 4 /* Exclude beacon length field */, 2186 len); 2187 if (bcn_info->timIeOffset > 3) { 2188 bcn->tim_ie_offset = bcn_info->timIeOffset - 4; 2189 } else { 2190 bcn->tim_ie_offset = bcn_info->timIeOffset; 2191 } 2192 2193 if (bcn_info->p2pIeOffset > 3) { 2194 bcn->p2p_ie_offset = bcn_info->p2pIeOffset - 4; 2195 } else { 2196 bcn->p2p_ie_offset = bcn_info->p2pIeOffset; 2197 } 2198 bcn_payload = qdf_nbuf_data(bcn->buf); 2199 if (bcn->tim_ie_offset) { 2200 tim_ie = 2201 (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]); 2202 /* 2203 * Intial Value of bcn->dtim_count will be 0. 2204 * But if the beacon gets updated then current dtim 2205 * count will be restored 2206 */ 2207 tim_ie->dtim_count = bcn->dtim_count; 2208 tim_ie->tim_bitctl = 0; 2209 } 2210 2211 qdf_nbuf_put_tail(bcn->buf, len); 2212 bcn->len = len; 2213 2214 qdf_spin_unlock_bh(&bcn->lock); 2215 2216 return QDF_STATUS_SUCCESS; 2217 } 2218 2219 /** 2220 * wma_tbttoffset_update_event_handler() - tbtt offset update handler 2221 * @handle: wma handle 2222 * @event: event buffer 2223 * @len: data length 2224 * 2225 * Return: 0 for success or error code 2226 */ 2227 int wma_tbttoffset_update_event_handler(void *handle, uint8_t *event, 2228 uint32_t len) 2229 { 2230 tp_wma_handle wma = (tp_wma_handle) handle; 2231 WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf; 2232 wmi_tbtt_offset_event_fixed_param *tbtt_offset_event; 2233 struct wma_txrx_node *intf; 2234 struct beacon_info *bcn; 2235 tSendbeaconParams bcn_info; 2236 uint32_t *adjusted_tsf = NULL; 2237 uint32_t if_id = 0, vdev_map; 2238 2239 if (!wma) { 2240 WMA_LOGE("Invalid wma handle"); 2241 return -EINVAL; 2242 } 2243 2244 param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *) event; 2245 if (!param_buf) { 2246 WMA_LOGE("Invalid tbtt update event buffer"); 2247 return -EINVAL; 2248 } 2249 2250 tbtt_offset_event = param_buf->fixed_param; 2251 intf = wma->interfaces; 2252 vdev_map = tbtt_offset_event->vdev_map; 2253 adjusted_tsf = param_buf->tbttoffset_list; 2254 if (!adjusted_tsf) { 2255 WMA_LOGE("%s: Invalid adjusted_tsf", __func__); 2256 return -EINVAL; 2257 } 2258 2259 for (; (vdev_map); vdev_map >>= 1, if_id++) { 2260 if (!(vdev_map & 0x1) || (!(intf[if_id].handle))) 2261 continue; 2262 2263 bcn = intf[if_id].beacon; 2264 if (!bcn) { 2265 WMA_LOGE("%s: Invalid beacon", __func__); 2266 return -EINVAL; 2267 } 2268 if (!bcn->buf) { 2269 WMA_LOGE("%s: Invalid beacon buffer", __func__); 2270 return -EINVAL; 2271 } 2272 /* Save the adjusted TSF */ 2273 intf[if_id].tsfadjust = adjusted_tsf[if_id]; 2274 2275 qdf_spin_lock_bh(&bcn->lock); 2276 qdf_mem_zero(&bcn_info, sizeof(bcn_info)); 2277 bcn_info.beacon = qdf_nbuf_data(bcn->buf); 2278 bcn_info.p2pIeOffset = bcn->p2p_ie_offset; 2279 bcn_info.beaconLength = bcn->len; 2280 bcn_info.timIeOffset = bcn->tim_ie_offset; 2281 qdf_spin_unlock_bh(&bcn->lock); 2282 2283 /* Update beacon template in firmware */ 2284 wma_unified_bcn_tmpl_send(wma, if_id, &bcn_info, 0); 2285 } 2286 return 0; 2287 } 2288 2289 /** 2290 * wma_p2p_go_set_beacon_ie() - set beacon IE for p2p go 2291 * @wma_handle: wma handle 2292 * @vdev_id: vdev id 2293 * @p2pIe: p2p IE 2294 * 2295 * Return: 0 for success or error code 2296 */ 2297 static int wma_p2p_go_set_beacon_ie(t_wma_handle *wma_handle, 2298 A_UINT32 vdev_id, uint8_t *p2pIe) 2299 { 2300 if (!wma_handle) { 2301 WMA_LOGE("%s: wma handle is NULL", __func__); 2302 return QDF_STATUS_E_FAILURE; 2303 } 2304 2305 return wmi_unified_p2p_go_set_beacon_ie_cmd(wma_handle->wmi_handle, 2306 vdev_id, p2pIe); 2307 } 2308 2309 /** 2310 * wma_send_probe_rsp_tmpl() - send probe resp template 2311 * @wma: wma handle 2312 * @probe_rsp_info: probe response info 2313 * 2314 * This funciton sends probe response template to fw which 2315 * firmware will use in case of probe response offload. 2316 * 2317 * Return: none 2318 */ 2319 void wma_send_probe_rsp_tmpl(tp_wma_handle wma, 2320 tpSendProbeRespParams probe_rsp_info) 2321 { 2322 void *vdev; 2323 uint8_t vdev_id; 2324 tpAniProbeRspStruct probe_rsp; 2325 2326 if (!probe_rsp_info) { 2327 WMA_LOGE(FL("probe_rsp_info is NULL")); 2328 return; 2329 } 2330 2331 probe_rsp = (tpAniProbeRspStruct) (probe_rsp_info->pProbeRespTemplate); 2332 if (!probe_rsp) { 2333 WMA_LOGE(FL("probe_rsp is NULL")); 2334 return; 2335 } 2336 2337 vdev = wma_find_vdev_by_addr(wma, probe_rsp->macHdr.sa, &vdev_id); 2338 if (!vdev) { 2339 WMA_LOGE(FL("failed to get vdev handle")); 2340 return; 2341 } 2342 2343 if (WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap, 2344 WMI_SERVICE_BEACON_OFFLOAD)) { 2345 WMA_LOGI("Beacon Offload Enabled Sending Unified command"); 2346 if (wmi_unified_probe_rsp_tmpl_send(wma, vdev_id, 2347 probe_rsp_info) < 0) { 2348 WMA_LOGE(FL("wmi_unified_probe_rsp_tmpl_send Failed ")); 2349 return; 2350 } 2351 } 2352 } 2353 2354 /** 2355 * wma_send_beacon() - send beacon template 2356 * @wma: wma handle 2357 * @bcn_info: beacon info 2358 * 2359 * This funciton store beacon template locally and 2360 * update keep alive parameters 2361 * 2362 * Return: none 2363 */ 2364 void wma_send_beacon(tp_wma_handle wma, tpSendbeaconParams bcn_info) 2365 { 2366 void *vdev; 2367 uint8_t vdev_id; 2368 QDF_STATUS status; 2369 uint8_t *p2p_ie; 2370 tpAniBeaconStruct beacon; 2371 struct vdev_up_params param = {0}; 2372 2373 beacon = (tpAniBeaconStruct) (bcn_info->beacon); 2374 vdev = wma_find_vdev_by_addr(wma, beacon->macHdr.sa, &vdev_id); 2375 if (!vdev) { 2376 WMA_LOGE("%s : failed to get vdev handle", __func__); 2377 return; 2378 } 2379 2380 if (WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap, 2381 WMI_SERVICE_BEACON_OFFLOAD)) { 2382 WMA_LOGI("Beacon Offload Enabled Sending Unified command"); 2383 status = wma_unified_bcn_tmpl_send(wma, vdev_id, bcn_info, 4); 2384 if (QDF_IS_STATUS_ERROR(status)) { 2385 WMA_LOGE("%s : wmi_unified_bcn_tmpl_send Failed ", 2386 __func__); 2387 return; 2388 } 2389 2390 if (bcn_info->p2pIeOffset) { 2391 p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset; 2392 WMA_LOGI 2393 (" %s: p2pIe is present - vdev_id %hu, p2p_ie = %p, p2p ie len = %hu", 2394 __func__, vdev_id, p2p_ie, p2p_ie[1]); 2395 if (wma_p2p_go_set_beacon_ie(wma, vdev_id, p2p_ie) < 0) { 2396 WMA_LOGE 2397 ("%s : wmi_unified_bcn_tmpl_send Failed ", 2398 __func__); 2399 return; 2400 } 2401 } 2402 } 2403 status = wma_store_bcn_tmpl(wma, vdev_id, bcn_info); 2404 if (status != QDF_STATUS_SUCCESS) { 2405 WMA_LOGE("%s : wma_store_bcn_tmpl Failed", __func__); 2406 return; 2407 } 2408 if (!((qdf_atomic_read( 2409 &wma->interfaces[vdev_id].vdev_restart_params. 2410 hidden_ssid_restart_in_progress)) || 2411 (wma->interfaces[vdev_id].is_channel_switch))) { 2412 if (!wma->interfaces[vdev_id].vdev_up) { 2413 param.vdev_id = vdev_id; 2414 param.assoc_id = 0; 2415 status = wmi_unified_vdev_up_send(wma->wmi_handle, 2416 bcn_info->bssId, 2417 ¶m); 2418 if (QDF_IS_STATUS_ERROR(status)) { 2419 WMA_LOGE(FL("failed to send vdev up")); 2420 cds_set_do_hw_mode_change_flag(false); 2421 return; 2422 } 2423 wma->interfaces[vdev_id].vdev_up = true; 2424 wma_set_sap_keepalive(wma, vdev_id); 2425 2426 } 2427 } 2428 } 2429 2430 /** 2431 * wma_set_keepalive_req() - send keep alive request to fw 2432 * @wma: wma handle 2433 * @keepalive: keep alive parameters 2434 * 2435 * Return: none 2436 */ 2437 void wma_set_keepalive_req(tp_wma_handle wma, 2438 tSirKeepAliveReq *keepalive) 2439 { 2440 WMA_LOGD("KEEPALIVE:PacketType:%d", keepalive->packetType); 2441 wma_set_sta_keep_alive(wma, keepalive->sessionId, 2442 keepalive->packetType, 2443 keepalive->timePeriod, 2444 keepalive->hostIpv4Addr, 2445 keepalive->destIpv4Addr, 2446 keepalive->dest_macaddr.bytes); 2447 2448 qdf_mem_free(keepalive); 2449 } 2450 2451 /** 2452 * wma_beacon_miss_handler() - beacon miss event handler 2453 * @wma: wma handle 2454 * @vdev_id: vdev id 2455 * 2456 * This function send beacon miss indication to upper layers. 2457 * 2458 * Return: none 2459 */ 2460 void wma_beacon_miss_handler(tp_wma_handle wma, uint32_t vdev_id) 2461 { 2462 tSirSmeMissedBeaconInd *beacon_miss_ind; 2463 2464 beacon_miss_ind = (tSirSmeMissedBeaconInd *) qdf_mem_malloc 2465 (sizeof(tSirSmeMissedBeaconInd)); 2466 2467 if (NULL == beacon_miss_ind) { 2468 WMA_LOGE("%s: Memory allocation failure", __func__); 2469 return; 2470 } 2471 beacon_miss_ind->messageType = WMA_MISSED_BEACON_IND; 2472 beacon_miss_ind->length = sizeof(tSirSmeMissedBeaconInd); 2473 beacon_miss_ind->bssIdx = vdev_id; 2474 2475 wma_send_msg(wma, WMA_MISSED_BEACON_IND, (void *)beacon_miss_ind, 0); 2476 } 2477 2478 /** 2479 * wma_process_mgmt_tx_completion() - process mgmt completion 2480 * @wma_handle: wma handle 2481 * @desc_id: descriptor id 2482 * @status: status 2483 * 2484 * Return: 0 for success or error code 2485 */ 2486 static int wma_process_mgmt_tx_completion(tp_wma_handle wma_handle, 2487 uint32_t desc_id, uint32_t status) 2488 { 2489 struct wmi_desc_t *wmi_desc; 2490 2491 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX); 2492 2493 if (pdev == NULL) { 2494 WMA_LOGE("%s: NULL pdev pointer", __func__); 2495 return -EINVAL; 2496 } 2497 2498 WMA_LOGI("%s: status:%d wmi_desc_id:%d", __func__, status, desc_id); 2499 2500 wmi_desc = (struct wmi_desc_t *) 2501 (&wma_handle->wmi_desc_pool.array[desc_id]); 2502 2503 if (!wmi_desc) { 2504 WMA_LOGE("%s: Invalid wmi desc", __func__); 2505 return -EINVAL; 2506 } 2507 2508 if (wmi_desc->nbuf) 2509 qdf_nbuf_unmap_single(wma_handle->qdf_dev, wmi_desc->nbuf, 2510 QDF_DMA_TO_DEVICE); 2511 2512 if (wma_handle->wma_mgmt_tx_packetdump_cb) 2513 wma_handle->wma_mgmt_tx_packetdump_cb(wmi_desc->nbuf, 2514 QDF_STATUS_SUCCESS, wmi_desc->vdev_id, TX_MGMT_PKT); 2515 2516 if (wmi_desc->tx_cmpl_cb) 2517 wmi_desc->tx_cmpl_cb(wma_handle->mac_context, 2518 wmi_desc->nbuf, 1); 2519 2520 if (wmi_desc->ota_post_proc_cb) 2521 wmi_desc->ota_post_proc_cb((tpAniSirGlobal) 2522 wma_handle->mac_context, 2523 status); 2524 2525 wmi_desc_put(wma_handle, wmi_desc); 2526 return 0; 2527 } 2528 2529 /** 2530 * wma_mgmt_tx_completion_handler() - wma mgmt Tx completion event handler 2531 * @handle: wma handle 2532 * @cmpl_event_params: completion event handler data 2533 * @len: length of @cmpl_event_params 2534 * 2535 * Return: 0 on success; error number otherwise 2536 */ 2537 2538 int wma_mgmt_tx_completion_handler(void *handle, uint8_t *cmpl_event_params, 2539 uint32_t len) 2540 { 2541 tp_wma_handle wma_handle = (tp_wma_handle)handle; 2542 WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *param_buf; 2543 wmi_mgmt_tx_compl_event_fixed_param *cmpl_params; 2544 2545 param_buf = (WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *) 2546 cmpl_event_params; 2547 if (!param_buf || !wma_handle) { 2548 WMA_LOGE("%s: Invalid mgmt Tx completion event", __func__); 2549 return -EINVAL; 2550 } 2551 cmpl_params = param_buf->fixed_param; 2552 2553 wma_process_mgmt_tx_completion(wma_handle, 2554 cmpl_params->desc_id, cmpl_params->status); 2555 2556 return 0; 2557 } 2558 2559 /** 2560 * wma_mgmt_tx_bundle_completion_handler() - mgmt bundle comp handler 2561 * @handle: wma handle 2562 * @buf: buffer 2563 * @len: length 2564 * 2565 * Return: 0 for success or error code 2566 */ 2567 int wma_mgmt_tx_bundle_completion_handler(void *handle, uint8_t *buf, 2568 uint32_t len) 2569 { 2570 tp_wma_handle wma_handle = (tp_wma_handle)handle; 2571 WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *param_buf; 2572 wmi_mgmt_tx_compl_bundle_event_fixed_param *cmpl_params; 2573 uint32_t num_reports; 2574 uint32_t *desc_ids; 2575 uint32_t *status; 2576 int i; 2577 2578 param_buf = (WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *)buf; 2579 if (!param_buf || !wma_handle) { 2580 WMA_LOGE("%s: Invalid mgmt Tx completion event", __func__); 2581 return -EINVAL; 2582 } 2583 cmpl_params = param_buf->fixed_param; 2584 num_reports = cmpl_params->num_reports; 2585 desc_ids = (uint32_t *)(param_buf->desc_ids); 2586 status = (uint32_t *)(param_buf->status); 2587 2588 for (i = 0; i < num_reports; i++) 2589 wma_process_mgmt_tx_completion(wma_handle, 2590 desc_ids[i], status[i]); 2591 return 0; 2592 } 2593 2594 /** 2595 * wma_process_update_opmode() - process update VHT opmode cmd from UMAC 2596 * @wma_handle: wma handle 2597 * @update_vht_opmode: vht opmode 2598 * 2599 * Return: none 2600 */ 2601 void wma_process_update_opmode(tp_wma_handle wma_handle, 2602 tUpdateVHTOpMode *update_vht_opmode) 2603 { 2604 WMA_LOGD("%s: opMode = %d", __func__, update_vht_opmode->opMode); 2605 2606 wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac, 2607 WMI_PEER_CHWIDTH, update_vht_opmode->opMode, 2608 update_vht_opmode->smesessionId); 2609 } 2610 2611 /** 2612 * wma_process_update_rx_nss() - process update RX NSS cmd from UMAC 2613 * @wma_handle: wma handle 2614 * @update_rx_nss: rx nss value 2615 * 2616 * Return: none 2617 */ 2618 void wma_process_update_rx_nss(tp_wma_handle wma_handle, 2619 tUpdateRxNss *update_rx_nss) 2620 { 2621 struct wma_txrx_node *intr = 2622 &wma_handle->interfaces[update_rx_nss->smesessionId]; 2623 int rx_nss = update_rx_nss->rxNss; 2624 2625 wma_update_txrx_chainmask(wma_handle->num_rf_chains, &rx_nss); 2626 2627 intr->nss = (uint8_t)rx_nss; 2628 update_rx_nss->rxNss = (uint32_t)rx_nss; 2629 2630 WMA_LOGD("%s: Rx Nss = %d", __func__, update_rx_nss->rxNss); 2631 2632 wma_set_peer_param(wma_handle, update_rx_nss->peer_mac, 2633 WMI_PEER_NSS, update_rx_nss->rxNss, 2634 update_rx_nss->smesessionId); 2635 } 2636 2637 /** 2638 * wma_process_update_membership() - process update group membership cmd 2639 * @wma_handle: wma handle 2640 * @membership: group membership info 2641 * 2642 * Return: none 2643 */ 2644 void wma_process_update_membership(tp_wma_handle wma_handle, 2645 tUpdateMembership *membership) 2646 { 2647 WMA_LOGD("%s: membership = %x ", __func__, membership->membership); 2648 2649 wma_set_peer_param(wma_handle, membership->peer_mac, 2650 WMI_PEER_MEMBERSHIP, membership->membership, 2651 membership->smesessionId); 2652 } 2653 2654 /** 2655 * wma_process_update_userpos() - process update user pos cmd from UMAC 2656 * @wma_handle: wma handle 2657 * @userpos: user pos value 2658 * 2659 * Return: none 2660 */ 2661 void wma_process_update_userpos(tp_wma_handle wma_handle, 2662 tUpdateUserPos *userpos) 2663 { 2664 WMA_LOGD("%s: userPos = %x ", __func__, userpos->userPos); 2665 2666 wma_set_peer_param(wma_handle, userpos->peer_mac, 2667 WMI_PEER_USERPOS, userpos->userPos, 2668 userpos->smesessionId); 2669 2670 /* Now that membership/userpos is updated in fw, 2671 * enable GID PPS. 2672 */ 2673 wma_set_ppsconfig(userpos->smesessionId, WMA_VHT_PPS_GID_MATCH, 1); 2674 2675 } 2676 2677 QDF_STATUS wma_set_cts2self_for_p2p_go(void *wma_handle, 2678 uint32_t cts2self_for_p2p_go) 2679 { 2680 int32_t ret; 2681 tp_wma_handle wma = (tp_wma_handle)wma_handle; 2682 struct pdev_params pdevparam; 2683 2684 pdevparam.param_id = WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG; 2685 pdevparam.param_value = cts2self_for_p2p_go; 2686 2687 ret = wmi_unified_pdev_param_send(wma->wmi_handle, 2688 &pdevparam, 2689 WMA_WILDCARD_PDEV_ID); 2690 if (ret) { 2691 WMA_LOGE("Fail to Set CTS2SELF for p2p GO %d", 2692 cts2self_for_p2p_go); 2693 return QDF_STATUS_E_FAILURE; 2694 } 2695 2696 WMA_LOGD("Successfully Set CTS2SELF for p2p GO %d", 2697 cts2self_for_p2p_go); 2698 2699 return QDF_STATUS_SUCCESS; 2700 } 2701 2702 2703 /** 2704 * wma_set_htconfig() - set ht config parameters to target 2705 * @vdev_id: vdev id 2706 * @ht_capab: ht capablity 2707 * @value: value of ht param 2708 * 2709 * Return: QDF status 2710 */ 2711 QDF_STATUS wma_set_htconfig(uint8_t vdev_id, uint16_t ht_capab, int value) 2712 { 2713 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 2714 QDF_STATUS ret = QDF_STATUS_E_FAILURE; 2715 2716 if (NULL == wma) { 2717 WMA_LOGE("%s: Failed to get wma", __func__); 2718 return QDF_STATUS_E_INVAL; 2719 } 2720 2721 switch (ht_capab) { 2722 case WNI_CFG_HT_CAP_INFO_ADVANCE_CODING: 2723 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 2724 WMI_VDEV_PARAM_LDPC, 2725 value); 2726 break; 2727 case WNI_CFG_HT_CAP_INFO_TX_STBC: 2728 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 2729 WMI_VDEV_PARAM_TX_STBC, 2730 value); 2731 break; 2732 case WNI_CFG_HT_CAP_INFO_RX_STBC: 2733 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 2734 WMI_VDEV_PARAM_RX_STBC, 2735 value); 2736 break; 2737 case WNI_CFG_HT_CAP_INFO_SHORT_GI_20MHZ: 2738 case WNI_CFG_HT_CAP_INFO_SHORT_GI_40MHZ: 2739 WMA_LOGE("%s: ht_capab = %d, value = %d", __func__, ht_capab, 2740 value); 2741 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 2742 WMI_VDEV_PARAM_SGI, value); 2743 if (ret == QDF_STATUS_SUCCESS) 2744 wma->interfaces[vdev_id].config.shortgi = value; 2745 break; 2746 default: 2747 WMA_LOGE("%s:INVALID HT CONFIG", __func__); 2748 } 2749 2750 return ret; 2751 } 2752 2753 /** 2754 * wma_hidden_ssid_vdev_restart() - vdev restart for hidden ssid 2755 * @wma_handle: wma handle 2756 * @pReq: hidden ssid vdev restart request 2757 * 2758 * Return: none 2759 */ 2760 void wma_hidden_ssid_vdev_restart(tp_wma_handle wma_handle, 2761 tHalHiddenSsidVdevRestart *pReq) 2762 { 2763 struct wma_txrx_node *intr = wma_handle->interfaces; 2764 struct wma_target_req *msg; 2765 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 2766 2767 if ((pReq->sessionId != 2768 intr[pReq->sessionId].vdev_restart_params.vdev_id) 2769 || !((intr[pReq->sessionId].type == WMI_VDEV_TYPE_AP) 2770 && (intr[pReq->sessionId].sub_type == 0))) { 2771 WMA_LOGE("%s : invalid session id", __func__); 2772 return; 2773 } 2774 2775 intr[pReq->sessionId].vdev_restart_params.ssidHidden = pReq->ssidHidden; 2776 qdf_atomic_set(&intr[pReq->sessionId].vdev_restart_params. 2777 hidden_ssid_restart_in_progress, 1); 2778 2779 msg = wma_fill_vdev_req(wma_handle, pReq->sessionId, 2780 WMA_HIDDEN_SSID_VDEV_RESTART, 2781 WMA_TARGET_REQ_TYPE_VDEV_STOP, pReq, 2782 WMA_VDEV_STOP_REQUEST_TIMEOUT); 2783 if (!msg) { 2784 WMA_LOGE("%s: Failed to fill vdev restart request for vdev_id %d", 2785 __func__, pReq->sessionId); 2786 return; 2787 } 2788 2789 /* vdev stop -> vdev restart -> vdev up */ 2790 WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP", 2791 __func__, pReq->sessionId); 2792 cdp_fc_vdev_pause(soc, wma_handle->interfaces[pReq->sessionId].handle, 2793 OL_TXQ_PAUSE_REASON_VDEV_STOP); 2794 wma_handle->interfaces[pReq->sessionId].pause_bitmap |= 2795 (1 << PAUSE_TYPE_HOST); 2796 if (wmi_unified_vdev_stop_send(wma_handle->wmi_handle, pReq->sessionId)) { 2797 WMA_LOGE("%s: %d Failed to send vdev stop", __func__, __LINE__); 2798 qdf_atomic_set(&intr[pReq->sessionId].vdev_restart_params. 2799 hidden_ssid_restart_in_progress, 0); 2800 wma_remove_vdev_req(wma_handle, pReq->sessionId, 2801 WMA_TARGET_REQ_TYPE_VDEV_STOP); 2802 return; 2803 } 2804 } 2805 2806 2807 #ifdef WLAN_FEATURE_11W 2808 2809 /** 2810 * wma_extract_ccmp_pn() - extract 6 byte PN from the CCMP header 2811 * @ccmp_ptr: CCMP header 2812 * 2813 * Return: PN extracted from header. 2814 */ 2815 static uint64_t wma_extract_ccmp_pn(uint8_t *ccmp_ptr) 2816 { 2817 uint8_t rsvd, key, pn[6]; 2818 uint64_t new_pn; 2819 2820 /* 2821 * +-----+-----+------+----------+-----+-----+-----+-----+ 2822 * | PN0 | PN1 | rsvd | rsvd/key | PN2 | PN3 | PN4 | PN5 | 2823 * +-----+-----+------+----------+-----+-----+-----+-----+ 2824 * CCMP Header Format 2825 */ 2826 2827 /* Extract individual bytes */ 2828 pn[0] = (uint8_t) *ccmp_ptr; 2829 pn[1] = (uint8_t) *(ccmp_ptr + 1); 2830 rsvd = (uint8_t) *(ccmp_ptr + 2); 2831 key = (uint8_t) *(ccmp_ptr + 3); 2832 pn[2] = (uint8_t) *(ccmp_ptr + 4); 2833 pn[3] = (uint8_t) *(ccmp_ptr + 5); 2834 pn[4] = (uint8_t) *(ccmp_ptr + 6); 2835 pn[5] = (uint8_t) *(ccmp_ptr + 7); 2836 2837 /* Form 6 byte PN with 6 individual bytes of PN */ 2838 new_pn = ((uint64_t) pn[5] << 40) | 2839 ((uint64_t) pn[4] << 32) | 2840 ((uint64_t) pn[3] << 24) | 2841 ((uint64_t) pn[2] << 16) | 2842 ((uint64_t) pn[1] << 8) | ((uint64_t) pn[0] << 0); 2843 2844 WMA_LOGE("PN of received packet is %llu", new_pn); 2845 return new_pn; 2846 } 2847 2848 /** 2849 * wma_is_ccmp_pn_replay_attack() - detect replay attacking using PN in CCMP 2850 * @cds_ctx: cds context 2851 * @wh: 802.11 frame header 2852 * @ccmp_ptr: CCMP frame header 2853 * 2854 * Return: true/false 2855 */ 2856 static bool 2857 wma_is_ccmp_pn_replay_attack(void *cds_ctx, struct ieee80211_frame *wh, 2858 uint8_t *ccmp_ptr) 2859 { 2860 void *pdev; 2861 void *vdev; 2862 void *peer; 2863 uint8_t vdev_id, peer_id; 2864 uint8_t *last_pn_valid; 2865 uint64_t *last_pn, new_pn; 2866 uint32_t *rmf_pn_replays; 2867 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 2868 2869 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 2870 if (!pdev) { 2871 WMA_LOGE("%s: Failed to find pdev", __func__); 2872 return true; 2873 } 2874 2875 vdev = wma_find_vdev_by_bssid(cds_ctx, wh->i_addr3, &vdev_id); 2876 if (!vdev) { 2877 WMA_LOGE("%s: Failed to find vdev", __func__); 2878 return true; 2879 } 2880 2881 /* Retrieve the peer based on vdev and addr */ 2882 peer = cdp_peer_find_by_addr_and_vdev(soc, pdev, vdev, wh->i_addr2, 2883 &peer_id); 2884 2885 if (NULL == peer) { 2886 WMA_LOGE("%s: Failed to find peer, Not able to validate PN", 2887 __func__); 2888 return true; 2889 } 2890 2891 new_pn = wma_extract_ccmp_pn(ccmp_ptr); 2892 cdp_get_pn_info(soc, peer, &last_pn_valid, &last_pn, &rmf_pn_replays); 2893 2894 if (*last_pn_valid) { 2895 if (new_pn > *last_pn) { 2896 *last_pn = new_pn; 2897 WMA_LOGE("%s: PN validation successful", __func__); 2898 } else { 2899 WMA_LOGE("%s: PN Replay attack detected", __func__); 2900 /* per 11W amendment, keeping track of replay attacks */ 2901 *rmf_pn_replays += 1; 2902 return true; 2903 } 2904 } else { 2905 *last_pn_valid = 1; 2906 *last_pn = new_pn; 2907 } 2908 2909 return false; 2910 } 2911 2912 /** 2913 * wma_process_bip() - process mmie in rmf frame 2914 * @wma_handle: wma handle 2915 * @iface: txrx node 2916 * @wh: 80211 frame 2917 * @wbuf: Buffer 2918 * 2919 * Return: 0 for success or error code 2920 */ 2921 2922 static 2923 int wma_process_bip(tp_wma_handle wma_handle, 2924 struct wma_txrx_node *iface, 2925 struct ieee80211_frame *wh, 2926 qdf_nbuf_t wbuf 2927 ) 2928 { 2929 uint16_t key_id; 2930 uint8_t *efrm; 2931 2932 efrm = qdf_nbuf_data(wbuf) + qdf_nbuf_len(wbuf); 2933 key_id = (uint16_t)*(efrm - cds_get_mmie_size() + 2); 2934 2935 if (!((key_id == WMA_IGTK_KEY_INDEX_4) 2936 || (key_id == WMA_IGTK_KEY_INDEX_5))) { 2937 WMA_LOGE(FL("Invalid KeyID(%d) dropping the frame"), key_id); 2938 return -EINVAL; 2939 } 2940 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap, 2941 WMI_SERVICE_STA_PMF_OFFLOAD)) { 2942 /* 2943 * if 11w offload is enabled then mmie validation is performed 2944 * in firmware, host just need to trim the mmie. 2945 */ 2946 qdf_nbuf_trim_tail(wbuf, cds_get_mmie_size()); 2947 } else { 2948 if (cds_is_mmie_valid(iface->key.key, 2949 iface->key.key_id[key_id - WMA_IGTK_KEY_INDEX_4].ipn, 2950 (uint8_t *) wh, efrm)) { 2951 WMA_LOGE(FL("Protected BC/MC frame MMIE validation successful")); 2952 /* Remove MMIE */ 2953 qdf_nbuf_trim_tail(wbuf, cds_get_mmie_size()); 2954 } else { 2955 WMA_LOGE(FL("BC/MC MIC error or MMIE not present, dropping the frame")); 2956 return -EINVAL; 2957 } 2958 } 2959 return 0; 2960 } 2961 2962 /** 2963 * wma_process_rmf_frame() - process rmf frame 2964 * @wma_handle: wma handle 2965 * @iface: txrx node 2966 * @wh: 80211 frame 2967 * @rx_pkt: rx packet 2968 * @wbuf: Buffer 2969 * 2970 * Return: 0 for success or error code 2971 */ 2972 static 2973 int wma_process_rmf_frame(tp_wma_handle wma_handle, 2974 struct wma_txrx_node *iface, 2975 struct ieee80211_frame *wh, 2976 cds_pkt_t *rx_pkt, 2977 qdf_nbuf_t wbuf) 2978 { 2979 uint8_t *orig_hdr; 2980 uint8_t *ccmp; 2981 2982 if ((wh)->i_fc[1] & IEEE80211_FC1_WEP) { 2983 if (IEEE80211_IS_BROADCAST(wh->i_addr1) || 2984 IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2985 WMA_LOGE("Encrypted BC/MC frame dropping the frame"); 2986 cds_pkt_return_packet(rx_pkt); 2987 return -EINVAL; 2988 } 2989 2990 orig_hdr = (uint8_t *) qdf_nbuf_data(wbuf); 2991 /* Pointer to head of CCMP header */ 2992 ccmp = orig_hdr + sizeof(*wh); 2993 if (wma_is_ccmp_pn_replay_attack( 2994 wma_handle, wh, ccmp)) { 2995 WMA_LOGE("Dropping the frame"); 2996 cds_pkt_return_packet(rx_pkt); 2997 return -EINVAL; 2998 } 2999 3000 /* Strip privacy headers (and trailer) 3001 * for a received frame 3002 */ 3003 qdf_mem_move(orig_hdr + 3004 IEEE80211_CCMP_HEADERLEN, wh, 3005 sizeof(*wh)); 3006 qdf_nbuf_pull_head(wbuf, 3007 IEEE80211_CCMP_HEADERLEN); 3008 qdf_nbuf_trim_tail(wbuf, IEEE80211_CCMP_MICLEN); 3009 /* 3010 * CCMP header has been pulled off 3011 * reinitialize the start pointer of mac header 3012 * to avoid accessing incorrect address 3013 */ 3014 wh = (struct ieee80211_frame *) qdf_nbuf_data(wbuf); 3015 rx_pkt->pkt_meta.mpdu_hdr_ptr = 3016 qdf_nbuf_data(wbuf); 3017 rx_pkt->pkt_meta.mpdu_len = qdf_nbuf_len(wbuf); 3018 rx_pkt->pkt_meta.mpdu_data_len = 3019 rx_pkt->pkt_meta.mpdu_len - 3020 rx_pkt->pkt_meta.mpdu_hdr_len; 3021 rx_pkt->pkt_meta.mpdu_data_ptr = 3022 rx_pkt->pkt_meta.mpdu_hdr_ptr + 3023 rx_pkt->pkt_meta.mpdu_hdr_len; 3024 rx_pkt->pkt_meta.tsf_delta = rx_pkt->pkt_meta.tsf_delta; 3025 rx_pkt->pkt_buf = wbuf; 3026 WMA_LOGD(FL("BSSID: "MAC_ADDRESS_STR" tsf_delta: %u"), 3027 MAC_ADDR_ARRAY(wh->i_addr3), rx_pkt->pkt_meta.tsf_delta); 3028 } else { 3029 if (IEEE80211_IS_BROADCAST(wh->i_addr1) || 3030 IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3031 if (0 != wma_process_bip(wma_handle, iface, wh, wbuf)) { 3032 cds_pkt_return_packet(rx_pkt); 3033 return -EINVAL; 3034 } 3035 } else { 3036 WMA_LOGE("Rx unprotected unicast mgmt frame"); 3037 rx_pkt->pkt_meta.dpuFeedback = 3038 DPU_FEEDBACK_UNPROTECTED_ERROR; 3039 } 3040 } 3041 return 0; 3042 } 3043 #endif 3044 3045 /** 3046 * wma_is_pkt_drop_candidate() - check if the mgmt frame should be droppped 3047 * @wma_handle: wma handle 3048 * @peer_addr: peer MAC address 3049 * @subtype: Management frame subtype 3050 * 3051 * This function is used to decide if a particular management frame should be 3052 * dropped to prevent DOS attack. Timestamp is used to decide the DOS attack. 3053 * 3054 * Return: true if the packet should be dropped and false oterwise 3055 */ 3056 static bool wma_is_pkt_drop_candidate(tp_wma_handle wma_handle, 3057 uint8_t *peer_addr, uint8_t subtype) 3058 { 3059 void *peer; 3060 void *pdev_ctx; 3061 uint8_t peer_id; 3062 bool should_drop = false; 3063 qdf_time_t *ptr; 3064 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 3065 3066 /* 3067 * Currently this function handles only Disassoc, 3068 * Deauth and Assoc req frames. Return false for 3069 * all other frames. 3070 */ 3071 if (subtype != IEEE80211_FC0_SUBTYPE_DISASSOC && 3072 subtype != IEEE80211_FC0_SUBTYPE_DEAUTH && 3073 subtype != IEEE80211_FC0_SUBTYPE_ASSOC_REQ) { 3074 should_drop = false; 3075 goto end; 3076 } 3077 3078 pdev_ctx = cds_get_context(QDF_MODULE_ID_TXRX); 3079 if (!pdev_ctx) { 3080 WMA_LOGE(FL("Failed to get the context")); 3081 should_drop = true; 3082 goto end; 3083 } 3084 3085 peer = cdp_peer_find_by_addr(soc, pdev_ctx, peer_addr, &peer_id); 3086 if (!peer) { 3087 if (SIR_MAC_MGMT_ASSOC_REQ != subtype) { 3088 WMA_LOGI( 3089 FL("Received mgmt frame: %0x from unknow peer: %pM"), 3090 subtype, peer_addr); 3091 should_drop = true; 3092 } 3093 goto end; 3094 } 3095 3096 switch (subtype) { 3097 case SIR_MAC_MGMT_ASSOC_REQ: 3098 ptr = cdp_peer_last_assoc_received(soc, peer); 3099 if (ptr == NULL) { 3100 WMA_LOGE(FL("cdp_peer_last_assoc_received Failed")); 3101 should_drop = true; 3102 goto end; 3103 } else { 3104 if ((qdf_get_system_timestamp() - 3105 *cdp_peer_last_assoc_received(soc, peer)) < 3106 WMA_MGMT_FRAME_DETECT_DOS_TIMER) { 3107 WMA_LOGI(FL("Dropping Assoc Req received")); 3108 should_drop = true; 3109 } 3110 } 3111 *cdp_peer_last_assoc_received(soc, peer) = 3112 qdf_get_system_timestamp(); 3113 break; 3114 case SIR_MAC_MGMT_DISASSOC: 3115 if (*cdp_peer_last_disassoc_received(soc, peer)) { 3116 if ((qdf_get_system_timestamp() - 3117 *cdp_peer_last_disassoc_received(soc, peer)) < 3118 WMA_MGMT_FRAME_DETECT_DOS_TIMER) { 3119 WMA_LOGI(FL("Dropping DisAssoc received")); 3120 should_drop = true; 3121 } 3122 } 3123 *cdp_peer_last_disassoc_received(soc, peer) = 3124 qdf_get_system_timestamp(); 3125 break; 3126 case SIR_MAC_MGMT_DEAUTH: 3127 if (*cdp_peer_last_deauth_received(soc, peer)) { 3128 if ((qdf_get_system_timestamp() - 3129 *cdp_peer_last_deauth_received(soc, peer)) < 3130 WMA_MGMT_FRAME_DETECT_DOS_TIMER) { 3131 WMA_LOGI(FL("Dropping Deauth received")); 3132 should_drop = true; 3133 } 3134 } 3135 *cdp_peer_last_deauth_received(soc, peer) = 3136 qdf_get_system_timestamp(); 3137 break; 3138 default: 3139 break; 3140 } 3141 3142 end: 3143 return should_drop; 3144 } 3145 3146 /** 3147 * wma_mgmt_rx_process() - process management rx frame. 3148 * @handle: wma handle 3149 * @data: rx data 3150 * @data_len: data length 3151 * 3152 * Return: 0 for success or error code 3153 */ 3154 static int wma_mgmt_rx_process(void *handle, uint8_t *data, 3155 uint32_t data_len) 3156 { 3157 tp_wma_handle wma_handle = (tp_wma_handle) handle; 3158 WMI_MGMT_RX_EVENTID_param_tlvs *param_tlvs = NULL; 3159 wmi_mgmt_rx_hdr *hdr = NULL; 3160 struct wma_txrx_node *iface = NULL; 3161 uint8_t vdev_id = WMA_INVALID_VDEV_ID; 3162 cds_pkt_t *rx_pkt; 3163 qdf_nbuf_t wbuf; 3164 struct ieee80211_frame *wh; 3165 uint8_t mgt_type, mgt_subtype; 3166 int status; 3167 3168 if (!wma_handle) { 3169 WMA_LOGE("%s: Failed to get WMA context", __func__); 3170 return -EINVAL; 3171 } 3172 3173 param_tlvs = (WMI_MGMT_RX_EVENTID_param_tlvs *) data; 3174 if (!param_tlvs) { 3175 WMA_LOGE("Get NULL point message from FW"); 3176 return -EINVAL; 3177 } 3178 3179 hdr = param_tlvs->hdr; 3180 if (!hdr) { 3181 WMA_LOGE("Rx event is NULL"); 3182 return -EINVAL; 3183 } 3184 3185 if (hdr->buf_len < sizeof(struct ieee80211_frame)) { 3186 WMA_LOGE("Invalid rx mgmt packet"); 3187 return -EINVAL; 3188 } 3189 3190 rx_pkt = qdf_mem_malloc(sizeof(*rx_pkt)); 3191 if (!rx_pkt) { 3192 WMA_LOGE("Failed to allocate rx packet"); 3193 return -ENOMEM; 3194 } 3195 3196 if (cds_is_load_or_unload_in_progress()) { 3197 WMA_LOGW(FL("Load/Unload in progress")); 3198 return -EINVAL; 3199 } 3200 3201 if (cds_is_driver_recovering()) { 3202 WMA_LOGW(FL("Recovery in progress")); 3203 return -EINVAL; 3204 } 3205 3206 qdf_mem_zero(rx_pkt, sizeof(*rx_pkt)); 3207 3208 /* 3209 * Fill in meta information needed by pe/lim 3210 * TODO: Try to maintain rx metainfo as part of skb->data. 3211 */ 3212 rx_pkt->pkt_meta.channel = hdr->channel; 3213 rx_pkt->pkt_meta.scan_src = hdr->flags; 3214 3215 /* 3216 * Get the rssi value from the current snr value 3217 * using standard noise floor of -96. 3218 */ 3219 rx_pkt->pkt_meta.rssi = hdr->snr + WMA_NOISE_FLOOR_DBM_DEFAULT; 3220 rx_pkt->pkt_meta.snr = hdr->snr; 3221 3222 /* If absolute rssi is available from firmware, use it */ 3223 if (hdr->rssi != 0) 3224 rx_pkt->pkt_meta.rssi_raw = hdr->rssi; 3225 else 3226 rx_pkt->pkt_meta.rssi_raw = rx_pkt->pkt_meta.rssi; 3227 3228 3229 /* 3230 * FIXME: Assigning the local timestamp as hw timestamp is not 3231 * available. Need to see if pe/lim really uses this data. 3232 */ 3233 rx_pkt->pkt_meta.timestamp = (uint32_t) jiffies; 3234 rx_pkt->pkt_meta.mpdu_hdr_len = sizeof(struct ieee80211_frame); 3235 rx_pkt->pkt_meta.mpdu_len = hdr->buf_len; 3236 rx_pkt->pkt_meta.mpdu_data_len = hdr->buf_len - 3237 rx_pkt->pkt_meta.mpdu_hdr_len; 3238 3239 rx_pkt->pkt_meta.roamCandidateInd = 0; 3240 3241 /* Why not just use rx_event->hdr.buf_len? */ 3242 wbuf = qdf_nbuf_alloc(NULL, roundup(hdr->buf_len, 4), 0, 4, false); 3243 if (!wbuf) { 3244 WMA_LOGE("%s: Failed to allocate wbuf for mgmt rx len(%u)", 3245 __func__, hdr->buf_len); 3246 qdf_mem_free(rx_pkt); 3247 return -ENOMEM; 3248 } 3249 3250 qdf_nbuf_put_tail(wbuf, hdr->buf_len); 3251 qdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL); 3252 wh = (struct ieee80211_frame *)qdf_nbuf_data(wbuf); 3253 3254 rx_pkt->pkt_meta.mpdu_hdr_ptr = qdf_nbuf_data(wbuf); 3255 rx_pkt->pkt_meta.mpdu_data_ptr = rx_pkt->pkt_meta.mpdu_hdr_ptr + 3256 rx_pkt->pkt_meta.mpdu_hdr_len; 3257 rx_pkt->pkt_meta.tsf_delta = hdr->tsf_delta; 3258 rx_pkt->pkt_buf = wbuf; 3259 3260 #ifdef BIG_ENDIAN_HOST 3261 { 3262 /* 3263 * for big endian host, copy engine byte_swap is enabled 3264 * But the rx mgmt frame buffer content is in network byte order 3265 * Need to byte swap the mgmt frame buffer content - so when 3266 * copy engine does byte_swap - host gets buffer content in the 3267 * correct byte order. 3268 */ 3269 int i; 3270 uint32_t *destp, *srcp; 3271 destp = (uint32_t *) wh; 3272 srcp = (uint32_t *) param_tlvs->bufp; 3273 for (i = 0; 3274 i < (roundup(hdr->buf_len, sizeof(uint32_t)) / 4); i++) { 3275 *destp = cpu_to_le32(*srcp); 3276 destp++; 3277 srcp++; 3278 } 3279 } 3280 #else 3281 qdf_mem_copy(wh, param_tlvs->bufp, hdr->buf_len); 3282 #endif 3283 3284 WMA_LOGD( 3285 FL("BSSID: "MAC_ADDRESS_STR" snr = %d, rssi = %d, rssi_raw = %d tsf_delta: %u"), 3286 MAC_ADDR_ARRAY(wh->i_addr3), 3287 hdr->snr, rx_pkt->pkt_meta.rssi, 3288 rx_pkt->pkt_meta.rssi_raw, 3289 hdr->tsf_delta); 3290 if (!wma_handle->mgmt_rx) { 3291 WMA_LOGE("Not registered for Mgmt rx, dropping the frame"); 3292 cds_pkt_return_packet(rx_pkt); 3293 return -EINVAL; 3294 } 3295 3296 /* If it is a beacon/probe response, save it for future use */ 3297 mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3298 mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3299 3300 #ifdef WLAN_FEATURE_11W 3301 if (mgt_type == IEEE80211_FC0_TYPE_MGT && 3302 (mgt_subtype == IEEE80211_FC0_SUBTYPE_DISASSOC || 3303 mgt_subtype == IEEE80211_FC0_SUBTYPE_DEAUTH || 3304 mgt_subtype == IEEE80211_FC0_SUBTYPE_ACTION)) { 3305 if (wma_find_vdev_by_bssid( 3306 wma_handle, wh->i_addr3, &vdev_id)) { 3307 iface = &(wma_handle->interfaces[vdev_id]); 3308 if (iface->rmfEnabled) { 3309 status = wma_process_rmf_frame(wma_handle, 3310 iface, wh, rx_pkt, wbuf); 3311 /* 3312 * CCMP header might have been pulled off 3313 * reinitialize the start pointer of mac header 3314 */ 3315 wh = (struct ieee80211_frame *) 3316 qdf_nbuf_data(wbuf); 3317 if (status != 0) 3318 return status; 3319 } 3320 } 3321 } 3322 #endif /* WLAN_FEATURE_11W */ 3323 rx_pkt->pkt_meta.sessionId = 3324 (vdev_id == WMA_INVALID_VDEV_ID ? 0 : vdev_id); 3325 3326 if (wma_is_pkt_drop_candidate(wma_handle, wh->i_addr2, mgt_subtype)) { 3327 cds_pkt_return_packet(rx_pkt); 3328 return -EINVAL; 3329 } 3330 3331 if ((mgt_type == IEEE80211_FC0_TYPE_MGT && 3332 mgt_subtype != IEEE80211_FC0_SUBTYPE_BEACON) && 3333 wma_handle->wma_mgmt_rx_packetdump_cb) 3334 wma_handle->wma_mgmt_rx_packetdump_cb(rx_pkt->pkt_buf, 3335 QDF_STATUS_SUCCESS, rx_pkt->pkt_meta.sessionId, 3336 RX_MGMT_PKT); 3337 3338 wma_handle->mgmt_rx(wma_handle, rx_pkt); 3339 return 0; 3340 } 3341 3342 /** 3343 * wma_de_register_mgmt_frm_client() - deregister management frame 3344 * @cds_ctx: cds context 3345 * 3346 * Return: QDF status 3347 */ 3348 QDF_STATUS wma_de_register_mgmt_frm_client(void *cds_ctx) 3349 { 3350 tp_wma_handle wma_handle; 3351 3352 #ifdef QCA_WIFI_FTM 3353 if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE) 3354 return QDF_STATUS_SUCCESS; 3355 #endif 3356 3357 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 3358 if (!wma_handle) { 3359 WMA_LOGE("%s: Failed to get WMA context", __func__); 3360 return QDF_STATUS_E_FAILURE; 3361 } 3362 3363 if (wmi_unified_unregister_event_handler(wma_handle->wmi_handle, 3364 WMI_MGMT_RX_EVENTID) != 0) { 3365 WMA_LOGE("Failed to Unregister rx mgmt handler with wmi"); 3366 return QDF_STATUS_E_FAILURE; 3367 } 3368 wma_handle->mgmt_rx = NULL; 3369 return QDF_STATUS_SUCCESS; 3370 } 3371 3372 #ifdef WLAN_FEATURE_ROAM_OFFLOAD 3373 /** 3374 * wma_register_roaming_callbacks() - Register roaming callbacks 3375 * @cds_ctx: CDS Context 3376 * @csr_roam_synch_cb: CSR roam synch callback routine pointer 3377 * @pe_roam_synch_cb: PE roam synch callback routine pointer 3378 * 3379 * Register the SME and PE callback routines with WMA for 3380 * handling roaming 3381 * 3382 * Return: Success or Failure Status 3383 */ 3384 QDF_STATUS wma_register_roaming_callbacks(void *cds_ctx, 3385 void (*csr_roam_synch_cb)(tpAniSirGlobal mac, 3386 roam_offload_synch_ind *roam_synch_data, 3387 tpSirBssDescription bss_desc_ptr, 3388 enum sir_roam_op_code reason), 3389 QDF_STATUS (*pe_roam_synch_cb)(tpAniSirGlobal mac, 3390 roam_offload_synch_ind *roam_synch_data, 3391 tpSirBssDescription bss_desc_ptr)) 3392 { 3393 3394 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 3395 3396 if (!wma) { 3397 WMA_LOGE("%s: Failed to get WMA context", __func__); 3398 return QDF_STATUS_E_FAILURE; 3399 } 3400 wma->csr_roam_synch_cb = csr_roam_synch_cb; 3401 wma->pe_roam_synch_cb = pe_roam_synch_cb; 3402 WMA_LOGD("Registered roam synch callbacks with WMA successfully"); 3403 return QDF_STATUS_SUCCESS; 3404 } 3405 #endif 3406 3407 /** 3408 * wma_register_mgmt_frm_client() - register management frame callback 3409 * @cds_ctx: cds context 3410 * @mgmt_frm_rx: management frame 3411 * 3412 * Return: QDF status 3413 */ 3414 QDF_STATUS wma_register_mgmt_frm_client( 3415 void *cds_ctx, wma_mgmt_frame_rx_callback mgmt_frm_rx) 3416 { 3417 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 3418 3419 if (!wma_handle) { 3420 WMA_LOGE("%s: Failed to get WMA context", __func__); 3421 return QDF_STATUS_E_FAILURE; 3422 } 3423 3424 if (wmi_unified_register_event_handler(wma_handle->wmi_handle, 3425 WMI_MGMT_RX_EVENTID, 3426 wma_mgmt_rx_process, 3427 WMA_RX_WORK_CTX) != 0) { 3428 WMA_LOGE("Failed to register rx mgmt handler with wmi"); 3429 return QDF_STATUS_E_FAILURE; 3430 } 3431 wma_handle->mgmt_rx = mgmt_frm_rx; 3432 3433 return QDF_STATUS_SUCCESS; 3434 } 3435 3436 /** 3437 * wma_register_packetdump_callback() - stores tx and rx mgmt packet dump 3438 * callback handler 3439 * @wma_mgmt_tx_packetdump_cb: tx mgmt packetdump cb 3440 * @wma_mgmt_rx_packetdump_cb: rx mgmt packetdump cb 3441 * 3442 * This function is used to store tx and rx mgmt. packet dump callback 3443 * 3444 * Return: None 3445 * 3446 */ 3447 void wma_register_packetdump_callback( 3448 tp_wma_packetdump_cb wma_mgmt_tx_packetdump_cb, 3449 tp_wma_packetdump_cb wma_mgmt_rx_packetdump_cb) 3450 { 3451 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 3452 3453 if (!wma_handle) { 3454 WMA_LOGE("wma handle is NULL"); 3455 return; 3456 } 3457 3458 wma_handle->wma_mgmt_tx_packetdump_cb = wma_mgmt_tx_packetdump_cb; 3459 wma_handle->wma_mgmt_rx_packetdump_cb = wma_mgmt_rx_packetdump_cb; 3460 } 3461 3462 /** 3463 * wma_deregister_packetdump_callback() - removes tx and rx mgmt packet dump 3464 * callback handler 3465 * 3466 * This function is used to remove tx and rx mgmt. packet dump callback 3467 * 3468 * Return: None 3469 * 3470 */ 3471 void wma_deregister_packetdump_callback(void) 3472 { 3473 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 3474 3475 if (!wma_handle) { 3476 WMA_LOGE("wma handle is NULL"); 3477 return; 3478 } 3479 3480 wma_handle->wma_mgmt_tx_packetdump_cb = NULL; 3481 wma_handle->wma_mgmt_rx_packetdump_cb = NULL; 3482 } 3483