1 /* 2 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 /** 29 * DOC: wma_features.c 30 * This file contains different features related functions like WoW, 31 * Offloads, TDLS etc. 32 */ 33 34 /* Header files */ 35 36 #include "cds_ieee80211_common.h" /* ieee80211_frame */ 37 #include "wma.h" 38 #include "wma_api.h" 39 #include "cds_api.h" 40 #include "wmi_unified_api.h" 41 #include "wlan_qct_sys.h" 42 #include "wni_api.h" 43 #include "ani_global.h" 44 #include "wmi_unified.h" 45 #include "wni_cfg.h" 46 #include "cfg_api.h" 47 #include "ol_txrx_ctrl_api.h" 48 #include <cdp_txrx_tx_delay.h> 49 #include <cdp_txrx_peer_ops.h> 50 51 #include "qdf_nbuf.h" 52 #include "qdf_types.h" 53 #include "qdf_mem.h" 54 #include "ol_txrx_peer_find.h" 55 56 #include "wma_types.h" 57 #include "lim_api.h" 58 #include "lim_session_utils.h" 59 60 #include "cds_utils.h" 61 62 #if !defined(REMOVE_PKT_LOG) 63 #include "pktlog_ac.h" 64 #endif /* REMOVE_PKT_LOG */ 65 66 #include "dbglog_host.h" 67 #include "csr_api.h" 68 #include "ol_fw.h" 69 70 #include "dfs.h" 71 #include "radar_filters.h" 72 #include "wma_internal.h" 73 #include "ol_txrx.h" 74 #include "wma_nan_datapath.h" 75 76 #ifndef ARRAY_LENGTH 77 #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) 78 #endif 79 80 #define WMA_WOW_STA_WAKE_UP_EVENTS ((1 << WOW_CSA_IE_EVENT) |\ 81 (1 << WOW_CLIENT_KICKOUT_EVENT) |\ 82 (1 << WOW_PATTERN_MATCH_EVENT) |\ 83 (1 << WOW_MAGIC_PKT_RECVD_EVENT) |\ 84 (1 << WOW_DEAUTH_RECVD_EVENT) |\ 85 (1 << WOW_DISASSOC_RECVD_EVENT) |\ 86 (1 << WOW_BMISS_EVENT) |\ 87 (1 << WOW_GTK_ERR_EVENT) |\ 88 (1 << WOW_BETTER_AP_EVENT) |\ 89 (1 << WOW_HTT_EVENT) |\ 90 (1 << WOW_RA_MATCH_EVENT) |\ 91 (1 << WOW_NLO_DETECTED_EVENT) |\ 92 (1 << WOW_EXTSCAN_EVENT)) |\ 93 (1 << WOW_OEM_RESPONSE_EVENT)|\ 94 (1 << WOW_TDLS_CONN_TRACKER_EVENT)\ 95 96 #define WMA_WOW_SAP_WAKE_UP_EVENTS ((1 << WOW_PROBE_REQ_WPS_IE_EVENT) |\ 97 (1 << WOW_PATTERN_MATCH_EVENT) |\ 98 (1 << WOW_AUTH_REQ_EVENT) |\ 99 (1 << WOW_ASSOC_REQ_EVENT) |\ 100 (1 << WOW_DEAUTH_RECVD_EVENT) |\ 101 (1 << WOW_DISASSOC_RECVD_EVENT) |\ 102 (1 << WOW_HTT_EVENT))\ 103 104 /** 105 * WMA_SET_VDEV_IE_SOURCE_HOST - Flag to identify the source of VDEV SET IE 106 * command. The value is 0x0 for the VDEV SET IE WMI commands from mobile 107 * MCL platform. 108 */ 109 #define WMA_SET_VDEV_IE_SOURCE_HOST 0x0 110 111 static const uint8_t arp_ptrn[] = {0x08, 0x06}; 112 static const uint8_t arp_mask[] = {0xff, 0xff}; 113 static const uint8_t ns_ptrn[] = {0x86, 0xDD}; 114 static const uint8_t discvr_ptrn[] = {0xe0, 0x00, 0x00, 0xf8}; 115 static const uint8_t discvr_mask[] = {0xf0, 0x00, 0x00, 0xf8}; 116 117 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN 118 /** 119 * wma_post_auto_shutdown_msg() - to post auto shutdown event to sme 120 * 121 * Return: 0 for success or error code 122 */ 123 static int wma_post_auto_shutdown_msg(void) 124 { 125 tSirAutoShutdownEvtParams *auto_sh_evt; 126 QDF_STATUS qdf_status; 127 cds_msg_t sme_msg = { 0 }; 128 129 auto_sh_evt = (tSirAutoShutdownEvtParams *) 130 qdf_mem_malloc(sizeof(tSirAutoShutdownEvtParams)); 131 if (!auto_sh_evt) { 132 WMA_LOGE(FL("No Mem")); 133 return -ENOMEM; 134 } 135 136 auto_sh_evt->shutdown_reason = 137 WMI_HOST_AUTO_SHUTDOWN_REASON_TIMER_EXPIRY; 138 sme_msg.type = eWNI_SME_AUTO_SHUTDOWN_IND; 139 sme_msg.bodyptr = auto_sh_evt; 140 sme_msg.bodyval = 0; 141 142 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg); 143 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 144 WMA_LOGE("Fail to post eWNI_SME_AUTO_SHUTDOWN_IND msg to SME"); 145 qdf_mem_free(auto_sh_evt); 146 return -EINVAL; 147 } 148 149 return 0; 150 } 151 #endif 152 /** 153 * wma_send_snr_request() - send request to fw to get RSSI stats 154 * @wma_handle: wma handle 155 * @pGetRssiReq: get RSSI request 156 * 157 * Return: QDF status 158 */ 159 QDF_STATUS wma_send_snr_request(tp_wma_handle wma_handle, 160 void *pGetRssiReq) 161 { 162 tAniGetRssiReq *pRssiBkUp = NULL; 163 164 /* command is in progess */ 165 if (NULL != wma_handle->pGetRssiReq) 166 return QDF_STATUS_SUCCESS; 167 168 /* create a copy of csrRssiCallback to send rssi value 169 * after wmi event 170 */ 171 if (pGetRssiReq) { 172 pRssiBkUp = qdf_mem_malloc(sizeof(tAniGetRssiReq)); 173 if (!pRssiBkUp) { 174 WMA_LOGE("Failed to allocate memory for tAniGetRssiReq"); 175 wma_handle->pGetRssiReq = NULL; 176 return QDF_STATUS_E_NOMEM; 177 } 178 pRssiBkUp->sessionId = 179 ((tAniGetRssiReq *) pGetRssiReq)->sessionId; 180 pRssiBkUp->rssiCallback = 181 ((tAniGetRssiReq *) pGetRssiReq)->rssiCallback; 182 pRssiBkUp->pDevContext = 183 ((tAniGetRssiReq *) pGetRssiReq)->pDevContext; 184 wma_handle->pGetRssiReq = (void *)pRssiBkUp; 185 } 186 187 if (wmi_unified_snr_request_cmd(wma_handle->wmi_handle)) { 188 WMA_LOGE("Failed to send host stats request to fw"); 189 qdf_mem_free(pRssiBkUp); 190 wma_handle->pGetRssiReq = NULL; 191 return QDF_STATUS_E_FAILURE; 192 } 193 return QDF_STATUS_SUCCESS; 194 } 195 196 /** 197 * wma_get_snr() - get RSSI from fw 198 * @psnr_req: request params 199 * 200 * Return: QDF status 201 */ 202 QDF_STATUS wma_get_snr(tAniGetSnrReq *psnr_req) 203 { 204 tAniGetSnrReq *psnr_req_bkp; 205 tp_wma_handle wma_handle = NULL; 206 struct wma_txrx_node *intr; 207 208 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 209 210 if (NULL == wma_handle) { 211 WMA_LOGE("%s : Failed to get wma_handle", __func__); 212 return QDF_STATUS_E_FAULT; 213 } 214 215 intr = &wma_handle->interfaces[psnr_req->sessionId]; 216 /* command is in progess */ 217 if (NULL != intr->psnr_req) { 218 WMA_LOGE("%s : previous snr request is pending", __func__); 219 return QDF_STATUS_SUCCESS; 220 } 221 222 psnr_req_bkp = qdf_mem_malloc(sizeof(tAniGetSnrReq)); 223 if (!psnr_req_bkp) { 224 WMA_LOGE("Failed to allocate memory for tAniGetSnrReq"); 225 return QDF_STATUS_E_NOMEM; 226 } 227 228 qdf_mem_set(psnr_req_bkp, sizeof(tAniGetSnrReq), 0); 229 psnr_req_bkp->staId = psnr_req->staId; 230 psnr_req_bkp->pDevContext = psnr_req->pDevContext; 231 psnr_req_bkp->snrCallback = psnr_req->snrCallback; 232 intr->psnr_req = (void *)psnr_req_bkp; 233 234 if (wmi_unified_snr_cmd(wma_handle->wmi_handle, 235 psnr_req->sessionId)) { 236 WMA_LOGE("Failed to send host stats request to fw"); 237 qdf_mem_free(psnr_req_bkp); 238 intr->psnr_req = NULL; 239 return QDF_STATUS_E_FAILURE; 240 } 241 242 return QDF_STATUS_SUCCESS; 243 } 244 245 /** 246 * wma_process_link_status_req() - process link status request from UMAC 247 * @wma: wma handle 248 * @pGetLinkStatus: get link params 249 * 250 * Return: none 251 */ 252 void wma_process_link_status_req(tp_wma_handle wma, 253 tAniGetLinkStatus *pGetLinkStatus) 254 { 255 struct link_status_params cmd = {0}; 256 struct wma_txrx_node *iface = 257 &wma->interfaces[pGetLinkStatus->sessionId]; 258 259 if (iface->plink_status_req) { 260 WMA_LOGE("%s:previous link status request is pending,deleting the new request", 261 __func__); 262 qdf_mem_free(pGetLinkStatus); 263 return; 264 } 265 266 iface->plink_status_req = pGetLinkStatus; 267 cmd.session_id = pGetLinkStatus->sessionId; 268 if (wmi_unified_link_status_req_cmd(wma->wmi_handle, &cmd)) { 269 WMA_LOGE("Failed to send WMI link status request to fw"); 270 iface->plink_status_req = NULL; 271 goto end; 272 } 273 274 return; 275 276 end: 277 wma_post_link_status(pGetLinkStatus, LINK_STATUS_LEGACY); 278 } 279 280 #ifdef WLAN_FEATURE_TSF 281 /** 282 * wma_vdev_tsf_handler() - handle tsf event indicated by FW 283 * @handle: wma context 284 * @data: event buffer 285 * @data len: length of event buffer 286 * 287 * Return: 0 on success 288 */ 289 int wma_vdev_tsf_handler(void *handle, uint8_t *data, uint32_t data_len) 290 { 291 cds_msg_t tsf_msg = {0}; 292 WMI_VDEV_TSF_REPORT_EVENTID_param_tlvs *param_buf; 293 wmi_vdev_tsf_report_event_fixed_param *tsf_event; 294 struct stsf *ptsf; 295 296 if (data == NULL) { 297 WMA_LOGE("%s: invalid pointer", __func__); 298 return -EINVAL; 299 } 300 ptsf = qdf_mem_malloc(sizeof(*ptsf)); 301 if (NULL == ptsf) { 302 WMA_LOGE("%s: failed to allocate tsf data structure", __func__); 303 return -ENOMEM; 304 } 305 306 param_buf = (WMI_VDEV_TSF_REPORT_EVENTID_param_tlvs *)data; 307 tsf_event = param_buf->fixed_param; 308 309 ptsf->vdev_id = tsf_event->vdev_id; 310 ptsf->tsf_low = tsf_event->tsf_low; 311 ptsf->tsf_high = tsf_event->tsf_high; 312 ptsf->soc_timer_low = tsf_event->qtimer_low; 313 ptsf->soc_timer_high = tsf_event->qtimer_high; 314 315 WMA_LOGD("%s: receive WMI_VDEV_TSF_REPORT_EVENTID ", __func__); 316 WMA_LOGD("%s: vdev_id = %u,tsf_low =%u, tsf_high = %u", __func__, 317 ptsf->vdev_id, ptsf->tsf_low, ptsf->tsf_high); 318 319 tsf_msg.type = eWNI_SME_TSF_EVENT; 320 tsf_msg.bodyptr = ptsf; 321 tsf_msg.bodyval = 0; 322 323 if (QDF_STATUS_SUCCESS != 324 cds_mq_post_message(CDS_MQ_ID_SME, &tsf_msg)) { 325 326 WMA_LOGP("%s: Failed to post eWNI_SME_TSF_EVENT", __func__); 327 qdf_mem_free(ptsf); 328 return -EINVAL; 329 } 330 return 0; 331 } 332 333 #ifdef QCA_WIFI_3_0 334 #define TSF_FW_ACTION_CMD TSF_TSTAMP_QTIMER_CAPTURE_REQ 335 #else 336 #define TSF_FW_ACTION_CMD TSF_TSTAMP_CAPTURE_REQ 337 #endif 338 /** 339 * wma_capture_tsf() - send wmi to fw to capture tsf 340 * @wma_handle: wma handler 341 * @vdev_id: vdev id 342 * 343 * Return: wmi send state 344 */ 345 QDF_STATUS wma_capture_tsf(tp_wma_handle wma_handle, uint32_t vdev_id) 346 { 347 QDF_STATUS status = QDF_STATUS_SUCCESS; 348 wmi_buf_t buf; 349 wmi_vdev_tsf_tstamp_action_cmd_fixed_param *cmd; 350 int ret; 351 int len = sizeof(*cmd); 352 353 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 354 if (!buf) { 355 WMA_LOGP("%s: failed to allocate memory for cap tsf cmd", 356 __func__); 357 return QDF_STATUS_E_NOMEM; 358 } 359 360 cmd = (wmi_vdev_tsf_tstamp_action_cmd_fixed_param *) wmi_buf_data(buf); 361 cmd->vdev_id = vdev_id; 362 cmd->tsf_action = TSF_FW_ACTION_CMD; 363 WMA_LOGD("%s :vdev_id %u, tsf_cmd: %d", __func__, cmd->vdev_id, 364 cmd->tsf_action); 365 366 WMITLV_SET_HDR(&cmd->tlv_header, 367 WMITLV_TAG_STRUC_wmi_vdev_tsf_tstamp_action_cmd_fixed_param, 368 WMITLV_GET_STRUCT_TLVLEN( 369 wmi_vdev_tsf_tstamp_action_cmd_fixed_param)); 370 371 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 372 WMI_VDEV_TSF_TSTAMP_ACTION_CMDID); 373 if (ret != EOK) { 374 WMA_LOGE("wmi_unified_cmd_send returned Error %d", status); 375 status = QDF_STATUS_E_FAILURE; 376 goto error; 377 } 378 379 return QDF_STATUS_SUCCESS; 380 381 error: 382 if (buf) 383 wmi_buf_free(buf); 384 return status; 385 } 386 387 /** 388 * wma_reset_tsf_gpio() - send wmi to fw to reset GPIO 389 * @wma_handle: wma handler 390 * @vdev_id: vdev id 391 * 392 * Return: wmi send state 393 */ 394 QDF_STATUS wma_reset_tsf_gpio(tp_wma_handle wma_handle, uint32_t vdev_id) 395 { 396 QDF_STATUS status = QDF_STATUS_SUCCESS; 397 wmi_buf_t buf; 398 wmi_vdev_tsf_tstamp_action_cmd_fixed_param *cmd; 399 int ret; 400 int len = sizeof(*cmd); 401 uint8_t *buf_ptr; 402 403 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 404 if (!buf) { 405 WMA_LOGP("%s: failed to allocate memory for reset tsf gpio", 406 __func__); 407 return QDF_STATUS_E_NOMEM; 408 } 409 410 buf_ptr = (uint8_t *) wmi_buf_data(buf); 411 cmd = (wmi_vdev_tsf_tstamp_action_cmd_fixed_param *) buf_ptr; 412 cmd->vdev_id = vdev_id; 413 cmd->tsf_action = TSF_TSTAMP_CAPTURE_RESET; 414 415 WMA_LOGD("%s :vdev_id %u, TSF_TSTAMP_CAPTURE_RESET", __func__, 416 cmd->vdev_id); 417 418 WMITLV_SET_HDR(&cmd->tlv_header, 419 WMITLV_TAG_STRUC_wmi_vdev_tsf_tstamp_action_cmd_fixed_param, 420 WMITLV_GET_STRUCT_TLVLEN( 421 wmi_vdev_tsf_tstamp_action_cmd_fixed_param)); 422 423 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 424 WMI_VDEV_TSF_TSTAMP_ACTION_CMDID); 425 426 if (ret != EOK) { 427 WMA_LOGE("wmi_unified_cmd_send returned Error %d", status); 428 status = QDF_STATUS_E_FAILURE; 429 goto error; 430 } 431 return QDF_STATUS_SUCCESS; 432 433 error: 434 if (buf) 435 wmi_buf_free(buf); 436 return status; 437 } 438 439 /** 440 * wma_set_tsf_gpio_pin() - send wmi cmd to configure gpio pin 441 * @handle: wma handler 442 * @pin: GPIO pin id 443 * 444 * Return: QDF_STATUS 445 */ 446 QDF_STATUS wma_set_tsf_gpio_pin(WMA_HANDLE handle, uint32_t pin) 447 { 448 tp_wma_handle wma = (tp_wma_handle)handle; 449 struct pdev_params pdev_param = {0}; 450 int32_t ret; 451 452 if (!wma || !wma->wmi_handle) { 453 WMA_LOGE("%s: WMA is closed, can not set gpio", __func__); 454 return QDF_STATUS_E_INVAL; 455 } 456 457 WMA_LOGD("%s: set tsf gpio pin: %d", __func__, pin); 458 459 pdev_param.param_id = WMI_PDEV_PARAM_WNTS_CONFIG; 460 pdev_param.param_value = pin; 461 ret = wmi_unified_pdev_param_send(wma->wmi_handle, 462 &pdev_param, 463 WMA_WILDCARD_PDEV_ID); 464 if (ret) { 465 WMA_LOGE("%s: Failed to set tsf gpio pin (%d)", __func__, ret); 466 return QDF_STATUS_E_FAILURE; 467 } 468 return QDF_STATUS_SUCCESS; 469 } 470 #endif 471 472 /** 473 * wma_set_wisa_params(): Set WISA features related params in FW 474 * @wma_handle: WMA handle 475 * @wisa: Pointer to WISA param struct 476 * 477 * Return: CDF status 478 */ 479 QDF_STATUS wma_set_wisa_params(tp_wma_handle wma_handle, 480 struct sir_wisa_params *wisa) 481 { 482 QDF_STATUS status = QDF_STATUS_SUCCESS; 483 wmi_buf_t buf; 484 wmi_vdev_wisa_cmd_fixed_param *cmd; 485 int ret, len = sizeof(*cmd); 486 487 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 488 if (!buf) { 489 WMA_LOGP("%s: failed to allocate memory for WISA params", 490 __func__); 491 return QDF_STATUS_E_NOMEM; 492 } 493 494 cmd = (wmi_vdev_wisa_cmd_fixed_param *) wmi_buf_data(buf); 495 cmd->wisa_mode = wisa->mode; 496 cmd->vdev_id = wisa->vdev_id; 497 498 WMITLV_SET_HDR(&cmd->tlv_header, 499 WMITLV_TAG_STRUC_wmi_vdev_wisa_cmd_fixed_param, 500 WMITLV_GET_STRUCT_TLVLEN( 501 wmi_vdev_wisa_cmd_fixed_param)); 502 503 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 504 WMI_VDEV_WISA_CMDID); 505 if (ret != EOK) { 506 WMA_LOGE("wmi_unified_cmd_send returned Error %d", status); 507 status = QDF_STATUS_E_FAILURE; 508 goto error; 509 } 510 return QDF_STATUS_SUCCESS; 511 512 error: 513 wmi_buf_free(buf); 514 return status; 515 } 516 517 #ifdef FEATURE_WLAN_LPHB 518 /** 519 * wma_lphb_conf_hbenable() - enable command of LPHB configuration requests 520 * @wma_handle: WMA handle 521 * @lphb_conf_req: configuration info 522 * @by_user: whether this call is from user or cached resent 523 * 524 * Return: QDF status 525 */ 526 QDF_STATUS wma_lphb_conf_hbenable(tp_wma_handle wma_handle, 527 tSirLPHBReq *lphb_conf_req, bool by_user) 528 { 529 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 530 int status = 0; 531 tSirLPHBEnableStruct *ts_lphb_enable; 532 wmi_hb_set_enable_cmd_fixed_param hb_enable_fp; 533 int i; 534 535 if (lphb_conf_req == NULL) { 536 WMA_LOGE("%s : LPHB configuration is NULL", __func__); 537 return QDF_STATUS_E_FAILURE; 538 } 539 540 ts_lphb_enable = &(lphb_conf_req->params.lphbEnableReq); 541 WMA_LOGI("%s: WMA --> WMI_HB_SET_ENABLE enable=%d, item=%d, session=%d", 542 __func__, 543 ts_lphb_enable->enable, 544 ts_lphb_enable->item, ts_lphb_enable->session); 545 546 if ((ts_lphb_enable->item != 1) && (ts_lphb_enable->item != 2)) { 547 WMA_LOGE("%s : LPHB configuration wrong item %d", 548 __func__, ts_lphb_enable->item); 549 return QDF_STATUS_E_FAILURE; 550 } 551 552 553 /* fill in values */ 554 hb_enable_fp.vdev_id = ts_lphb_enable->session; 555 hb_enable_fp.enable = ts_lphb_enable->enable; 556 hb_enable_fp.item = ts_lphb_enable->item; 557 hb_enable_fp.session = ts_lphb_enable->session; 558 559 status = wmi_unified_lphb_config_hbenable_cmd(wma_handle->wmi_handle, 560 &hb_enable_fp); 561 if (status != EOK) { 562 qdf_status = QDF_STATUS_E_FAILURE; 563 goto error; 564 } 565 566 if (by_user) { 567 /* target already configured, now cache command status */ 568 if (ts_lphb_enable->enable) { 569 i = ts_lphb_enable->item - 1; 570 wma_handle->wow.lphb_cache[i].cmd 571 = LPHB_SET_EN_PARAMS_INDID; 572 wma_handle->wow.lphb_cache[i].params.lphbEnableReq. 573 enable = ts_lphb_enable->enable; 574 wma_handle->wow.lphb_cache[i].params.lphbEnableReq. 575 item = ts_lphb_enable->item; 576 wma_handle->wow.lphb_cache[i].params.lphbEnableReq. 577 session = ts_lphb_enable->session; 578 579 WMA_LOGI("%s: cached LPHB status in WMA context for item %d", 580 __func__, i); 581 } else { 582 qdf_mem_zero((void *)&wma_handle->wow.lphb_cache, 583 sizeof(wma_handle->wow.lphb_cache)); 584 WMA_LOGI("%s: cleared all cached LPHB status in WMA context", 585 __func__); 586 } 587 } 588 589 return QDF_STATUS_SUCCESS; 590 error: 591 return qdf_status; 592 } 593 594 /** 595 * wma_lphb_conf_tcp_params() - set tcp params of LPHB configuration requests 596 * @wma_handle: wma handle 597 * @lphb_conf_req: lphb config request 598 * 599 * Return: QDF status 600 */ 601 QDF_STATUS wma_lphb_conf_tcp_params(tp_wma_handle wma_handle, 602 tSirLPHBReq *lphb_conf_req) 603 { 604 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 605 int status = 0; 606 tSirLPHBTcpParamStruct *ts_lphb_tcp_param; 607 wmi_hb_set_tcp_params_cmd_fixed_param hb_tcp_params_fp = {0}; 608 609 610 if (lphb_conf_req == NULL) { 611 WMA_LOGE("%s : LPHB configuration is NULL", __func__); 612 return QDF_STATUS_E_FAILURE; 613 } 614 615 ts_lphb_tcp_param = &(lphb_conf_req->params.lphbTcpParamReq); 616 WMA_LOGI("%s: WMA --> WMI_HB_SET_TCP_PARAMS srv_ip=%08x, " 617 "dev_ip=%08x, src_port=%d, dst_port=%d, timeout=%d, " 618 "session=%d, gateway_mac="MAC_ADDRESS_STR", timePeriodSec=%d, " 619 "tcpSn=%d", __func__, ts_lphb_tcp_param->srv_ip, 620 ts_lphb_tcp_param->dev_ip, ts_lphb_tcp_param->src_port, 621 ts_lphb_tcp_param->dst_port, ts_lphb_tcp_param->timeout, 622 ts_lphb_tcp_param->session, 623 MAC_ADDR_ARRAY(ts_lphb_tcp_param->gateway_mac.bytes), 624 ts_lphb_tcp_param->timePeriodSec, ts_lphb_tcp_param->tcpSn); 625 626 /* fill in values */ 627 hb_tcp_params_fp.vdev_id = ts_lphb_tcp_param->session; 628 hb_tcp_params_fp.srv_ip = ts_lphb_tcp_param->srv_ip; 629 hb_tcp_params_fp.dev_ip = ts_lphb_tcp_param->dev_ip; 630 hb_tcp_params_fp.seq = ts_lphb_tcp_param->tcpSn; 631 hb_tcp_params_fp.src_port = ts_lphb_tcp_param->src_port; 632 hb_tcp_params_fp.dst_port = ts_lphb_tcp_param->dst_port; 633 hb_tcp_params_fp.interval = ts_lphb_tcp_param->timePeriodSec; 634 hb_tcp_params_fp.timeout = ts_lphb_tcp_param->timeout; 635 hb_tcp_params_fp.session = ts_lphb_tcp_param->session; 636 WMI_CHAR_ARRAY_TO_MAC_ADDR(ts_lphb_tcp_param->gateway_mac.bytes, 637 &hb_tcp_params_fp.gateway_mac); 638 639 status = wmi_unified_lphb_config_tcp_params_cmd(wma_handle->wmi_handle, 640 &hb_tcp_params_fp); 641 if (status != EOK) { 642 qdf_status = QDF_STATUS_E_FAILURE; 643 goto error; 644 } 645 646 return QDF_STATUS_SUCCESS; 647 error: 648 return qdf_status; 649 } 650 651 /** 652 * wma_lphb_conf_tcp_pkt_filter() - configure tcp packet filter command of LPHB 653 * @wma_handle: wma handle 654 * @lphb_conf_req: lphb config request 655 * 656 * Return: QDF status 657 */ 658 QDF_STATUS wma_lphb_conf_tcp_pkt_filter(tp_wma_handle wma_handle, 659 tSirLPHBReq *lphb_conf_req) 660 { 661 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 662 int status = 0; 663 tSirLPHBTcpFilterStruct *ts_lphb_tcp_filter; 664 wmi_hb_set_tcp_pkt_filter_cmd_fixed_param hb_tcp_filter_fp = {0}; 665 666 if (lphb_conf_req == NULL) { 667 WMA_LOGE("%s : LPHB configuration is NULL", __func__); 668 return QDF_STATUS_E_FAILURE; 669 } 670 671 ts_lphb_tcp_filter = &(lphb_conf_req->params.lphbTcpFilterReq); 672 WMA_LOGI("%s: WMA --> WMI_HB_SET_TCP_PKT_FILTER length=%d, offset=%d, session=%d, " 673 "filter=%2x:%2x:%2x:%2x:%2x:%2x ...", __func__, 674 ts_lphb_tcp_filter->length, ts_lphb_tcp_filter->offset, 675 ts_lphb_tcp_filter->session, ts_lphb_tcp_filter->filter[0], 676 ts_lphb_tcp_filter->filter[1], ts_lphb_tcp_filter->filter[2], 677 ts_lphb_tcp_filter->filter[3], ts_lphb_tcp_filter->filter[4], 678 ts_lphb_tcp_filter->filter[5]); 679 680 /* fill in values */ 681 hb_tcp_filter_fp.vdev_id = ts_lphb_tcp_filter->session; 682 hb_tcp_filter_fp.length = ts_lphb_tcp_filter->length; 683 hb_tcp_filter_fp.offset = ts_lphb_tcp_filter->offset; 684 hb_tcp_filter_fp.session = ts_lphb_tcp_filter->session; 685 memcpy((void *)&hb_tcp_filter_fp.filter, 686 (void *)&ts_lphb_tcp_filter->filter, 687 WMI_WLAN_HB_MAX_FILTER_SIZE); 688 689 status = wmi_unified_lphb_config_tcp_pkt_filter_cmd(wma_handle->wmi_handle, 690 &hb_tcp_filter_fp); 691 if (status != EOK) { 692 qdf_status = QDF_STATUS_E_FAILURE; 693 goto error; 694 } 695 696 return QDF_STATUS_SUCCESS; 697 error: 698 return qdf_status; 699 } 700 701 /** 702 * wma_lphb_conf_udp_params() - configure udp param command of LPHB 703 * @wma_handle: wma handle 704 * @lphb_conf_req: lphb config request 705 * 706 * Return: QDF status 707 */ 708 QDF_STATUS wma_lphb_conf_udp_params(tp_wma_handle wma_handle, 709 tSirLPHBReq *lphb_conf_req) 710 { 711 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 712 int status = 0; 713 tSirLPHBUdpParamStruct *ts_lphb_udp_param; 714 wmi_hb_set_udp_params_cmd_fixed_param hb_udp_params_fp = {0}; 715 716 717 if (lphb_conf_req == NULL) { 718 WMA_LOGE("%s : LPHB configuration is NULL", __func__); 719 return QDF_STATUS_E_FAILURE; 720 } 721 722 ts_lphb_udp_param = &(lphb_conf_req->params.lphbUdpParamReq); 723 WMA_LOGI("%s: WMA --> WMI_HB_SET_UDP_PARAMS srv_ip=%d, dev_ip=%d, src_port=%d, " 724 "dst_port=%d, interval=%d, timeout=%d, session=%d, " 725 "gateway_mac="MAC_ADDRESS_STR, __func__, 726 ts_lphb_udp_param->srv_ip, ts_lphb_udp_param->dev_ip, 727 ts_lphb_udp_param->src_port, ts_lphb_udp_param->dst_port, 728 ts_lphb_udp_param->interval, ts_lphb_udp_param->timeout, 729 ts_lphb_udp_param->session, 730 MAC_ADDR_ARRAY(ts_lphb_udp_param->gateway_mac.bytes)); 731 732 733 /* fill in values */ 734 hb_udp_params_fp.vdev_id = ts_lphb_udp_param->session; 735 hb_udp_params_fp.srv_ip = ts_lphb_udp_param->srv_ip; 736 hb_udp_params_fp.dev_ip = ts_lphb_udp_param->dev_ip; 737 hb_udp_params_fp.src_port = ts_lphb_udp_param->src_port; 738 hb_udp_params_fp.dst_port = ts_lphb_udp_param->dst_port; 739 hb_udp_params_fp.interval = ts_lphb_udp_param->interval; 740 hb_udp_params_fp.timeout = ts_lphb_udp_param->timeout; 741 hb_udp_params_fp.session = ts_lphb_udp_param->session; 742 WMI_CHAR_ARRAY_TO_MAC_ADDR(ts_lphb_udp_param->gateway_mac.bytes, 743 &hb_udp_params_fp.gateway_mac); 744 745 status = wmi_unified_lphb_config_udp_params_cmd(wma_handle->wmi_handle, 746 &hb_udp_params_fp); 747 if (status != EOK) { 748 qdf_status = QDF_STATUS_E_FAILURE; 749 goto error; 750 } 751 752 return QDF_STATUS_SUCCESS; 753 error: 754 return qdf_status; 755 } 756 757 /** 758 * wma_lphb_conf_udp_pkt_filter() - configure udp pkt filter command of LPHB 759 * @wma_handle: wma handle 760 * @lphb_conf_req: lphb config request 761 * 762 * Return: QDF status 763 */ 764 QDF_STATUS wma_lphb_conf_udp_pkt_filter(tp_wma_handle wma_handle, 765 tSirLPHBReq *lphb_conf_req) 766 { 767 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 768 int status = 0; 769 tSirLPHBUdpFilterStruct *ts_lphb_udp_filter; 770 wmi_hb_set_udp_pkt_filter_cmd_fixed_param hb_udp_filter_fp = {0}; 771 772 if (lphb_conf_req == NULL) { 773 WMA_LOGE("%s : LPHB configuration is NULL", __func__); 774 return QDF_STATUS_E_FAILURE; 775 } 776 777 ts_lphb_udp_filter = &(lphb_conf_req->params.lphbUdpFilterReq); 778 WMA_LOGI("%s: WMA --> WMI_HB_SET_UDP_PKT_FILTER length=%d, offset=%d, session=%d, " 779 "filter=%2x:%2x:%2x:%2x:%2x:%2x ...", __func__, 780 ts_lphb_udp_filter->length, ts_lphb_udp_filter->offset, 781 ts_lphb_udp_filter->session, ts_lphb_udp_filter->filter[0], 782 ts_lphb_udp_filter->filter[1], ts_lphb_udp_filter->filter[2], 783 ts_lphb_udp_filter->filter[3], ts_lphb_udp_filter->filter[4], 784 ts_lphb_udp_filter->filter[5]); 785 786 787 /* fill in values */ 788 hb_udp_filter_fp.vdev_id = ts_lphb_udp_filter->session; 789 hb_udp_filter_fp.length = ts_lphb_udp_filter->length; 790 hb_udp_filter_fp.offset = ts_lphb_udp_filter->offset; 791 hb_udp_filter_fp.session = ts_lphb_udp_filter->session; 792 memcpy((void *)&hb_udp_filter_fp.filter, 793 (void *)&ts_lphb_udp_filter->filter, 794 WMI_WLAN_HB_MAX_FILTER_SIZE); 795 796 status = wmi_unified_lphb_config_udp_pkt_filter_cmd(wma_handle->wmi_handle, 797 &hb_udp_filter_fp); 798 if (status != EOK) { 799 qdf_status = QDF_STATUS_E_FAILURE; 800 goto error; 801 } 802 803 return QDF_STATUS_SUCCESS; 804 error: 805 return qdf_status; 806 } 807 808 /** 809 * wma_process_lphb_conf_req() - handle LPHB configuration requests 810 * @wma_handle: wma handle 811 * @lphb_conf_req: lphb config request 812 * 813 * Return: QDF status 814 */ 815 QDF_STATUS wma_process_lphb_conf_req(tp_wma_handle wma_handle, 816 tSirLPHBReq *lphb_conf_req) 817 { 818 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 819 820 if (lphb_conf_req == NULL) { 821 WMA_LOGE("%s : LPHB configuration is NULL", __func__); 822 return QDF_STATUS_E_FAILURE; 823 } 824 825 WMA_LOGI("%s : LPHB configuration cmd id is %d", __func__, 826 lphb_conf_req->cmd); 827 switch (lphb_conf_req->cmd) { 828 case LPHB_SET_EN_PARAMS_INDID: 829 qdf_status = wma_lphb_conf_hbenable(wma_handle, 830 lphb_conf_req, true); 831 break; 832 833 case LPHB_SET_TCP_PARAMS_INDID: 834 qdf_status = wma_lphb_conf_tcp_params(wma_handle, 835 lphb_conf_req); 836 break; 837 838 case LPHB_SET_TCP_PKT_FILTER_INDID: 839 qdf_status = wma_lphb_conf_tcp_pkt_filter(wma_handle, 840 lphb_conf_req); 841 break; 842 843 case LPHB_SET_UDP_PARAMS_INDID: 844 qdf_status = wma_lphb_conf_udp_params(wma_handle, 845 lphb_conf_req); 846 break; 847 848 case LPHB_SET_UDP_PKT_FILTER_INDID: 849 qdf_status = wma_lphb_conf_udp_pkt_filter(wma_handle, 850 lphb_conf_req); 851 break; 852 853 case LPHB_SET_NETWORK_INFO_INDID: 854 default: 855 break; 856 } 857 858 qdf_mem_free(lphb_conf_req); 859 return qdf_status; 860 } 861 #endif /* FEATURE_WLAN_LPHB */ 862 863 /** 864 * wma_process_dhcp_ind() - process dhcp indication from SME 865 * @wma_handle: wma handle 866 * @ta_dhcp_ind: DHCP indication 867 * 868 * Return: QDF Status 869 */ 870 QDF_STATUS wma_process_dhcp_ind(tp_wma_handle wma_handle, 871 tAniDHCPInd *ta_dhcp_ind) 872 { 873 uint8_t vdev_id; 874 int status = 0; 875 wmi_peer_set_param_cmd_fixed_param peer_set_param_fp = {0}; 876 877 if (!ta_dhcp_ind) { 878 WMA_LOGE("%s : DHCP indication is NULL", __func__); 879 return QDF_STATUS_E_FAILURE; 880 } 881 882 if (!wma_find_vdev_by_addr(wma_handle, 883 ta_dhcp_ind->adapterMacAddr.bytes, 884 &vdev_id)) { 885 WMA_LOGE("%s: Failed to find vdev id for DHCP indication", 886 __func__); 887 return QDF_STATUS_E_FAILURE; 888 } 889 890 WMA_LOGI("%s: WMA --> WMI_PEER_SET_PARAM triggered by DHCP, " 891 "msgType=%s," 892 "device_mode=%d, macAddr=" MAC_ADDRESS_STR, 893 __func__, 894 ta_dhcp_ind->msgType == WMA_DHCP_START_IND ? 895 "WMA_DHCP_START_IND" : "WMA_DHCP_STOP_IND", 896 ta_dhcp_ind->device_mode, 897 MAC_ADDR_ARRAY(ta_dhcp_ind->peerMacAddr.bytes)); 898 899 /* fill in values */ 900 peer_set_param_fp.vdev_id = vdev_id; 901 peer_set_param_fp.param_id = WMI_PEER_CRIT_PROTO_HINT_ENABLED; 902 if (WMA_DHCP_START_IND == ta_dhcp_ind->msgType) 903 peer_set_param_fp.param_value = 1; 904 else 905 peer_set_param_fp.param_value = 0; 906 WMI_CHAR_ARRAY_TO_MAC_ADDR(ta_dhcp_ind->peerMacAddr.bytes, 907 &peer_set_param_fp.peer_macaddr); 908 909 status = wmi_unified_process_dhcp_ind(wma_handle->wmi_handle, 910 &peer_set_param_fp); 911 if (status != EOK) { 912 return QDF_STATUS_E_FAILURE; 913 } 914 915 return QDF_STATUS_SUCCESS; 916 } 917 918 /** 919 * wma_chan_phy__mode() - get WLAN_PHY_MODE for channel 920 * @chan: channel number 921 * @chan_width: maximum channel width possible 922 * @dot11_mode: maximum phy_mode possible 923 * 924 * Return: return WLAN_PHY_MODE 925 */ 926 WLAN_PHY_MODE wma_chan_phy_mode(u8 chan, enum phy_ch_width chan_width, 927 u8 dot11_mode) 928 { 929 WLAN_PHY_MODE phymode = MODE_UNKNOWN; 930 uint16_t bw_val = cds_bw_value(chan_width); 931 932 if (CDS_IS_CHANNEL_24GHZ(chan)) { 933 if (((CH_WIDTH_5MHZ == chan_width) || 934 (CH_WIDTH_10MHZ == chan_width)) && 935 ((WNI_CFG_DOT11_MODE_11B == dot11_mode) || 936 (WNI_CFG_DOT11_MODE_11G == dot11_mode) || 937 (WNI_CFG_DOT11_MODE_11N == dot11_mode) || 938 (WNI_CFG_DOT11_MODE_ALL == dot11_mode) || 939 (WNI_CFG_DOT11_MODE_11AC == dot11_mode))) 940 phymode = MODE_11G; 941 else { 942 switch (dot11_mode) { 943 case WNI_CFG_DOT11_MODE_11B: 944 if ((20 == bw_val) || 945 (40 == bw_val)) 946 phymode = MODE_11B; 947 break; 948 case WNI_CFG_DOT11_MODE_11G: 949 if ((20 == bw_val) || 950 (40 == bw_val)) 951 phymode = MODE_11G; 952 break; 953 case WNI_CFG_DOT11_MODE_11G_ONLY: 954 if ((20 == bw_val) || 955 (40 == bw_val)) 956 phymode = MODE_11GONLY; 957 break; 958 case WNI_CFG_DOT11_MODE_11N: 959 case WNI_CFG_DOT11_MODE_11N_ONLY: 960 if (20 == bw_val) 961 phymode = MODE_11NG_HT20; 962 else if (40 == bw_val) 963 phymode = MODE_11NG_HT40; 964 break; 965 case WNI_CFG_DOT11_MODE_ALL: 966 case WNI_CFG_DOT11_MODE_11AC: 967 case WNI_CFG_DOT11_MODE_11AC_ONLY: 968 if (20 == bw_val) 969 phymode = MODE_11AC_VHT20_2G; 970 else if (40 == bw_val) 971 phymode = MODE_11AC_VHT40_2G; 972 break; 973 default: 974 break; 975 } 976 } 977 } else if (CDS_IS_CHANNEL_DSRC(chan)) 978 phymode = MODE_11A; 979 else { 980 if (((CH_WIDTH_5MHZ == chan_width) || 981 (CH_WIDTH_10MHZ == chan_width)) && 982 ((WNI_CFG_DOT11_MODE_11A == dot11_mode) || 983 (WNI_CFG_DOT11_MODE_11N == dot11_mode) || 984 (WNI_CFG_DOT11_MODE_ALL == dot11_mode) || 985 (WNI_CFG_DOT11_MODE_11AC == dot11_mode))) 986 phymode = MODE_11A; 987 else { 988 switch (dot11_mode) { 989 case WNI_CFG_DOT11_MODE_11A: 990 if (0 < bw_val) 991 phymode = MODE_11A; 992 break; 993 case WNI_CFG_DOT11_MODE_11N: 994 case WNI_CFG_DOT11_MODE_11N_ONLY: 995 if (20 == bw_val) 996 phymode = MODE_11NA_HT20; 997 else if (40 <= bw_val) 998 phymode = MODE_11NA_HT40; 999 break; 1000 case WNI_CFG_DOT11_MODE_ALL: 1001 case WNI_CFG_DOT11_MODE_11AC: 1002 case WNI_CFG_DOT11_MODE_11AC_ONLY: 1003 if (20 == bw_val) 1004 phymode = MODE_11AC_VHT20; 1005 else if (40 == bw_val) 1006 phymode = MODE_11AC_VHT40; 1007 else if (80 == bw_val) 1008 phymode = MODE_11AC_VHT80; 1009 else if (CH_WIDTH_160MHZ == chan_width) 1010 phymode = MODE_11AC_VHT160; 1011 else if (CH_WIDTH_80P80MHZ == chan_width) 1012 phymode = MODE_11AC_VHT80_80; 1013 break; 1014 default: 1015 break; 1016 } 1017 } 1018 } 1019 1020 WMA_LOGD("%s: phymode %d channel %d ch_width %d" 1021 "dot11_mode %d", __func__, phymode, chan, 1022 chan_width, dot11_mode); 1023 1024 QDF_ASSERT(MODE_UNKNOWN != phymode); 1025 return phymode; 1026 } 1027 1028 /** 1029 * wma_get_link_speed() -send command to get linkspeed 1030 * @handle: wma handle 1031 * @pLinkSpeed: link speed info 1032 * 1033 * Return: QDF status 1034 */ 1035 QDF_STATUS wma_get_link_speed(WMA_HANDLE handle, tSirLinkSpeedInfo *pLinkSpeed) 1036 { 1037 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1038 wmi_mac_addr peer_macaddr; 1039 1040 if (!wma_handle || !wma_handle->wmi_handle) { 1041 WMA_LOGE("%s: WMA is closed, can not issue get link speed cmd", 1042 __func__); 1043 return QDF_STATUS_E_INVAL; 1044 } 1045 if (!WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap, 1046 WMI_SERVICE_ESTIMATE_LINKSPEED)) { 1047 WMA_LOGE("%s: Linkspeed feature bit not enabled" 1048 " Sending value 0 as link speed.", __func__); 1049 wma_send_link_speed(0); 1050 return QDF_STATUS_E_FAILURE; 1051 } 1052 1053 /* Copy the peer macaddress to the wma buffer */ 1054 WMI_CHAR_ARRAY_TO_MAC_ADDR(pLinkSpeed->peer_macaddr.bytes, 1055 &peer_macaddr); 1056 1057 WMA_LOGD("%s: pLinkSpeed->peerMacAddr: %pM, " 1058 "peer_macaddr.mac_addr31to0: 0x%x, peer_macaddr.mac_addr47to32: 0x%x", 1059 __func__, pLinkSpeed->peer_macaddr.bytes, 1060 peer_macaddr.mac_addr31to0, 1061 peer_macaddr.mac_addr47to32); 1062 1063 if (wmi_unified_get_link_speed_cmd(wma_handle->wmi_handle, 1064 peer_macaddr)) { 1065 return QDF_STATUS_E_FAILURE; 1066 } 1067 1068 return QDF_STATUS_SUCCESS; 1069 } 1070 1071 /** 1072 * wma_add_beacon_filter() - Issue WMI command to set beacon filter 1073 * @wma: wma handler 1074 * @filter_params: beacon_filter_param to set 1075 * 1076 * Return: Return QDF_STATUS 1077 */ 1078 QDF_STATUS wma_add_beacon_filter(WMA_HANDLE handle, 1079 struct beacon_filter_param *filter_params) 1080 { 1081 int i; 1082 wmi_buf_t wmi_buf; 1083 u_int8_t *buf; 1084 A_UINT32 *ie_map; 1085 int ret; 1086 tp_wma_handle wma = (tp_wma_handle) handle; 1087 wmi_add_bcn_filter_cmd_fixed_param *cmd; 1088 int len = sizeof(wmi_add_bcn_filter_cmd_fixed_param); 1089 1090 len += WMI_TLV_HDR_SIZE; 1091 len += BCN_FLT_MAX_ELEMS_IE_LIST*sizeof(A_UINT32); 1092 1093 if (!wma || !wma->wmi_handle) { 1094 WMA_LOGE("%s: WMA is closed, can not issue set beacon filter", 1095 __func__); 1096 return QDF_STATUS_E_INVAL; 1097 } 1098 1099 wmi_buf = wmi_buf_alloc(wma->wmi_handle, len); 1100 if (!wmi_buf) { 1101 WMA_LOGE("%s: wmi_buf_alloc failed", __func__); 1102 return QDF_STATUS_E_NOMEM; 1103 } 1104 1105 buf = (u_int8_t *) wmi_buf_data(wmi_buf); 1106 1107 cmd = (wmi_add_bcn_filter_cmd_fixed_param *)wmi_buf_data(wmi_buf); 1108 cmd->vdev_id = filter_params->vdev_id; 1109 1110 WMITLV_SET_HDR(&cmd->tlv_header, 1111 WMITLV_TAG_STRUC_wmi_add_bcn_filter_cmd_fixed_param, 1112 WMITLV_GET_STRUCT_TLVLEN( 1113 wmi_add_bcn_filter_cmd_fixed_param)); 1114 1115 buf += sizeof(wmi_add_bcn_filter_cmd_fixed_param); 1116 1117 WMITLV_SET_HDR(buf, WMITLV_TAG_ARRAY_UINT32, 1118 (BCN_FLT_MAX_ELEMS_IE_LIST * sizeof(u_int32_t))); 1119 1120 ie_map = (A_UINT32 *)(buf + WMI_TLV_HDR_SIZE); 1121 for (i = 0; i < BCN_FLT_MAX_ELEMS_IE_LIST; i++) { 1122 ie_map[i] = filter_params->ie_map[i]; 1123 WMA_LOGD("beacon filter ie map = %u", ie_map[i]); 1124 } 1125 1126 ret = wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len, 1127 WMI_ADD_BCN_FILTER_CMDID); 1128 if (ret) { 1129 WMA_LOGE("Failed to send wmi add beacon filter = %d", 1130 ret); 1131 wmi_buf_free(wmi_buf); 1132 return QDF_STATUS_E_FAILURE; 1133 } 1134 1135 return QDF_STATUS_SUCCESS; 1136 } 1137 1138 /** 1139 * wma_remove_beacon_filter() - Issue WMI command to remove beacon filter 1140 * @wma: wma handler 1141 * @filter_params: beacon_filter_params 1142 * 1143 * Return: Return QDF_STATUS 1144 */ 1145 QDF_STATUS wma_remove_beacon_filter(WMA_HANDLE handle, 1146 struct beacon_filter_param *filter_params) 1147 { 1148 wmi_buf_t buf; 1149 tp_wma_handle wma = (tp_wma_handle) handle; 1150 wmi_rmv_bcn_filter_cmd_fixed_param *cmd; 1151 int len = sizeof(wmi_rmv_bcn_filter_cmd_fixed_param); 1152 int ret; 1153 1154 if (!wma || !wma->wmi_handle) { 1155 WMA_LOGE("%s: WMA is closed, cannot issue remove beacon filter", 1156 __func__); 1157 return QDF_STATUS_E_INVAL; 1158 } 1159 1160 buf = wmi_buf_alloc(wma->wmi_handle, len); 1161 if (!buf) { 1162 WMA_LOGE("%s: wmi_buf_alloc failed", __func__); 1163 return QDF_STATUS_E_NOMEM; 1164 } 1165 cmd = (wmi_rmv_bcn_filter_cmd_fixed_param *)wmi_buf_data(buf); 1166 cmd->vdev_id = filter_params->vdev_id; 1167 1168 WMITLV_SET_HDR(&cmd->tlv_header, 1169 WMITLV_TAG_STRUC_wmi_rmv_bcn_filter_cmd_fixed_param, 1170 WMITLV_GET_STRUCT_TLVLEN( 1171 wmi_rmv_bcn_filter_cmd_fixed_param)); 1172 1173 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len, 1174 WMI_RMV_BCN_FILTER_CMDID); 1175 if (ret) { 1176 WMA_LOGE("Failed to send wmi remove beacon filter = %d", 1177 ret); 1178 wmi_buf_free(buf); 1179 return QDF_STATUS_E_FAILURE; 1180 } 1181 1182 return QDF_STATUS_SUCCESS; 1183 } 1184 1185 /** 1186 * wma_send_adapt_dwelltime_params() - send adaptive dwelltime configuration 1187 * params to firmware 1188 * @wma_handle: wma handler 1189 * @dwelltime_params: pointer to dwelltime_params 1190 * 1191 * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure 1192 */ 1193 QDF_STATUS wma_send_adapt_dwelltime_params(WMA_HANDLE handle, 1194 struct adaptive_dwelltime_params *dwelltime_params) 1195 { 1196 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1197 struct wmi_adaptive_dwelltime_params wmi_param = {0}; 1198 int32_t err; 1199 1200 wmi_param.is_enabled = dwelltime_params->is_enabled; 1201 wmi_param.dwelltime_mode = dwelltime_params->dwelltime_mode; 1202 wmi_param.lpf_weight = dwelltime_params->lpf_weight; 1203 wmi_param.passive_mon_intval = dwelltime_params->passive_mon_intval; 1204 wmi_param.wifi_act_threshold = dwelltime_params->wifi_act_threshold; 1205 err = wmi_unified_send_adapt_dwelltime_params_cmd(wma_handle-> 1206 wmi_handle, &wmi_param); 1207 if (err) 1208 return QDF_STATUS_E_FAILURE; 1209 1210 return QDF_STATUS_SUCCESS; 1211 } 1212 1213 #ifdef FEATURE_GREEN_AP 1214 1215 /** 1216 * wma_egap_info_status_event() - egap info status event 1217 * @handle: pointer to wma handler 1218 * @event: pointer to event 1219 * @len: len of the event 1220 * 1221 * Return: 0 for success, otherwise appropriate error code 1222 */ 1223 static int wma_egap_info_status_event(void *handle, u_int8_t *event, 1224 uint32_t len) 1225 { 1226 WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf; 1227 wmi_ap_ps_egap_info_event_fixed_param *egap_info_event; 1228 wmi_ap_ps_egap_info_chainmask_list *chainmask_event; 1229 u_int8_t *buf_ptr; 1230 1231 param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *)event; 1232 if (!param_buf) { 1233 WMA_LOGE("Invalid EGAP Info status event buffer"); 1234 return -EINVAL; 1235 } 1236 1237 egap_info_event = (wmi_ap_ps_egap_info_event_fixed_param *) 1238 param_buf->fixed_param; 1239 buf_ptr = (uint8_t *)egap_info_event; 1240 buf_ptr += sizeof(wmi_ap_ps_egap_info_event_fixed_param); 1241 chainmask_event = (wmi_ap_ps_egap_info_chainmask_list *)buf_ptr; 1242 1243 WMA_LOGI("mac_id: %d, status: %d, tx_mask: %x, rx_mask: %d", 1244 chainmask_event->mac_id, 1245 egap_info_event->status, 1246 chainmask_event->tx_chainmask, 1247 chainmask_event->rx_chainmask); 1248 return 0; 1249 } 1250 1251 /** 1252 * wma_send_egap_conf_params() - send wmi cmd of egap configuration params 1253 * @wma_handle: wma handler 1254 * @egap_params: pointer to egap_params 1255 * 1256 * Return: 0 for success, otherwise appropriate error code 1257 */ 1258 QDF_STATUS wma_send_egap_conf_params(WMA_HANDLE handle, 1259 struct egap_conf_params *egap_params) 1260 { 1261 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1262 wmi_ap_ps_egap_param_cmd_fixed_param cmd = {0}; 1263 int32_t err; 1264 1265 cmd.enable = egap_params->enable; 1266 cmd.inactivity_time = egap_params->inactivity_time; 1267 cmd.wait_time = egap_params->wait_time; 1268 cmd.flags = egap_params->flags; 1269 err = wmi_unified_egap_conf_params_cmd(wma_handle->wmi_handle, &cmd); 1270 if (err) { 1271 return QDF_STATUS_E_FAILURE; 1272 } 1273 1274 return QDF_STATUS_SUCCESS; 1275 } 1276 1277 /** 1278 * wma_setup_egap_support() - setup the EGAP support flag 1279 * @tgt_cfg: pointer to hdd target configuration 1280 * @egap_support: EGAP support flag 1281 * 1282 * Return: None 1283 */ 1284 void wma_setup_egap_support(struct wma_tgt_cfg *tgt_cfg, WMA_HANDLE handle) 1285 { 1286 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1287 1288 if (tgt_cfg && wma_handle) 1289 tgt_cfg->egap_support = wma_handle->egap_support; 1290 } 1291 1292 /** 1293 * wma_register_egap_event_handle() - register the EGAP event handle 1294 * @wma_handle: wma handler 1295 * 1296 * Return: None 1297 */ 1298 void wma_register_egap_event_handle(WMA_HANDLE handle) 1299 { 1300 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1301 QDF_STATUS status; 1302 1303 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap, 1304 WMI_SERVICE_EGAP)) { 1305 status = wmi_unified_register_event_handler( 1306 wma_handle->wmi_handle, 1307 WMI_AP_PS_EGAP_INFO_EVENTID, 1308 wma_egap_info_status_event, 1309 WMA_RX_SERIALIZER_CTX); 1310 if (QDF_IS_STATUS_ERROR(status)) { 1311 WMA_LOGE("Failed to register Enhance Green AP event"); 1312 wma_handle->egap_support = false; 1313 } else { 1314 WMA_LOGI("Set the Enhance Green AP event handler"); 1315 wma_handle->egap_support = true; 1316 } 1317 } else 1318 wma_handle->egap_support = false; 1319 } 1320 #endif /* FEATURE_GREEN_AP */ 1321 1322 /** 1323 * wma_unified_fw_profiling_cmd() - send FW profiling cmd to WLAN FW 1324 * @wma: wma handle 1325 * @cmd: Profiling command index 1326 * @value1: parameter1 value 1327 * @value2: parameter2 value 1328 * 1329 * Return: 0 for success else error code 1330 */ 1331 QDF_STATUS wma_unified_fw_profiling_cmd(wmi_unified_t wmi_handle, 1332 uint32_t cmd, uint32_t value1, uint32_t value2) 1333 { 1334 int ret; 1335 1336 ret = wmi_unified_fw_profiling_data_cmd(wmi_handle, cmd, 1337 value1, value2); 1338 if (ret) { 1339 WMA_LOGE("enable cmd Failed for id %d value %d", 1340 value1, value2); 1341 return ret; 1342 } 1343 1344 return QDF_STATUS_SUCCESS; 1345 } 1346 1347 #ifdef FEATURE_WLAN_LPHB 1348 /** 1349 * wma_lphb_handler() - send LPHB indication to SME 1350 * @wma: wma handle 1351 * @event: event handler 1352 * 1353 * Return: 0 for success or error code 1354 */ 1355 static int wma_lphb_handler(tp_wma_handle wma, uint8_t *event) 1356 { 1357 wmi_hb_ind_event_fixed_param *hb_fp; 1358 tSirLPHBInd *slphb_indication; 1359 QDF_STATUS qdf_status; 1360 cds_msg_t sme_msg = { 0 }; 1361 1362 hb_fp = (wmi_hb_ind_event_fixed_param *) event; 1363 if (!hb_fp) { 1364 WMA_LOGE("Invalid wmi_hb_ind_event_fixed_param buffer"); 1365 return -EINVAL; 1366 } 1367 1368 WMA_LOGD("lphb indication received with vdev_id=%d, session=%d, reason=%d", 1369 hb_fp->vdev_id, hb_fp->session, hb_fp->reason); 1370 1371 slphb_indication = (tSirLPHBInd *) qdf_mem_malloc(sizeof(tSirLPHBInd)); 1372 1373 if (!slphb_indication) { 1374 WMA_LOGE("Invalid LPHB indication buffer"); 1375 return -ENOMEM; 1376 } 1377 1378 slphb_indication->sessionIdx = hb_fp->session; 1379 slphb_indication->protocolType = hb_fp->reason; 1380 slphb_indication->eventReason = hb_fp->reason; 1381 1382 sme_msg.type = eWNI_SME_LPHB_IND; 1383 sme_msg.bodyptr = slphb_indication; 1384 sme_msg.bodyval = 0; 1385 1386 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg); 1387 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 1388 WMA_LOGE("Fail to post eWNI_SME_LPHB_IND msg to SME"); 1389 qdf_mem_free(slphb_indication); 1390 return -EINVAL; 1391 } 1392 1393 return 0; 1394 } 1395 #endif /* FEATURE_WLAN_LPHB */ 1396 1397 #ifdef FEATURE_WLAN_RA_FILTERING 1398 /** 1399 * wma_wow_sta_ra_filter() - set RA filter pattern in fw 1400 * @wma: wma handle 1401 * @vdev_id: vdev id 1402 * 1403 * Return: QDF status 1404 */ 1405 static QDF_STATUS wma_wow_sta_ra_filter(tp_wma_handle wma, uint8_t vdev_id) 1406 { 1407 1408 struct wma_txrx_node *iface; 1409 int ret; 1410 uint8_t default_pattern; 1411 1412 iface = &wma->interfaces[vdev_id]; 1413 1414 default_pattern = iface->num_wow_default_patterns++; 1415 1416 WMA_LOGD("%s: send RA rate limit [%d] to fw vdev = %d", __func__, 1417 wma->RArateLimitInterval, vdev_id); 1418 1419 ret = wmi_unified_wow_sta_ra_filter_cmd(wma->wmi_handle, vdev_id, 1420 default_pattern, wma->RArateLimitInterval); 1421 if (ret) { 1422 WMA_LOGE("%s: Failed to send RA rate limit to fw", __func__); 1423 iface->num_wow_default_patterns--; 1424 return QDF_STATUS_E_FAILURE; 1425 } 1426 1427 return QDF_STATUS_SUCCESS; 1428 1429 } 1430 #endif /* FEATURE_WLAN_RA_FILTERING */ 1431 1432 /** 1433 * wmi_unified_nat_keepalive_enable() - enable NAT keepalive filter 1434 * @wma: wma handle 1435 * @vdev_id: vdev id 1436 * 1437 * Return: 0 for success or error code 1438 */ 1439 int wmi_unified_nat_keepalive_enable(tp_wma_handle wma, uint8_t vdev_id) 1440 { 1441 1442 if (wmi_unified_nat_keepalive_en_cmd(wma->wmi_handle, vdev_id)) 1443 return QDF_STATUS_E_FAILURE; 1444 1445 return QDF_STATUS_SUCCESS; 1446 } 1447 1448 /** 1449 * wma_unified_csa_offload_enable() - sen CSA offload enable command 1450 * @wma: wma handle 1451 * @vdev_id: vdev id 1452 * 1453 * Return: 0 for success or error code 1454 */ 1455 int wma_unified_csa_offload_enable(tp_wma_handle wma, uint8_t vdev_id) 1456 { 1457 if (wmi_unified_csa_offload_enable(wma->wmi_handle, 1458 vdev_id)) { 1459 WMA_LOGP("%s: Failed to send CSA offload enable command", 1460 __func__); 1461 return -EIO; 1462 } 1463 1464 return 0; 1465 } 1466 1467 #ifdef WLAN_FEATURE_NAN 1468 /** 1469 * wma_nan_rsp_event_handler() - Function is used to handle nan response 1470 * @handle: wma handle 1471 * @event_buf: event buffer 1472 * @len: length of buffer 1473 * 1474 * Return: 0 for success or error code 1475 */ 1476 int wma_nan_rsp_event_handler(void *handle, uint8_t *event_buf, 1477 uint32_t len) 1478 { 1479 WMI_NAN_EVENTID_param_tlvs *param_buf; 1480 tSirNanEvent *nan_rsp_event; 1481 wmi_nan_event_hdr *nan_rsp_event_hdr; 1482 QDF_STATUS status; 1483 cds_msg_t cds_msg; 1484 uint8_t *buf_ptr; 1485 uint32_t alloc_len; 1486 1487 /* 1488 * This is how received event_buf looks like 1489 * 1490 * <-------------------- event_buf -----------------------------------> 1491 * 1492 * <--wmi_nan_event_hdr--><---WMI_TLV_HDR_SIZE---><----- data --------> 1493 * 1494 * +-----------+---------+-----------------------+--------------------+ 1495 * | tlv_header| data_len| WMITLV_TAG_ARRAY_BYTE | nan_rsp_event_data | 1496 * +-----------+---------+-----------------------+--------------------+ 1497 */ 1498 1499 WMA_LOGD("%s: Posting NaN response event to SME", __func__); 1500 param_buf = (WMI_NAN_EVENTID_param_tlvs *) event_buf; 1501 if (!param_buf) { 1502 WMA_LOGE("%s: Invalid nan response event buf", __func__); 1503 return -EINVAL; 1504 } 1505 nan_rsp_event_hdr = param_buf->fixed_param; 1506 buf_ptr = (uint8_t *) nan_rsp_event_hdr; 1507 alloc_len = sizeof(tSirNanEvent); 1508 alloc_len += nan_rsp_event_hdr->data_len; 1509 nan_rsp_event = (tSirNanEvent *) qdf_mem_malloc(alloc_len); 1510 if (NULL == nan_rsp_event) { 1511 WMA_LOGE("%s: Memory allocation failure", __func__); 1512 return -ENOMEM; 1513 } 1514 1515 nan_rsp_event->event_data_len = nan_rsp_event_hdr->data_len; 1516 qdf_mem_copy(nan_rsp_event->event_data, buf_ptr + 1517 sizeof(wmi_nan_event_hdr) + WMI_TLV_HDR_SIZE, 1518 nan_rsp_event->event_data_len); 1519 cds_msg.type = eWNI_SME_NAN_EVENT; 1520 cds_msg.bodyptr = (void *)nan_rsp_event; 1521 cds_msg.bodyval = 0; 1522 1523 status = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg); 1524 if (status != QDF_STATUS_SUCCESS) { 1525 WMA_LOGE("%s: Failed to post NaN response event to SME", 1526 __func__); 1527 qdf_mem_free(nan_rsp_event); 1528 return -EFAULT; 1529 } 1530 WMA_LOGD("%s: NaN response event Posted to SME", __func__); 1531 return 0; 1532 } 1533 #else 1534 int wma_nan_rsp_event_handler(void *handle, uint8_t *event_buf, 1535 uint32_t len) 1536 { 1537 return 0; 1538 } 1539 #endif /* WLAN_FEATURE_NAN */ 1540 1541 /** 1542 * wma_csa_offload_handler() - CSA event handler 1543 * @handle: wma handle 1544 * @event: event buffer 1545 * @len: buffer length 1546 * 1547 * This event is sent by firmware when it receives CSA IE. 1548 * 1549 * Return: 0 for success or error code 1550 */ 1551 int wma_csa_offload_handler(void *handle, uint8_t *event, uint32_t len) 1552 { 1553 tp_wma_handle wma = (tp_wma_handle) handle; 1554 WMI_CSA_HANDLING_EVENTID_param_tlvs *param_buf; 1555 wmi_csa_event_fixed_param *csa_event; 1556 uint8_t bssid[IEEE80211_ADDR_LEN]; 1557 uint8_t vdev_id = 0; 1558 uint8_t cur_chan = 0; 1559 struct ieee80211_channelswitch_ie *csa_ie; 1560 struct csa_offload_params *csa_offload_event; 1561 struct ieee80211_extendedchannelswitch_ie *xcsa_ie; 1562 struct ieee80211_ie_wide_bw_switch *wb_ie; 1563 struct wma_txrx_node *intr = wma->interfaces; 1564 1565 param_buf = (WMI_CSA_HANDLING_EVENTID_param_tlvs *) event; 1566 1567 WMA_LOGD("%s: Enter", __func__); 1568 if (!param_buf) { 1569 WMA_LOGE("Invalid csa event buffer"); 1570 return -EINVAL; 1571 } 1572 csa_event = param_buf->fixed_param; 1573 WMI_MAC_ADDR_TO_CHAR_ARRAY(&csa_event->i_addr2, &bssid[0]); 1574 1575 if (wma_find_vdev_by_bssid(wma, bssid, &vdev_id) == NULL) { 1576 WMA_LOGE("Invalid bssid received %s:%d", __func__, __LINE__); 1577 return -EINVAL; 1578 } 1579 1580 csa_offload_event = qdf_mem_malloc(sizeof(*csa_offload_event)); 1581 if (!csa_offload_event) { 1582 WMA_LOGE("QDF MEM Alloc Failed for csa_offload_event"); 1583 return -EINVAL; 1584 } 1585 1586 qdf_mem_zero(csa_offload_event, sizeof(*csa_offload_event)); 1587 qdf_mem_copy(csa_offload_event->bssId, &bssid, IEEE80211_ADDR_LEN); 1588 1589 if (csa_event->ies_present_flag & WMI_CSA_IE_PRESENT) { 1590 csa_ie = (struct ieee80211_channelswitch_ie *) 1591 (&csa_event->csa_ie[0]); 1592 csa_offload_event->channel = csa_ie->newchannel; 1593 csa_offload_event->switch_mode = csa_ie->switchmode; 1594 } else if (csa_event->ies_present_flag & WMI_XCSA_IE_PRESENT) { 1595 xcsa_ie = (struct ieee80211_extendedchannelswitch_ie *) 1596 (&csa_event->xcsa_ie[0]); 1597 csa_offload_event->channel = xcsa_ie->newchannel; 1598 csa_offload_event->switch_mode = xcsa_ie->switchmode; 1599 csa_offload_event->new_op_class = xcsa_ie->newClass; 1600 } else { 1601 WMA_LOGE("CSA Event error: No CSA IE present"); 1602 qdf_mem_free(csa_offload_event); 1603 return -EINVAL; 1604 } 1605 1606 if (csa_event->ies_present_flag & WMI_WBW_IE_PRESENT) { 1607 wb_ie = (struct ieee80211_ie_wide_bw_switch *) 1608 (&csa_event->wb_ie[0]); 1609 csa_offload_event->new_ch_width = wb_ie->new_ch_width; 1610 csa_offload_event->new_ch_freq_seg1 = wb_ie->new_ch_freq_seg1; 1611 csa_offload_event->new_ch_freq_seg2 = wb_ie->new_ch_freq_seg2; 1612 } 1613 1614 csa_offload_event->ies_present_flag = csa_event->ies_present_flag; 1615 1616 WMA_LOGD("CSA: New Channel = %d BSSID:%pM", 1617 csa_offload_event->channel, csa_offload_event->bssId); 1618 1619 cur_chan = cds_freq_to_chan(intr[vdev_id].mhz); 1620 /* 1621 * basic sanity check: requested channel should not be 0 1622 * and equal to home channel 1623 */ 1624 if ((0 == csa_offload_event->channel) || 1625 (cur_chan == csa_offload_event->channel)) { 1626 WMA_LOGE("CSA Event with channel %d. Ignore !!", 1627 csa_offload_event->channel); 1628 qdf_mem_free(csa_offload_event); 1629 return -EINVAL; 1630 } 1631 wma->interfaces[vdev_id].is_channel_switch = true; 1632 wma_send_msg(wma, WMA_CSA_OFFLOAD_EVENT, (void *)csa_offload_event, 0); 1633 return 0; 1634 } 1635 1636 #ifdef FEATURE_OEM_DATA_SUPPORT 1637 /** 1638 * wma_oem_data_response_handler() - OEM data response event handler 1639 * @handle: wma handle 1640 * @datap: data ptr 1641 * @len: data length 1642 * 1643 * Return: 0 for success or error code 1644 */ 1645 int wma_oem_data_response_handler(void *handle, 1646 uint8_t *datap, uint32_t len) 1647 { 1648 WMI_OEM_RESPONSE_EVENTID_param_tlvs *param_buf; 1649 uint8_t *data; 1650 uint32_t datalen; 1651 struct oem_data_rsp *oem_rsp; 1652 tpAniSirGlobal pmac = cds_get_context(QDF_MODULE_ID_PE); 1653 1654 if (!pmac) { 1655 WMA_LOGE(FL("Invalid pmac")); 1656 return -EINVAL; 1657 } 1658 1659 if (!pmac->sme.oem_data_rsp_callback) { 1660 WMA_LOGE(FL("Callback not registered")); 1661 return -EINVAL; 1662 } 1663 1664 param_buf = (WMI_OEM_RESPONSE_EVENTID_param_tlvs *) datap; 1665 if (!param_buf) { 1666 WMA_LOGE(FL("Received NULL buf ptr from FW")); 1667 return -ENOMEM; 1668 } 1669 1670 data = param_buf->data; 1671 datalen = param_buf->num_data; 1672 1673 if (!data) { 1674 WMA_LOGE(FL("Received NULL data from FW")); 1675 return -EINVAL; 1676 } 1677 1678 if (datalen > OEM_DATA_RSP_SIZE) { 1679 WMA_LOGE(FL("Received data len %d exceeds max value %d"), 1680 datalen, OEM_DATA_RSP_SIZE); 1681 return -EINVAL; 1682 } 1683 1684 oem_rsp = qdf_mem_malloc(sizeof(*oem_rsp)); 1685 if (!oem_rsp) { 1686 WMA_LOGE(FL("Failed to alloc oem_data_rsp")); 1687 return -ENOMEM; 1688 } 1689 oem_rsp->rsp_len = datalen; 1690 if (oem_rsp->rsp_len) { 1691 oem_rsp->data = qdf_mem_malloc(oem_rsp->rsp_len); 1692 if (!oem_rsp->data) { 1693 WMA_LOGE(FL("malloc failed for data")); 1694 qdf_mem_free(oem_rsp); 1695 return -ENOMEM; 1696 } 1697 } else { 1698 WMA_LOGE(FL("Invalid rsp length: %d"), 1699 oem_rsp->rsp_len); 1700 qdf_mem_free(oem_rsp); 1701 return -EINVAL; 1702 } 1703 1704 qdf_mem_copy(oem_rsp->data, data, datalen); 1705 1706 WMA_LOGI(FL("Sending OEM_DATA_RSP(len: %d) to upper layer"), datalen); 1707 1708 pmac->sme.oem_data_rsp_callback(oem_rsp); 1709 1710 if (oem_rsp->data) 1711 qdf_mem_free(oem_rsp->data); 1712 qdf_mem_free(oem_rsp); 1713 1714 return 0; 1715 } 1716 1717 /** 1718 * wma_start_oem_data_req() - start OEM data request to target 1719 * @wma_handle: wma handle 1720 * @oem_data_req: start request params 1721 * 1722 * Return: QDF_STATUS 1723 */ 1724 QDF_STATUS wma_start_oem_data_req(tp_wma_handle wma_handle, 1725 struct oem_data_req *oem_data_req) 1726 { 1727 int ret = 0; 1728 1729 WMA_LOGD(FL("Send OEM Data Request to target")); 1730 1731 if (!oem_data_req || !oem_data_req->data) { 1732 WMA_LOGE(FL("oem_data_req is null")); 1733 return QDF_STATUS_E_INVAL; 1734 } 1735 1736 if (!wma_handle || !wma_handle->wmi_handle) { 1737 WMA_LOGE(FL("WMA - closed, can not send Oem data request cmd")); 1738 qdf_mem_free(oem_data_req->data); 1739 return QDF_STATUS_E_INVAL; 1740 } 1741 1742 ret = wmi_unified_start_oem_data_cmd(wma_handle->wmi_handle, 1743 oem_data_req->data_len, 1744 oem_data_req->data); 1745 1746 if (!QDF_IS_STATUS_SUCCESS(ret)) 1747 WMA_LOGE(FL("wmi cmd send failed")); 1748 1749 return ret; 1750 } 1751 #endif /* FEATURE_OEM_DATA_SUPPORT */ 1752 1753 1754 /** 1755 * wma_unified_dfs_radar_rx_event_handler() - dfs radar rx event handler 1756 * @handle: wma handle 1757 * @data: data buffer 1758 * @datalen: data length 1759 * 1760 * WMI handler for WMI_DFS_RADAR_EVENTID 1761 * This handler is registered for handling 1762 * filtered DFS Phyerror. This handler is 1763 * will be invoked only when DFS Phyerr 1764 * filtering offload is enabled. 1765 * 1766 * Return: 1 for Success and 0 for error 1767 */ 1768 static int wma_unified_dfs_radar_rx_event_handler(void *handle, 1769 uint8_t *data, 1770 uint32_t datalen) 1771 { 1772 tp_wma_handle wma = (tp_wma_handle) handle; 1773 struct ieee80211com *ic; 1774 struct ath_dfs *dfs; 1775 struct dfs_event *event; 1776 struct dfs_ieee80211_channel *chan; 1777 int empty; 1778 int do_check_chirp = 0; 1779 int is_hw_chirp = 0; 1780 int is_sw_chirp = 0; 1781 int is_pri = 0; 1782 bool is_ch_dfs = false; 1783 1784 WMI_DFS_RADAR_EVENTID_param_tlvs *param_tlvs; 1785 wmi_dfs_radar_event_fixed_param *radar_event; 1786 1787 ic = wma->dfs_ic; 1788 if (NULL == ic) { 1789 WMA_LOGE("%s: dfs_ic is NULL ", __func__); 1790 return 0; 1791 } 1792 1793 dfs = (struct ath_dfs *)ic->ic_dfs; 1794 param_tlvs = (WMI_DFS_RADAR_EVENTID_param_tlvs *) data; 1795 1796 if (NULL == dfs) { 1797 WMA_LOGE("%s: dfs is NULL ", __func__); 1798 return 0; 1799 } 1800 /* 1801 * This parameter holds the number 1802 * of phyerror interrupts to the host 1803 * after the phyerrors have passed through 1804 * false detect filters in the firmware. 1805 */ 1806 dfs->dfs_phyerr_count++; 1807 1808 if (!param_tlvs) { 1809 WMA_LOGE("%s: Received NULL data from FW", __func__); 1810 return 0; 1811 } 1812 1813 radar_event = param_tlvs->fixed_param; 1814 1815 qdf_spin_lock_bh(&ic->chan_lock); 1816 chan = ic->ic_curchan; 1817 if (ic->disable_phy_err_processing) { 1818 WMA_LOGD("%s: radar indication done,drop phyerror event", 1819 __func__); 1820 qdf_spin_unlock_bh(&ic->chan_lock); 1821 return 0; 1822 } 1823 1824 if (IEEE80211_IS_CHAN_11AC_VHT160(chan)) { 1825 is_ch_dfs = true; 1826 } else if (IEEE80211_IS_CHAN_11AC_VHT80P80(chan)) { 1827 if (cds_get_channel_state(chan->ic_ieee) == CHANNEL_STATE_DFS || 1828 cds_get_channel_state(chan->ic_ieee_ext - 1829 WMA_80MHZ_START_CENTER_CH_DIFF) == 1830 CHANNEL_STATE_DFS) 1831 is_ch_dfs = true; 1832 } else { 1833 if (cds_get_channel_state(chan->ic_ieee) == CHANNEL_STATE_DFS) 1834 is_ch_dfs = true; 1835 } 1836 if (!is_ch_dfs) { 1837 WMA_LOGE 1838 ("%s: Invalid DFS Phyerror event. Channel=%d is Non-DFS", 1839 __func__, chan->ic_ieee); 1840 qdf_spin_unlock_bh(&ic->chan_lock); 1841 return 0; 1842 } 1843 1844 qdf_spin_unlock_bh(&ic->chan_lock); 1845 dfs->ath_dfs_stats.total_phy_errors++; 1846 1847 if (dfs->dfs_caps.ath_chip_is_bb_tlv) { 1848 do_check_chirp = 1; 1849 is_pri = 1; 1850 is_hw_chirp = radar_event->pulse_is_chirp; 1851 1852 if ((uint32_t) dfs->dfs_phyerr_freq_min > 1853 radar_event->pulse_center_freq) { 1854 dfs->dfs_phyerr_freq_min = 1855 (int)radar_event->pulse_center_freq; 1856 } 1857 1858 if (dfs->dfs_phyerr_freq_max < 1859 (int)radar_event->pulse_center_freq) { 1860 dfs->dfs_phyerr_freq_max = 1861 (int)radar_event->pulse_center_freq; 1862 } 1863 } 1864 1865 /* 1866 * Now, add the parsed, checked and filtered 1867 * radar phyerror event radar pulse event list. 1868 * This event will then be processed by 1869 * dfs_radar_processevent() to see if the pattern 1870 * of pulses in radar pulse list match any radar 1871 * singnature in the current regulatory domain. 1872 */ 1873 1874 ATH_DFSEVENTQ_LOCK(dfs); 1875 empty = STAILQ_EMPTY(&(dfs->dfs_eventq)); 1876 ATH_DFSEVENTQ_UNLOCK(dfs); 1877 if (empty) { 1878 return 0; 1879 } 1880 /* 1881 * Add the event to the list, if there's space. 1882 */ 1883 ATH_DFSEVENTQ_LOCK(dfs); 1884 event = STAILQ_FIRST(&(dfs->dfs_eventq)); 1885 if (event == NULL) { 1886 ATH_DFSEVENTQ_UNLOCK(dfs); 1887 WMA_LOGE("%s: No more space left for queuing DFS Phyerror events", 1888 __func__); 1889 return 0; 1890 } 1891 STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); 1892 ATH_DFSEVENTQ_UNLOCK(dfs); 1893 dfs->dfs_phyerr_queued_count++; 1894 dfs->dfs_phyerr_w53_counter++; 1895 event->re_dur = (uint8_t) radar_event->pulse_duration; 1896 event->re_rssi = radar_event->rssi; 1897 event->re_ts = radar_event->pulse_detect_ts & DFS_TSMASK; 1898 event->re_full_ts = (((uint64_t) radar_event->upload_fullts_high) << 32) 1899 | radar_event->upload_fullts_low; 1900 1901 /* 1902 * Index of peak magnitude 1903 */ 1904 event->sidx = radar_event->peak_sidx; 1905 event->re_flags = 0; 1906 1907 /* 1908 * Handle chirp flags. 1909 */ 1910 if (do_check_chirp) { 1911 event->re_flags |= DFS_EVENT_CHECKCHIRP; 1912 if (is_hw_chirp) { 1913 event->re_flags |= DFS_EVENT_HW_CHIRP; 1914 } 1915 if (is_sw_chirp) { 1916 event->re_flags |= DFS_EVENT_SW_CHIRP; 1917 } 1918 } 1919 /* 1920 * Correctly set which channel is being reported on 1921 */ 1922 if (is_pri) { 1923 event->re_chanindex = (uint8_t) dfs->dfs_curchan_radindex; 1924 } else { 1925 if (dfs->dfs_extchan_radindex == -1) { 1926 WMA_LOGI("%s phyerr on ext channel", __func__); 1927 } 1928 event->re_chanindex = (uint8_t) dfs->dfs_extchan_radindex; 1929 WMA_LOGI("%s:New extension channel event is added to queue", 1930 __func__); 1931 } 1932 1933 ATH_DFSQ_LOCK(dfs); 1934 1935 STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list); 1936 1937 empty = STAILQ_EMPTY(&dfs->dfs_radarq); 1938 1939 ATH_DFSQ_UNLOCK(dfs); 1940 1941 if (!empty && !dfs->ath_radar_tasksched) { 1942 dfs->ath_radar_tasksched = 1; 1943 OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0); 1944 } 1945 1946 return 1; 1947 1948 } 1949 1950 /** 1951 * wma_unified_phyerr_rx_event_handler() - phyerr event handler 1952 * @handle: wma handle 1953 * @data: data buffer 1954 * @datalen: buffer length 1955 * 1956 * WMI Handler for WMI_PHYERR_EVENTID event from firmware. 1957 * This handler is currently handling only DFS phy errors. 1958 * This handler will be invoked only when the DFS phyerror 1959 * filtering offload is disabled. 1960 * 1961 * Return: 1:Success, 0:Failure 1962 */ 1963 static int wma_unified_phyerr_rx_event_handler(void *handle, 1964 uint8_t *data, uint32_t datalen) 1965 { 1966 tp_wma_handle wma = (tp_wma_handle) handle; 1967 WMI_PHYERR_EVENTID_param_tlvs *param_tlvs; 1968 wmi_comb_phyerr_rx_hdr *pe_hdr; 1969 uint8_t *bufp; 1970 wmi_single_phyerr_rx_event *ev; 1971 struct ieee80211com *ic = wma->dfs_ic; 1972 qdf_size_t n; 1973 A_UINT64 tsf64 = 0; 1974 int phy_err_code = 0; 1975 A_UINT32 phy_err_mask = 0; 1976 int error = 0; 1977 tpAniSirGlobal mac_ctx = 1978 (tpAniSirGlobal)cds_get_context(QDF_MODULE_ID_PE); 1979 bool enable_log = false; 1980 int max_dfs_buf_length = 0; 1981 1982 if (NULL == mac_ctx) { 1983 WMA_LOGE("%s: mac_ctx is NULL", __func__); 1984 return 0; 1985 } 1986 enable_log = mac_ctx->sap.enable_dfs_phy_error_logs; 1987 1988 param_tlvs = (WMI_PHYERR_EVENTID_param_tlvs *) data; 1989 1990 if (!param_tlvs) { 1991 WMA_LOGE("%s: Received NULL data from FW", __func__); 1992 return 0; 1993 } 1994 1995 pe_hdr = param_tlvs->hdr; 1996 if (pe_hdr == NULL) { 1997 WMA_LOGE("%s: Received Data PE Header is NULL", __func__); 1998 return 0; 1999 } 2000 2001 /* Ensure it's at least the size of the header */ 2002 if (datalen < sizeof(*pe_hdr)) { 2003 WMA_LOGE("%s: Expected minimum size %zu, received %d", 2004 __func__, sizeof(*pe_hdr), datalen); 2005 return 0; 2006 } 2007 /* 2008 * The max buffer lenght is larger for DFS-3 than DFS-2. 2009 * So, accordingly use the correct max buffer size. 2010 */ 2011 if (wma->hw_bd_id != WMI_HWBD_QCA6174) 2012 max_dfs_buf_length = DFS3_MAX_BUF_LENGTH; 2013 else 2014 max_dfs_buf_length = DFS_MAX_BUF_LENGTH; 2015 2016 if (pe_hdr->buf_len > max_dfs_buf_length) { 2017 WMA_LOGE("%s: Received Invalid Phyerror event buffer length = %d" 2018 "Maximum allowed buf length = %d", __func__, 2019 pe_hdr->buf_len, max_dfs_buf_length); 2020 2021 return 0; 2022 } 2023 2024 /* 2025 * Reconstruct the 64 bit event TSF. This isn't from the MAC, it's 2026 * at the time the event was sent to us, the TSF value will be 2027 * in the future. 2028 */ 2029 tsf64 = pe_hdr->tsf_l32; 2030 tsf64 |= (((uint64_t) pe_hdr->tsf_u32) << 32); 2031 2032 /* 2033 * Check the HW board ID to figure out 2034 * if DFS-3 is supported. In DFS-3 2035 * phyerror mask indicates the type of 2036 * phyerror, whereas in DFS-2 phyerrorcode 2037 * indicates the type of phyerror. If the 2038 * board is NOT WMI_HWBD_QCA6174, for now 2039 * assume that it supports DFS-3. 2040 */ 2041 if (wma->hw_bd_id != WMI_HWBD_QCA6174) { 2042 phy_err_mask = pe_hdr->rsPhyErrMask0; 2043 WMA_LOGD("%s: DFS-3 phyerror mask = 0x%x", 2044 __func__, phy_err_mask); 2045 } 2046 2047 /* 2048 * Loop over the bufp, extracting out phyerrors 2049 * wmi_unified_comb_phyerr_rx_event.bufp is a char pointer, 2050 * which isn't correct here - what we have received here 2051 * is an array of TLV-style PHY errors. 2052 */ 2053 n = 0; /* Start just after the header */ 2054 bufp = param_tlvs->bufp; 2055 while (n < pe_hdr->buf_len) { 2056 /* ensure there's at least space for the header */ 2057 if ((pe_hdr->buf_len - n) < sizeof(ev->hdr)) { 2058 WMA_LOGE("%s: Not enough space.(datalen=%d, n=%zu, hdr=%zu bytes", 2059 __func__, pe_hdr->buf_len, n, sizeof(ev->hdr)); 2060 error = 1; 2061 break; 2062 } 2063 /* 2064 * Obtain a pointer to the beginning of the current event. 2065 * data[0] is the beginning of the WMI payload. 2066 */ 2067 ev = (wmi_single_phyerr_rx_event *) &bufp[n]; 2068 2069 /* 2070 * Sanity check the buffer length of the event against 2071 * what we currently have. 2072 * Since buf_len is 32 bits, we check if it overflows 2073 * a large 32 bit value. It's not 0x7fffffff because 2074 * we increase n by (buf_len + sizeof(hdr)), which would 2075 * in itself cause n to overflow. 2076 * If "int" is 64 bits then this becomes a moot point. 2077 */ 2078 if (ev->hdr.buf_len > 0x7f000000) { 2079 WMA_LOGE("%s:buf_len is garbage (0x%x)", __func__, 2080 ev->hdr.buf_len); 2081 error = 1; 2082 break; 2083 } 2084 if (n + ev->hdr.buf_len > pe_hdr->buf_len) { 2085 WMA_LOGE("%s: buf_len exceeds available space n=%zu," 2086 "buf_len=%d, datalen=%d", 2087 __func__, n, ev->hdr.buf_len, pe_hdr->buf_len); 2088 error = 1; 2089 break; 2090 } 2091 /* 2092 * If the board id is WMI_HWBD_QCA6174 2093 * then it supports only DFS-2. So, fetch 2094 * phyerror code in order to know the type 2095 * of phyerror. 2096 */ 2097 if (wma->hw_bd_id == WMI_HWBD_QCA6174) { 2098 phy_err_code = WMI_UNIFIED_PHYERRCODE_GET(&ev->hdr); 2099 WMA_LOGD("%s: DFS-2 phyerror code = 0x%x", 2100 __func__, phy_err_code); 2101 } 2102 2103 /* 2104 * phy_err_code is set for DFS-2 and phy_err_mask 2105 * is set for DFS-3. Checking both to support 2106 * compatability for older platforms. 2107 * If the phyerror or phyerrmask category matches, 2108 * pass radar events to the dfs pattern matching code. 2109 * Don't pass radar events with no buffer payload. 2110 */ 2111 if (((phy_err_mask & WMI_PHY_ERROR_MASK0_RADAR) || 2112 (phy_err_mask & WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) || 2113 (phy_err_code == WMA_DFS2_PHYERROR_CODE || 2114 phy_err_code == WMA_DFS2_FALSE_RADAR_EXT)) { 2115 if (ev->hdr.buf_len > 0) { 2116 /* Calling in to the DFS module to process the phyerr */ 2117 dfs_process_phyerr(ic, &ev->bufp[0], 2118 ev->hdr.buf_len, 2119 WMI_UNIFIED_RSSI_COMB_GET 2120 (&ev->hdr) & 0xff, 2121 /* Extension RSSI */ 2122 WMI_UNIFIED_RSSI_COMB_GET 2123 (&ev->hdr) & 0xff, 2124 ev->hdr.tsf_timestamp, 2125 tsf64, enable_log); 2126 } 2127 } 2128 2129 /* 2130 * Advance the buffer pointer to the next PHY error. 2131 * buflen is the length of this payload, so we need to 2132 * advance past the current header _AND_ the payload. 2133 */ 2134 n += sizeof(*ev) + ev->hdr.buf_len; 2135 2136 } /*end while() */ 2137 if (error) 2138 return 0; 2139 else 2140 return 1; 2141 } 2142 2143 /** 2144 * wma_register_dfs_event_handler() - register dfs event handler 2145 * @wma_handle: wma handle 2146 * 2147 * Register appropriate dfs phyerror event handler 2148 * based on phyerror filtering offload is enabled 2149 * or disabled. 2150 * 2151 * Return: none 2152 */ 2153 void wma_register_dfs_event_handler(tp_wma_handle wma_handle) 2154 { 2155 if (NULL == wma_handle) { 2156 WMA_LOGE("%s:wma_handle is NULL", __func__); 2157 return; 2158 } 2159 2160 if (false == wma_handle->dfs_phyerr_filter_offload) { 2161 /* 2162 * Register the wma_unified_phyerr_rx_event_handler 2163 * for filtering offload disabled case to handle 2164 * the DFS phyerrors. 2165 */ 2166 WMA_LOGD("%s:Phyerror Filtering offload is Disabled in ini", 2167 __func__); 2168 wmi_unified_register_event_handler(wma_handle->wmi_handle, 2169 WMI_PHYERR_EVENTID, 2170 wma_unified_phyerr_rx_event_handler, 2171 WMA_RX_WORK_CTX); 2172 WMA_LOGD("%s: WMI_PHYERR_EVENTID event handler registered", 2173 __func__); 2174 } else { 2175 WMA_LOGD("%s:Phyerror Filtering offload is Enabled in ini", 2176 __func__); 2177 wmi_unified_register_event_handler(wma_handle->wmi_handle, 2178 WMI_DFS_RADAR_EVENTID, 2179 wma_unified_dfs_radar_rx_event_handler, 2180 WMA_RX_WORK_CTX); 2181 WMA_LOGD("%s:WMI_DFS_RADAR_EVENTID event handler registered", 2182 __func__); 2183 } 2184 2185 return; 2186 } 2187 2188 2189 /** 2190 * wma_unified_dfs_phyerr_filter_offload_enable() - enable dfs phyerr filter 2191 * @wma_handle: wma handle 2192 * 2193 * Send WMI_DFS_PHYERR_FILTER_ENA_CMDID or 2194 * WMI_DFS_PHYERR_FILTER_DIS_CMDID command 2195 * to firmware based on phyerr filtering 2196 * offload status. 2197 * 2198 * Return: 1 success, 0 failure 2199 */ 2200 int 2201 wma_unified_dfs_phyerr_filter_offload_enable(tp_wma_handle wma_handle) 2202 { 2203 int ret; 2204 2205 if (NULL == wma_handle) { 2206 WMA_LOGE("%s:wma_handle is NULL", __func__); 2207 return 0; 2208 } 2209 2210 ret = wmi_unified_dfs_phyerr_filter_offload_en_cmd(wma_handle->wmi_handle, 2211 wma_handle->dfs_phyerr_filter_offload); 2212 if (ret) 2213 return QDF_STATUS_E_FAILURE; 2214 2215 2216 return QDF_STATUS_SUCCESS; 2217 } 2218 2219 #if !defined(REMOVE_PKT_LOG) 2220 /** 2221 * wma_pktlog_wmi_send_cmd() - send pktlog enable/disable command to target 2222 * @handle: wma handle 2223 * @params: pktlog params 2224 * 2225 * Return: QDF status 2226 */ 2227 QDF_STATUS wma_pktlog_wmi_send_cmd(WMA_HANDLE handle, 2228 struct ath_pktlog_wmi_params *params) 2229 { 2230 tp_wma_handle wma_handle = (tp_wma_handle) handle; 2231 int ret; 2232 2233 ret = wmi_unified_pktlog_wmi_send_cmd(wma_handle->wmi_handle, 2234 params->pktlog_event, 2235 params->cmd_id, params->user_triggered); 2236 if (ret) 2237 return QDF_STATUS_E_FAILURE; 2238 2239 return QDF_STATUS_SUCCESS; 2240 } 2241 #endif /* REMOVE_PKT_LOG */ 2242 2243 static void wma_send_status_to_suspend_ind(tp_wma_handle wma, bool suspended) 2244 { 2245 tSirReadyToSuspendInd *ready_to_suspend; 2246 QDF_STATUS status; 2247 cds_msg_t cds_msg; 2248 uint8_t len; 2249 2250 WMA_LOGD("Posting ready to suspend indication to umac"); 2251 2252 len = sizeof(tSirReadyToSuspendInd); 2253 ready_to_suspend = (tSirReadyToSuspendInd *) qdf_mem_malloc(len); 2254 2255 if (NULL == ready_to_suspend) { 2256 WMA_LOGE("%s: Memory allocation failure", __func__); 2257 return; 2258 } 2259 2260 ready_to_suspend->mesgType = eWNI_SME_READY_TO_SUSPEND_IND; 2261 ready_to_suspend->mesgLen = len; 2262 ready_to_suspend->suspended = suspended; 2263 2264 cds_msg.type = eWNI_SME_READY_TO_SUSPEND_IND; 2265 cds_msg.bodyptr = (void *)ready_to_suspend; 2266 cds_msg.bodyval = 0; 2267 2268 status = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg); 2269 if (status != QDF_STATUS_SUCCESS) { 2270 WMA_LOGE("Failed to post ready to suspend"); 2271 qdf_mem_free(ready_to_suspend); 2272 } 2273 } 2274 2275 /** 2276 * wma_wow_wake_reason_str() - Converts wow wakeup reason code to text format 2277 * @wake_reason - WOW wake reason 2278 * 2279 * Return: reason code in string format 2280 */ 2281 static const u8 *wma_wow_wake_reason_str(A_INT32 wake_reason) 2282 { 2283 switch (wake_reason) { 2284 case WOW_REASON_UNSPECIFIED: 2285 return "UNSPECIFIED"; 2286 case WOW_REASON_NLOD: 2287 return "NLOD"; 2288 case WOW_REASON_AP_ASSOC_LOST: 2289 return "AP_ASSOC_LOST"; 2290 case WOW_REASON_LOW_RSSI: 2291 return "LOW_RSSI"; 2292 case WOW_REASON_DEAUTH_RECVD: 2293 return "DEAUTH_RECVD"; 2294 case WOW_REASON_DISASSOC_RECVD: 2295 return "DISASSOC_RECVD"; 2296 case WOW_REASON_GTK_HS_ERR: 2297 return "GTK_HS_ERR"; 2298 case WOW_REASON_EAP_REQ: 2299 return "EAP_REQ"; 2300 case WOW_REASON_FOURWAY_HS_RECV: 2301 return "FOURWAY_HS_RECV"; 2302 case WOW_REASON_TIMER_INTR_RECV: 2303 return "TIMER_INTR_RECV"; 2304 case WOW_REASON_PATTERN_MATCH_FOUND: 2305 return "PATTERN_MATCH_FOUND"; 2306 case WOW_REASON_RECV_MAGIC_PATTERN: 2307 return "RECV_MAGIC_PATTERN"; 2308 case WOW_REASON_P2P_DISC: 2309 return "P2P_DISC"; 2310 #ifdef FEATURE_WLAN_LPHB 2311 case WOW_REASON_WLAN_HB: 2312 return "WLAN_HB"; 2313 #endif /* FEATURE_WLAN_LPHB */ 2314 2315 case WOW_REASON_CSA_EVENT: 2316 return "CSA_EVENT"; 2317 case WOW_REASON_PROBE_REQ_WPS_IE_RECV: 2318 return "PROBE_REQ_RECV"; 2319 case WOW_REASON_AUTH_REQ_RECV: 2320 return "AUTH_REQ_RECV"; 2321 case WOW_REASON_ASSOC_REQ_RECV: 2322 return "ASSOC_REQ_RECV"; 2323 case WOW_REASON_HTT_EVENT: 2324 return "WOW_REASON_HTT_EVENT"; 2325 #ifdef FEATURE_WLAN_RA_FILTERING 2326 case WOW_REASON_RA_MATCH: 2327 return "WOW_REASON_RA_MATCH"; 2328 #endif /* FEATURE_WLAN_RA_FILTERING */ 2329 case WOW_REASON_BEACON_RECV: 2330 return "WOW_REASON_IBSS_BEACON_RECV"; 2331 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN 2332 case WOW_REASON_HOST_AUTO_SHUTDOWN: 2333 return "WOW_REASON_HOST_AUTO_SHUTDOWN"; 2334 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */ 2335 #ifdef WLAN_FEATURE_ROAM_OFFLOAD 2336 case WOW_REASON_ROAM_HO: 2337 return "WOW_REASON_ROAM_HO"; 2338 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */ 2339 #ifdef FEATURE_WLAN_EXTSCAN 2340 case WOW_REASON_EXTSCAN: 2341 return "WOW_REASON_EXTSCAN"; 2342 #endif 2343 case WOW_REASON_RSSI_BREACH_EVENT: 2344 return "WOW_REASON_RSSI_BREACH_EVENT"; 2345 case WOW_REASON_NLO_SCAN_COMPLETE: 2346 return "WOW_REASON_NLO_SCAN_COMPLETE"; 2347 case WOW_REASON_NAN_EVENT: 2348 return "WOW_REASON_NAN_EVENT"; 2349 case WOW_REASON_OEM_RESPONSE_EVENT: 2350 return "WOW_OEM_RESPONSE_EVENT"; 2351 case WOW_REASON_ASSOC_RES_RECV: 2352 return "ASSOC_RES_RECV"; 2353 case WOW_REASON_REASSOC_REQ_RECV: 2354 return "REASSOC_REQ_RECV"; 2355 case WOW_REASON_REASSOC_RES_RECV: 2356 return "REASSOC_RES_RECV"; 2357 case WOW_REASON_ACTION_FRAME_RECV: 2358 return "ACTION_FRAME_RECV"; 2359 case WOW_REASON_BPF_ALLOW: 2360 return "WOW_REASON_BPF_ALLOW"; 2361 } 2362 return "unknown"; 2363 } 2364 2365 /** 2366 * wma_wow_wake_up_stats_display() - display wow wake up stats 2367 * @wma: Pointer to wma handle 2368 * 2369 * Return: none 2370 */ 2371 static void wma_wow_wake_up_stats_display(tp_wma_handle wma) 2372 { 2373 WMA_LOGA("uc %d bc %d v4_mc %d v6_mc %d ra %d ns %d na %d pno_match %d pno_complete %d gscan %d low_rssi %d rssi_breach %d icmp %d icmpv6 %d oem %d", 2374 wma->wow_ucast_wake_up_count, 2375 wma->wow_bcast_wake_up_count, 2376 wma->wow_ipv4_mcast_wake_up_count, 2377 wma->wow_ipv6_mcast_wake_up_count, 2378 wma->wow_ipv6_mcast_ra_stats, 2379 wma->wow_ipv6_mcast_ns_stats, 2380 wma->wow_ipv6_mcast_na_stats, 2381 wma->wow_pno_match_wake_up_count, 2382 wma->wow_pno_complete_wake_up_count, 2383 wma->wow_gscan_wake_up_count, 2384 wma->wow_low_rssi_wake_up_count, 2385 wma->wow_rssi_breach_wake_up_count, 2386 wma->wow_icmpv4_count, 2387 wma->wow_icmpv6_count, 2388 wma->wow_oem_response_wake_up_count); 2389 2390 return; 2391 } 2392 2393 /** 2394 * wma_wow_ipv6_mcast_stats() - ipv6 mcast wake up stats 2395 * @wma: Pointer to wma handle 2396 * @data: Pointer to pattern match data 2397 * 2398 * Return: none 2399 */ 2400 static void wma_wow_ipv6_mcast_stats(tp_wma_handle wma, uint8_t *data) 2401 { 2402 static const uint8_t ipv6_ether_type[] = {0x86, 0xDD}; 2403 2404 if (!memcmp(ipv6_ether_type, (data + WMA_ETHER_TYPE_OFFSET), 2405 sizeof(ipv6_ether_type))) { 2406 if (WMA_ICMP_V6_HEADER_TYPE == 2407 *(data + WMA_ICMP_V6_HEADER_OFFSET)) { 2408 wma->wow_icmpv6_count++; 2409 if (WMA_ICMP_V6_RA_TYPE == 2410 *(data + WMA_ICMP_V6_TYPE_OFFSET)) 2411 wma->wow_ipv6_mcast_ra_stats++; 2412 else if (WMA_ICMP_V6_NS_TYPE == 2413 *(data + WMA_ICMP_V6_TYPE_OFFSET)) 2414 wma->wow_ipv6_mcast_ns_stats++; 2415 else if (WMA_ICMP_V6_NA_TYPE == 2416 *(data + WMA_ICMP_V6_TYPE_OFFSET)) 2417 wma->wow_ipv6_mcast_na_stats++; 2418 else 2419 WMA_LOGA("ICMP V6 type : 0x%x", 2420 *(data + WMA_ICMP_V6_TYPE_OFFSET)); 2421 } else { 2422 WMA_LOGA("ICMP_V6 header 0x%x", 2423 *(data + WMA_ICMP_V6_HEADER_OFFSET)); 2424 } 2425 } else { 2426 WMA_LOGA("Ethertype x%x:0x%x", 2427 *(data + WMA_ETHER_TYPE_OFFSET), 2428 *(data + WMA_ETHER_TYPE_OFFSET + 1)); 2429 } 2430 2431 return; 2432 } 2433 2434 /** 2435 * wma_wow_wake_up_stats() - maintain wow pattern match wake up stats 2436 * @wma: Pointer to wma handle 2437 * @data: Pointer to pattern match data 2438 * @len: Pattern match data length 2439 * @event: Wake up event 2440 * 2441 * Return: none 2442 */ 2443 static void wma_wow_wake_up_stats(tp_wma_handle wma, uint8_t *data, 2444 int32_t len, WOW_WAKE_REASON_TYPE event) 2445 { 2446 switch (event) { 2447 2448 case WOW_REASON_BPF_ALLOW: 2449 case WOW_REASON_PATTERN_MATCH_FOUND: 2450 if (WMA_BCAST_MAC_ADDR == *data) { 2451 wma->wow_bcast_wake_up_count++; 2452 if (len >= WMA_IPV4_PROTO_GET_MIN_LEN && 2453 qdf_nbuf_data_is_icmp_pkt(data)) 2454 wma->wow_icmpv4_count++; 2455 else if ((len > WMA_ICMP_V6_TYPE_OFFSET) && 2456 qdf_nbuf_data_is_icmpv6_pkt(data)) 2457 wma->wow_icmpv6_count++; 2458 } else if (WMA_MCAST_IPV4_MAC_ADDR == *data) { 2459 wma->wow_ipv4_mcast_wake_up_count++; 2460 if (len >= WMA_IPV4_PROTO_GET_MIN_LEN && 2461 WMA_ICMP_PROTOCOL == *(data + WMA_IPV4_PROTOCOL)) 2462 wma->wow_icmpv4_count++; 2463 } else if (WMA_MCAST_IPV6_MAC_ADDR == *data) { 2464 wma->wow_ipv6_mcast_wake_up_count++; 2465 if (len > WMA_ICMP_V6_TYPE_OFFSET) 2466 wma_wow_ipv6_mcast_stats(wma, data); 2467 else 2468 WMA_LOGA("ICMP_V6 data len %d", len); 2469 } else { 2470 wma->wow_ucast_wake_up_count++; 2471 if (qdf_nbuf_data_is_ipv4_mcast_pkt(data)) 2472 wma->wow_ipv4_mcast_wake_up_count++; 2473 else if (qdf_nbuf_data_is_ipv6_mcast_pkt(data)) 2474 wma->wow_ipv6_mcast_wake_up_count++; 2475 2476 if (len >= WMA_IPV4_PROTO_GET_MIN_LEN && 2477 qdf_nbuf_data_is_icmp_pkt(data)) 2478 wma->wow_icmpv4_count++; 2479 else if (len > WMA_ICMP_V6_TYPE_OFFSET && 2480 qdf_nbuf_data_is_icmpv6_pkt(data)) 2481 wma->wow_icmpv6_count++; 2482 } 2483 break; 2484 2485 case WOW_REASON_RA_MATCH: 2486 wma->wow_icmpv6_count++; 2487 wma->wow_ipv6_mcast_ra_stats++; 2488 wma->wow_ipv6_mcast_wake_up_count++; 2489 break; 2490 2491 case WOW_REASON_NLOD: 2492 wma->wow_pno_match_wake_up_count++; 2493 break; 2494 2495 case WOW_REASON_NLO_SCAN_COMPLETE: 2496 wma->wow_pno_complete_wake_up_count++; 2497 break; 2498 2499 case WOW_REASON_LOW_RSSI: 2500 wma->wow_low_rssi_wake_up_count++; 2501 break; 2502 2503 case WOW_REASON_EXTSCAN: 2504 wma->wow_gscan_wake_up_count++; 2505 break; 2506 2507 case WOW_REASON_RSSI_BREACH_EVENT: 2508 wma->wow_rssi_breach_wake_up_count++; 2509 break; 2510 case WOW_REASON_OEM_RESPONSE_EVENT: 2511 wma->wow_oem_response_wake_up_count++; 2512 break; 2513 2514 default: 2515 WMA_LOGE("Unknown wake up reason"); 2516 break; 2517 } 2518 2519 wma_wow_wake_up_stats_display(wma); 2520 return; 2521 } 2522 2523 #ifdef FEATURE_WLAN_EXTSCAN 2524 /** 2525 * wma_extscan_get_eventid_from_tlvtag() - map tlv tag to corresponding event id 2526 * @tag: WMI TLV tag 2527 * 2528 * Return: 2529 * 0 if TLV tag is invalid 2530 * else return corresponding WMI event id 2531 */ 2532 static int wma_extscan_get_eventid_from_tlvtag(uint32_t tag) 2533 { 2534 uint32_t event_id; 2535 2536 switch (tag) { 2537 case WMITLV_TAG_STRUC_wmi_extscan_start_stop_event_fixed_param: 2538 event_id = WMI_EXTSCAN_START_STOP_EVENTID; 2539 break; 2540 2541 case WMITLV_TAG_STRUC_wmi_extscan_operation_event_fixed_param: 2542 event_id = WMI_EXTSCAN_OPERATION_EVENTID; 2543 break; 2544 2545 case WMITLV_TAG_STRUC_wmi_extscan_table_usage_event_fixed_param: 2546 event_id = WMI_EXTSCAN_TABLE_USAGE_EVENTID; 2547 break; 2548 2549 case WMITLV_TAG_STRUC_wmi_extscan_cached_results_event_fixed_param: 2550 event_id = WMI_EXTSCAN_CACHED_RESULTS_EVENTID; 2551 break; 2552 2553 case WMITLV_TAG_STRUC_wmi_extscan_wlan_change_results_event_fixed_param: 2554 event_id = WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID; 2555 break; 2556 2557 case WMITLV_TAG_STRUC_wmi_extscan_hotlist_match_event_fixed_param: 2558 event_id = WMI_EXTSCAN_HOTLIST_MATCH_EVENTID; 2559 break; 2560 2561 case WMITLV_TAG_STRUC_wmi_extscan_capabilities_event_fixed_param: 2562 event_id = WMI_EXTSCAN_CAPABILITIES_EVENTID; 2563 break; 2564 2565 case WMITLV_TAG_STRUC_wmi_extscan_hotlist_ssid_match_event_fixed_param: 2566 event_id = WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID; 2567 break; 2568 2569 default: 2570 event_id = 0; 2571 WMA_LOGE("%s: Unknown tag: %d", __func__, tag); 2572 break; 2573 } 2574 2575 WMA_LOGI("%s: For tag %d WMI event 0x%x", __func__, tag, event_id); 2576 return event_id; 2577 } 2578 #else 2579 static int wma_extscan_get_eventid_from_tlvtag(uint32_t tag) 2580 { 2581 return 0; 2582 } 2583 #endif 2584 2585 /** 2586 * wow_get_wmi_eventid() - map reason or tlv tag to corresponding event id 2587 * @tag: WMI TLV tag 2588 * @reason: WOW reason 2589 * 2590 * WOW reason type is primarily used to find the ID. If there could be 2591 * multiple events that can be sent as a WOW event with same reason 2592 * then tlv tag is used to identify the corresponding event. 2593 * 2594 * Return: 2595 * 0 if TLV tag/reason is invalid 2596 * else return corresponding WMI event id 2597 */ 2598 static int wow_get_wmi_eventid(int32_t reason, uint32_t tag) 2599 { 2600 uint32_t event_id; 2601 2602 switch (reason) { 2603 case WOW_REASON_NLO_SCAN_COMPLETE: 2604 event_id = WMI_NLO_SCAN_COMPLETE_EVENTID; 2605 break; 2606 case WOW_REASON_CSA_EVENT: 2607 event_id = WMI_CSA_HANDLING_EVENTID; 2608 break; 2609 case WOW_REASON_LOW_RSSI: 2610 event_id = WMI_ROAM_EVENTID; 2611 break; 2612 case WOW_REASON_CLIENT_KICKOUT_EVENT: 2613 event_id = WMI_PEER_STA_KICKOUT_EVENTID; 2614 break; 2615 case WOW_REASON_EXTSCAN: 2616 event_id = wma_extscan_get_eventid_from_tlvtag(tag); 2617 break; 2618 case WOW_REASON_RSSI_BREACH_EVENT: 2619 event_id = WMI_RSSI_BREACH_EVENTID; 2620 break; 2621 case WOW_REASON_NAN_EVENT: 2622 event_id = WMI_NAN_EVENTID; 2623 break; 2624 case WOW_REASON_NAN_DATA: 2625 event_id = wma_ndp_get_eventid_from_tlvtag(tag); 2626 break; 2627 case WOW_REASON_TDLS_CONN_TRACKER_EVENT: 2628 event_id = WOW_TDLS_CONN_TRACKER_EVENT; 2629 break; 2630 default: 2631 WMA_LOGD(FL("Unexpected WOW reason : %s(%d)"), 2632 wma_wow_wake_reason_str(reason), reason); 2633 event_id = 0; 2634 break; 2635 } 2636 2637 return event_id; 2638 } 2639 2640 /** 2641 * tlv_check_required() - tells whether to check the wow packet buffer 2642 * for proper TLV structure. 2643 * @reason: WOW reason 2644 * 2645 * In most cases, wow wake up event carries the actual event buffer in 2646 * wow_packet_buffer with some exceptions. This function is used to 2647 * determine when to check for the TLVs in wow_packet_buffer. 2648 * 2649 * Return: true if check is required and false otherwise. 2650 */ 2651 static bool tlv_check_required(int32_t reason) 2652 { 2653 switch (reason) { 2654 case WOW_REASON_NLO_SCAN_COMPLETE: 2655 case WOW_REASON_CSA_EVENT: 2656 case WOW_REASON_LOW_RSSI: 2657 case WOW_REASON_CLIENT_KICKOUT_EVENT: 2658 case WOW_REASON_EXTSCAN: 2659 case WOW_REASON_RSSI_BREACH_EVENT: 2660 case WOW_REASON_NAN_EVENT: 2661 case WOW_REASON_NAN_DATA: 2662 return true; 2663 default: 2664 return false; 2665 } 2666 } 2667 2668 /** 2669 * wma_pkt_proto_subtype_to_string() - to convert proto subtype 2670 * of data packet to string. 2671 * @proto_subtype: proto subtype for data packet 2672 * 2673 * This function returns the string for the proto subtype of 2674 * data packet. 2675 * 2676 * Return: string for proto subtype for data packet 2677 */ 2678 const char * 2679 wma_pkt_proto_subtype_to_string(enum qdf_proto_subtype proto_subtype) 2680 { 2681 switch (proto_subtype) { 2682 case QDF_PROTO_EAPOL_M1: 2683 return "EAPOL M1"; 2684 case QDF_PROTO_EAPOL_M2: 2685 return "EAPOL M2"; 2686 case QDF_PROTO_EAPOL_M3: 2687 return "EAPOL M3"; 2688 case QDF_PROTO_EAPOL_M4: 2689 return "EAPOL M4"; 2690 case QDF_PROTO_DHCP_DISCOVER: 2691 return "DHCP DISCOVER"; 2692 case QDF_PROTO_DHCP_REQUEST: 2693 return "DHCP REQUEST"; 2694 case QDF_PROTO_DHCP_OFFER: 2695 return "DHCP OFFER"; 2696 case QDF_PROTO_DHCP_ACK: 2697 return "DHCP ACK"; 2698 case QDF_PROTO_DHCP_NACK: 2699 return "DHCP NACK"; 2700 case QDF_PROTO_DHCP_RELEASE: 2701 return "DHCP RELEASE"; 2702 case QDF_PROTO_DHCP_INFORM: 2703 return "DHCP INFORM"; 2704 case QDF_PROTO_DHCP_DECLINE: 2705 return "DHCP DECLINE"; 2706 case QDF_PROTO_ARP_REQ: 2707 return "ARP REQUEST"; 2708 case QDF_PROTO_ARP_RES: 2709 return "ARP RESPONSE"; 2710 case QDF_PROTO_ICMP_REQ: 2711 return "ICMP REQUEST"; 2712 case QDF_PROTO_ICMP_RES: 2713 return "ICMP RESPONSE"; 2714 case QDF_PROTO_ICMPV6_REQ: 2715 return "ICMPV6 REQUEST"; 2716 case QDF_PROTO_ICMPV6_RES: 2717 return "ICMPV6 RESPONSE"; 2718 case QDF_PROTO_IPV4_UDP: 2719 return "IPV4 UDP Packet"; 2720 case QDF_PROTO_IPV4_TCP: 2721 return "IPV4 TCP Packet"; 2722 case QDF_PROTO_IPV6_UDP: 2723 return "IPV6 UDP Packet"; 2724 case QDF_PROTO_IPV6_TCP: 2725 return "IPV6 TCP Packet"; 2726 default: 2727 return "Invalid Packet"; 2728 } 2729 } 2730 2731 /** 2732 * wma_wow_get_pkt_proto_subtype() - get the proto subtype 2733 * of the packet. 2734 * @data: Pointer to data buffer 2735 * @len: length of the data buffer 2736 * 2737 * This function gives the proto subtype of the packet. 2738 * 2739 * Return: proto subtype of the packet. 2740 */ 2741 static enum qdf_proto_subtype 2742 wma_wow_get_pkt_proto_subtype(uint8_t *data, 2743 uint32_t len) 2744 { 2745 uint16_t ether_type = (uint16_t)(*(uint16_t *)(data + 2746 QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); 2747 2748 WMA_LOGD("Ether Type: 0x%04x", 2749 ani_cpu_to_be16(ether_type)); 2750 2751 if (QDF_NBUF_TRAC_EAPOL_ETH_TYPE == 2752 ani_cpu_to_be16(ether_type)) { 2753 if (len >= WMA_EAPOL_SUBTYPE_GET_MIN_LEN) 2754 return qdf_nbuf_data_get_eapol_subtype(data); 2755 WMA_LOGD("EAPOL Packet"); 2756 return QDF_PROTO_INVALID; 2757 } else if (QDF_NBUF_TRAC_ARP_ETH_TYPE == 2758 ani_cpu_to_be16(ether_type)) { 2759 if (len >= WMA_ARP_SUBTYPE_GET_MIN_LEN) 2760 return qdf_nbuf_data_get_arp_subtype(data); 2761 WMA_LOGD("ARP Packet"); 2762 return QDF_PROTO_INVALID; 2763 } else if (QDF_NBUF_TRAC_IPV4_ETH_TYPE == 2764 ani_cpu_to_be16(ether_type)) { 2765 if (len >= WMA_IPV4_PROTO_GET_MIN_LEN) { 2766 uint8_t proto_type; 2767 2768 proto_type = qdf_nbuf_data_get_ipv4_proto(data); 2769 WMA_LOGD("IPV4_proto_type: %u", proto_type); 2770 if (proto_type == QDF_NBUF_TRAC_ICMP_TYPE) { 2771 if (len >= WMA_ICMP_SUBTYPE_GET_MIN_LEN) 2772 return qdf_nbuf_data_get_icmp_subtype( 2773 data); 2774 WMA_LOGD("ICMP Packet"); 2775 return QDF_PROTO_INVALID; 2776 } else if (proto_type == QDF_NBUF_TRAC_UDP_TYPE) { 2777 if (len >= WMA_IS_DHCP_GET_MIN_LEN) { 2778 if (qdf_nbuf_data_is_ipv4_dhcp_pkt(data)) { 2779 if (len >= 2780 WMA_DHCP_SUBTYPE_GET_MIN_LEN) 2781 return qdf_nbuf_data_get_dhcp_subtype(data); 2782 WMA_LOGD("DHCP Packet"); 2783 return QDF_PROTO_INVALID; 2784 } 2785 } 2786 return QDF_PROTO_IPV4_UDP; 2787 } else if (proto_type == QDF_NBUF_TRAC_TCP_TYPE) { 2788 return QDF_PROTO_IPV4_TCP; 2789 } 2790 } 2791 WMA_LOGD("IPV4 Packet"); 2792 return QDF_PROTO_INVALID; 2793 } else if (QDF_NBUF_TRAC_IPV6_ETH_TYPE == 2794 ani_cpu_to_be16(ether_type)) { 2795 if (len >= WMA_IPV6_PROTO_GET_MIN_LEN) { 2796 uint8_t proto_type; 2797 2798 proto_type = qdf_nbuf_data_get_ipv6_proto(data); 2799 WMA_LOGD("IPV6_proto_type: %u", proto_type); 2800 if (proto_type == QDF_NBUF_TRAC_ICMPV6_TYPE) { 2801 if (len >= WMA_ICMPV6_SUBTYPE_GET_MIN_LEN) 2802 return qdf_nbuf_data_get_icmpv6_subtype( 2803 data); 2804 WMA_LOGD("ICMPV6 Packet"); 2805 return QDF_PROTO_INVALID; 2806 } else if (proto_type == QDF_NBUF_TRAC_UDP_TYPE) { 2807 return QDF_PROTO_IPV6_UDP; 2808 } else if (proto_type == QDF_NBUF_TRAC_TCP_TYPE) { 2809 return QDF_PROTO_IPV6_TCP; 2810 } 2811 } 2812 WMA_LOGD("IPV6 Packet"); 2813 return QDF_PROTO_INVALID; 2814 } 2815 2816 return QDF_PROTO_INVALID; 2817 } 2818 2819 /** 2820 * wma_wow_parse_data_pkt_buffer() - API to parse data buffer for data 2821 * packet that resulted in WOW wakeup. 2822 * @data: Pointer to data buffer 2823 * @buf_len: data buffer length 2824 * 2825 * This function parses the data buffer received (first few bytes of 2826 * skb->data) to get informaton like src mac addr, dst mac addr, packet 2827 * len, seq_num, etc. 2828 * 2829 * Return: void 2830 */ 2831 static void wma_wow_parse_data_pkt_buffer(uint8_t *data, 2832 uint32_t buf_len) 2833 { 2834 enum qdf_proto_subtype proto_subtype; 2835 uint16_t pkt_len, key_len, seq_num; 2836 uint16_t src_port, dst_port; 2837 uint32_t transaction_id, tcp_seq_num; 2838 2839 WMA_LOGD("wow_buf_pkt_len: %u", buf_len); 2840 if (buf_len >= QDF_NBUF_TRAC_IPV4_OFFSET) 2841 WMA_LOGD("Src_mac: " MAC_ADDRESS_STR " Dst_mac: " MAC_ADDRESS_STR, 2842 MAC_ADDR_ARRAY(data + QDF_NBUF_SRC_MAC_OFFSET), 2843 MAC_ADDR_ARRAY(data + QDF_NBUF_DEST_MAC_OFFSET)); 2844 else 2845 goto end; 2846 2847 proto_subtype = wma_wow_get_pkt_proto_subtype(data, buf_len); 2848 switch (proto_subtype) { 2849 case QDF_PROTO_EAPOL_M1: 2850 case QDF_PROTO_EAPOL_M2: 2851 case QDF_PROTO_EAPOL_M3: 2852 case QDF_PROTO_EAPOL_M4: 2853 WMA_LOGD("WOW Wakeup: %s rcvd", 2854 wma_pkt_proto_subtype_to_string(proto_subtype)); 2855 if (buf_len >= WMA_EAPOL_INFO_GET_MIN_LEN) { 2856 pkt_len = (uint16_t)(*(uint16_t *)(data + 2857 EAPOL_PKT_LEN_OFFSET)); 2858 key_len = (uint16_t)(*(uint16_t *)(data + 2859 EAPOL_KEY_LEN_OFFSET)); 2860 WMA_LOGD("Pkt_len: %u, Key_len: %u", 2861 ani_cpu_to_be16(pkt_len), 2862 ani_cpu_to_be16(key_len)); 2863 } 2864 break; 2865 2866 case QDF_PROTO_DHCP_DISCOVER: 2867 case QDF_PROTO_DHCP_REQUEST: 2868 case QDF_PROTO_DHCP_OFFER: 2869 case QDF_PROTO_DHCP_ACK: 2870 case QDF_PROTO_DHCP_NACK: 2871 case QDF_PROTO_DHCP_RELEASE: 2872 case QDF_PROTO_DHCP_INFORM: 2873 case QDF_PROTO_DHCP_DECLINE: 2874 WMA_LOGD("WOW Wakeup: %s rcvd", 2875 wma_pkt_proto_subtype_to_string(proto_subtype)); 2876 if (buf_len >= WMA_DHCP_INFO_GET_MIN_LEN) { 2877 pkt_len = (uint16_t)(*(uint16_t *)(data + 2878 DHCP_PKT_LEN_OFFSET)); 2879 transaction_id = (uint32_t)(*(uint32_t *)(data + 2880 DHCP_TRANSACTION_ID_OFFSET)); 2881 WMA_LOGD("Pkt_len: %u, Transaction_id: %u", 2882 ani_cpu_to_be16(pkt_len), 2883 ani_cpu_to_be16(transaction_id)); 2884 } 2885 break; 2886 2887 case QDF_PROTO_ARP_REQ: 2888 case QDF_PROTO_ARP_RES: 2889 WMA_LOGD("WOW Wakeup: %s rcvd", 2890 wma_pkt_proto_subtype_to_string(proto_subtype)); 2891 break; 2892 2893 case QDF_PROTO_ICMP_REQ: 2894 case QDF_PROTO_ICMP_RES: 2895 WMA_LOGD("WOW Wakeup: %s rcvd", 2896 wma_pkt_proto_subtype_to_string(proto_subtype)); 2897 if (buf_len >= WMA_IPV4_PKT_INFO_GET_MIN_LEN) { 2898 pkt_len = (uint16_t)(*(uint16_t *)(data + 2899 IPV4_PKT_LEN_OFFSET)); 2900 seq_num = (uint16_t)(*(uint16_t *)(data + 2901 ICMP_SEQ_NUM_OFFSET)); 2902 WMA_LOGD("Pkt_len: %u, Seq_num: %u", 2903 ani_cpu_to_be16(pkt_len), 2904 ani_cpu_to_be16(seq_num)); 2905 } 2906 break; 2907 2908 case QDF_PROTO_ICMPV6_REQ: 2909 case QDF_PROTO_ICMPV6_RES: 2910 WMA_LOGD("WOW Wakeup: %s rcvd", 2911 wma_pkt_proto_subtype_to_string(proto_subtype)); 2912 if (buf_len >= WMA_IPV6_PKT_INFO_GET_MIN_LEN) { 2913 pkt_len = (uint16_t)(*(uint16_t *)(data + 2914 IPV6_PKT_LEN_OFFSET)); 2915 seq_num = (uint16_t)(*(uint16_t *)(data + 2916 ICMPV6_SEQ_NUM_OFFSET)); 2917 WMA_LOGD("Pkt_len: %u, Seq_num: %u", 2918 ani_cpu_to_be16(pkt_len), 2919 ani_cpu_to_be16(seq_num)); 2920 } 2921 break; 2922 2923 case QDF_PROTO_IPV4_UDP: 2924 case QDF_PROTO_IPV4_TCP: 2925 WMA_LOGD("WOW Wakeup: %s rcvd", 2926 wma_pkt_proto_subtype_to_string(proto_subtype)); 2927 if (buf_len >= WMA_IPV4_PKT_INFO_GET_MIN_LEN) { 2928 pkt_len = (uint16_t)(*(uint16_t *)(data + 2929 IPV4_PKT_LEN_OFFSET)); 2930 src_port = (uint16_t)(*(uint16_t *)(data + 2931 IPV4_SRC_PORT_OFFSET)); 2932 dst_port = (uint16_t)(*(uint16_t *)(data + 2933 IPV4_DST_PORT_OFFSET)); 2934 WMA_LOGD("Pkt_len: %u", 2935 ani_cpu_to_be16(pkt_len)); 2936 WMA_LOGD("src_port: %u, dst_port: %u", 2937 ani_cpu_to_be16(src_port), 2938 ani_cpu_to_be16(dst_port)); 2939 if (proto_subtype == QDF_PROTO_IPV4_TCP) { 2940 tcp_seq_num = (uint32_t)(*(uint32_t *)(data + 2941 IPV4_TCP_SEQ_NUM_OFFSET)); 2942 WMA_LOGD("TCP_seq_num: %u", 2943 ani_cpu_to_be16(tcp_seq_num)); 2944 } 2945 } 2946 break; 2947 2948 case QDF_PROTO_IPV6_UDP: 2949 case QDF_PROTO_IPV6_TCP: 2950 WMA_LOGD("WOW Wakeup: %s rcvd", 2951 wma_pkt_proto_subtype_to_string(proto_subtype)); 2952 if (buf_len >= WMA_IPV6_PKT_INFO_GET_MIN_LEN) { 2953 pkt_len = (uint16_t)(*(uint16_t *)(data + 2954 IPV6_PKT_LEN_OFFSET)); 2955 src_port = (uint16_t)(*(uint16_t *)(data + 2956 IPV6_SRC_PORT_OFFSET)); 2957 dst_port = (uint16_t)(*(uint16_t *)(data + 2958 IPV6_DST_PORT_OFFSET)); 2959 WMA_LOGD("Pkt_len: %u", 2960 ani_cpu_to_be16(pkt_len)); 2961 WMA_LOGD("src_port: %u, dst_port: %u", 2962 ani_cpu_to_be16(src_port), 2963 ani_cpu_to_be16(dst_port)); 2964 if (proto_subtype == QDF_PROTO_IPV6_TCP) { 2965 tcp_seq_num = (uint32_t)(*(uint32_t *)(data + 2966 IPV6_TCP_SEQ_NUM_OFFSET)); 2967 WMA_LOGD("TCP_seq_num: %u", 2968 ani_cpu_to_be16(tcp_seq_num)); 2969 } 2970 } 2971 break; 2972 2973 default: 2974 end: 2975 WMA_LOGD("wow_buf_pkt_len: %u", buf_len); 2976 break; 2977 } 2978 } 2979 2980 /** 2981 * wma_wow_dump_mgmt_buffer() - API to parse data buffer for mgmt. 2982 * packet that resulted in WOW wakeup. 2983 * @wow_packet_buffer: Pointer to data buffer 2984 * @buf_len: length of data buffer 2985 * 2986 * This function parses the data buffer received (802.11 header) 2987 * to get informaton like src mac addr, dst mac addr, seq_num, 2988 * frag_num, etc. 2989 * 2990 * Return: void 2991 */ 2992 static void wma_wow_dump_mgmt_buffer(uint8_t *wow_packet_buffer, 2993 uint32_t buf_len) 2994 { 2995 struct ieee80211_frame_addr4 *wh; 2996 2997 WMA_LOGD("wow_buf_pkt_len: %u", buf_len); 2998 wh = (struct ieee80211_frame_addr4 *) 2999 (wow_packet_buffer + 4); 3000 if (buf_len >= sizeof(struct ieee80211_frame)) { 3001 uint8_t to_from_ds, frag_num; 3002 uint32_t seq_num; 3003 3004 WMA_LOGE("RA: " MAC_ADDRESS_STR " TA: " MAC_ADDRESS_STR, 3005 MAC_ADDR_ARRAY(wh->i_addr1), 3006 MAC_ADDR_ARRAY(wh->i_addr2)); 3007 3008 WMA_LOGE("TO_DS: %u, FROM_DS: %u", 3009 wh->i_fc[1] & IEEE80211_FC1_DIR_TODS, 3010 wh->i_fc[1] & IEEE80211_FC1_DIR_FROMDS); 3011 3012 to_from_ds = wh->i_fc[1] & IEEE80211_FC1_DIR_DSTODS; 3013 3014 switch (to_from_ds) { 3015 case IEEE80211_NO_DS: 3016 WMA_LOGE("BSSID: " MAC_ADDRESS_STR, 3017 MAC_ADDR_ARRAY(wh->i_addr3)); 3018 break; 3019 case IEEE80211_TO_DS: 3020 WMA_LOGE("DA: " MAC_ADDRESS_STR, 3021 MAC_ADDR_ARRAY(wh->i_addr3)); 3022 break; 3023 case IEEE80211_FROM_DS: 3024 WMA_LOGE("SA: " MAC_ADDRESS_STR, 3025 MAC_ADDR_ARRAY(wh->i_addr3)); 3026 break; 3027 case IEEE80211_DS_TO_DS: 3028 if (buf_len >= sizeof(struct ieee80211_frame_addr4)) 3029 WMA_LOGE("DA: " MAC_ADDRESS_STR " SA: " 3030 MAC_ADDRESS_STR, 3031 MAC_ADDR_ARRAY(wh->i_addr3), 3032 MAC_ADDR_ARRAY(wh->i_addr4)); 3033 break; 3034 } 3035 3036 seq_num = (((*(uint16_t *)wh->i_seq) & 3037 IEEE80211_SEQ_SEQ_MASK) >> 3038 IEEE80211_SEQ_SEQ_SHIFT); 3039 frag_num = (((*(uint16_t *)wh->i_seq) & 3040 IEEE80211_SEQ_FRAG_MASK) >> 3041 IEEE80211_SEQ_FRAG_SHIFT); 3042 3043 WMA_LOGE("SEQ_NUM: %u, FRAG_NUM: %u", 3044 seq_num, frag_num); 3045 } else { 3046 WMA_LOGE("Insufficient buffer length for mgmt. packet"); 3047 } 3048 } 3049 3050 /** 3051 * wma_wow_get_wakelock_duration() - return the wakelock duration 3052 * for some mgmt packets received. 3053 * @wake_reason: wow wakeup reason 3054 * 3055 * This function returns the wakelock duration for some mgmt packets 3056 * received while in wow suspend. 3057 * 3058 * Return: wakelock duration 3059 */ 3060 static uint32_t wma_wow_get_wakelock_duration(int wake_reason) 3061 { 3062 uint32_t wake_lock_duration = 0; 3063 3064 switch (wake_reason) { 3065 case WOW_REASON_AUTH_REQ_RECV: 3066 wake_lock_duration = WMA_AUTH_REQ_RECV_WAKE_LOCK_TIMEOUT; 3067 break; 3068 case WOW_REASON_ASSOC_REQ_RECV: 3069 wake_lock_duration = WMA_ASSOC_REQ_RECV_WAKE_LOCK_DURATION; 3070 break; 3071 case WOW_REASON_DEAUTH_RECVD: 3072 wake_lock_duration = WMA_DEAUTH_RECV_WAKE_LOCK_DURATION; 3073 break; 3074 case WOW_REASON_DISASSOC_RECVD: 3075 wake_lock_duration = WMA_DISASSOC_RECV_WAKE_LOCK_DURATION; 3076 break; 3077 default: 3078 break; 3079 } 3080 3081 return wake_lock_duration; 3082 } 3083 3084 /** 3085 * wma_wow_wakeup_host_event() - wakeup host event handler 3086 * @handle: wma handle 3087 * @event: event data 3088 * @len: buffer length 3089 * 3090 * Handler to catch wow wakeup host event. This event will have 3091 * reason why the firmware has woken the host. 3092 * 3093 * Return: 0 for success or error 3094 */ 3095 int wma_wow_wakeup_host_event(void *handle, uint8_t *event, 3096 uint32_t len) 3097 { 3098 tp_wma_handle wma = (tp_wma_handle) handle; 3099 WMI_WOW_WAKEUP_HOST_EVENTID_param_tlvs *param_buf; 3100 WOW_EVENT_INFO_fixed_param *wake_info; 3101 #ifdef FEATURE_WLAN_SCAN_PNO 3102 struct wma_txrx_node *node; 3103 #endif /* FEATURE_WLAN_SCAN_PNO */ 3104 uint32_t wake_lock_duration = 0; 3105 void *wmi_cmd_struct_ptr = NULL; 3106 uint32_t tlv_hdr, tag, wow_buf_pkt_len = 0, event_id = 0; 3107 int tlv_ok_status; 3108 3109 param_buf = (WMI_WOW_WAKEUP_HOST_EVENTID_param_tlvs *) event; 3110 if (!param_buf) { 3111 WMA_LOGE("Invalid wow wakeup host event buf"); 3112 return -EINVAL; 3113 } 3114 3115 wake_info = param_buf->fixed_param; 3116 3117 if ((wake_info->wake_reason != WOW_REASON_UNSPECIFIED) || 3118 (wake_info->wake_reason == WOW_REASON_UNSPECIFIED && 3119 !wmi_get_runtime_pm_inprogress(wma->wmi_handle))) { 3120 WMA_LOGA("WOW wakeup host event received (reason: %s(%d)) for vdev %d", 3121 wma_wow_wake_reason_str(wake_info->wake_reason), 3122 wake_info->wake_reason, wake_info->vdev_id); 3123 qdf_wow_wakeup_host_event(wake_info->wake_reason); 3124 } 3125 3126 qdf_event_set(&wma->wma_resume_event); 3127 3128 if (param_buf->wow_packet_buffer && 3129 tlv_check_required(wake_info->wake_reason)) { 3130 /* 3131 * In case of wow_packet_buffer, first 4 bytes is the length. 3132 * Following the length is the actual buffer. 3133 */ 3134 wow_buf_pkt_len = *(uint32_t *)param_buf->wow_packet_buffer; 3135 tlv_hdr = WMITLV_GET_HDR( 3136 (uint8_t *)param_buf->wow_packet_buffer + 4); 3137 3138 tag = WMITLV_GET_TLVTAG(tlv_hdr); 3139 event_id = wow_get_wmi_eventid(wake_info->wake_reason, tag); 3140 if (!event_id) { 3141 WMA_LOGE(FL("Unable to find matching ID")); 3142 return -EINVAL; 3143 } 3144 3145 tlv_ok_status = wmitlv_check_and_pad_event_tlvs( 3146 handle, param_buf->wow_packet_buffer + 4, 3147 wow_buf_pkt_len, event_id, 3148 &wmi_cmd_struct_ptr); 3149 3150 if (tlv_ok_status != 0) { 3151 WMA_LOGE(FL("Invalid TLVs, Length:%d event_id:%d status: %d"), 3152 wow_buf_pkt_len, event_id, tlv_ok_status); 3153 return -EINVAL; 3154 } 3155 } 3156 3157 switch (wake_info->wake_reason) { 3158 case WOW_REASON_AUTH_REQ_RECV: 3159 case WOW_REASON_ASSOC_REQ_RECV: 3160 case WOW_REASON_DEAUTH_RECVD: 3161 case WOW_REASON_DISASSOC_RECVD: 3162 case WOW_REASON_ASSOC_RES_RECV: 3163 case WOW_REASON_REASSOC_REQ_RECV: 3164 case WOW_REASON_REASSOC_RES_RECV: 3165 case WOW_REASON_BEACON_RECV: 3166 case WOW_REASON_ACTION_FRAME_RECV: 3167 wake_lock_duration = 3168 wma_wow_get_wakelock_duration(wake_info->wake_reason); 3169 if (param_buf->wow_packet_buffer) { 3170 /* First 4-bytes of wow_packet_buffer is the length */ 3171 qdf_mem_copy((uint8_t *) &wow_buf_pkt_len, 3172 param_buf->wow_packet_buffer, 4); 3173 if (wow_buf_pkt_len) 3174 wma_wow_dump_mgmt_buffer( 3175 param_buf->wow_packet_buffer, 3176 wow_buf_pkt_len); 3177 else 3178 WMA_LOGE("wow packet buffer is empty"); 3179 } else { 3180 WMA_LOGE("No wow packet buffer present"); 3181 } 3182 break; 3183 3184 case WOW_REASON_AP_ASSOC_LOST: 3185 wake_lock_duration = WMA_BMISS_EVENT_WAKE_LOCK_DURATION; 3186 WMA_LOGA("Beacon miss indication on vdev %x", 3187 wake_info->vdev_id); 3188 wma_beacon_miss_handler(wma, wake_info->vdev_id); 3189 break; 3190 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN 3191 case WOW_REASON_HOST_AUTO_SHUTDOWN: 3192 wake_lock_duration = WMA_AUTO_SHUTDOWN_WAKE_LOCK_DURATION; 3193 WMA_LOGA("Received WOW Auto Shutdown trigger in suspend"); 3194 if (wma_post_auto_shutdown_msg()) 3195 return -EINVAL; 3196 break; 3197 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */ 3198 #ifdef FEATURE_WLAN_SCAN_PNO 3199 case WOW_REASON_NLOD: 3200 wma_wow_wake_up_stats(wma, NULL, 0, WOW_REASON_NLOD); 3201 node = &wma->interfaces[wake_info->vdev_id]; 3202 if (node) { 3203 WMA_LOGD("NLO match happened"); 3204 node->nlo_match_evt_received = true; 3205 cds_host_diag_log_work(&wma->pno_wake_lock, 3206 WMA_PNO_MATCH_WAKE_LOCK_TIMEOUT, 3207 WIFI_POWER_EVENT_WAKELOCK_PNO); 3208 qdf_wake_lock_timeout_acquire(&wma->pno_wake_lock, 3209 WMA_PNO_MATCH_WAKE_LOCK_TIMEOUT); 3210 } 3211 break; 3212 3213 case WOW_REASON_NLO_SCAN_COMPLETE: 3214 WMA_LOGD("Host woken up due to pno scan complete reason"); 3215 if (param_buf->wow_packet_buffer) 3216 wma_nlo_scan_cmp_evt_handler(handle, 3217 wmi_cmd_struct_ptr, wow_buf_pkt_len); 3218 else 3219 WMA_LOGD("No wow_packet_buffer present"); 3220 break; 3221 #endif /* FEATURE_WLAN_SCAN_PNO */ 3222 3223 case WOW_REASON_CSA_EVENT: 3224 WMA_LOGD("Host woken up because of CSA IE"); 3225 wma_csa_offload_handler(handle, wmi_cmd_struct_ptr, 3226 wow_buf_pkt_len); 3227 break; 3228 3229 #ifdef FEATURE_WLAN_LPHB 3230 case WOW_REASON_WLAN_HB: 3231 wma_lphb_handler(wma, (uint8_t *) param_buf->hb_indevt); 3232 break; 3233 #endif /* FEATURE_WLAN_LPHB */ 3234 3235 case WOW_REASON_HTT_EVENT: 3236 break; 3237 3238 case WOW_REASON_BPF_ALLOW: 3239 case WOW_REASON_PATTERN_MATCH_FOUND: 3240 #ifdef FEATURE_WLAN_RA_FILTERING 3241 case WOW_REASON_RA_MATCH: 3242 #endif /* FEATURE_WLAN_RA_FILTERING */ 3243 case WOW_REASON_RECV_MAGIC_PATTERN: 3244 wma_wow_wake_up_stats_display(wma); 3245 WMA_LOGD("Wake up for Rx packet, dump starting from ethernet hdr"); 3246 if (param_buf->wow_packet_buffer) { 3247 /* First 4-bytes of wow_packet_buffer is the length */ 3248 qdf_mem_copy((uint8_t *) &wow_buf_pkt_len, 3249 param_buf->wow_packet_buffer, 4); 3250 if (wow_buf_pkt_len) { 3251 uint8_t *data; 3252 3253 wma_wow_wake_up_stats(wma, 3254 param_buf->wow_packet_buffer + 4, 3255 wow_buf_pkt_len, 3256 wake_info->wake_reason); 3257 qdf_trace_hex_dump(QDF_MODULE_ID_WMA, 3258 QDF_TRACE_LEVEL_DEBUG, 3259 param_buf->wow_packet_buffer + 4, 3260 wow_buf_pkt_len); 3261 3262 data = (uint8_t *) 3263 (param_buf->wow_packet_buffer + 4); 3264 wma_wow_parse_data_pkt_buffer(data, 3265 wow_buf_pkt_len); 3266 } else { 3267 WMA_LOGE("wow packet buffer is empty"); 3268 } 3269 } else { 3270 WMA_LOGE("No wow packet buffer present"); 3271 } 3272 break; 3273 3274 case WOW_REASON_LOW_RSSI: 3275 /* WOW_REASON_LOW_RSSI is used for all roaming events. 3276 * WMI_ROAM_REASON_BETTER_AP, WMI_ROAM_REASON_BMISS, 3277 * WMI_ROAM_REASON_SUITABLE_AP will be handled by 3278 * wma_roam_event_callback(). 3279 */ 3280 wma_wow_wake_up_stats(wma, NULL, 0, WOW_REASON_LOW_RSSI); 3281 WMA_LOGD("Host woken up because of roam event"); 3282 if (param_buf->wow_packet_buffer) { 3283 /* Roam event is embedded in wow_packet_buffer */ 3284 WMA_LOGD("wow_packet_buffer dump"); 3285 qdf_trace_hex_dump(QDF_MODULE_ID_WMA, 3286 QDF_TRACE_LEVEL_DEBUG, 3287 param_buf->wow_packet_buffer, 3288 wow_buf_pkt_len); 3289 wma_roam_event_callback(handle, wmi_cmd_struct_ptr, 3290 wow_buf_pkt_len); 3291 } else { 3292 /* 3293 * No wow_packet_buffer means a better AP beacon 3294 * will follow in a later event. 3295 */ 3296 WMA_LOGD("Host woken up because of better AP beacon"); 3297 } 3298 break; 3299 case WOW_REASON_CLIENT_KICKOUT_EVENT: 3300 WMA_LOGD("Host woken up because of sta_kickout event"); 3301 if (param_buf->wow_packet_buffer) { 3302 WMA_LOGD("wow_packet_buffer dump"); 3303 qdf_trace_hex_dump(QDF_MODULE_ID_WMA, 3304 QDF_TRACE_LEVEL_DEBUG, 3305 param_buf->wow_packet_buffer, wow_buf_pkt_len); 3306 wma_peer_sta_kickout_event_handler(handle, 3307 wmi_cmd_struct_ptr, wow_buf_pkt_len); 3308 } else { 3309 WMA_LOGD("No wow_packet_buffer present"); 3310 } 3311 break; 3312 #ifdef FEATURE_WLAN_EXTSCAN 3313 case WOW_REASON_EXTSCAN: 3314 WMA_LOGD("Host woken up because of extscan reason"); 3315 wma_wow_wake_up_stats(wma, NULL, 0, WOW_REASON_EXTSCAN); 3316 if (param_buf->wow_packet_buffer) 3317 wma_extscan_wow_event_callback(handle, 3318 wmi_cmd_struct_ptr, wow_buf_pkt_len); 3319 else 3320 WMA_LOGE("wow_packet_buffer is empty"); 3321 break; 3322 #endif 3323 case WOW_REASON_RSSI_BREACH_EVENT: 3324 wma_wow_wake_up_stats(wma, NULL, 0, 3325 WOW_REASON_RSSI_BREACH_EVENT); 3326 WMA_LOGD("Host woken up because of rssi breach reason"); 3327 /* rssi breach event is embedded in wow_packet_buffer */ 3328 if (param_buf->wow_packet_buffer) 3329 wma_rssi_breached_event_handler(handle, 3330 wmi_cmd_struct_ptr, wow_buf_pkt_len); 3331 else 3332 WMA_LOGD("No wow_packet_buffer present"); 3333 break; 3334 case WOW_REASON_NAN_EVENT: 3335 WMA_LOGA("Host woken up due to NAN event reason"); 3336 wma_nan_rsp_event_handler(handle, 3337 wmi_cmd_struct_ptr, wow_buf_pkt_len); 3338 break; 3339 case WOW_REASON_NAN_DATA: 3340 WMA_LOGD(FL("Host woken up for NAN data path event from FW")); 3341 if (param_buf->wow_packet_buffer) { 3342 wma_ndp_wow_event_callback(handle, wmi_cmd_struct_ptr, 3343 wow_buf_pkt_len, event_id); 3344 } else { 3345 WMA_LOGE(FL("wow_packet_buffer is empty")); 3346 } 3347 break; 3348 case WOW_REASON_OEM_RESPONSE_EVENT: 3349 wma_wow_wake_up_stats(wma, NULL, 0, 3350 WOW_REASON_OEM_RESPONSE_EVENT); 3351 /* 3352 * Actual OEM Response event will follow after this 3353 * WOW Wakeup event 3354 */ 3355 WMA_LOGD(FL("Host woken up for OEM Response event")); 3356 break; 3357 #ifdef FEATURE_WLAN_TDLS 3358 case WOW_REASON_TDLS_CONN_TRACKER_EVENT: 3359 WMA_LOGD("Host woken up because of TDLS event"); 3360 if (param_buf->wow_packet_buffer) 3361 wma_tdls_event_handler(handle, 3362 wmi_cmd_struct_ptr, wow_buf_pkt_len); 3363 else 3364 WMA_LOGD("No wow_packet_buffer present"); 3365 break; 3366 #endif 3367 default: 3368 break; 3369 } 3370 3371 if (wake_lock_duration) { 3372 cds_host_diag_log_work(&wma->wow_wake_lock, 3373 wake_lock_duration, 3374 WIFI_POWER_EVENT_WAKELOCK_WOW); 3375 qdf_wake_lock_timeout_acquire(&wma->wow_wake_lock, 3376 wake_lock_duration); 3377 WMA_LOGA("Holding %d msec wake_lock", wake_lock_duration); 3378 } 3379 3380 if (wmi_cmd_struct_ptr) 3381 wmitlv_free_allocated_event_tlvs(event_id, &wmi_cmd_struct_ptr); 3382 3383 return 0; 3384 } 3385 3386 /** 3387 * wma_pdev_resume_event_handler() - PDEV resume event handler 3388 * @handle: wma handle 3389 * @event: event data 3390 * @len: buffer length 3391 * 3392 * Return: 0 for success or error 3393 */ 3394 int wma_pdev_resume_event_handler(void *handle, uint8_t *event, uint32_t len) 3395 { 3396 tp_wma_handle wma = (tp_wma_handle) handle; 3397 3398 WMA_LOGA("Received PDEV resume event"); 3399 3400 qdf_event_set(&wma->wma_resume_event); 3401 3402 return 0; 3403 } 3404 /** 3405 * wma_set_wow_bus_suspend() - set suspend flag 3406 * @wma: wma handle 3407 * @val: value 3408 * 3409 * Return: none 3410 */ 3411 static inline void wma_set_wow_bus_suspend(tp_wma_handle wma, int val) 3412 { 3413 3414 qdf_atomic_set(&wma->is_wow_bus_suspended, val); 3415 } 3416 3417 3418 3419 /** 3420 * wma_add_wow_wakeup_event() - Configures wow wakeup events. 3421 * @wma: wma handle 3422 * @vdev_id: vdev id 3423 * @bitmap: Event bitmap 3424 * @enable: enable/disable 3425 * 3426 * Return: QDF status 3427 */ 3428 QDF_STATUS wma_add_wow_wakeup_event(tp_wma_handle wma, 3429 uint32_t vdev_id, 3430 uint32_t bitmap, 3431 bool enable) 3432 { 3433 int ret; 3434 3435 ret = wmi_unified_add_wow_wakeup_event_cmd(wma->wmi_handle, vdev_id, 3436 bitmap, enable); 3437 if (ret) { 3438 WMA_LOGE("Failed to config wow wakeup event"); 3439 return QDF_STATUS_E_FAILURE; 3440 } 3441 3442 return QDF_STATUS_SUCCESS; 3443 } 3444 3445 /** 3446 * wma_send_wow_patterns_to_fw() - Sends WOW patterns to FW. 3447 * @wma: wma handle 3448 * @vdev_id: vdev id 3449 * @ptrn_id: pattern id 3450 * @ptrn: pattern 3451 * @ptrn_len: pattern length 3452 * @ptrn_offset: pattern offset 3453 * @mask: mask 3454 * @mask_len: mask length 3455 * @user: true for user configured pattern and false for default pattern 3456 * 3457 * Return: QDF status 3458 */ 3459 static QDF_STATUS wma_send_wow_patterns_to_fw(tp_wma_handle wma, 3460 uint8_t vdev_id, uint8_t ptrn_id, 3461 const uint8_t *ptrn, uint8_t ptrn_len, 3462 uint8_t ptrn_offset, const uint8_t *mask, 3463 uint8_t mask_len, bool user) 3464 { 3465 struct wma_txrx_node *iface; 3466 int ret; 3467 3468 iface = &wma->interfaces[vdev_id]; 3469 ret = wmi_unified_wow_patterns_to_fw_cmd(wma->wmi_handle, 3470 vdev_id, ptrn_id, ptrn, 3471 ptrn_len, ptrn_offset, mask, 3472 mask_len, user, 0); 3473 if (ret) { 3474 if (!user) 3475 iface->num_wow_default_patterns--; 3476 return QDF_STATUS_E_FAILURE; 3477 } 3478 3479 if (user) 3480 iface->num_wow_user_patterns++; 3481 3482 return QDF_STATUS_SUCCESS; 3483 } 3484 3485 /** 3486 * wma_wow_ap() - set WOW patterns in ap mode 3487 * @wma: wma handle 3488 * @vdev_id: vdev id 3489 * 3490 * Configures default WOW pattern for the given vdev_id which is in AP mode. 3491 * 3492 * Return: QDF status 3493 */ 3494 static QDF_STATUS wma_wow_ap(tp_wma_handle wma, uint8_t vdev_id) 3495 { 3496 QDF_STATUS ret; 3497 uint8_t arp_offset = 20; 3498 uint8_t mac_mask[IEEE80211_ADDR_LEN]; 3499 struct wma_txrx_node *iface = &wma->interfaces[vdev_id]; 3500 3501 /* 3502 * Setup unicast pkt pattern 3503 * WoW pattern id should be unique for each vdev 3504 * WoW pattern id can be same on 2 different VDEVs 3505 */ 3506 qdf_mem_set(&mac_mask, IEEE80211_ADDR_LEN, 0xFF); 3507 ret = wma_send_wow_patterns_to_fw(wma, vdev_id, 3508 iface->num_wow_default_patterns++, 3509 wma->interfaces[vdev_id].addr, 3510 IEEE80211_ADDR_LEN, 0, mac_mask, 3511 IEEE80211_ADDR_LEN, false); 3512 if (ret != QDF_STATUS_SUCCESS) { 3513 WMA_LOGE("Failed to add WOW unicast pattern ret %d", ret); 3514 return ret; 3515 } 3516 3517 /* 3518 * Setup all ARP pkt pattern. This is dummy pattern hence the length 3519 * is zero. Pattern ID should be unique per vdev. 3520 */ 3521 ret = wma_send_wow_patterns_to_fw(wma, vdev_id, 3522 iface->num_wow_default_patterns++, 3523 arp_ptrn, 0, arp_offset, arp_mask, 0, false); 3524 if (ret != QDF_STATUS_SUCCESS) { 3525 WMA_LOGE("Failed to add WOW ARP pattern ret %d", ret); 3526 return ret; 3527 } 3528 3529 return ret; 3530 } 3531 3532 /** 3533 * wma_configure_wow_ssdp() - API to configure WoW SSDP 3534 * @wma: WMA Handle 3535 * @vdev_id: Vdev Id 3536 * 3537 * API to configure SSDP pattern as WoW pattern 3538 * 3539 * Return: Success/Failure 3540 */ 3541 static QDF_STATUS wma_configure_wow_ssdp(tp_wma_handle wma, uint8_t vdev_id) 3542 { 3543 QDF_STATUS status = QDF_STATUS_SUCCESS; 3544 uint8_t discvr_offset = 30; 3545 struct wma_txrx_node *iface = &wma->interfaces[vdev_id]; 3546 3547 /* 3548 * WoW pattern ID should be unique for each vdev 3549 * Different WoW patterns can use same pattern ID 3550 */ 3551 status = wma_send_wow_patterns_to_fw(wma, vdev_id, 3552 iface->num_wow_default_patterns++, 3553 discvr_ptrn, sizeof(discvr_ptrn), discvr_offset, 3554 discvr_mask, sizeof(discvr_ptrn), false); 3555 3556 if (status != QDF_STATUS_SUCCESS) 3557 WMA_LOGE("Failed to add WOW mDNS/SSDP/LLMNR pattern"); 3558 3559 return status; 3560 } 3561 3562 /** 3563 * wma_configure_mc_ssdp() - API to configure SSDP address as MC list 3564 * @wma: WMA Handle 3565 * @vdev_id: Vdev Id 3566 * 3567 * SSDP address 239.255.255.250 is converted to Multicast Mac address 3568 * and configure it to FW. Firmware will apply this pattern on the incoming 3569 * packets to filter them out during chatter/wow mode. 3570 * 3571 * Return: Success/Failure 3572 */ 3573 static QDF_STATUS wma_configure_mc_ssdp(tp_wma_handle wma, uint8_t vdev_id) 3574 { 3575 WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param *cmd; 3576 wmi_buf_t buf; 3577 const tSirMacAddr ssdp_addr = {0x01, 0x00, 0x5e, 0x7f, 0xff, 0xfa}; 3578 int ret; 3579 WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param fixed_param; 3580 uint32_t tag = 3581 WMITLV_TAG_STRUC_WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param; 3582 3583 buf = wmi_buf_alloc(wma->wmi_handle, sizeof(*cmd)); 3584 if (!buf) { 3585 WMA_LOGE("%s No Memory for MC address", __func__); 3586 return QDF_STATUS_E_NOMEM; 3587 } 3588 3589 cmd = (WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param *) wmi_buf_data(buf); 3590 3591 WMITLV_SET_HDR(&cmd->tlv_header, tag, 3592 WMITLV_GET_STRUCT_TLVLEN(fixed_param)); 3593 3594 cmd->action = WMI_MCAST_FILTER_SET; 3595 cmd->vdev_id = vdev_id; 3596 WMI_CHAR_ARRAY_TO_MAC_ADDR(ssdp_addr, &cmd->mcastbdcastaddr); 3597 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, sizeof(*cmd), 3598 WMI_SET_MCASTBCAST_FILTER_CMDID); 3599 if (ret != QDF_STATUS_SUCCESS) { 3600 WMA_LOGE("%s Failed to configure FW with SSDP MC address", 3601 __func__); 3602 wmi_buf_free(buf); 3603 return QDF_STATUS_E_FAILURE; 3604 } 3605 3606 return QDF_STATUS_SUCCESS; 3607 } 3608 3609 /** 3610 * wma_configure_ssdp() - API to Configure SSDP pattern to FW 3611 * @wma: WMA Handle 3612 * @vdev_id: VDEV ID 3613 * 3614 * Setup multicast pattern for mDNS 224.0.0.251, SSDP 239.255.255.250 and LLMNR 3615 * 224.0.0.252 3616 * 3617 * Return: Success/Failure. 3618 */ 3619 static QDF_STATUS wma_configure_ssdp(tp_wma_handle wma, uint8_t vdev_id) 3620 { 3621 if (!wma->ssdp) { 3622 WMA_LOGD("mDNS, SSDP, LLMNR patterns are disabled from ini"); 3623 return QDF_STATUS_SUCCESS; 3624 } 3625 3626 WMA_LOGD("%s, enable_mc_list:%d", __func__, wma->enable_mc_list); 3627 3628 if (wma->enable_mc_list) 3629 return wma_configure_mc_ssdp(wma, vdev_id); 3630 3631 return wma_configure_wow_ssdp(wma, vdev_id); 3632 } 3633 3634 /** 3635 * wma_wow_sta() - set WOW patterns in sta mode 3636 * @wma: wma handle 3637 * @vdev_id: vdev id 3638 * 3639 * Configures default WOW pattern for the given vdev_id which is in sta mode. 3640 * 3641 * Return: QDF status 3642 */ 3643 static QDF_STATUS wma_wow_sta(tp_wma_handle wma, uint8_t vdev_id) 3644 { 3645 uint8_t arp_offset = 12; 3646 uint8_t mac_mask[IEEE80211_ADDR_LEN]; 3647 QDF_STATUS ret = QDF_STATUS_SUCCESS; 3648 struct wma_txrx_node *iface = &wma->interfaces[vdev_id]; 3649 3650 qdf_mem_set(&mac_mask, IEEE80211_ADDR_LEN, 0xFF); 3651 /* 3652 * Set up unicast wow pattern 3653 * WoW pattern ID should be unique for each vdev 3654 * Different WoW patterns can use same pattern ID 3655 */ 3656 ret = wma_send_wow_patterns_to_fw(wma, vdev_id, 3657 iface->num_wow_default_patterns++, 3658 wma->interfaces[vdev_id].addr, 3659 IEEE80211_ADDR_LEN, 0, mac_mask, 3660 IEEE80211_ADDR_LEN, false); 3661 if (ret != QDF_STATUS_SUCCESS) { 3662 WMA_LOGE("Failed to add WOW unicast pattern ret %d", ret); 3663 return ret; 3664 } 3665 3666 ret = wma_configure_ssdp(wma, vdev_id); 3667 if (ret != QDF_STATUS_SUCCESS) 3668 WMA_LOGE("Failed to configure SSDP patterns to FW"); 3669 3670 /* when arp offload or ns offloaded is disabled 3671 * from ini file, configure broad cast arp pattern 3672 * to fw, so that host can wake up 3673 */ 3674 if (!(wma->ol_ini_info & 0x1)) { 3675 /* Setup all ARP pkt pattern */ 3676 WMA_LOGI("ARP offload is disabled in INI enable WoW for ARP"); 3677 ret = wma_send_wow_patterns_to_fw(wma, vdev_id, 3678 iface->num_wow_default_patterns++, 3679 arp_ptrn, sizeof(arp_ptrn), arp_offset, 3680 arp_mask, sizeof(arp_mask), false); 3681 if (ret != QDF_STATUS_SUCCESS) { 3682 WMA_LOGE("Failed to add WOW ARP pattern"); 3683 return ret; 3684 } 3685 } 3686 3687 /* for NS or NDP offload packets */ 3688 if (!(wma->ol_ini_info & 0x2)) { 3689 /* Setup all NS pkt pattern */ 3690 WMA_LOGI("NS offload is disabled in INI enable WoW for NS"); 3691 ret = wma_send_wow_patterns_to_fw(wma, vdev_id, 3692 iface->num_wow_default_patterns++, 3693 ns_ptrn, sizeof(arp_ptrn), arp_offset, 3694 arp_mask, sizeof(arp_mask), false); 3695 if (ret != QDF_STATUS_SUCCESS) { 3696 WMA_LOGE("Failed to add WOW NS pattern"); 3697 return ret; 3698 } 3699 } 3700 3701 return ret; 3702 } 3703 3704 /** 3705 * wma_register_wow_default_patterns() - register default wow patterns with fw 3706 * @handle: Pointer to wma handle 3707 * @vdev_id: vdev id 3708 * 3709 * WoW default wake up pattern rule is: 3710 * - For STA & P2P CLI mode register for same STA specific wow patterns 3711 * - For SAP/P2P GO & IBSS mode register for same SAP specific wow patterns 3712 * 3713 * Return: none 3714 */ 3715 void wma_register_wow_default_patterns(WMA_HANDLE handle, uint8_t vdev_id) 3716 { 3717 tp_wma_handle wma = handle; 3718 struct wma_txrx_node *iface; 3719 3720 if (vdev_id > wma->max_bssid) { 3721 WMA_LOGE("Invalid vdev id %d", vdev_id); 3722 return; 3723 } 3724 iface = &wma->interfaces[vdev_id]; 3725 3726 if (iface->ptrn_match_enable) { 3727 if (wma_is_vdev_in_beaconning_mode(wma, vdev_id)) { 3728 /* Configure SAP/GO/IBSS mode default wow patterns */ 3729 WMA_LOGI("Config SAP specific default wow patterns vdev_id %d", 3730 vdev_id); 3731 wma_wow_ap(wma, vdev_id); 3732 } else { 3733 /* Configure STA/P2P CLI mode default wow patterns */ 3734 WMA_LOGI("Config STA specific default wow patterns vdev_id %d", 3735 vdev_id); 3736 wma_wow_sta(wma, vdev_id); 3737 if (wma->IsRArateLimitEnabled) { 3738 WMA_LOGI("Config STA RA limit wow patterns vdev_id %d", 3739 vdev_id); 3740 wma_wow_sta_ra_filter(wma, vdev_id); 3741 } 3742 } 3743 } 3744 3745 return; 3746 } 3747 3748 /** 3749 * wma_register_wow_wakeup_events() - register vdev specific wake events with fw 3750 * @handle: Pointer to wma handle 3751 * @vdev_id: vdev Id 3752 * @vdev_type: vdev type 3753 * @vdev_subtype: vdev sub type 3754 * 3755 * WoW wake up event rule is following: 3756 * 1) STA mode and P2P CLI mode wake up events are same 3757 * 2) SAP mode and P2P GO mode wake up events are same 3758 * 3) IBSS mode wake events are same as STA mode plus WOW_BEACON_EVENT 3759 * 3760 * Return: none 3761 */ 3762 void wma_register_wow_wakeup_events(WMA_HANDLE handle, 3763 uint8_t vdev_id, 3764 uint8_t vdev_type, 3765 uint8_t vdev_subtype) 3766 { 3767 tp_wma_handle wma = handle; 3768 uint32_t event_bitmap; 3769 3770 WMA_LOGI("vdev_type %d vdev_subtype %d vdev_id %d", vdev_type, 3771 vdev_subtype, vdev_id); 3772 3773 if ((WMI_VDEV_TYPE_STA == vdev_type) || 3774 ((WMI_VDEV_TYPE_AP == vdev_type) && 3775 (WMI_UNIFIED_VDEV_SUBTYPE_P2P_DEVICE == vdev_subtype))) { 3776 /* Configure STA/P2P CLI mode specific default wake up events */ 3777 event_bitmap = WMA_WOW_STA_WAKE_UP_EVENTS; 3778 WMA_LOGI("STA specific default wake up event 0x%x vdev id %d", 3779 event_bitmap, vdev_id); 3780 } else if (WMI_VDEV_TYPE_IBSS == vdev_type) { 3781 /* Configure IBSS mode specific default wake up events */ 3782 event_bitmap = (WMA_WOW_STA_WAKE_UP_EVENTS | 3783 (1 << WOW_BEACON_EVENT)); 3784 WMA_LOGI("IBSS specific default wake up event 0x%x vdev id %d", 3785 event_bitmap, vdev_id); 3786 } else if (WMI_VDEV_TYPE_AP == vdev_type) { 3787 /* Configure SAP/GO mode specific default wake up events */ 3788 event_bitmap = WMA_WOW_SAP_WAKE_UP_EVENTS; 3789 WMA_LOGI("SAP specific default wake up event 0x%x vdev id %d", 3790 event_bitmap, vdev_id); 3791 } else if (WMI_VDEV_TYPE_NDI == vdev_type) { 3792 /* 3793 * Configure NAN data path specific default wake up events. 3794 * Following routine sends the command to firmware. 3795 */ 3796 wma_ndp_add_wow_wakeup_event(wma, vdev_id); 3797 return; 3798 } else { 3799 WMA_LOGE("unknown type %d subtype %d", vdev_type, vdev_subtype); 3800 return; 3801 } 3802 3803 wma_add_wow_wakeup_event(wma, vdev_id, event_bitmap, true); 3804 3805 return; 3806 } 3807 3808 /** 3809 * wma_enable_disable_wakeup_event() - Configures wow wakeup events 3810 * @wma: wma handle 3811 * @vdev_id: vdev id 3812 * @bitmap: Event bitmap 3813 * @enable: enable/disable 3814 * 3815 * Return: none 3816 */ 3817 void wma_enable_disable_wakeup_event(WMA_HANDLE handle, 3818 uint32_t vdev_id, 3819 uint32_t bitmap, 3820 bool enable) 3821 { 3822 tp_wma_handle wma = handle; 3823 3824 WMA_LOGI("vdev_id %d wake up event 0x%x enable %d", 3825 vdev_id, bitmap, enable); 3826 wma_add_wow_wakeup_event(wma, vdev_id, bitmap, enable); 3827 } 3828 3829 /** 3830 * wma_enable_wow_in_fw() - wnable wow in fw 3831 * @wma: wma handle 3832 * 3833 * Return: QDF status 3834 */ 3835 QDF_STATUS wma_enable_wow_in_fw(WMA_HANDLE handle) 3836 { 3837 tp_wma_handle wma = handle; 3838 int ret; 3839 struct hif_opaque_softc *scn; 3840 int host_credits; 3841 int wmi_pending_cmds; 3842 struct wow_cmd_params param = {0}; 3843 3844 #ifdef CONFIG_CNSS 3845 tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); 3846 3847 if (NULL == pMac) { 3848 WMA_LOGE("%s: Unable to get PE context", __func__); 3849 return QDF_STATUS_E_FAILURE; 3850 } 3851 #endif /* CONFIG_CNSS */ 3852 3853 qdf_event_reset(&wma->target_suspend); 3854 wma->wow_nack = false; 3855 3856 host_credits = wmi_get_host_credits(wma->wmi_handle); 3857 wmi_pending_cmds = wmi_get_pending_cmds(wma->wmi_handle); 3858 3859 WMA_LOGD("Credits:%d; Pending_Cmds: %d", 3860 host_credits, wmi_pending_cmds); 3861 3862 if (host_credits < WMI_WOW_REQUIRED_CREDITS) { 3863 WMA_LOGE("%s: Host Doesn't have enough credits to Post WMI_WOW_ENABLE_CMDID! " 3864 "Credits:%d, pending_cmds:%d\n", __func__, host_credits, 3865 wmi_pending_cmds); 3866 #ifndef QCA_WIFI_3_0_EMU 3867 goto error; 3868 #endif 3869 } 3870 3871 param.enable = true; 3872 param.can_suspend_link = htc_can_suspend_link(wma->htc_handle); 3873 ret = wmi_unified_wow_enable_send(wma->wmi_handle, ¶m, 3874 WMA_WILDCARD_PDEV_ID); 3875 if (ret) { 3876 WMA_LOGE("Failed to enable wow in fw"); 3877 goto error; 3878 } 3879 3880 wmi_set_target_suspend(wma->wmi_handle, true); 3881 3882 if (qdf_wait_single_event(&wma->target_suspend, 3883 WMA_TGT_SUSPEND_COMPLETE_TIMEOUT) 3884 != QDF_STATUS_SUCCESS) { 3885 WMA_LOGE("Failed to receive WoW Enable Ack from FW"); 3886 WMA_LOGE("Credits:%d; Pending_Cmds: %d", 3887 wmi_get_host_credits(wma->wmi_handle), 3888 wmi_get_pending_cmds(wma->wmi_handle)); 3889 wmi_set_target_suspend(wma->wmi_handle, false); 3890 if (!cds_is_driver_recovering()) { 3891 #ifdef CONFIG_CNSS 3892 if (pMac->sme.enableSelfRecovery) { 3893 cds_trigger_recovery(); 3894 } else { 3895 QDF_BUG(0); 3896 } 3897 #else 3898 QDF_BUG(0); 3899 #endif /* CONFIG_CNSS */ 3900 } else { 3901 WMA_LOGE("%s: LOGP is in progress, ignore!", __func__); 3902 } 3903 3904 return QDF_STATUS_E_FAILURE; 3905 } 3906 3907 if (wma->wow_nack) { 3908 WMA_LOGE("FW not ready to WOW"); 3909 wmi_set_target_suspend(wma->wmi_handle, false); 3910 return QDF_STATUS_E_AGAIN; 3911 } 3912 3913 host_credits = wmi_get_host_credits(wma->wmi_handle); 3914 wmi_pending_cmds = wmi_get_pending_cmds(wma->wmi_handle); 3915 3916 if (host_credits < WMI_WOW_REQUIRED_CREDITS) { 3917 WMA_LOGE("%s: No Credits after HTC ACK:%d, pending_cmds:%d, " 3918 "cannot resume back", __func__, host_credits, 3919 wmi_pending_cmds); 3920 htc_dump_counter_info(wma->htc_handle); 3921 if (!cds_is_driver_recovering()) 3922 QDF_BUG(0); 3923 else 3924 WMA_LOGE("%s: SSR in progress, ignore no credit issue", 3925 __func__); 3926 } 3927 3928 WMA_LOGD("WOW enabled successfully in fw: credits:%d" 3929 "pending_cmds: %d", host_credits, wmi_pending_cmds); 3930 3931 scn = cds_get_context(QDF_MODULE_ID_HIF); 3932 3933 if (scn == NULL) { 3934 WMA_LOGE("%s: Failed to get HIF context", __func__); 3935 wmi_set_target_suspend(wma->wmi_handle, false); 3936 QDF_ASSERT(0); 3937 return QDF_STATUS_E_FAULT; 3938 } 3939 3940 wma->wow.wow_enable_cmd_sent = true; 3941 3942 return QDF_STATUS_SUCCESS; 3943 3944 error: 3945 return QDF_STATUS_E_FAILURE; 3946 } 3947 3948 /** 3949 * wma_resume_req() - clear configured wow patterns in fw 3950 * @wma: wma handle 3951 * @type: type of suspend 3952 * 3953 * Return: QDF status 3954 */ 3955 QDF_STATUS wma_resume_req(tp_wma_handle wma, enum qdf_suspend_type type) 3956 { 3957 if (type == QDF_SYSTEM_SUSPEND) { 3958 wma->no_of_resume_ind++; 3959 3960 if (wma->no_of_resume_ind < wma_get_vdev_count(wma)) 3961 return QDF_STATUS_SUCCESS; 3962 3963 wma->no_of_resume_ind = 0; 3964 } 3965 3966 /* Reset the DTIM Parameters */ 3967 wma_set_resume_dtim(wma); 3968 /* need to reset if hif_pci_suspend_fails */ 3969 wma_set_wow_bus_suspend(wma, 0); 3970 /* unpause the vdev if left paused and hif_pci_suspend fails */ 3971 wma_unpause_vdev(wma); 3972 3973 wmi_set_runtime_pm_inprogress(wma->wmi_handle, false); 3974 3975 if (type == QDF_RUNTIME_SUSPEND) 3976 qdf_runtime_pm_allow_suspend(wma->wma_runtime_resume_lock); 3977 3978 return QDF_STATUS_SUCCESS; 3979 } 3980 3981 /** 3982 * wma_wow_delete_pattern() - delete wow pattern in target 3983 * @wma: wma handle 3984 * @ptrn_id: pattern id 3985 * @vdev_id: vdev id 3986 * @user: true for user pattern and false for default pattern 3987 * 3988 * Return: QDF status 3989 */ 3990 static QDF_STATUS wma_wow_delete_pattern(tp_wma_handle wma, uint8_t ptrn_id, 3991 uint8_t vdev_id, bool user) 3992 { 3993 3994 struct wma_txrx_node *iface; 3995 int ret; 3996 3997 iface = &wma->interfaces[vdev_id]; 3998 ret = wmi_unified_wow_delete_pattern_cmd(wma->wmi_handle, ptrn_id, 3999 vdev_id); 4000 if (ret) { 4001 return QDF_STATUS_E_FAILURE; 4002 } 4003 4004 if (user) 4005 iface->num_wow_user_patterns--; 4006 4007 return QDF_STATUS_SUCCESS; 4008 } 4009 4010 /** 4011 * wma_wow_add_pattern() - add wow pattern in target 4012 * @wma: wma handle 4013 * @ptrn: wow pattern 4014 * 4015 * This function does following: 4016 * 1) Delete all default patterns of the vdev 4017 * 2) Add received wow patterns for given vdev in target. 4018 * 4019 * Target is responsible for caching wow patterns accross multiple 4020 * suspend/resumes until the pattern is deleted by user 4021 * 4022 * Return: QDF status 4023 */ 4024 QDF_STATUS wma_wow_add_pattern(tp_wma_handle wma, struct wow_add_pattern *ptrn) 4025 { 4026 uint8_t id; 4027 uint8_t bit_to_check, pos; 4028 struct wma_txrx_node *iface; 4029 QDF_STATUS ret = QDF_STATUS_SUCCESS; 4030 uint8_t new_mask[SIR_WOWL_BCAST_PATTERN_MAX_SIZE]; 4031 4032 if (ptrn->session_id > wma->max_bssid) { 4033 WMA_LOGE("Invalid vdev id (%d)", ptrn->session_id); 4034 return QDF_STATUS_E_INVAL; 4035 } 4036 4037 iface = &wma->interfaces[ptrn->session_id]; 4038 4039 /* clear all default patterns cofigured by wma */ 4040 for (id = 0; id < iface->num_wow_default_patterns; id++) 4041 wma_wow_delete_pattern(wma, id, ptrn->session_id, false); 4042 4043 iface->num_wow_default_patterns = 0; 4044 4045 WMA_LOGI("Add user passed wow pattern id %d vdev id %d", 4046 ptrn->pattern_id, ptrn->session_id); 4047 /* 4048 * Convert received pattern mask value from bit representation 4049 * to byte representation. 4050 * 4051 * For example, received value from umac, 4052 * 4053 * Mask value : A1 (equivalent binary is "1010 0001") 4054 * Pattern value : 12:00:13:00:00:00:00:44 4055 * 4056 * The value which goes to FW after the conversion from this 4057 * function (1 in mask value will become FF and 0 will 4058 * become 00), 4059 * 4060 * Mask value : FF:00:FF:00:0:00:00:FF 4061 * Pattern value : 12:00:13:00:00:00:00:44 4062 */ 4063 qdf_mem_zero(new_mask, sizeof(new_mask)); 4064 for (pos = 0; pos < ptrn->pattern_size; pos++) { 4065 bit_to_check = (WMA_NUM_BITS_IN_BYTE - 1) - 4066 (pos % WMA_NUM_BITS_IN_BYTE); 4067 bit_to_check = 0x1 << bit_to_check; 4068 if (ptrn->pattern_mask[pos / WMA_NUM_BITS_IN_BYTE] & 4069 bit_to_check) 4070 new_mask[pos] = WMA_WOW_PTRN_MASK_VALID; 4071 } 4072 4073 ret = wma_send_wow_patterns_to_fw(wma, ptrn->session_id, 4074 ptrn->pattern_id, 4075 ptrn->pattern, ptrn->pattern_size, 4076 ptrn->pattern_byte_offset, new_mask, 4077 ptrn->pattern_size, true); 4078 if (ret != QDF_STATUS_SUCCESS) 4079 WMA_LOGE("Failed to add wow pattern %d", ptrn->pattern_id); 4080 4081 return ret; 4082 } 4083 4084 /** 4085 * wma_wow_delete_user_pattern() - delete user configured wow pattern in target 4086 * @wma: wma handle 4087 * @ptrn: wow pattern 4088 * 4089 * This function does following: 4090 * 1) Deletes a particular user configured wow pattern in target 4091 * 2) After deleting all user wow patterns add default wow patterns 4092 * specific to that vdev. 4093 * 4094 * Return: QDF status 4095 */ 4096 QDF_STATUS wma_wow_delete_user_pattern(tp_wma_handle wma, 4097 struct wow_delete_pattern *pattern) 4098 { 4099 struct wma_txrx_node *iface; 4100 4101 if (pattern->session_id > wma->max_bssid) { 4102 WMA_LOGE("Invalid vdev id %d", pattern->session_id); 4103 return QDF_STATUS_E_INVAL; 4104 } 4105 4106 iface = &wma->interfaces[pattern->session_id]; 4107 if (iface->num_wow_user_patterns <= 0) { 4108 WMA_LOGE("No valid user pattern. Num user pattern %u vdev %d", 4109 iface->num_wow_user_patterns, pattern->session_id); 4110 return QDF_STATUS_E_INVAL; 4111 } 4112 4113 WMA_LOGI("Delete user passed wow pattern id %d total user pattern %d", 4114 pattern->pattern_id, iface->num_wow_user_patterns); 4115 4116 wma_wow_delete_pattern(wma, pattern->pattern_id, 4117 pattern->session_id, true); 4118 4119 /* configure default patterns once all user patterns are deleted */ 4120 if (!iface->num_wow_user_patterns) 4121 wma_register_wow_default_patterns(wma, pattern->session_id); 4122 4123 return QDF_STATUS_SUCCESS; 4124 } 4125 4126 /** 4127 * wma_wow_enter() - store enable/disable status for pattern 4128 * @wma: wma handle 4129 * @info: wow parameters 4130 * 4131 * Records pattern enable/disable status locally. This choice will 4132 * take effect when the driver enter into suspend state. 4133 * 4134 * Return: QDF status 4135 */ 4136 QDF_STATUS wma_wow_enter(tp_wma_handle wma, tpSirHalWowlEnterParams info) 4137 { 4138 struct wma_txrx_node *iface; 4139 4140 WMA_LOGD("wow enable req received for vdev id: %d", info->sessionId); 4141 4142 if (info->sessionId > wma->max_bssid) { 4143 WMA_LOGE("Invalid vdev id (%d)", info->sessionId); 4144 qdf_mem_free(info); 4145 return QDF_STATUS_E_INVAL; 4146 } 4147 4148 iface = &wma->interfaces[info->sessionId]; 4149 iface->ptrn_match_enable = info->ucPatternFilteringEnable ? 4150 true : false; 4151 wma->wow.magic_ptrn_enable = info->ucMagicPktEnable ? true : false; 4152 wma->wow.deauth_enable = info->ucWowDeauthRcv ? true : false; 4153 wma->wow.disassoc_enable = info->ucWowDeauthRcv ? true : false; 4154 wma->wow.bmiss_enable = info->ucWowMaxMissedBeacons ? true : false; 4155 4156 qdf_mem_free(info); 4157 4158 return QDF_STATUS_SUCCESS; 4159 } 4160 4161 /** 4162 * wma_wow_exit() - clear all wma states 4163 * @wma: wma handle 4164 * @info: wow params 4165 * 4166 * Return: QDF status 4167 */ 4168 QDF_STATUS wma_wow_exit(tp_wma_handle wma, tpSirHalWowlExitParams info) 4169 { 4170 struct wma_txrx_node *iface; 4171 4172 WMA_LOGD("wow disable req received for vdev id: %d", info->sessionId); 4173 4174 if (info->sessionId > wma->max_bssid) { 4175 WMA_LOGE("Invalid vdev id (%d)", info->sessionId); 4176 qdf_mem_free(info); 4177 return QDF_STATUS_E_INVAL; 4178 } 4179 4180 iface = &wma->interfaces[info->sessionId]; 4181 iface->ptrn_match_enable = false; 4182 wma->wow.magic_ptrn_enable = false; 4183 qdf_mem_free(info); 4184 4185 return QDF_STATUS_SUCCESS; 4186 } 4187 4188 /** 4189 * wma_calculate_and_update_conn_state(): calculate each interfaces conn state 4190 * @wma: validated wma handle 4191 * 4192 * Identifies any vdev that is up and not in ap mode as connected. 4193 * stores this in the interfaces conn_state varible. 4194 */ 4195 void wma_calculate_and_update_conn_state(tp_wma_handle wma) 4196 { 4197 int i; 4198 for (i = 0; i < wma->max_bssid; i++) { 4199 wma->interfaces[i].conn_state = 4200 !!(wma->interfaces[i].vdev_up && 4201 !wma_is_vdev_in_ap_mode(wma, i)); 4202 } 4203 } 4204 4205 /** 4206 * wma_update_conn_state(): synchronize wma & hdd 4207 * @wma: wma handle 4208 * @conn_state: boolean array to populate 4209 * @len: validation parameter 4210 * 4211 * populate interfaces conn_state with true if the interface 4212 * is a connected client and wow will configure a pattern. 4213 */ 4214 void wma_update_conn_state(tp_wma_handle wma, uint32_t conn_mask) 4215 { 4216 int i; 4217 for (i = 0; i < wma->max_bssid; i++) { 4218 if (conn_mask & (1 << i)) 4219 wma->interfaces[i].conn_state = true; 4220 else 4221 wma->interfaces[i].conn_state = false; 4222 } 4223 4224 if (wma->wow.magic_ptrn_enable) 4225 return; 4226 4227 for (i = 0; i < wma->max_bssid; i++) { 4228 if (!wma->interfaces[i].ptrn_match_enable) 4229 wma->interfaces[i].conn_state = false; 4230 } 4231 } 4232 4233 /** 4234 * wma_is_beaconning_vdev_up(): check if a beaconning vdev is up 4235 * @wma: wma handle 4236 * 4237 * Return TRUE if beaconning vdev is up 4238 */ 4239 static inline 4240 bool wma_is_beaconning_vdev_up(tp_wma_handle wma) 4241 { 4242 int i; 4243 for (i = 0; i < wma->max_bssid; i++) { 4244 if (wma_is_vdev_in_beaconning_mode(wma, i) 4245 && wma->interfaces[i].vdev_up) 4246 return true; 4247 } 4248 return false; 4249 } 4250 4251 /** 4252 * wma_support_wow_for_beaconing: wow query for beaconning 4253 * @wma: wma handle 4254 * 4255 * Need to configure wow to enable beaconning offload when 4256 * a beaconing vdev is up and beaonning offload is configured. 4257 * 4258 * Return: true if we need to enable wow for beaconning offload 4259 */ 4260 static inline 4261 bool wma_support_wow_for_beaconing(tp_wma_handle wma) 4262 { 4263 if (WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap, 4264 WMI_SERVICE_BEACON_OFFLOAD)) { 4265 if (wma_is_beaconning_vdev_up(wma)) 4266 return true; 4267 } 4268 return false; 4269 } 4270 4271 #ifdef FEATURE_WLAN_SCAN_PNO 4272 /** 4273 * wma_is_pnoscan_in_progress(): check if a pnoscan is in progress 4274 * @wma: wma handle 4275 * @vdev_id: vdev_id 4276 * 4277 * Return: TRUE/FALSE 4278 */ 4279 static inline 4280 bool wma_is_pnoscan_in_progress(tp_wma_handle wma, int vdev_id) 4281 { 4282 return wma->interfaces[vdev_id].pno_in_progress; 4283 } 4284 4285 /** 4286 * wma_is_pnoscan_match_found(): check if a scan match was found 4287 * @wma: wma handle 4288 * @vdev_id: vdev_id 4289 * 4290 * Return: TRUE/FALSE 4291 */ 4292 static inline 4293 bool wma_is_pnoscan_match_found(tp_wma_handle wma, int vdev_id) 4294 { 4295 return wma->interfaces[vdev_id].nlo_match_evt_received; 4296 } 4297 #else 4298 /** 4299 * wma_is_pnoscan_in_progress(): dummy 4300 * 4301 * Return: False since no pnoscan cannot be in progress 4302 * when feature flag is not defined. 4303 */ 4304 bool wma_is_pnoscan_in_progress(tp_wma_handle wma, int vdev_id) 4305 { 4306 return FALSE; 4307 } 4308 4309 /** 4310 * wma_is_pnoscan_match_found(): dummy 4311 * @wma: wma handle 4312 * @vdev_id: vdev_id 4313 * 4314 * Return: False since no pnoscan cannot occur 4315 * when feature flag is not defined. 4316 */ 4317 static inline 4318 bool wma_is_pnoscan_match_found(tp_wma_handle wma, int vdev_id) 4319 { 4320 return FALSE; 4321 } 4322 #endif 4323 4324 #ifdef FEATURE_WLAN_EXTSCAN 4325 static inline 4326 /** 4327 * wma_is_extscan_in_progress(): check if an extscan is in progress 4328 * @wma: wma handle 4329 * @vdev_id: vdev_id 4330 * 4331 * Return: TRUE/FALSvE 4332 */ 4333 bool wma_is_extscan_in_progress(tp_wma_handle wma, int vdev_id) 4334 { 4335 return wma->interfaces[vdev_id].extscan_in_progress; 4336 } 4337 #else 4338 /** 4339 * wma_is_extscan_in_progress(): dummy 4340 * 4341 * Return: False since no extscan can be in progress 4342 * when feature flag is not defined. 4343 */ 4344 bool wma_is_extscan_in_progress(tp_wma_handle wma, int vdev_id) 4345 { 4346 return false; 4347 } 4348 #endif 4349 4350 /** 4351 * wma_is_p2plo_in_progress(): check if P2P listen offload is in progress 4352 * @wma: wma handle 4353 * @vdev_id: vdev_id 4354 * 4355 * This function is to check if p2p listen offload is in progress, 4356 * true: p2p listen offload in progress 4357 * false: otherwise 4358 * 4359 * Return: TRUE/FALSE 4360 */ 4361 static inline 4362 bool wma_is_p2plo_in_progress(tp_wma_handle wma, int vdev_id) 4363 { 4364 return wma->interfaces[vdev_id].p2p_lo_in_progress; 4365 } 4366 4367 #ifdef WLAN_FEATURE_LPSS 4368 /** 4369 * wma_is_lpass_enabled() - check if lpass is enabled 4370 * @handle: Pointer to wma handle 4371 * 4372 * WoW is needed if LPASS or NaN feature is enabled in INI because 4373 * target can't wake up itself if its put in PDEV suspend when LPASS 4374 * or NaN features are supported 4375 * 4376 * Return: true if lpass is enabled else false 4377 */ 4378 bool static wma_is_lpass_enabled(tp_wma_handle wma) 4379 { 4380 if (wma->is_lpass_enabled) 4381 return true; 4382 else 4383 return false; 4384 } 4385 #else 4386 bool static wma_is_lpass_enabled(tp_wma_handle wma) 4387 { 4388 return false; 4389 } 4390 #endif 4391 4392 #ifdef WLAN_FEATURE_NAN 4393 /** 4394 * wma_is_nan_enabled() - check if NaN is enabled 4395 * @handle: Pointer to wma handle 4396 * 4397 * WoW is needed if LPASS or NaN feature is enabled in INI because 4398 * target can't wake up itself if its put in PDEV suspend when LPASS 4399 * or NaN features are supported 4400 * 4401 * Return: true if NaN is enabled else false 4402 */ 4403 bool static wma_is_nan_enabled(tp_wma_handle wma) 4404 { 4405 if (wma->is_nan_enabled) 4406 return true; 4407 else 4408 return false; 4409 } 4410 #else 4411 bool static wma_is_nan_enabled(tp_wma_handle wma) 4412 { 4413 return false; 4414 } 4415 #endif 4416 4417 /** 4418 * wma_is_wow_applicable(): should enable wow 4419 * @wma: wma handle 4420 * 4421 * Enable WOW if any one of the condition meets, 4422 * 1) Is any one of vdev in beaconning mode (in AP mode) ? 4423 * 2) Is any one of vdev in connected state (in STA mode) ? 4424 * 3) Is PNO in progress in any one of vdev ? 4425 * 4) Is Extscan in progress in any one of vdev ? 4426 * 5) Is P2P listen offload in any one of vdev? 4427 * 6) Is any vdev in NAN data mode? BSS is already started at the 4428 * the time of device creation. It is ready to accept data 4429 * requests. 4430 * 7) If LPASS feature is enabled 4431 * 8) If NaN feature is enabled 4432 * If none of above conditions is true then return false 4433 * 4434 * Return: true if wma needs to configure wow false otherwise. 4435 */ 4436 bool wma_is_wow_applicable(tp_wma_handle wma) 4437 { 4438 int vdev_id; 4439 if (wma_support_wow_for_beaconing(wma)) { 4440 WMA_LOGD("vdev is in beaconning mode, enabling wow"); 4441 return true; 4442 } 4443 4444 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 4445 if (wma->interfaces[vdev_id].conn_state) { 4446 WMA_LOGD("STA is connected, enabling wow"); 4447 return true; 4448 } else if (wma_is_pnoscan_in_progress(wma, vdev_id)) { 4449 WMA_LOGD("PNO is in progress, enabling wow"); 4450 return true; 4451 } else if (wma_is_extscan_in_progress(wma, vdev_id)) { 4452 WMA_LOGD("EXT is in progress, enabling wow"); 4453 return true; 4454 } else if (wma_is_p2plo_in_progress(wma, vdev_id)) { 4455 WMA_LOGD("P2P LO is in progress, enabling wow"); 4456 return true; 4457 } else if (wma_is_lpass_enabled(wma)) { 4458 WMA_LOGD("LPASS is enabled, enabling WoW"); 4459 return true; 4460 } else if (wma_is_nan_enabled(wma)) { 4461 WMA_LOGD("NAN is enabled, enabling WoW"); 4462 return true; 4463 } else if (WMA_IS_VDEV_IN_NDI_MODE(wma->interfaces, vdev_id)) { 4464 WMA_LOGD("vdev %d is in NAN data mode, enabling wow", 4465 vdev_id); 4466 return true; 4467 } 4468 } 4469 4470 WMA_LOGD("All vdev are in disconnected state and pno/extscan is not in progress, skipping wow"); 4471 return false; 4472 } 4473 4474 /** 4475 * wma_configure_dynamic_wake_events(): configure dyanmic wake events 4476 * @wma: wma handle 4477 * 4478 * Some wake events need to be enabled dynamically. Controll those here. 4479 * 4480 * Return: none 4481 */ 4482 void wma_configure_dynamic_wake_events(tp_wma_handle wma) 4483 { 4484 int vdev_id; 4485 int enable_mask; 4486 int disable_mask; 4487 4488 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 4489 enable_mask = 0; 4490 disable_mask = 0; 4491 4492 if (wma_is_pnoscan_in_progress(wma, vdev_id)) { 4493 if (wma_is_pnoscan_match_found(wma, vdev_id)) 4494 enable_mask |= 4495 (1 << WOW_NLO_SCAN_COMPLETE_EVENT); 4496 else 4497 disable_mask |= 4498 (1 << WOW_NLO_SCAN_COMPLETE_EVENT); 4499 } 4500 4501 if (enable_mask != 0) 4502 wma_enable_disable_wakeup_event(wma, vdev_id, 4503 enable_mask, true); 4504 if (disable_mask != 0) 4505 wma_enable_disable_wakeup_event(wma, vdev_id, 4506 disable_mask, false); 4507 } 4508 } 4509 4510 #ifdef FEATURE_WLAN_LPHB 4511 /** 4512 * wma_apply_lphb(): apply cached LPHB settings 4513 * @wma: wma handle 4514 * 4515 * LPHB cache, if any item was enabled, should be 4516 * applied. 4517 */ 4518 static inline 4519 void wma_apply_lphb(tp_wma_handle wma) 4520 { 4521 int i; 4522 WMA_LOGD("%s: checking LPHB cache", __func__); 4523 for (i = 0; i < 2; i++) { 4524 if (wma->wow.lphb_cache[i].params.lphbEnableReq.enable) { 4525 WMA_LOGD("%s: LPHB cache for item %d is marked as enable", 4526 __func__, i + 1); 4527 wma_lphb_conf_hbenable(wma, &(wma->wow.lphb_cache[i]), 4528 false); 4529 } 4530 } 4531 } 4532 #else 4533 void wma_apply_lphb(tp_wma_handle wma) {} 4534 #endif /* FEATURE_WLAN_LPHB */ 4535 4536 static void wma_notify_suspend_req_procesed(tp_wma_handle wma, 4537 enum qdf_suspend_type type) 4538 { 4539 if (type == QDF_SYSTEM_SUSPEND) 4540 wma_send_status_to_suspend_ind(wma, true); 4541 else if (type == QDF_RUNTIME_SUSPEND) 4542 qdf_event_set(&wma->runtime_suspend); 4543 } 4544 4545 /** 4546 * wma_suspend_req() - Handles suspend indication request received from umac. 4547 * @wma: wma handle 4548 * @type: type of suspend 4549 * 4550 * The type controlls how we notify the indicator that the indication has 4551 * been processed 4552 * 4553 * Return: QDF status 4554 */ 4555 QDF_STATUS wma_suspend_req(tp_wma_handle wma, enum qdf_suspend_type type) 4556 { 4557 if (type == QDF_RUNTIME_SUSPEND) 4558 wmi_set_runtime_pm_inprogress(wma->wmi_handle, true); 4559 4560 if (wma_is_wow_applicable(wma)) { 4561 WMA_LOGI("WOW Suspend"); 4562 wma_apply_lphb(wma); 4563 4564 wma_configure_dynamic_wake_events(wma); 4565 4566 wma->wow.wow_enable = true; 4567 wma->wow.wow_enable_cmd_sent = false; 4568 } 4569 4570 /* Set the Suspend DTIM Parameters */ 4571 wma_set_suspend_dtim(wma); 4572 4573 wma_notify_suspend_req_procesed(wma, type); 4574 4575 /* to handle race between hif_pci_suspend and 4576 * unpause/pause tx handler 4577 */ 4578 wma_set_wow_bus_suspend(wma, 1); 4579 4580 return QDF_STATUS_SUCCESS; 4581 } 4582 4583 /** 4584 * wma_send_host_wakeup_ind_to_fw() - send wakeup ind to fw 4585 * @wma: wma handle 4586 * 4587 * Sends host wakeup indication to FW. On receiving this indication, 4588 * FW will come out of WOW. 4589 * 4590 * Return: QDF status 4591 */ 4592 static QDF_STATUS wma_send_host_wakeup_ind_to_fw(tp_wma_handle wma) 4593 { 4594 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 4595 int ret; 4596 #ifdef CONFIG_CNSS 4597 tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); 4598 if (NULL == pMac) { 4599 WMA_LOGE("%s: Unable to get PE context", __func__); 4600 return QDF_STATUS_E_FAILURE; 4601 } 4602 #endif /* CONFIG_CNSS */ 4603 4604 qdf_event_reset(&wma->wma_resume_event); 4605 4606 ret = wmi_unified_host_wakeup_ind_to_fw_cmd(wma->wmi_handle); 4607 if (ret) { 4608 return QDF_STATUS_E_FAILURE; 4609 } 4610 4611 WMA_LOGD("Host wakeup indication sent to fw"); 4612 4613 qdf_status = qdf_wait_single_event(&(wma->wma_resume_event), 4614 WMA_RESUME_TIMEOUT); 4615 if (QDF_STATUS_SUCCESS != qdf_status) { 4616 WMA_LOGP("%s: Timeout waiting for resume event from FW", 4617 __func__); 4618 WMA_LOGP("%s: Pending commands %d credits %d", __func__, 4619 wmi_get_pending_cmds(wma->wmi_handle), 4620 wmi_get_host_credits(wma->wmi_handle)); 4621 if (!cds_is_driver_recovering()) { 4622 #ifdef CONFIG_CNSS 4623 if (pMac->sme.enableSelfRecovery) { 4624 cds_trigger_recovery(); 4625 } else { 4626 QDF_BUG(0); 4627 } 4628 #else 4629 QDF_BUG(0); 4630 #endif /* CONFIG_CNSS */ 4631 } else { 4632 WMA_LOGE("%s: SSR in progress, ignore resume timeout", 4633 __func__); 4634 } 4635 } else { 4636 WMA_LOGD("Host wakeup received"); 4637 } 4638 4639 if (QDF_STATUS_SUCCESS == qdf_status) 4640 wmi_set_target_suspend(wma->wmi_handle, false); 4641 4642 return qdf_status; 4643 } 4644 4645 /** 4646 * wma_disable_wow_in_fw() - Disable wow in PCIe resume context. 4647 * @handle: wma handle 4648 * 4649 * Return: 0 for success or error code 4650 */ 4651 QDF_STATUS wma_disable_wow_in_fw(WMA_HANDLE handle) 4652 { 4653 tp_wma_handle wma = handle; 4654 QDF_STATUS ret; 4655 4656 ret = wma_send_host_wakeup_ind_to_fw(wma); 4657 4658 if (ret != QDF_STATUS_SUCCESS) 4659 return ret; 4660 4661 wma->wow.wow_enable = false; 4662 wma->wow.wow_enable_cmd_sent = false; 4663 4664 /* To allow the tx pause/unpause events */ 4665 wma_set_wow_bus_suspend(wma, 0); 4666 /* Unpause the vdev as we are resuming */ 4667 wma_unpause_vdev(wma); 4668 4669 return ret; 4670 } 4671 4672 4673 /** 4674 * wma_is_wow_mode_selected() - check if wow needs to be enabled in fw 4675 * @handle: Pointer to wma handle 4676 * 4677 * If lpass is enabled then always do wow else check wow_enable config 4678 * 4679 * Return: true is wow mode is needed else false 4680 */ 4681 bool wma_is_wow_mode_selected(WMA_HANDLE handle) 4682 { 4683 tp_wma_handle wma = (tp_wma_handle) handle; 4684 WMA_LOGD("WoW enable %d", wma->wow.wow_enable); 4685 return wma->wow.wow_enable; 4686 } 4687 4688 /** 4689 * wma_del_ts_req() - send DELTS request to fw 4690 * @wma: wma handle 4691 * @msg: delts params 4692 * 4693 * Return: none 4694 */ 4695 void wma_del_ts_req(tp_wma_handle wma, tDelTsParams *msg) 4696 { 4697 if (wmi_unified_del_ts_cmd(wma->wmi_handle, 4698 msg->sessionId, 4699 TID_TO_WME_AC(msg->userPrio))) { 4700 WMA_LOGP("%s: Failed to send vdev DELTS command", __func__); 4701 } 4702 4703 #ifdef WLAN_FEATURE_ROAM_OFFLOAD 4704 if (msg->setRICparams == true) 4705 wma_set_ric_req(wma, msg, false); 4706 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */ 4707 4708 qdf_mem_free(msg); 4709 } 4710 4711 /** 4712 * wma_aggr_qos_req() - send aggr qos request to fw 4713 * @wma: handle to wma 4714 * @pAggrQosRspMsg - combined struct for all ADD_TS requests. 4715 * 4716 * A function to handle WMA_AGGR_QOS_REQ. This will send out 4717 * ADD_TS requestes to firmware in loop for all the ACs with 4718 * active flow. 4719 * 4720 * Return: none 4721 */ 4722 void wma_aggr_qos_req(tp_wma_handle wma, 4723 tAggrAddTsParams *pAggrQosRspMsg) 4724 { 4725 wmi_unified_aggr_qos_cmd(wma->wmi_handle, 4726 (struct aggr_add_ts_param *)pAggrQosRspMsg); 4727 /* send reponse to upper layers from here only. */ 4728 wma_send_msg(wma, WMA_AGGR_QOS_RSP, pAggrQosRspMsg, 0); 4729 } 4730 4731 #ifdef FEATURE_WLAN_ESE 4732 /** 4733 * wma_set_tsm_interval() - Set TSM interval 4734 * @req: pointer to ADDTS request 4735 * 4736 * Return: QDF_STATUS_E_FAILURE or QDF_STATUS_SUCCESS 4737 */ 4738 static QDF_STATUS wma_set_tsm_interval(tAddTsParams *req) 4739 { 4740 /* 4741 * msmt_interval is in unit called TU (1 TU = 1024 us) 4742 * max value of msmt_interval cannot make resulting 4743 * interval_milliseconds overflow 32 bit 4744 * 4745 */ 4746 uint32_t interval_milliseconds; 4747 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); 4748 if (NULL == pdev) { 4749 WMA_LOGE("%s: Failed to get pdev", __func__); 4750 return QDF_STATUS_E_FAILURE; 4751 } 4752 4753 interval_milliseconds = (req->tsm_interval * 1024) / 1000; 4754 4755 ol_tx_set_compute_interval(pdev, interval_milliseconds); 4756 return QDF_STATUS_SUCCESS; 4757 } 4758 #else 4759 static inline QDF_STATUS wma_set_tsm_interval(tAddTsParams *req) 4760 { 4761 return QDF_STATUS_SUCCESS; 4762 } 4763 #endif /* FEATURE_WLAN_ESE */ 4764 4765 /** 4766 * wma_add_ts_req() - send ADDTS request to fw 4767 * @wma: wma handle 4768 * @msg: ADDTS params 4769 * 4770 * Return: none 4771 */ 4772 void wma_add_ts_req(tp_wma_handle wma, tAddTsParams *msg) 4773 { 4774 struct add_ts_param cmd = {0}; 4775 msg->status = QDF_STATUS_SUCCESS; 4776 4777 if (wma_set_tsm_interval(msg) == QDF_STATUS_SUCCESS) { 4778 4779 cmd.sme_session_id = msg->sme_session_id; 4780 cmd.tspec.tsinfo.traffic.userPrio = 4781 TID_TO_WME_AC(msg->tspec.tsinfo.traffic.userPrio); 4782 cmd.tspec.mediumTime = msg->tspec.mediumTime; 4783 if (wmi_unified_add_ts_cmd(wma->wmi_handle, &cmd)) 4784 msg->status = QDF_STATUS_E_FAILURE; 4785 4786 #ifdef WLAN_FEATURE_ROAM_OFFLOAD 4787 if (msg->setRICparams == true) 4788 wma_set_ric_req(wma, msg, true); 4789 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */ 4790 4791 } 4792 wma_send_msg(wma, WMA_ADD_TS_RSP, msg, 0); 4793 } 4794 4795 /** 4796 * wma_enable_disable_packet_filter() - enable/disable packet filter in target 4797 * @wma: Pointer to wma handle 4798 * @vdev_id: vdev id 4799 * @enable: Flag to enable/disable packet filter 4800 * 4801 * Return: 0 for success or error code 4802 */ 4803 static int wma_enable_disable_packet_filter(tp_wma_handle wma, 4804 uint8_t vdev_id, bool enable) 4805 { 4806 int ret; 4807 4808 ret = wmi_unified_enable_disable_packet_filter_cmd(wma->wmi_handle, 4809 vdev_id, enable); 4810 if (ret) 4811 WMA_LOGE("Failed to send packet filter wmi cmd to fw"); 4812 4813 return ret; 4814 } 4815 4816 /** 4817 * wma_config_packet_filter() - configure packet filter in target 4818 * @wma: Pointer to wma handle 4819 * @vdev_id: vdev id 4820 * @rcv_filter_param: Packet filter parameters 4821 * @filter_id: Filter id 4822 * @enable: Flag to add/delete packet filter configuration 4823 * 4824 * Return: 0 for success or error code 4825 */ 4826 static int wma_config_packet_filter(tp_wma_handle wma, 4827 uint8_t vdev_id, tSirRcvPktFilterCfgType *rcv_filter_param, 4828 uint8_t filter_id, bool enable) 4829 { 4830 int err; 4831 4832 /* send the command along with data */ 4833 err = wmi_unified_config_packet_filter_cmd(wma->wmi_handle, 4834 vdev_id, (struct rcv_pkt_filter_config *)rcv_filter_param, 4835 filter_id, enable); 4836 if (err) { 4837 WMA_LOGE("Failed to send pkt_filter cmd"); 4838 return -EIO; 4839 } 4840 4841 /* Enable packet filter */ 4842 if (enable) 4843 wma_enable_disable_packet_filter(wma, vdev_id, true); 4844 4845 return 0; 4846 } 4847 4848 /** 4849 * wma_process_receive_filter_set_filter_req() - enable packet filter 4850 * @wma_handle: wma handle 4851 * @rcv_filter_param: filter params 4852 * 4853 * Return: 0 for success or error code 4854 */ 4855 int wma_process_receive_filter_set_filter_req(tp_wma_handle wma, 4856 tSirRcvPktFilterCfgType *rcv_filter_param) 4857 { 4858 int ret = 0; 4859 uint8_t vdev_id; 4860 4861 /* Get the vdev id */ 4862 if (!wma_find_vdev_by_bssid(wma, 4863 rcv_filter_param->bssid.bytes, &vdev_id)) { 4864 WMA_LOGE("vdev handle is invalid for %pM", 4865 rcv_filter_param->bssid.bytes); 4866 return -EINVAL; 4867 } 4868 4869 ret = wma_config_packet_filter(wma, vdev_id, rcv_filter_param, 4870 rcv_filter_param->filterId, true); 4871 4872 return ret; 4873 } 4874 4875 /** 4876 * wma_process_receive_filter_clear_filter_req() - disable packet filter 4877 * @wma_handle: wma handle 4878 * @rcv_clear_param: filter params 4879 * 4880 * Return: 0 for success or error code 4881 */ 4882 int wma_process_receive_filter_clear_filter_req(tp_wma_handle wma, 4883 tSirRcvFltPktClearParam *rcv_clear_param) 4884 { 4885 int ret = 0; 4886 uint8_t vdev_id; 4887 4888 /* Get the vdev id */ 4889 if (!wma_find_vdev_by_bssid(wma, 4890 rcv_clear_param->bssid.bytes, &vdev_id)) { 4891 WMA_LOGE("vdev handle is invalid for %pM", 4892 rcv_clear_param->bssid.bytes); 4893 return -EINVAL; 4894 } 4895 4896 ret = wma_config_packet_filter(wma, vdev_id, NULL, 4897 rcv_clear_param->filterId, false); 4898 4899 return ret; 4900 } 4901 4902 #ifdef FEATURE_WLAN_ESE 4903 4904 #define TSM_DELAY_HISTROGRAM_BINS 4 4905 /** 4906 * wma_process_tsm_stats_req() - process tsm stats request 4907 * @wma_handler - handle to wma 4908 * @pTsmStatsMsg - TSM stats struct that needs to be populated and 4909 * passed in message. 4910 * 4911 * A parallel function to WMA_ProcessTsmStatsReq for pronto. This 4912 * function fetches stats from data path APIs and post 4913 * WMA_TSM_STATS_RSP msg back to LIM. 4914 * 4915 * Return: QDF status 4916 */ 4917 QDF_STATUS wma_process_tsm_stats_req(tp_wma_handle wma_handler, 4918 void *pTsmStatsMsg) 4919 { 4920 uint8_t counter; 4921 uint32_t queue_delay_microsec = 0; 4922 uint32_t tx_delay_microsec = 0; 4923 uint16_t packet_count = 0; 4924 uint16_t packet_loss_count = 0; 4925 tpAniTrafStrmMetrics pTsmMetric = NULL; 4926 tpAniGetTsmStatsReq pStats = (tpAniGetTsmStatsReq) pTsmStatsMsg; 4927 tpAniGetTsmStatsRsp pTsmRspParams = NULL; 4928 int tid = pStats->tid; 4929 /* 4930 * The number of histrogram bin report by data path api are different 4931 * than required by TSM, hence different (6) size array used 4932 */ 4933 uint16_t bin_values[QCA_TX_DELAY_HIST_REPORT_BINS] = { 0, }; 4934 4935 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); 4936 4937 if (NULL == pdev) { 4938 WMA_LOGE("%s: Failed to get pdev", __func__); 4939 qdf_mem_free(pTsmStatsMsg); 4940 return QDF_STATUS_E_INVAL; 4941 } 4942 4943 /* get required values from data path APIs */ 4944 ol_tx_delay(pdev, &queue_delay_microsec, &tx_delay_microsec, tid); 4945 ol_tx_delay_hist(pdev, bin_values, tid); 4946 ol_tx_packet_count(pdev, &packet_count, &packet_loss_count, tid); 4947 4948 pTsmRspParams = qdf_mem_malloc(sizeof(*pTsmRspParams)); 4949 if (NULL == pTsmRspParams) { 4950 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_ERROR, 4951 "%s: QDF MEM Alloc Failure", __func__); 4952 QDF_ASSERT(0); 4953 qdf_mem_free(pTsmStatsMsg); 4954 return QDF_STATUS_E_NOMEM; 4955 } 4956 pTsmRspParams->staId = pStats->staId; 4957 pTsmRspParams->rc = eSIR_FAILURE; 4958 pTsmRspParams->tsmStatsReq = pStats; 4959 pTsmMetric = &pTsmRspParams->tsmMetrics; 4960 /* populate pTsmMetric */ 4961 pTsmMetric->UplinkPktQueueDly = queue_delay_microsec; 4962 /* store only required number of bin values */ 4963 for (counter = 0; counter < TSM_DELAY_HISTROGRAM_BINS; counter++) { 4964 pTsmMetric->UplinkPktQueueDlyHist[counter] = 4965 bin_values[counter]; 4966 } 4967 pTsmMetric->UplinkPktTxDly = tx_delay_microsec; 4968 pTsmMetric->UplinkPktLoss = packet_loss_count; 4969 pTsmMetric->UplinkPktCount = packet_count; 4970 4971 /* 4972 * No need to populate roaming delay and roaming count as they are 4973 * being populated just before sending IAPP frame out 4974 */ 4975 /* post this message to LIM/PE */ 4976 wma_send_msg(wma_handler, WMA_TSM_STATS_RSP, (void *)pTsmRspParams, 0); 4977 return QDF_STATUS_SUCCESS; 4978 } 4979 4980 #endif /* FEATURE_WLAN_ESE */ 4981 4982 /** 4983 * wma_add_clear_mcbc_filter() - set mcast filter command to fw 4984 * @wma_handle: wma handle 4985 * @vdev_id: vdev id 4986 * @multicastAddr: mcast address 4987 * @clearList: clear list flag 4988 * 4989 * Return: 0 for success or error code 4990 */ 4991 static QDF_STATUS wma_add_clear_mcbc_filter(tp_wma_handle wma_handle, 4992 uint8_t vdev_id, 4993 struct qdf_mac_addr multicast_addr, 4994 bool clearList) 4995 { 4996 return wmi_unified_add_clear_mcbc_filter_cmd(wma_handle->wmi_handle, 4997 vdev_id, multicast_addr, clearList); 4998 } 4999 5000 /** 5001 * wma_config_enhance_multicast_offload() - config enhance multicast offload 5002 * @wma_handle: wma handle 5003 * @vdev_id: vdev id 5004 * @action: enable or disable enhance multicast offload 5005 * 5006 * Return: none 5007 */ 5008 static void wma_config_enhance_multicast_offload(tp_wma_handle wma_handle, 5009 uint8_t vdev_id, 5010 uint8_t action) 5011 { 5012 int status; 5013 wmi_buf_t buf; 5014 wmi_config_enhanced_mcast_filter_cmd_fixed_param *cmd; 5015 5016 buf = wmi_buf_alloc(wma_handle->wmi_handle, sizeof(*cmd)); 5017 if (!buf) { 5018 WMA_LOGE("Failed to allocate buffer to send set key cmd"); 5019 return; 5020 } 5021 5022 cmd = (wmi_config_enhanced_mcast_filter_cmd_fixed_param *) 5023 wmi_buf_data(buf); 5024 5025 WMITLV_SET_HDR(&cmd->tlv_header, 5026 WMITLV_TAG_STRUC_wmi_config_enhanced_mcast_filter_fixed_param, 5027 WMITLV_GET_STRUCT_TLVLEN(wmi_config_enhanced_mcast_filter_cmd_fixed_param)); 5028 5029 cmd->vdev_id = vdev_id; 5030 cmd->enable = ((0 == action) ? ENHANCED_MCAST_FILTER_DISABLED : 5031 ENHANCED_MCAST_FILTER_ENABLED); 5032 5033 WMA_LOGD("%s: config enhance multicast offload action %d for vdev %d", 5034 __func__, action, vdev_id); 5035 5036 status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, 5037 sizeof(*cmd), WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID); 5038 if (status) { 5039 qdf_nbuf_free(buf); 5040 WMA_LOGE("%s:Failed to send WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID", 5041 __func__); 5042 } 5043 5044 return; 5045 } 5046 5047 /** 5048 * wma_process_mcbc_set_filter_req() - process mcbc set filter request 5049 * @wma_handle: wma handle 5050 * @mcbc_param: mcbc params 5051 * 5052 * Return: QDF status 5053 */ 5054 QDF_STATUS wma_process_mcbc_set_filter_req(tp_wma_handle wma_handle, 5055 tSirRcvFltMcAddrList *mcbc_param) 5056 { 5057 uint8_t vdev_id = 0; 5058 int i; 5059 5060 if (mcbc_param->ulMulticastAddrCnt <= 0) { 5061 WMA_LOGW("Number of multicast addresses is 0"); 5062 return QDF_STATUS_E_FAILURE; 5063 } 5064 5065 if (!wma_find_vdev_by_addr(wma_handle, 5066 mcbc_param->self_macaddr.bytes, &vdev_id)) { 5067 WMA_LOGE("%s: Failed to find vdev id for %pM", __func__, 5068 mcbc_param->bssid.bytes); 5069 return QDF_STATUS_E_FAILURE; 5070 } 5071 5072 /* 5073 * Configure enhance multicast offload feature for filtering out 5074 * multicast IP data packets transmitted using unicast MAC address 5075 */ 5076 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap, 5077 WMI_SERVICE_ENHANCED_MCAST_FILTER)) { 5078 WMA_LOGD("%s: FW supports enhance multicast offload", __func__); 5079 wma_config_enhance_multicast_offload(wma_handle, vdev_id, 5080 mcbc_param->action); 5081 } else { 5082 WMA_LOGD("%s: FW does not support enhance multicast offload", 5083 __func__); 5084 } 5085 5086 /* set mcbc_param->action to clear MCList and reset 5087 * to configure the MCList in FW 5088 */ 5089 5090 for (i = 0; i < mcbc_param->ulMulticastAddrCnt; i++) { 5091 wma_add_clear_mcbc_filter(wma_handle, vdev_id, 5092 mcbc_param->multicastAddr[i], 5093 (mcbc_param->action == 0)); 5094 } 5095 return QDF_STATUS_SUCCESS; 5096 } 5097 5098 #ifdef WLAN_FEATURE_GTK_OFFLOAD 5099 #define GTK_OFFLOAD_ENABLE 0 5100 #define GTK_OFFLOAD_DISABLE 1 5101 5102 /** 5103 * wma_gtk_offload_status_event() - GTK offload status event handler 5104 * @handle: wma handle 5105 * @event: event buffer 5106 * @len: buffer length 5107 * 5108 * Return: 0 for success or error code 5109 */ 5110 int wma_gtk_offload_status_event(void *handle, uint8_t *event, 5111 uint32_t len) 5112 { 5113 tp_wma_handle wma = (tp_wma_handle) handle; 5114 WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param *status; 5115 WMI_GTK_OFFLOAD_STATUS_EVENTID_param_tlvs *param_buf; 5116 tpSirGtkOffloadGetInfoRspParams resp; 5117 cds_msg_t cds_msg; 5118 uint8_t *bssid; 5119 5120 WMA_LOGD("%s Enter", __func__); 5121 5122 param_buf = (WMI_GTK_OFFLOAD_STATUS_EVENTID_param_tlvs *) event; 5123 if (!param_buf) { 5124 WMA_LOGE("param_buf is NULL"); 5125 return -EINVAL; 5126 } 5127 5128 status = (WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param *) param_buf->fixed_param; 5129 5130 if (len < sizeof(WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param)) { 5131 WMA_LOGE("Invalid length for GTK status"); 5132 return -EINVAL; 5133 } 5134 bssid = wma_find_bssid_by_vdev_id(wma, status->vdev_id); 5135 if (!bssid) { 5136 WMA_LOGE("invalid bssid for vdev id %d", status->vdev_id); 5137 return -ENOENT; 5138 } 5139 5140 resp = qdf_mem_malloc(sizeof(*resp)); 5141 if (!resp) { 5142 WMA_LOGE("%s: Failed to alloc response", __func__); 5143 return -ENOMEM; 5144 } 5145 qdf_mem_zero(resp, sizeof(*resp)); 5146 resp->mesgType = eWNI_PMC_GTK_OFFLOAD_GETINFO_RSP; 5147 resp->mesgLen = sizeof(*resp); 5148 resp->ulStatus = QDF_STATUS_SUCCESS; 5149 resp->ulTotalRekeyCount = status->refresh_cnt; 5150 /* TODO: Is the total rekey count and GTK rekey count same? */ 5151 resp->ulGTKRekeyCount = status->refresh_cnt; 5152 5153 qdf_mem_copy(&resp->ullKeyReplayCounter, &status->replay_counter, 5154 GTK_REPLAY_COUNTER_BYTES); 5155 5156 qdf_mem_copy(resp->bssid.bytes, bssid, IEEE80211_ADDR_LEN); 5157 5158 #ifdef IGTK_OFFLOAD 5159 /* TODO: Is the refresh count same for GTK and IGTK? */ 5160 resp->ulIGTKRekeyCount = status->refresh_cnt; 5161 #endif /* IGTK_OFFLOAD */ 5162 5163 cds_msg.type = eWNI_PMC_GTK_OFFLOAD_GETINFO_RSP; 5164 cds_msg.bodyptr = (void *)resp; 5165 cds_msg.bodyval = 0; 5166 5167 if (cds_mq_post_message(CDS_MQ_ID_SME, (cds_msg_t *) &cds_msg) 5168 != QDF_STATUS_SUCCESS) { 5169 WMA_LOGE("Failed to post GTK response to SME"); 5170 qdf_mem_free(resp); 5171 return -EINVAL; 5172 } 5173 5174 WMA_LOGD("GTK: got target status with replay counter " 5175 "%02x%02x%02x%02x%02x%02x%02x%02x. vdev %d " 5176 "Refresh GTK %d times exchanges since last set operation", 5177 status->replay_counter[0], 5178 status->replay_counter[1], 5179 status->replay_counter[2], 5180 status->replay_counter[3], 5181 status->replay_counter[4], 5182 status->replay_counter[5], 5183 status->replay_counter[6], 5184 status->replay_counter[7], 5185 status->vdev_id, status->refresh_cnt); 5186 5187 WMA_LOGD("%s Exit", __func__); 5188 5189 return 0; 5190 } 5191 5192 /** 5193 * wma_send_gtk_offload_req() - send GTK offload command to fw 5194 * @wma: wma handle 5195 * @vdev_id: vdev id 5196 * @params: GTK offload parameters 5197 * 5198 * Return: QDF status 5199 */ 5200 static QDF_STATUS wma_send_gtk_offload_req(tp_wma_handle wma, uint8_t vdev_id, 5201 tpSirGtkOffloadParams params) 5202 { 5203 struct gtk_offload_params offload_params = {0}; 5204 QDF_STATUS status = QDF_STATUS_SUCCESS; 5205 bool enable_offload; 5206 uint32_t gtk_offload_opcode; 5207 5208 WMA_LOGD("%s Enter", __func__); 5209 5210 /* Request target to enable GTK offload */ 5211 if (params->ulFlags == GTK_OFFLOAD_ENABLE) { 5212 gtk_offload_opcode = GTK_OFFLOAD_ENABLE_OPCODE; 5213 wma->wow.gtk_err_enable[vdev_id] = true; 5214 5215 /* Copy the keys and replay counter */ 5216 qdf_mem_copy(offload_params.aKCK, params->aKCK, 5217 GTK_OFFLOAD_KCK_BYTES); 5218 qdf_mem_copy(offload_params.aKEK, params->aKEK, 5219 GTK_OFFLOAD_KEK_BYTES); 5220 qdf_mem_copy(&offload_params.ullKeyReplayCounter, 5221 ¶ms->ullKeyReplayCounter, GTK_REPLAY_COUNTER_BYTES); 5222 } else { 5223 wma->wow.gtk_err_enable[vdev_id] = false; 5224 gtk_offload_opcode = GTK_OFFLOAD_DISABLE_OPCODE; 5225 } 5226 5227 enable_offload = params->ulFlags; 5228 5229 /* send the wmi command */ 5230 status = wmi_unified_send_gtk_offload_cmd(wma->wmi_handle, 5231 vdev_id, &offload_params, 5232 enable_offload, 5233 gtk_offload_opcode); 5234 if (QDF_IS_STATUS_ERROR(status)) 5235 goto out; 5236 5237 WMA_LOGD("VDEVID: %d, GTK_FLAGS: x%x", vdev_id, gtk_offload_opcode); 5238 out: 5239 WMA_LOGD("%s Exit", __func__); 5240 return status; 5241 } 5242 5243 /** 5244 * wma_process_gtk_offload_req() - process GTK offload req from umac 5245 * @handle: wma handle 5246 * @params: GTK offload params 5247 * 5248 * Return: QDF status 5249 */ 5250 QDF_STATUS wma_process_gtk_offload_req(tp_wma_handle wma, 5251 tpSirGtkOffloadParams params) 5252 { 5253 uint8_t vdev_id; 5254 QDF_STATUS status = QDF_STATUS_SUCCESS; 5255 5256 WMA_LOGD("%s Enter", __func__); 5257 5258 /* Get the vdev id */ 5259 if (!wma_find_vdev_by_bssid(wma, params->bssid.bytes, &vdev_id)) { 5260 WMA_LOGE("vdev handle is invalid for %pM", params->bssid.bytes); 5261 status = QDF_STATUS_E_INVAL; 5262 goto out; 5263 } 5264 5265 /* Validate vdev id */ 5266 if (vdev_id >= WMA_MAX_SUPPORTED_BSS) { 5267 WMA_LOGE("invalid vdev_id %d for %pM", vdev_id, 5268 params->bssid.bytes); 5269 status = QDF_STATUS_E_INVAL; 5270 goto out; 5271 } 5272 5273 if ((params->ulFlags == GTK_OFFLOAD_ENABLE) && 5274 (wma->wow.gtk_err_enable[vdev_id] == true)) { 5275 WMA_LOGE("%s GTK Offload already enabled. Disable it first " 5276 "vdev_id %d", __func__, vdev_id); 5277 params->ulFlags = GTK_OFFLOAD_DISABLE; 5278 status = wma_send_gtk_offload_req(wma, vdev_id, params); 5279 if (status != QDF_STATUS_SUCCESS) { 5280 WMA_LOGE("%s Failed to disable GTK Offload", __func__); 5281 goto out; 5282 } 5283 WMA_LOGD("%s Enable GTK Offload again with updated inputs", 5284 __func__); 5285 params->ulFlags = GTK_OFFLOAD_ENABLE; 5286 } 5287 status = wma_send_gtk_offload_req(wma, vdev_id, params); 5288 out: 5289 qdf_mem_free(params); 5290 WMA_LOGD("%s Exit", __func__); 5291 return status; 5292 } 5293 5294 /** 5295 * wma_process_gtk_offload_getinfo_req() - send GTK offload cmd to fw 5296 * @wma: wma handle 5297 * @params: GTK offload params 5298 * 5299 * Return: QDF status 5300 */ 5301 QDF_STATUS wma_process_gtk_offload_getinfo_req(tp_wma_handle wma, 5302 tpSirGtkOffloadGetInfoRspParams params) 5303 { 5304 uint8_t vdev_id; 5305 uint64_t offload_req_opcode; 5306 QDF_STATUS status = QDF_STATUS_SUCCESS; 5307 5308 WMA_LOGD("%s Enter", __func__); 5309 5310 /* Get the vdev id */ 5311 if (!wma_find_vdev_by_bssid(wma, params->bssid.bytes, &vdev_id)) { 5312 WMA_LOGE("vdev handle is invalid for %pM", params->bssid.bytes); 5313 status = QDF_STATUS_E_INVAL; 5314 goto out; 5315 } 5316 5317 /* Request for GTK offload status */ 5318 offload_req_opcode = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; 5319 5320 /* send the wmi command */ 5321 status = wmi_unified_process_gtk_offload_getinfo_cmd(wma->wmi_handle, 5322 vdev_id, offload_req_opcode); 5323 5324 out: 5325 qdf_mem_free(params); 5326 WMA_LOGD("%s Exit", __func__); 5327 return status; 5328 } 5329 #endif /* WLAN_FEATURE_GTK_OFFLOAD */ 5330 5331 /** 5332 * wma_enable_arp_ns_offload() - enable ARP NS offload 5333 * @wma: wma handle 5334 * @tpSirHostOffloadReq: offload request 5335 * @config_arp: config arp flag 5336 * 5337 * To configure ARP NS off load data to firmware 5338 * when target goes to wow mode. 5339 * 5340 * Return: QDF Status 5341 */ 5342 QDF_STATUS wma_enable_arp_ns_offload(tp_wma_handle wma, 5343 tpSirHostOffloadReq 5344 offload_req, bool config_arp) 5345 { 5346 int32_t res; 5347 uint8_t vdev_id; 5348 tpSirHostOffloadReq ns_offload_req; 5349 tpSirHostOffloadReq arp_offload_req; 5350 5351 /* Get the vdev id */ 5352 if (!wma_find_vdev_by_bssid(wma, offload_req->bssid.bytes, 5353 &vdev_id)) { 5354 WMA_LOGE("vdev handle is invalid for %pM", 5355 offload_req->bssid.bytes); 5356 qdf_mem_free(offload_req); 5357 return QDF_STATUS_E_INVAL; 5358 } 5359 5360 if (!wma->interfaces[vdev_id].vdev_up) { 5361 WMA_LOGE("vdev %d is not up skipping arp/ns offload", vdev_id); 5362 qdf_mem_free(offload_req); 5363 return QDF_STATUS_E_FAILURE; 5364 } 5365 5366 /* 5367 * config_arp is true means arp request comes from upper layer 5368 * Hence ns request need to used from wma cached request. 5369 */ 5370 if (config_arp) { 5371 arp_offload_req = offload_req; 5372 ns_offload_req = &wma->interfaces[vdev_id].ns_offload_req; 5373 WMA_LOGD(" %s: ARP Offload vdev_id: %d enable: %d", 5374 __func__, vdev_id, 5375 offload_req->enableOrDisable); 5376 } else { 5377 ns_offload_req = offload_req; 5378 arp_offload_req = &wma->interfaces[vdev_id].arp_offload_req; 5379 WMA_LOGD(" %s: NS Offload vdev_id: %d enable: %d ns_count: %u", 5380 __func__, vdev_id, 5381 offload_req->enableOrDisable, 5382 offload_req->num_ns_offload_count); 5383 } 5384 5385 res = wmi_unified_enable_arp_ns_offload_cmd(wma->wmi_handle, 5386 (struct host_offload_req_param *)arp_offload_req, 5387 (struct host_offload_req_param *)ns_offload_req, 5388 config_arp, 5389 vdev_id); 5390 if (res) { 5391 WMA_LOGE("Failed to enable ARP NDP/NSffload"); 5392 qdf_mem_free(offload_req); 5393 return QDF_STATUS_E_FAILURE; 5394 } 5395 5396 if (config_arp) { 5397 qdf_mem_copy(&wma->interfaces[vdev_id].arp_offload_req, 5398 offload_req, 5399 sizeof(wma->interfaces[vdev_id].arp_offload_req)); 5400 } else { 5401 qdf_mem_copy(&wma->interfaces[vdev_id].ns_offload_req, 5402 offload_req, 5403 sizeof(wma->interfaces[vdev_id].ns_offload_req)); 5404 } 5405 5406 qdf_mem_free(offload_req); 5407 return QDF_STATUS_SUCCESS; 5408 } 5409 5410 5411 /** 5412 * wma_process_cesium_enable_ind() - enables cesium functionality in target 5413 * @wma: wma handle 5414 * 5415 * Return: QDF status 5416 */ 5417 QDF_STATUS wma_process_cesium_enable_ind(tp_wma_handle wma) 5418 { 5419 QDF_STATUS ret; 5420 int32_t vdev_id; 5421 5422 vdev_id = wma_find_vdev_by_type(wma, WMI_VDEV_TYPE_IBSS); 5423 if (vdev_id < 0) { 5424 WMA_LOGE("%s: IBSS vdev does not exist could not enable cesium", 5425 __func__); 5426 return QDF_STATUS_E_FAILURE; 5427 } 5428 5429 /* Send enable cesium command to target */ 5430 WMA_LOGE("Enable cesium in target for vdevId %d ", vdev_id); 5431 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 5432 WMI_VDEV_PARAM_ENABLE_RMC, 1); 5433 if (ret) { 5434 WMA_LOGE("Enable cesium failed for vdevId %d", vdev_id); 5435 return QDF_STATUS_E_FAILURE; 5436 } 5437 return QDF_STATUS_SUCCESS; 5438 } 5439 5440 /** 5441 * wma_process_get_peer_info_req() - sends get peer info cmd to target 5442 * @wma: wma handle 5443 * @preq: get peer info request 5444 * 5445 * Return: QDF status 5446 */ 5447 QDF_STATUS wma_process_get_peer_info_req 5448 (tp_wma_handle wma, tSirIbssGetPeerInfoReqParams *pReq) 5449 { 5450 int32_t ret; 5451 uint8_t *p; 5452 uint16_t len; 5453 wmi_buf_t buf; 5454 int32_t vdev_id; 5455 ol_txrx_pdev_handle pdev; 5456 struct ol_txrx_peer_t *peer; 5457 uint8_t peer_mac[IEEE80211_ADDR_LEN]; 5458 wmi_peer_info_req_cmd_fixed_param *p_get_peer_info_cmd; 5459 uint8_t bcast_mac[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 5460 0xff, 0xff, 0xff }; 5461 5462 vdev_id = wma_find_vdev_by_type(wma, WMI_VDEV_TYPE_IBSS); 5463 if (vdev_id < 0) { 5464 WMA_LOGE("%s: IBSS vdev does not exist could not get peer info", 5465 __func__); 5466 return QDF_STATUS_E_FAILURE; 5467 } 5468 5469 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 5470 if (NULL == pdev) { 5471 WMA_LOGE("%s: Failed to get pdev context", __func__); 5472 return QDF_STATUS_E_FAILURE; 5473 } 5474 5475 if (0xFF == pReq->staIdx) { 5476 /*get info for all peers */ 5477 qdf_mem_copy(peer_mac, bcast_mac, IEEE80211_ADDR_LEN); 5478 } else { 5479 /*get info for a single peer */ 5480 peer = ol_txrx_peer_find_by_local_id(pdev, pReq->staIdx); 5481 if (!peer) { 5482 WMA_LOGE("%s: Failed to get peer handle using peer id %d", 5483 __func__, pReq->staIdx); 5484 return QDF_STATUS_E_FAILURE; 5485 } 5486 WMA_LOGE("%s: staIdx %d peer mac: 0x%2x:0x%2x:0x%2x:0x%2x:0x%2x:0x%2x", 5487 __func__, pReq->staIdx, peer->mac_addr.raw[0], 5488 peer->mac_addr.raw[1], peer->mac_addr.raw[2], 5489 peer->mac_addr.raw[3], peer->mac_addr.raw[4], 5490 peer->mac_addr.raw[5]); 5491 qdf_mem_copy(peer_mac, peer->mac_addr.raw, IEEE80211_ADDR_LEN); 5492 } 5493 5494 len = sizeof(wmi_peer_info_req_cmd_fixed_param); 5495 buf = wmi_buf_alloc(wma->wmi_handle, len); 5496 if (!buf) { 5497 WMA_LOGE("%s %d: No WMI resource!", __func__, __LINE__); 5498 return QDF_STATUS_E_FAILURE; 5499 } 5500 5501 p = (uint8_t *) wmi_buf_data(buf); 5502 qdf_mem_zero(p, len); 5503 p_get_peer_info_cmd = (wmi_peer_info_req_cmd_fixed_param *) p; 5504 5505 WMITLV_SET_HDR(&p_get_peer_info_cmd->tlv_header, 5506 WMITLV_TAG_STRUC_wmi_peer_info_req_cmd_fixed_param, 5507 WMITLV_GET_STRUCT_TLVLEN 5508 (wmi_peer_info_req_cmd_fixed_param)); 5509 5510 p_get_peer_info_cmd->vdev_id = vdev_id; 5511 WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_mac, 5512 &p_get_peer_info_cmd->peer_mac_address); 5513 5514 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len, 5515 WMI_PEER_INFO_REQ_CMDID); 5516 5517 WMA_LOGE("IBSS get peer info cmd sent len: %d, vdev %d" 5518 " command id: %d, status: %d", len, 5519 p_get_peer_info_cmd->vdev_id, WMI_PEER_INFO_REQ_CMDID, ret); 5520 5521 return QDF_STATUS_SUCCESS; 5522 } 5523 5524 /** 5525 * wma_process_tx_fail_monitor_ind() - sends tx fail monitor cmd to target 5526 * @wma: wma handle 5527 * @pReq: tx fail monitor command params 5528 * 5529 * Return: QDF status 5530 */ 5531 QDF_STATUS wma_process_tx_fail_monitor_ind 5532 (tp_wma_handle wma, tAniTXFailMonitorInd *pReq) 5533 { 5534 QDF_STATUS ret; 5535 int32_t vdev_id; 5536 5537 vdev_id = wma_find_vdev_by_type(wma, WMI_VDEV_TYPE_IBSS); 5538 if (vdev_id < 0) { 5539 WMA_LOGE("%s: IBSS vdev does not exist could not send fast tx fail" 5540 " monitor indication message to target", __func__); 5541 return QDF_STATUS_E_FAILURE; 5542 } 5543 5544 /* Send enable cesium command to target */ 5545 WMA_LOGE("send fast tx fail monitor ind cmd target for vdevId %d val %d", 5546 vdev_id, pReq->tx_fail_count); 5547 5548 if (0 == pReq->tx_fail_count) { 5549 wma->hddTxFailCb = NULL; 5550 } else { 5551 wma->hddTxFailCb = pReq->txFailIndCallback; 5552 } 5553 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id, 5554 WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR, 5555 pReq->tx_fail_count); 5556 if (ret) { 5557 WMA_LOGE("tx fail monitor failed for vdevId %d", vdev_id); 5558 return QDF_STATUS_E_FAILURE; 5559 } 5560 5561 return QDF_STATUS_SUCCESS; 5562 } 5563 5564 /** 5565 * wma_process_rmc_enable_ind() - enables RMC functionality in target 5566 * @wma: wma handle 5567 * 5568 * Return: QDF status 5569 */ 5570 QDF_STATUS wma_process_rmc_enable_ind(tp_wma_handle wma) 5571 { 5572 int ret; 5573 uint8_t *p; 5574 uint16_t len; 5575 wmi_buf_t buf; 5576 int32_t vdev_id; 5577 wmi_rmc_set_mode_cmd_fixed_param *p_rmc_enable_cmd; 5578 5579 vdev_id = wma_find_vdev_by_type(wma, WMI_VDEV_TYPE_IBSS); 5580 if (vdev_id < 0) { 5581 WMA_LOGE("%s: IBSS vdev does not exist could not enable RMC", 5582 __func__); 5583 return QDF_STATUS_E_FAILURE; 5584 } 5585 5586 len = sizeof(wmi_rmc_set_mode_cmd_fixed_param); 5587 buf = wmi_buf_alloc(wma->wmi_handle, len); 5588 if (!buf) { 5589 WMA_LOGE("%s %d: No WMI resource!", __func__, __LINE__); 5590 return QDF_STATUS_E_FAILURE; 5591 } 5592 5593 p = (uint8_t *) wmi_buf_data(buf); 5594 qdf_mem_zero(p, len); 5595 p_rmc_enable_cmd = (wmi_rmc_set_mode_cmd_fixed_param *) p; 5596 5597 WMITLV_SET_HDR(&p_rmc_enable_cmd->tlv_header, 5598 WMITLV_TAG_STRUC_wmi_rmc_set_mode_cmd_fixed_param, 5599 WMITLV_GET_STRUCT_TLVLEN 5600 (wmi_rmc_set_mode_cmd_fixed_param)); 5601 5602 p_rmc_enable_cmd->vdev_id = vdev_id; 5603 p_rmc_enable_cmd->enable_rmc = WMI_RMC_MODE_ENABLED; 5604 5605 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len, 5606 WMI_RMC_SET_MODE_CMDID); 5607 5608 WMA_LOGE("Enable RMC cmd sent len: %d, vdev %d" " command id: %d," 5609 " status: %d", len, p_rmc_enable_cmd->vdev_id, 5610 WMI_RMC_SET_MODE_CMDID, ret); 5611 5612 return QDF_STATUS_SUCCESS; 5613 } 5614 5615 /** 5616 * wma_process_rmc_disable_ind() - disables rmc functionality in target 5617 * @wma: wma handle 5618 * 5619 * Return: QDF status 5620 */ 5621 QDF_STATUS wma_process_rmc_disable_ind(tp_wma_handle wma) 5622 { 5623 int ret; 5624 uint8_t *p; 5625 uint16_t len; 5626 wmi_buf_t buf; 5627 int32_t vdev_id; 5628 wmi_rmc_set_mode_cmd_fixed_param *p_rmc_disable_cmd; 5629 5630 vdev_id = wma_find_vdev_by_type(wma, WMI_VDEV_TYPE_IBSS); 5631 if (vdev_id < 0) { 5632 WMA_LOGE("%s: IBSS vdev does not exist could not disable RMC", 5633 __func__); 5634 return QDF_STATUS_E_FAILURE; 5635 } 5636 5637 len = sizeof(wmi_rmc_set_mode_cmd_fixed_param); 5638 buf = wmi_buf_alloc(wma->wmi_handle, len); 5639 if (!buf) { 5640 WMA_LOGE("%s %d: No WMI resource!", __func__, __LINE__); 5641 return QDF_STATUS_E_FAILURE; 5642 } 5643 5644 p = (uint8_t *) wmi_buf_data(buf); 5645 qdf_mem_zero(p, len); 5646 p_rmc_disable_cmd = (wmi_rmc_set_mode_cmd_fixed_param *) p; 5647 5648 WMITLV_SET_HDR(&p_rmc_disable_cmd->tlv_header, 5649 WMITLV_TAG_STRUC_wmi_rmc_set_mode_cmd_fixed_param, 5650 WMITLV_GET_STRUCT_TLVLEN 5651 (wmi_rmc_set_mode_cmd_fixed_param)); 5652 5653 p_rmc_disable_cmd->vdev_id = vdev_id; 5654 p_rmc_disable_cmd->enable_rmc = WMI_RMC_MODE_DISABLED; 5655 5656 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len, 5657 WMI_RMC_SET_MODE_CMDID); 5658 5659 WMA_LOGE("Disable RMC cmd sent len: %d, vdev %d" " command id: %d," 5660 " status: %d", len, p_rmc_disable_cmd->vdev_id, 5661 WMI_RMC_SET_MODE_CMDID, ret); 5662 5663 return QDF_STATUS_SUCCESS; 5664 } 5665 5666 /** 5667 * wma_process_rmc_action_period_ind() - sends RMC action period to target 5668 * @wma: wma handle 5669 * 5670 * Return: QDF status 5671 */ 5672 QDF_STATUS wma_process_rmc_action_period_ind(tp_wma_handle wma) 5673 { 5674 int ret; 5675 uint8_t *p; 5676 uint16_t len; 5677 uint32_t val; 5678 wmi_buf_t buf; 5679 int32_t vdev_id; 5680 wmi_rmc_set_action_period_cmd_fixed_param *p_rmc_cmd; 5681 struct sAniSirGlobal *mac = cds_get_context(QDF_MODULE_ID_PE); 5682 5683 if (NULL == mac) { 5684 WMA_LOGE("%s: MAC mac does not exist", __func__); 5685 return QDF_STATUS_E_FAILURE; 5686 } 5687 5688 vdev_id = wma_find_vdev_by_type(wma, WMI_VDEV_TYPE_IBSS); 5689 if (vdev_id < 0) { 5690 WMA_LOGE("%s: IBSS vdev does not exist could not send" 5691 " RMC action period to target", __func__); 5692 return QDF_STATUS_E_FAILURE; 5693 } 5694 5695 len = sizeof(wmi_rmc_set_action_period_cmd_fixed_param); 5696 buf = wmi_buf_alloc(wma->wmi_handle, len); 5697 if (!buf) { 5698 WMA_LOGE("%s %d: No WMI resource!", __func__, __LINE__); 5699 return QDF_STATUS_E_FAILURE; 5700 } 5701 5702 p = (uint8_t *) wmi_buf_data(buf); 5703 qdf_mem_zero(p, len); 5704 p_rmc_cmd = (wmi_rmc_set_action_period_cmd_fixed_param *) p; 5705 5706 WMITLV_SET_HDR(&p_rmc_cmd->tlv_header, 5707 WMITLV_TAG_STRUC_wmi_rmc_set_action_period_cmd_fixed_param, 5708 WMITLV_GET_STRUCT_TLVLEN 5709 (wmi_rmc_set_action_period_cmd_fixed_param)); 5710 5711 if (wlan_cfg_get_int(mac, WNI_CFG_RMC_ACTION_PERIOD_FREQUENCY, &val) 5712 != eSIR_SUCCESS) { 5713 WMA_LOGE("Failed to get value for RMC action period using default"); 5714 val = WNI_CFG_RMC_ACTION_PERIOD_FREQUENCY_STADEF; 5715 } 5716 5717 p_rmc_cmd->vdev_id = vdev_id; 5718 p_rmc_cmd->periodicity_msec = val; 5719 5720 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len, 5721 WMI_RMC_SET_ACTION_PERIOD_CMDID); 5722 5723 WMA_LOGE("RMC action period %d cmd sent len: %d, vdev %d" 5724 " command id: %d, status: %d", p_rmc_cmd->periodicity_msec, 5725 len, p_rmc_cmd->vdev_id, WMI_RMC_SET_ACTION_PERIOD_CMDID, ret); 5726 5727 return QDF_STATUS_SUCCESS; 5728 } 5729 5730 /** 5731 * wma_process_add_periodic_tx_ptrn_ind - add periodic tx ptrn 5732 * @handle: wma handle 5733 * @pAddPeriodicTxPtrnParams: tx ptrn params 5734 * 5735 * Retrun: QDF status 5736 */ 5737 QDF_STATUS wma_process_add_periodic_tx_ptrn_ind(WMA_HANDLE handle, 5738 tSirAddPeriodicTxPtrn * 5739 pAddPeriodicTxPtrnParams) 5740 { 5741 tp_wma_handle wma_handle = (tp_wma_handle) handle; 5742 struct periodic_tx_pattern *params_ptr; 5743 uint8_t vdev_id; 5744 QDF_STATUS status; 5745 5746 if (!wma_handle || !wma_handle->wmi_handle) { 5747 WMA_LOGE("%s: WMA is closed, can not issue fw add pattern cmd", 5748 __func__); 5749 return QDF_STATUS_E_INVAL; 5750 } 5751 5752 params_ptr = qdf_mem_malloc(sizeof(*params_ptr)); 5753 5754 if (!params_ptr) { 5755 WMA_LOGE( 5756 "%s: unable to allocate memory for periodic_tx_pattern", 5757 __func__); 5758 return QDF_STATUS_E_NOMEM; 5759 } 5760 5761 if (!wma_find_vdev_by_addr(wma_handle, 5762 pAddPeriodicTxPtrnParams->mac_address.bytes, 5763 &vdev_id)) { 5764 WMA_LOGE("%s: Failed to find vdev id for %pM", __func__, 5765 pAddPeriodicTxPtrnParams->mac_address.bytes); 5766 return QDF_STATUS_E_INVAL; 5767 } 5768 5769 params_ptr->ucPtrnId = pAddPeriodicTxPtrnParams->ucPtrnId; 5770 params_ptr->ucPtrnSize = pAddPeriodicTxPtrnParams->ucPtrnSize; 5771 params_ptr->usPtrnIntervalMs = 5772 pAddPeriodicTxPtrnParams->usPtrnIntervalMs; 5773 qdf_mem_copy(¶ms_ptr->mac_address, 5774 &pAddPeriodicTxPtrnParams->mac_address, 5775 sizeof(struct qdf_mac_addr)); 5776 qdf_mem_copy(params_ptr->ucPattern, 5777 pAddPeriodicTxPtrnParams->ucPattern, 5778 params_ptr->ucPtrnSize); 5779 5780 status = wmi_unified_process_add_periodic_tx_ptrn_cmd( 5781 wma_handle->wmi_handle, params_ptr, vdev_id); 5782 5783 qdf_mem_free(params_ptr); 5784 return status; 5785 } 5786 5787 /** 5788 * wma_process_del_periodic_tx_ptrn_ind - del periodic tx ptrn 5789 * @handle: wma handle 5790 * @pDelPeriodicTxPtrnParams: tx ptrn params 5791 * 5792 * Retrun: QDF status 5793 */ 5794 QDF_STATUS wma_process_del_periodic_tx_ptrn_ind(WMA_HANDLE handle, 5795 tSirDelPeriodicTxPtrn * 5796 pDelPeriodicTxPtrnParams) 5797 { 5798 tp_wma_handle wma_handle = (tp_wma_handle) handle; 5799 uint8_t vdev_id; 5800 5801 if (!wma_handle || !wma_handle->wmi_handle) { 5802 WMA_LOGE("%s: WMA is closed, can not issue Del Pattern cmd", 5803 __func__); 5804 return QDF_STATUS_E_INVAL; 5805 } 5806 5807 if (!wma_find_vdev_by_addr(wma_handle, 5808 pDelPeriodicTxPtrnParams->mac_address.bytes, 5809 &vdev_id)) { 5810 WMA_LOGE("%s: Failed to find vdev id for %pM", __func__, 5811 pDelPeriodicTxPtrnParams->mac_address.bytes); 5812 return QDF_STATUS_E_INVAL; 5813 } 5814 5815 return wmi_unified_process_del_periodic_tx_ptrn_cmd( 5816 wma_handle->wmi_handle, vdev_id, 5817 pDelPeriodicTxPtrnParams->ucPtrnId); 5818 } 5819 5820 #ifdef WLAN_FEATURE_STATS_EXT 5821 /** 5822 * wma_stats_ext_req() - request ext stats from fw 5823 * @wma_ptr: wma handle 5824 * @preq: stats ext params 5825 * 5826 * Return: QDF status 5827 */ 5828 QDF_STATUS wma_stats_ext_req(void *wma_ptr, tpStatsExtRequest preq) 5829 { 5830 tp_wma_handle wma = (tp_wma_handle) wma_ptr; 5831 struct stats_ext_params *params; 5832 size_t params_len; 5833 QDF_STATUS status; 5834 5835 if (!wma) { 5836 WMA_LOGE("%s: wma handle is NULL", __func__); 5837 return QDF_STATUS_E_FAILURE; 5838 } 5839 5840 params_len = sizeof(*params) + preq->request_data_len; 5841 params = qdf_mem_malloc(params_len); 5842 5843 if (params == NULL) { 5844 WMA_LOGE(FL("memory allocation failed")); 5845 return QDF_STATUS_E_NOMEM; 5846 } 5847 5848 params->vdev_id = preq->vdev_id; 5849 params->request_data_len = preq->request_data_len; 5850 if (preq->request_data_len > 0) 5851 qdf_mem_copy(params->request_data, preq->request_data, 5852 params->request_data_len); 5853 5854 status = wmi_unified_stats_ext_req_cmd(wma->wmi_handle, params); 5855 qdf_mem_free(params); 5856 5857 return status; 5858 } 5859 5860 #endif /* WLAN_FEATURE_STATS_EXT */ 5861 5862 #ifdef WLAN_FEATURE_EXTWOW_SUPPORT 5863 /** 5864 * wma_send_status_of_ext_wow() - send ext wow status to SME 5865 * @wma: wma handle 5866 * @status: status 5867 * 5868 * Return: none 5869 */ 5870 static void wma_send_status_of_ext_wow(tp_wma_handle wma, bool status) 5871 { 5872 tSirReadyToExtWoWInd *ready_to_extwow; 5873 QDF_STATUS vstatus; 5874 cds_msg_t cds_msg; 5875 uint8_t len; 5876 5877 WMA_LOGD("Posting ready to suspend indication to umac"); 5878 5879 len = sizeof(tSirReadyToExtWoWInd); 5880 ready_to_extwow = (tSirReadyToExtWoWInd *) qdf_mem_malloc(len); 5881 5882 if (NULL == ready_to_extwow) { 5883 WMA_LOGE("%s: Memory allocation failure", __func__); 5884 return; 5885 } 5886 5887 ready_to_extwow->mesgType = eWNI_SME_READY_TO_EXTWOW_IND; 5888 ready_to_extwow->mesgLen = len; 5889 ready_to_extwow->status = status; 5890 5891 cds_msg.type = eWNI_SME_READY_TO_EXTWOW_IND; 5892 cds_msg.bodyptr = (void *)ready_to_extwow; 5893 cds_msg.bodyval = 0; 5894 5895 vstatus = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg); 5896 if (vstatus != QDF_STATUS_SUCCESS) { 5897 WMA_LOGE("Failed to post ready to suspend"); 5898 qdf_mem_free(ready_to_extwow); 5899 } 5900 } 5901 5902 /** 5903 * wma_enable_ext_wow() - enable ext wow in fw 5904 * @wma: wma handle 5905 * @params: ext wow params 5906 * 5907 * Return:0 for success or error code 5908 */ 5909 QDF_STATUS wma_enable_ext_wow(tp_wma_handle wma, tpSirExtWoWParams params) 5910 { 5911 struct ext_wow_params wow_params = {0}; 5912 QDF_STATUS status; 5913 5914 if (!wma) { 5915 WMA_LOGE("%s: wma handle is NULL", __func__); 5916 return QDF_STATUS_E_FAILURE; 5917 } 5918 5919 wow_params.vdev_id = params->vdev_id; 5920 wow_params.type = (enum wmi_ext_wow_type) params->type; 5921 wow_params.wakeup_pin_num = params->wakeup_pin_num; 5922 5923 status = wmi_unified_enable_ext_wow_cmd(wma->wmi_handle, 5924 &wow_params); 5925 if (QDF_IS_STATUS_ERROR(status)) 5926 return status; 5927 5928 wma_send_status_of_ext_wow(wma, true); 5929 return status; 5930 5931 } 5932 5933 /** 5934 * wma_set_app_type1_params_in_fw() - set app type1 params in fw 5935 * @wma: wma handle 5936 * @appType1Params: app type1 params 5937 * 5938 * Return: QDF status 5939 */ 5940 int wma_set_app_type1_params_in_fw(tp_wma_handle wma, 5941 tpSirAppType1Params appType1Params) 5942 { 5943 int ret; 5944 5945 ret = wmi_unified_app_type1_params_in_fw_cmd(wma->wmi_handle, 5946 (struct app_type1_params *)appType1Params); 5947 if (ret) { 5948 WMA_LOGE("%s: Failed to set APP TYPE1 PARAMS", __func__); 5949 return QDF_STATUS_E_FAILURE; 5950 } 5951 5952 return QDF_STATUS_SUCCESS; 5953 } 5954 5955 /** 5956 * wma_set_app_type2_params_in_fw() - set app type2 params in fw 5957 * @wma: wma handle 5958 * @appType2Params: app type2 params 5959 * 5960 * Return: QDF status 5961 */ 5962 QDF_STATUS wma_set_app_type2_params_in_fw(tp_wma_handle wma, 5963 tpSirAppType2Params appType2Params) 5964 { 5965 struct app_type2_params params = {0}; 5966 5967 if (!wma) { 5968 WMA_LOGE("%s: wma handle is NULL", __func__); 5969 return QDF_STATUS_E_FAILURE; 5970 } 5971 5972 params.vdev_id = appType2Params->vdev_id; 5973 params.rc4_key_len = appType2Params->rc4_key_len; 5974 qdf_mem_copy(params.rc4_key, appType2Params->rc4_key, 16); 5975 params.ip_id = appType2Params->ip_id; 5976 params.ip_device_ip = appType2Params->ip_device_ip; 5977 params.ip_server_ip = appType2Params->ip_server_ip; 5978 params.tcp_src_port = appType2Params->tcp_src_port; 5979 params.tcp_dst_port = appType2Params->tcp_dst_port; 5980 params.tcp_seq = appType2Params->tcp_seq; 5981 params.tcp_ack_seq = appType2Params->tcp_ack_seq; 5982 params.keepalive_init = appType2Params->keepalive_init; 5983 params.keepalive_min = appType2Params->keepalive_min; 5984 params.keepalive_max = appType2Params->keepalive_max; 5985 params.keepalive_inc = appType2Params->keepalive_inc; 5986 params.tcp_tx_timeout_val = appType2Params->tcp_tx_timeout_val; 5987 params.tcp_rx_timeout_val = appType2Params->tcp_rx_timeout_val; 5988 qdf_mem_copy(¶ms.gateway_mac, &appType2Params->gateway_mac, 5989 sizeof(struct qdf_mac_addr)); 5990 5991 return wmi_unified_set_app_type2_params_in_fw_cmd(wma->wmi_handle, 5992 ¶ms); 5993 5994 } 5995 #endif /* WLAN_FEATURE_EXTWOW_SUPPORT */ 5996 5997 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN 5998 /** 5999 * wma_auto_shutdown_event_handler() - process auto shutdown timer trigger 6000 * @handle: wma handle 6001 * @event: event buffer 6002 * @len: buffer length 6003 * 6004 * Return: 0 for success or error code 6005 */ 6006 int wma_auto_shutdown_event_handler(void *handle, uint8_t *event, 6007 uint32_t len) 6008 { 6009 wmi_host_auto_shutdown_event_fixed_param *wmi_auto_sh_evt; 6010 WMI_HOST_AUTO_SHUTDOWN_EVENTID_param_tlvs *param_buf = 6011 (WMI_HOST_AUTO_SHUTDOWN_EVENTID_param_tlvs *) 6012 event; 6013 6014 if (!param_buf || !param_buf->fixed_param) { 6015 WMA_LOGE("%s:%d: Invalid Auto shutdown timer evt", __func__, 6016 __LINE__); 6017 return -EINVAL; 6018 } 6019 6020 wmi_auto_sh_evt = param_buf->fixed_param; 6021 6022 if (wmi_auto_sh_evt->shutdown_reason 6023 != WMI_HOST_AUTO_SHUTDOWN_REASON_TIMER_EXPIRY) { 6024 WMA_LOGE("%s:%d: Invalid Auto shutdown timer evt", __func__, 6025 __LINE__); 6026 return -EINVAL; 6027 } 6028 6029 WMA_LOGD("%s:%d: Auto Shutdown Evt: %d", __func__, __LINE__, 6030 wmi_auto_sh_evt->shutdown_reason); 6031 return wma_post_auto_shutdown_msg(); 6032 } 6033 6034 /** 6035 * wma_set_auto_shutdown_timer_req() - sets auto shutdown timer in firmware 6036 * @wma: wma handle 6037 * @auto_sh_cmd: auto shutdown timer value 6038 * 6039 * Return: QDF status 6040 */ 6041 QDF_STATUS wma_set_auto_shutdown_timer_req(tp_wma_handle wma_handle, 6042 tSirAutoShutdownCmdParams * 6043 auto_sh_cmd) 6044 { 6045 if (auto_sh_cmd == NULL) { 6046 WMA_LOGE("%s : Invalid Autoshutdown cfg cmd", __func__); 6047 return QDF_STATUS_E_FAILURE; 6048 } 6049 6050 return wmi_unified_set_auto_shutdown_timer_cmd(wma_handle->wmi_handle, 6051 auto_sh_cmd->timer_val); 6052 } 6053 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */ 6054 6055 #ifdef WLAN_FEATURE_NAN 6056 /** 6057 * wma_nan_req() - to send nan request to target 6058 * @wma: wma_handle 6059 * @nan_req: request data which will be non-null 6060 * 6061 * Return: QDF status 6062 */ 6063 QDF_STATUS wma_nan_req(void *wma_ptr, tpNanRequest nan_req) 6064 { 6065 tp_wma_handle wma_handle = (tp_wma_handle) wma_ptr; 6066 struct nan_req_params *params; 6067 size_t params_len; 6068 QDF_STATUS status; 6069 6070 if (!wma_handle) { 6071 WMA_LOGE("%s: wma handle is NULL", __func__); 6072 return QDF_STATUS_E_FAILURE; 6073 } 6074 6075 params_len = sizeof(*params) + nan_req->request_data_len; 6076 params = qdf_mem_malloc(params_len); 6077 6078 if (params == NULL) { 6079 WMA_LOGE(FL("memory allocation failed")); 6080 return QDF_STATUS_E_NOMEM; 6081 } 6082 6083 params->request_data_len = nan_req->request_data_len; 6084 if (params->request_data_len > 0) 6085 qdf_mem_copy(params->request_data, nan_req->request_data, 6086 params->request_data_len); 6087 6088 status = wmi_unified_nan_req_cmd(wma_handle->wmi_handle, params); 6089 qdf_mem_free(params); 6090 6091 return status; 6092 } 6093 #endif /* WLAN_FEATURE_NAN */ 6094 6095 #ifdef DHCP_SERVER_OFFLOAD 6096 /** 6097 * wma_process_dhcpserver_offload() - enable DHCP server offload 6098 * @wma_handle: wma handle 6099 * @pDhcpSrvOffloadInfo: DHCP server offload info 6100 * 6101 * Return: 0 for success or error code 6102 */ 6103 QDF_STATUS wma_process_dhcpserver_offload(tp_wma_handle wma_handle, 6104 tSirDhcpSrvOffloadInfo * 6105 pDhcpSrvOffloadInfo) 6106 { 6107 struct dhcp_offload_info_params params = {0}; 6108 QDF_STATUS status; 6109 6110 if (!wma_handle) { 6111 WMA_LOGE("%s: wma handle is NULL", __func__); 6112 return -EIO; 6113 } 6114 6115 params.vdev_id = pDhcpSrvOffloadInfo->vdev_id; 6116 params.dhcpSrvOffloadEnabled = 6117 pDhcpSrvOffloadInfo->dhcpSrvOffloadEnabled; 6118 params.dhcpClientNum = pDhcpSrvOffloadInfo->dhcpClientNum; 6119 params.dhcpSrvIP = pDhcpSrvOffloadInfo->; 6120 6121 status = wmi_unified_process_dhcpserver_offload_cmd( 6122 wma_handle->wmi_handle, ¶ms); 6123 if (QDF_IS_STATUS_ERROR(status)) 6124 return status; 6125 6126 WMA_LOGD("Set dhcp server offload to vdevId %d", 6127 pDhcpSrvOffloadInfo->vdev_id); 6128 return status; 6129 } 6130 #endif /* DHCP_SERVER_OFFLOAD */ 6131 6132 #ifdef WLAN_FEATURE_GPIO_LED_FLASHING 6133 /** 6134 * wma_set_led_flashing() - set led flashing in fw 6135 * @wma_handle: wma handle 6136 * @flashing: flashing request 6137 * 6138 * Return: QDF status 6139 */ 6140 QDF_STATUS wma_set_led_flashing(tp_wma_handle wma_handle, 6141 tSirLedFlashingReq *flashing) 6142 { 6143 struct flashing_req_params cmd = {0}; 6144 6145 if (!wma_handle || !wma_handle->wmi_handle) { 6146 WMA_LOGE(FL("WMA is closed, can not issue cmd")); 6147 return QDF_STATUS_E_INVAL; 6148 } 6149 if (!flashing) { 6150 WMA_LOGE(FL("invalid parameter: flashing")); 6151 return QDF_STATUS_E_INVAL; 6152 } 6153 cmd.req_id = flashing->reqId; 6154 cmd.pattern_id = flashing->pattern_id; 6155 cmd.led_x0 = flashing->led_x0; 6156 cmd.led_x1 = flashing->led_x1; 6157 status = wmi_unified_set_led_flashing_cmd(wma_handle->wmi_handle, 6158 &cmd); 6159 if (status != EOK) { 6160 return QDF_STATUS_E_FAILURE; 6161 } 6162 return QDF_STATUS_SUCCESS; 6163 } 6164 #endif /* WLAN_FEATURE_GPIO_LED_FLASHING */ 6165 6166 #ifdef FEATURE_WLAN_CH_AVOID 6167 /** 6168 * wma_channel_avoid_evt_handler() - process channel to avoid event from FW. 6169 * @handle: wma handle 6170 * @event: event buffer 6171 * @len: buffer length 6172 * 6173 * Return: 0 for success or error code 6174 */ 6175 int wma_channel_avoid_evt_handler(void *handle, uint8_t *event, 6176 uint32_t len) 6177 { 6178 wmi_avoid_freq_ranges_event_fixed_param *afr_fixed_param; 6179 wmi_avoid_freq_range_desc *afr_desc; 6180 uint32_t num_freq_ranges, freq_range_idx; 6181 tSirChAvoidIndType *sca_indication; 6182 QDF_STATUS qdf_status; 6183 cds_msg_t sme_msg = { 0 }; 6184 WMI_WLAN_FREQ_AVOID_EVENTID_param_tlvs *param_buf = 6185 (WMI_WLAN_FREQ_AVOID_EVENTID_param_tlvs *) event; 6186 6187 if (!param_buf) { 6188 WMA_LOGE("Invalid channel avoid event buffer"); 6189 return -EINVAL; 6190 } 6191 6192 afr_fixed_param = param_buf->fixed_param; 6193 if (!afr_fixed_param) { 6194 WMA_LOGE("Invalid channel avoid event fixed param buffer"); 6195 return -EINVAL; 6196 } 6197 6198 num_freq_ranges = 6199 (afr_fixed_param->num_freq_ranges > 6200 SIR_CH_AVOID_MAX_RANGE) ? SIR_CH_AVOID_MAX_RANGE : 6201 afr_fixed_param->num_freq_ranges; 6202 6203 WMA_LOGD("Channel avoid event received with %d ranges", 6204 num_freq_ranges); 6205 for (freq_range_idx = 0; freq_range_idx < num_freq_ranges; 6206 freq_range_idx++) { 6207 afr_desc = (wmi_avoid_freq_range_desc *) 6208 ((void *)param_buf->avd_freq_range + 6209 freq_range_idx * sizeof(wmi_avoid_freq_range_desc)); 6210 6211 WMA_LOGD("range %d: tlv id = %u, start freq = %u, end freq = %u", 6212 freq_range_idx, afr_desc->tlv_header, afr_desc->start_freq, 6213 afr_desc->end_freq); 6214 } 6215 6216 sca_indication = (tSirChAvoidIndType *) 6217 qdf_mem_malloc(sizeof(tSirChAvoidIndType)); 6218 if (!sca_indication) { 6219 WMA_LOGE("Invalid channel avoid indication buffer"); 6220 return -EINVAL; 6221 } 6222 6223 sca_indication->avoid_range_count = num_freq_ranges; 6224 for (freq_range_idx = 0; freq_range_idx < num_freq_ranges; 6225 freq_range_idx++) { 6226 afr_desc = (wmi_avoid_freq_range_desc *) 6227 ((void *)param_buf->avd_freq_range + 6228 freq_range_idx * sizeof(wmi_avoid_freq_range_desc)); 6229 sca_indication->avoid_freq_range[freq_range_idx].start_freq = 6230 afr_desc->start_freq; 6231 sca_indication->avoid_freq_range[freq_range_idx].end_freq = 6232 afr_desc->end_freq; 6233 } 6234 6235 sme_msg.type = eWNI_SME_CH_AVOID_IND; 6236 sme_msg.bodyptr = sca_indication; 6237 sme_msg.bodyval = 0; 6238 6239 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg); 6240 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 6241 WMA_LOGE("Fail to post eWNI_SME_CH_AVOID_IND msg to SME"); 6242 qdf_mem_free(sca_indication); 6243 return -EINVAL; 6244 } 6245 6246 return 0; 6247 } 6248 6249 /** 6250 * wma_process_ch_avoid_update_req() - handles channel avoid update request 6251 * @wma_handle: wma handle 6252 * @ch_avoid_update_req: channel avoid update params 6253 * 6254 * Return: QDF status 6255 */ 6256 QDF_STATUS wma_process_ch_avoid_update_req(tp_wma_handle wma_handle, 6257 tSirChAvoidUpdateReq * 6258 ch_avoid_update_req) 6259 { 6260 QDF_STATUS status; 6261 if (!wma_handle) { 6262 WMA_LOGE("%s: wma handle is NULL", __func__); 6263 return QDF_STATUS_E_FAILURE; 6264 } 6265 if (ch_avoid_update_req == NULL) { 6266 WMA_LOGE("%s : ch_avoid_update_req is NULL", __func__); 6267 return QDF_STATUS_E_FAILURE; 6268 } 6269 6270 WMA_LOGI("%s: WMA --> WMI_CHAN_AVOID_UPDATE", __func__); 6271 6272 status = wmi_unified_process_ch_avoid_update_cmd( 6273 wma_handle->wmi_handle); 6274 if (QDF_IS_STATUS_ERROR(status)) 6275 return status; 6276 6277 WMA_LOGI("%s: WMA --> WMI_CHAN_AVOID_UPDATE sent through WMI", 6278 __func__); 6279 return status; 6280 } 6281 #endif /* FEATURE_WLAN_CH_AVOID */ 6282 6283 /** 6284 * wma_set_reg_domain() - set reg domain 6285 * @clientCtxt: client context 6286 * @regId: reg id 6287 * 6288 * Return: QDF status 6289 */ 6290 QDF_STATUS wma_set_reg_domain(void *clientCtxt, v_REGDOMAIN_t regId) 6291 { 6292 if (QDF_STATUS_SUCCESS != 6293 cds_set_reg_domain(clientCtxt, regId)) 6294 return QDF_STATUS_E_INVAL; 6295 6296 return QDF_STATUS_SUCCESS; 6297 } 6298 6299 /** 6300 * wma_send_regdomain_info_to_fw() - send regdomain info to fw 6301 * @reg_dmn: reg domain 6302 * @regdmn2G: 2G reg domain 6303 * @regdmn5G: 5G reg domain 6304 * @ctl2G: 2G test limit 6305 * @ctl5G: 5G test limit 6306 * 6307 * Return: none 6308 */ 6309 void wma_send_regdomain_info_to_fw(uint32_t reg_dmn, uint16_t regdmn2G, 6310 uint16_t regdmn5G, int8_t ctl2G, 6311 int8_t ctl5G) 6312 { 6313 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 6314 int32_t cck_mask_val = 0; 6315 struct pdev_params pdev_param = {0}; 6316 QDF_STATUS ret = QDF_STATUS_SUCCESS; 6317 QDF_STATUS status = QDF_STATUS_SUCCESS; 6318 6319 if (NULL == wma) { 6320 WMA_LOGE("%s: wma context is NULL", __func__); 6321 return; 6322 } 6323 6324 status = wmi_unified_send_regdomain_info_to_fw_cmd(wma->wmi_handle, 6325 reg_dmn, regdmn2G, regdmn5G, ctl2G, ctl5G); 6326 if (status == QDF_STATUS_E_NOMEM) 6327 return; 6328 6329 if ((((reg_dmn & ~COUNTRY_ERD_FLAG) == CTRY_JAPAN14) || 6330 ((reg_dmn & ~COUNTRY_ERD_FLAG) == CTRY_KOREA_ROC)) && 6331 (true == wma->tx_chain_mask_cck)) 6332 cck_mask_val = 1; 6333 6334 cck_mask_val |= (wma->self_gen_frm_pwr << 16); 6335 pdev_param.param_id = WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK; 6336 pdev_param.param_value = cck_mask_val; 6337 ret = wmi_unified_pdev_param_send(wma->wmi_handle, 6338 &pdev_param, 6339 WMA_WILDCARD_PDEV_ID); 6340 6341 if (QDF_IS_STATUS_ERROR(ret)) 6342 WMA_LOGE("failed to set PDEV tx_chain_mask_cck %d", 6343 ret); 6344 6345 return; 6346 } 6347 6348 /** 6349 * wma_post_runtime_resume_msg() - post the resume request 6350 * @handle: validated wma handle 6351 * 6352 * request the MC thread unpaus the vdev and set resume dtim 6353 * 6354 * Return: qdf status of the mq post 6355 */ 6356 static QDF_STATUS wma_post_runtime_resume_msg(WMA_HANDLE handle) 6357 { 6358 cds_msg_t resume_msg; 6359 QDF_STATUS status; 6360 tp_wma_handle wma = (tp_wma_handle) handle; 6361 6362 qdf_runtime_pm_prevent_suspend(wma->wma_runtime_resume_lock); 6363 6364 resume_msg.bodyptr = NULL; 6365 resume_msg.type = WMA_RUNTIME_PM_RESUME_IND; 6366 6367 status = cds_mq_post_message(QDF_MODULE_ID_WMA, &resume_msg); 6368 6369 if (!QDF_IS_STATUS_SUCCESS(status)) { 6370 WMA_LOGE("Failed to post Runtime PM Resume IND to VOS"); 6371 qdf_runtime_pm_allow_suspend(wma->wma_runtime_resume_lock); 6372 } 6373 6374 return status; 6375 } 6376 6377 /** 6378 * wma_post_runtime_suspend_msg() - post the suspend request 6379 * @handle: validated wma handle 6380 * 6381 * Requests for offloads to be configured for runtime suspend 6382 * on the MC thread 6383 * 6384 * Return QDF_STATUS_E_AGAIN in case of timeout or QDF_STATUS_SUCCESS 6385 */ 6386 static QDF_STATUS wma_post_runtime_suspend_msg(WMA_HANDLE handle) 6387 { 6388 cds_msg_t cds_msg; 6389 QDF_STATUS qdf_status; 6390 tp_wma_handle wma = (tp_wma_handle) handle; 6391 6392 qdf_event_reset(&wma->runtime_suspend); 6393 6394 cds_msg.bodyptr = NULL; 6395 cds_msg.type = WMA_RUNTIME_PM_SUSPEND_IND; 6396 qdf_status = cds_mq_post_message(QDF_MODULE_ID_WMA, &cds_msg); 6397 6398 if (qdf_status != QDF_STATUS_SUCCESS) 6399 goto failure; 6400 6401 if (qdf_wait_single_event(&wma->runtime_suspend, 6402 WMA_TGT_SUSPEND_COMPLETE_TIMEOUT) != 6403 QDF_STATUS_SUCCESS) { 6404 WMA_LOGE("Failed to get runtime suspend event"); 6405 goto msg_timed_out; 6406 } 6407 6408 return QDF_STATUS_SUCCESS; 6409 6410 msg_timed_out: 6411 wma_post_runtime_resume_msg(wma); 6412 failure: 6413 return QDF_STATUS_E_AGAIN; 6414 } 6415 6416 /** 6417 * __wma_bus_suspend(): handles bus suspend for wma 6418 * @type: is this suspend part of runtime suspend or system suspend? 6419 * 6420 * Bails if a scan is in progress. 6421 * Calls the appropriate handlers based on configuration and event. 6422 * 6423 * Return: 0 for success or error code 6424 */ 6425 static int __wma_bus_suspend(enum qdf_suspend_type type) 6426 { 6427 WMA_HANDLE handle = cds_get_context(QDF_MODULE_ID_WMA); 6428 if (NULL == handle) { 6429 WMA_LOGE("%s: wma context is NULL", __func__); 6430 return -EFAULT; 6431 } 6432 6433 if (wma_check_scan_in_progress(handle)) { 6434 WMA_LOGE("%s: Scan in progress. Aborting suspend", __func__); 6435 return -EBUSY; 6436 } 6437 6438 if (type == QDF_RUNTIME_SUSPEND) { 6439 QDF_STATUS status = wma_post_runtime_suspend_msg(handle); 6440 if (status) 6441 return qdf_status_to_os_return(status); 6442 } 6443 6444 if (type == QDF_SYSTEM_SUSPEND) 6445 WMA_LOGI("%s: wow mode selected %d", __func__, 6446 wma_is_wow_mode_selected(handle)); 6447 6448 if (wma_is_wow_mode_selected(handle)) { 6449 QDF_STATUS status = wma_enable_wow_in_fw(handle); 6450 return qdf_status_to_os_return(status); 6451 } 6452 6453 return wma_suspend_target(handle, 0); 6454 } 6455 6456 /** 6457 * wma_runtime_suspend() - handles runtime suspend request from hdd 6458 * 6459 * Calls the appropriate handler based on configuration and event. 6460 * Last busy marking should prevent race conditions between processing 6461 * of asyncronous fw events and the running of runtime suspend. 6462 * (eg. last busy marking should guarantee that any auth requests have 6463 * been processed) 6464 * Events comming from the host are not protected, but aren't expected 6465 * to be an issue. 6466 * 6467 * Return: 0 for success or error code 6468 */ 6469 int wma_runtime_suspend(void) 6470 { 6471 return __wma_bus_suspend(QDF_RUNTIME_SUSPEND); 6472 } 6473 6474 /** 6475 * wma_bus_suspend() - handles bus suspend request from hdd 6476 * 6477 * Calls the appropriate handler based on configuration and event 6478 * 6479 * Return: 0 for success or error code 6480 */ 6481 int wma_bus_suspend(void) 6482 { 6483 6484 return __wma_bus_suspend(QDF_SYSTEM_SUSPEND); 6485 } 6486 6487 /** 6488 * __wma_bus_resume() - bus resume for wma 6489 * 6490 * does the part of the bus resume common to bus and system suspend 6491 * 6492 * Return: os error code. 6493 */ 6494 int __wma_bus_resume(WMA_HANDLE handle) 6495 { 6496 bool wow_mode = wma_is_wow_mode_selected(handle); 6497 tp_wma_handle wma = handle; 6498 QDF_STATUS status; 6499 6500 WMA_LOGI("%s: wow mode %d", __func__, wow_mode); 6501 6502 wma->wow_initial_wake_up = false; 6503 6504 if (!wow_mode) 6505 return qdf_status_to_os_return(wma_resume_target(handle)); 6506 6507 status = wma_disable_wow_in_fw(handle); 6508 return qdf_status_to_os_return(status); 6509 } 6510 6511 /** 6512 * wma_runtime_resume() - do the runtime resume operation for wma 6513 * 6514 * Return: os error code. 6515 */ 6516 int wma_runtime_resume(void) 6517 { 6518 int ret; 6519 QDF_STATUS status; 6520 WMA_HANDLE handle = cds_get_context(QDF_MODULE_ID_WMA); 6521 if (NULL == handle) { 6522 WMA_LOGE("%s: wma context is NULL", __func__); 6523 return -EFAULT; 6524 } 6525 6526 ret = __wma_bus_resume(handle); 6527 if (ret) 6528 return ret; 6529 6530 status = wma_post_runtime_resume_msg(handle); 6531 return qdf_status_to_os_return(status); 6532 } 6533 6534 /** 6535 * wma_bus_resume() - handles bus resume request from hdd 6536 * @handle: valid wma handle 6537 * 6538 * Calls the appropriate handler based on configuration 6539 * 6540 * Return: 0 for success or error code 6541 */ 6542 int wma_bus_resume(void) 6543 { 6544 WMA_HANDLE handle = cds_get_context(QDF_MODULE_ID_WMA); 6545 if (NULL == handle) { 6546 WMA_LOGE("%s: wma context is NULL", __func__); 6547 return -EFAULT; 6548 } 6549 6550 return __wma_bus_resume(handle); 6551 } 6552 6553 /** 6554 * wma_suspend_target_timeout() - Handles the target suspend timeout 6555 * @is_self_recovery_enabled: Is self recovery enabled or not 6556 * 6557 * Return: NONE 6558 */ 6559 #ifdef QCA_WIFI_3_0_ADRASTEA 6560 static inline void wma_suspend_target_timeout(bool is_self_recovery_enabled) 6561 { 6562 if (cds_is_driver_recovering()) { 6563 WMA_LOGE("%s: recovery is in progress, ignore!", __func__); 6564 } else { 6565 if (is_self_recovery_enabled) { 6566 cds_trigger_recovery(); 6567 } else { 6568 QDF_BUG(0); 6569 } 6570 } 6571 } 6572 #else /* ROME chipset */ 6573 static inline void wma_suspend_target_timeout(bool is_self_recovery_enabled) 6574 { 6575 if (cds_is_load_or_unload_in_progress() || cds_is_driver_recovering()) { 6576 WMA_LOGE("%s: Unloading/Loading/recovery is in progress, Ignore!", 6577 __func__); 6578 } else { 6579 if (is_self_recovery_enabled) { 6580 cds_trigger_recovery(); 6581 } else { 6582 QDF_BUG(0); 6583 } 6584 } 6585 } 6586 #endif 6587 6588 /** 6589 * wma_suspend_target() - suspend target 6590 * @handle: wma handle 6591 * @disable_target_intr: disable target interrupt 6592 * 6593 * Return: QDF_STATUS_SUCCESS for success or error code 6594 */ 6595 QDF_STATUS wma_suspend_target(WMA_HANDLE handle, int disable_target_intr) 6596 { 6597 tp_wma_handle wma_handle = (tp_wma_handle) handle; 6598 QDF_STATUS status; 6599 struct suspend_params param = {0}; 6600 6601 tpAniSirGlobal pmac = cds_get_context(QDF_MODULE_ID_PE); 6602 6603 if (!wma_handle || !wma_handle->wmi_handle) { 6604 WMA_LOGE("WMA is closed. can not issue suspend cmd"); 6605 return QDF_STATUS_E_INVAL; 6606 } 6607 6608 if (NULL == pmac) { 6609 WMA_LOGE("%s: Unable to get PE context", __func__); 6610 return QDF_STATUS_E_INVAL; 6611 } 6612 6613 qdf_event_reset(&wma_handle->target_suspend); 6614 param.disable_target_intr = disable_target_intr; 6615 status = wmi_unified_suspend_send(wma_handle->wmi_handle, 6616 ¶m, 6617 WMA_WILDCARD_PDEV_ID); 6618 if (QDF_IS_STATUS_ERROR(status)) 6619 return status; 6620 6621 wmi_set_target_suspend(wma_handle->wmi_handle, true); 6622 6623 if (qdf_wait_single_event(&wma_handle->target_suspend, 6624 WMA_TGT_SUSPEND_COMPLETE_TIMEOUT) 6625 != QDF_STATUS_SUCCESS) { 6626 WMA_LOGE("Failed to get ACK from firmware for pdev suspend"); 6627 wmi_set_target_suspend(wma_handle->wmi_handle, false); 6628 wma_suspend_target_timeout(pmac->sme.enableSelfRecovery); 6629 return QDF_STATUS_E_FAULT; 6630 } 6631 6632 return QDF_STATUS_SUCCESS; 6633 } 6634 6635 /** 6636 * wma_target_suspend_acknowledge() - update target susspend status 6637 * @context: HTC_INIT_INFO->context 6638 * @wow_nack: true when wow is rejected 6639 * 6640 * Return: none 6641 */ 6642 void wma_target_suspend_acknowledge(void *context, bool wow_nack) 6643 { 6644 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 6645 6646 if (NULL == wma) { 6647 WMA_LOGE("%s: wma is NULL", __func__); 6648 return; 6649 } 6650 6651 wma->wow_nack = wow_nack; 6652 qdf_event_set(&wma->target_suspend); 6653 if (wow_nack && !wmi_get_runtime_pm_inprogress(wma->wmi_handle)) { 6654 cds_host_diag_log_work(&wma->wow_wake_lock, 6655 WMA_WAKE_LOCK_TIMEOUT, 6656 WIFI_POWER_EVENT_WAKELOCK_WOW); 6657 qdf_wake_lock_timeout_acquire(&wma->wow_wake_lock, 6658 WMA_WAKE_LOCK_TIMEOUT); 6659 } 6660 } 6661 6662 /** 6663 * wma_handle_initial_wake_up() - handle inital wake up 6664 * 6665 * Return: none 6666 */ 6667 void wma_handle_initial_wake_up(void) 6668 { 6669 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 6670 6671 if (NULL == wma) { 6672 WMA_LOGE("%s: wma is NULL", __func__); 6673 return; 6674 } 6675 6676 wma->wow_initial_wake_up = true; 6677 } 6678 6679 /** 6680 * wma_is_target_wake_up_received() - check for initial wake up 6681 * 6682 * Check if target initial wake up is received and fail PM suspend gracefully 6683 * 6684 * Return: -EAGAIN if initial wake up is received else 0 6685 */ 6686 int wma_is_target_wake_up_received(void) 6687 { 6688 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 6689 6690 if (NULL == wma) { 6691 WMA_LOGE("%s: wma is NULL", __func__); 6692 return -EAGAIN; 6693 } 6694 6695 if (wma->wow_initial_wake_up) { 6696 WMA_LOGE("Target initial wake up received try again"); 6697 return -EAGAIN; 6698 } 6699 6700 return 0; 6701 } 6702 6703 /** 6704 * wma_clear_target_wake_up() - clear initial wake up 6705 * 6706 * Clear target initial wake up reason 6707 * 6708 * Return: 0 for success and negative error code for failure 6709 */ 6710 int wma_clear_target_wake_up(void) 6711 { 6712 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 6713 6714 if (NULL == wma) { 6715 WMA_LOGE("%s: wma is NULL", __func__); 6716 return -EFAULT; 6717 } 6718 6719 wma->wow_initial_wake_up = false; 6720 6721 return 0; 6722 } 6723 6724 /** 6725 * wma_resume_target() - resume target 6726 * @handle: wma handle 6727 * 6728 * Return: QDF_STATUS_SUCCESS for success or error code 6729 */ 6730 QDF_STATUS wma_resume_target(WMA_HANDLE handle) 6731 { 6732 tp_wma_handle wma = (tp_wma_handle) handle; 6733 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 6734 #ifdef CONFIG_CNSS 6735 tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); 6736 if (NULL == pMac) { 6737 WMA_LOGE("%s: Unable to get PE context", __func__); 6738 return QDF_STATUS_E_INVAL; 6739 } 6740 #endif /* CONFIG_CNSS */ 6741 6742 qdf_event_reset(&wma->wma_resume_event); 6743 qdf_status = wmi_unified_resume_send(wma->wmi_handle, 6744 WMA_WILDCARD_PDEV_ID); 6745 if (QDF_IS_STATUS_ERROR(qdf_status)) 6746 WMA_LOGE("Failed to send WMI_PDEV_RESUME_CMDID command"); 6747 6748 qdf_status = qdf_wait_single_event(&(wma->wma_resume_event), 6749 WMA_RESUME_TIMEOUT); 6750 if (QDF_STATUS_SUCCESS != qdf_status) { 6751 WMA_LOGP("%s: Timeout waiting for resume event from FW", 6752 __func__); 6753 WMA_LOGP("%s: Pending commands %d credits %d", __func__, 6754 wmi_get_pending_cmds(wma->wmi_handle), 6755 wmi_get_host_credits(wma->wmi_handle)); 6756 if (!cds_is_driver_recovering()) { 6757 #ifdef CONFIG_CNSS 6758 if (pMac->sme.enableSelfRecovery) { 6759 cds_trigger_recovery(); 6760 } else { 6761 QDF_BUG(0); 6762 } 6763 #else 6764 QDF_BUG(0); 6765 #endif /* CONFIG_CNSS */ 6766 } else { 6767 WMA_LOGE("%s: SSR in progress, ignore resume timeout", 6768 __func__); 6769 } 6770 } else { 6771 WMA_LOGD("Host wakeup received"); 6772 } 6773 6774 if (QDF_STATUS_SUCCESS == qdf_status) 6775 wmi_set_target_suspend(wma->wmi_handle, false); 6776 6777 return qdf_status; 6778 } 6779 6780 #ifdef FEATURE_WLAN_TDLS 6781 /** 6782 * wma_tdls_event_handler() - handle TDLS event 6783 * @handle: wma handle 6784 * @event: event buffer 6785 * @len: buffer length 6786 * 6787 * Return: 0 for success or error code 6788 */ 6789 int wma_tdls_event_handler(void *handle, uint8_t *event, uint32_t len) 6790 { 6791 tp_wma_handle wma = (tp_wma_handle) handle; 6792 WMI_TDLS_PEER_EVENTID_param_tlvs *param_buf = NULL; 6793 wmi_tdls_peer_event_fixed_param *peer_event = NULL; 6794 tSirTdlsEventnotify *tdls_event; 6795 6796 if (!event) { 6797 WMA_LOGE("%s: event param null", __func__); 6798 return -EINVAL; 6799 } 6800 6801 param_buf = (WMI_TDLS_PEER_EVENTID_param_tlvs *) event; 6802 if (!param_buf) { 6803 WMA_LOGE("%s: received null buf from target", __func__); 6804 return -EINVAL; 6805 } 6806 6807 peer_event = param_buf->fixed_param; 6808 if (!peer_event) { 6809 WMA_LOGE("%s: received null event data from target", __func__); 6810 return -EINVAL; 6811 } 6812 6813 tdls_event = (tSirTdlsEventnotify *) 6814 qdf_mem_malloc(sizeof(*tdls_event)); 6815 if (!tdls_event) { 6816 WMA_LOGE("%s: failed to allocate memory for tdls_event", 6817 __func__); 6818 return -ENOMEM; 6819 } 6820 6821 tdls_event->sessionId = peer_event->vdev_id; 6822 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_event->peer_macaddr, 6823 tdls_event->peermac.bytes); 6824 6825 switch (peer_event->peer_status) { 6826 case WMI_TDLS_SHOULD_DISCOVER: 6827 tdls_event->messageType = WMA_TDLS_SHOULD_DISCOVER_CMD; 6828 break; 6829 case WMI_TDLS_SHOULD_TEARDOWN: 6830 tdls_event->messageType = WMA_TDLS_SHOULD_TEARDOWN_CMD; 6831 break; 6832 case WMI_TDLS_PEER_DISCONNECTED: 6833 tdls_event->messageType = WMA_TDLS_PEER_DISCONNECTED_CMD; 6834 break; 6835 case WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION: 6836 tdls_event->messageType = 6837 WMA_TDLS_CONNECTION_TRACKER_NOTIFICATION_CMD; 6838 break; 6839 default: 6840 WMA_LOGE("%s: Discarding unknown tdls event(%d) from target", 6841 __func__, peer_event->peer_status); 6842 return -EINVAL; 6843 } 6844 6845 switch (peer_event->peer_reason) { 6846 case WMI_TDLS_TEARDOWN_REASON_TX: 6847 tdls_event->peer_reason = eWNI_TDLS_TEARDOWN_REASON_TX; 6848 break; 6849 case WMI_TDLS_TEARDOWN_REASON_RSSI: 6850 tdls_event->peer_reason = eWNI_TDLS_TEARDOWN_REASON_RSSI; 6851 break; 6852 case WMI_TDLS_TEARDOWN_REASON_SCAN: 6853 tdls_event->peer_reason = eWNI_TDLS_TEARDOWN_REASON_SCAN; 6854 break; 6855 case WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE: 6856 tdls_event->peer_reason = 6857 eWNI_TDLS_DISCONNECTED_REASON_PEER_DELETE; 6858 break; 6859 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: 6860 tdls_event->peer_reason = eWNI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT; 6861 break; 6862 case WMI_TDLS_TEARDOWN_REASON_BAD_PTR: 6863 tdls_event->peer_reason = eWNI_TDLS_TEARDOWN_REASON_BAD_PTR; 6864 break; 6865 case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE: 6866 tdls_event->peer_reason = eWNI_TDLS_TEARDOWN_REASON_NO_RESPONSE; 6867 break; 6868 case WMI_TDLS_ENTER_BUF_STA: 6869 tdls_event->peer_reason = eWNI_TDLS_PEER_ENTER_BUF_STA; 6870 break; 6871 case WMI_TDLS_EXIT_BUF_STA: 6872 tdls_event->peer_reason = eWNI_TDLS_PEER_EXIT_BUF_STA; 6873 break; 6874 case WMI_TDLS_ENTER_BT_BUSY_MODE: 6875 tdls_event->peer_reason = eWNI_TDLS_ENTER_BT_BUSY_MODE; 6876 break; 6877 case WMI_TDLS_EXIT_BT_BUSY_MODE: 6878 tdls_event->peer_reason = eWNI_TDLS_EXIT_BT_BUSY_MODE; 6879 break; 6880 default: 6881 WMA_LOGE("%s: unknown reason(%d) in tdls event(%d) from target", 6882 __func__, peer_event->peer_reason, 6883 peer_event->peer_status); 6884 return -EINVAL; 6885 } 6886 6887 WMA_LOGD("%s: sending msg to umac, messageType: 0x%x, " 6888 "for peer: %pM, reason: %d, smesessionId: %d", 6889 __func__, tdls_event->messageType, tdls_event->peermac.bytes, 6890 tdls_event->peer_reason, tdls_event->sessionId); 6891 6892 wma_send_msg(wma, tdls_event->messageType, (void *)tdls_event, 0); 6893 return 0; 6894 } 6895 6896 /** 6897 * wma_set_tdls_offchan_mode() - set tdls off channel mode 6898 * @handle: wma handle 6899 * @chan_switch_params: Pointer to tdls channel switch parameter structure 6900 * 6901 * This function sets tdls off channel mode 6902 * 6903 * Return: 0 on success; Negative errno otherwise 6904 */ 6905 QDF_STATUS wma_set_tdls_offchan_mode(WMA_HANDLE handle, 6906 tdls_chan_switch_params *chan_switch_params) 6907 { 6908 tp_wma_handle wma_handle = (tp_wma_handle) handle; 6909 struct tdls_channel_switch_params params = {0}; 6910 QDF_STATUS ret = QDF_STATUS_SUCCESS; 6911 6912 if (!wma_handle || !wma_handle->wmi_handle) { 6913 WMA_LOGE(FL( 6914 "WMA is closed, can not issue tdls off channel cmd" 6915 )); 6916 ret = -EINVAL; 6917 goto end; 6918 } 6919 6920 params.vdev_id = chan_switch_params->vdev_id; 6921 params.tdls_off_ch_bw_offset = 6922 chan_switch_params->tdls_off_ch_bw_offset; 6923 params.tdls_off_ch = chan_switch_params->tdls_off_ch; 6924 params.tdls_sw_mode = chan_switch_params->tdls_sw_mode; 6925 params.oper_class = chan_switch_params->oper_class; 6926 params.is_responder = chan_switch_params->is_responder; 6927 qdf_mem_copy(params.peer_mac_addr, chan_switch_params->peer_mac_addr, 6928 IEEE80211_ADDR_LEN); 6929 6930 ret = wmi_unified_set_tdls_offchan_mode_cmd(wma_handle->wmi_handle, 6931 ¶ms); 6932 6933 end: 6934 if (chan_switch_params) 6935 qdf_mem_free(chan_switch_params); 6936 return ret; 6937 } 6938 6939 /** 6940 * wma_update_fw_tdls_state() - send enable/disable tdls for a vdev 6941 * @wma: wma handle 6942 * @pwmaTdlsparams: TDLS params 6943 * 6944 * Return: 0 for sucess or error code 6945 */ 6946 QDF_STATUS wma_update_fw_tdls_state(WMA_HANDLE handle, void *pwmaTdlsparams) 6947 { 6948 tp_wma_handle wma_handle = (tp_wma_handle) handle; 6949 t_wma_tdls_mode tdls_mode; 6950 t_wma_tdls_params *wma_tdls = (t_wma_tdls_params *) pwmaTdlsparams; 6951 struct wmi_tdls_params params = {0}; 6952 QDF_STATUS ret = QDF_STATUS_SUCCESS; 6953 uint8_t tdls_state; 6954 6955 if (!wma_handle || !wma_handle->wmi_handle) { 6956 WMA_LOGE("%s: WMA is closed, can not issue fw tdls state cmd", 6957 __func__); 6958 ret = -EINVAL; 6959 goto end_fw_tdls_state; 6960 } 6961 6962 params.tdls_state = wma_tdls->tdls_state; 6963 tdls_mode = wma_tdls->tdls_state; 6964 6965 if (WMA_TDLS_SUPPORT_EXPLICIT_TRIGGER_ONLY == tdls_mode) { 6966 tdls_state = WMI_TDLS_ENABLE_PASSIVE; 6967 } else if (WMA_TDLS_SUPPORT_ENABLED == tdls_mode) { 6968 tdls_state = WMI_TDLS_ENABLE_CONNECTION_TRACKER_IN_HOST; 6969 } else if (WMA_TDLS_SUPPORT_ACTIVE_EXTERNAL_CONTROL == tdls_mode) { 6970 tdls_state = WMI_TDLS_ENABLE_CONNECTION_TRACKER_IN_HOST; 6971 } else { 6972 tdls_state = WMI_TDLS_DISABLE; 6973 } 6974 6975 params.vdev_id = wma_tdls->vdev_id; 6976 params.notification_interval_ms = wma_tdls->notification_interval_ms; 6977 params.tx_discovery_threshold = wma_tdls->tx_discovery_threshold; 6978 params.tx_teardown_threshold = wma_tdls->tx_teardown_threshold; 6979 params.rssi_teardown_threshold = wma_tdls->rssi_teardown_threshold; 6980 params.rssi_delta = wma_tdls->rssi_delta; 6981 params.tdls_options = wma_tdls->tdls_options; 6982 params.peer_traffic_ind_window = wma_tdls->peer_traffic_ind_window; 6983 params.peer_traffic_response_timeout = 6984 wma_tdls->peer_traffic_response_timeout; 6985 params.puapsd_mask = wma_tdls->puapsd_mask; 6986 params.puapsd_inactivity_time = wma_tdls->puapsd_inactivity_time; 6987 params.puapsd_rx_frame_threshold = 6988 wma_tdls->puapsd_rx_frame_threshold; 6989 params.teardown_notification_ms = 6990 wma_tdls->teardown_notification_ms; 6991 params.tdls_peer_kickout_threshold = 6992 wma_tdls->tdls_peer_kickout_threshold; 6993 6994 ret = wmi_unified_update_fw_tdls_state_cmd(wma_handle->wmi_handle, 6995 ¶ms, tdls_state); 6996 if (QDF_IS_STATUS_ERROR(ret)) 6997 goto end_fw_tdls_state; 6998 6999 WMA_LOGD("%s: vdev_id %d", __func__, wma_tdls->vdev_id); 7000 7001 end_fw_tdls_state: 7002 if (pwmaTdlsparams) 7003 qdf_mem_free(pwmaTdlsparams); 7004 return ret; 7005 } 7006 7007 /** 7008 * wma_update_tdls_peer_state() - update TDLS peer state 7009 * @handle: wma handle 7010 * @peerStateParams: TDLS peer state params 7011 * 7012 * Return: 0 for success or error code 7013 */ 7014 int wma_update_tdls_peer_state(WMA_HANDLE handle, 7015 tTdlsPeerStateParams *peerStateParams) 7016 { 7017 tp_wma_handle wma_handle = (tp_wma_handle) handle; 7018 uint32_t i; 7019 ol_txrx_pdev_handle pdev; 7020 uint8_t peer_id; 7021 ol_txrx_peer_handle peer; 7022 uint8_t *peer_mac_addr; 7023 int ret = 0; 7024 uint32_t *ch_mhz = NULL; 7025 bool restore_last_peer = false; 7026 7027 if (!wma_handle || !wma_handle->wmi_handle) { 7028 WMA_LOGE("%s: WMA is closed, can not issue cmd", __func__); 7029 ret = -EINVAL; 7030 goto end_tdls_peer_state; 7031 } 7032 7033 /* peer capability info is valid only when peer state is connected */ 7034 if (WMA_TDLS_PEER_STATE_CONNECTED != peerStateParams->peerState) { 7035 qdf_mem_zero(&peerStateParams->peerCap, 7036 sizeof(tTdlsPeerCapParams)); 7037 } 7038 7039 if (peerStateParams->peerCap.peerChanLen) { 7040 ch_mhz = qdf_mem_malloc(sizeof(uint32_t) * 7041 peerStateParams->peerCap.peerChanLen); 7042 if (ch_mhz == NULL) { 7043 WMA_LOGE("%s: memory allocation failed", __func__); 7044 ret = -ENOMEM; 7045 goto end_tdls_peer_state; 7046 } 7047 } 7048 7049 for (i = 0; i < peerStateParams->peerCap.peerChanLen; ++i) { 7050 ch_mhz[i] = 7051 cds_chan_to_freq(peerStateParams->peerCap.peerChan[i]. 7052 chanId); 7053 } 7054 7055 if (wmi_unified_update_tdls_peer_state_cmd(wma_handle->wmi_handle, 7056 (struct tdls_peer_state_params *)peerStateParams, 7057 ch_mhz)) { 7058 WMA_LOGE("%s: failed to send tdls peer update state command", 7059 __func__); 7060 ret = -EIO; 7061 goto end_tdls_peer_state; 7062 } 7063 7064 /* in case of teardown, remove peer from fw */ 7065 if (WMA_TDLS_PEER_STATE_TEARDOWN == peerStateParams->peerState) { 7066 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 7067 if (!pdev) { 7068 WMA_LOGE("%s: Failed to find pdev", __func__); 7069 ret = -EIO; 7070 goto end_tdls_peer_state; 7071 } 7072 7073 peer = ol_txrx_find_peer_by_addr(pdev, 7074 peerStateParams->peerMacAddr, 7075 &peer_id); 7076 if (!peer) { 7077 WMA_LOGE("%s: Failed to get peer handle using peer mac %pM", 7078 __func__, peerStateParams->peerMacAddr); 7079 ret = -EIO; 7080 goto end_tdls_peer_state; 7081 } 7082 peer_mac_addr = ol_txrx_peer_get_peer_mac_addr(peer); 7083 restore_last_peer = is_vdev_restore_last_peer(peer); 7084 7085 WMA_LOGD("%s: calling wma_remove_peer for peer " MAC_ADDRESS_STR 7086 " vdevId: %d", __func__, 7087 MAC_ADDR_ARRAY(peer_mac_addr), 7088 peerStateParams->vdevId); 7089 wma_remove_peer(wma_handle, peer_mac_addr, 7090 peerStateParams->vdevId, peer, false); 7091 ol_txrx_update_last_real_peer(pdev, peer, &peer_id, 7092 restore_last_peer); 7093 } 7094 7095 end_tdls_peer_state: 7096 if (ch_mhz) 7097 qdf_mem_free(ch_mhz); 7098 if (peerStateParams) 7099 qdf_mem_free(peerStateParams); 7100 return ret; 7101 } 7102 #endif /* FEATURE_WLAN_TDLS */ 7103 7104 7105 /** 7106 * wma_dfs_attach() - Attach DFS methods to the umac state. 7107 * @dfs_ic: ieee80211com ptr 7108 * 7109 * Return: Return ieee80211com ptr with updated info 7110 */ 7111 struct ieee80211com *wma_dfs_attach(struct ieee80211com *dfs_ic) 7112 { 7113 /*Allocate memory for dfs_ic before passing it up to dfs_attach() */ 7114 dfs_ic = (struct ieee80211com *) 7115 os_malloc(NULL, sizeof(struct ieee80211com), GFP_ATOMIC); 7116 if (dfs_ic == NULL) { 7117 WMA_LOGE("%s:Allocation of dfs_ic failed %zu", 7118 __func__, sizeof(struct ieee80211com)); 7119 return NULL; 7120 } 7121 OS_MEMZERO(dfs_ic, sizeof(struct ieee80211com)); 7122 /* DFS pattern matching hooks */ 7123 dfs_ic->ic_dfs_attach = ol_if_dfs_attach; 7124 dfs_ic->ic_dfs_disable = ol_if_dfs_disable; 7125 dfs_ic->ic_find_channel = ieee80211_find_channel; 7126 dfs_ic->ic_dfs_enable = ol_if_dfs_enable; 7127 dfs_ic->ic_ieee2mhz = ieee80211_ieee2mhz; 7128 7129 /* Hardware facing hooks */ 7130 dfs_ic->ic_get_ext_busy = ol_if_dfs_get_ext_busy; 7131 dfs_ic->ic_get_mib_cycle_counts_pct = 7132 ol_if_dfs_get_mib_cycle_counts_pct; 7133 dfs_ic->ic_get_TSF64 = ol_if_get_tsf64; 7134 7135 /* NOL related hooks */ 7136 dfs_ic->ic_dfs_usenol = ol_if_dfs_usenol; 7137 /* 7138 * Hooks from wma/dfs/ back 7139 * into the PE/SME 7140 * and shared DFS code 7141 */ 7142 dfs_ic->ic_dfs_notify_radar = ieee80211_mark_dfs; 7143 qdf_spinlock_create(&dfs_ic->chan_lock); 7144 /* Initializes DFS Data Structures and queues */ 7145 dfs_attach(dfs_ic); 7146 7147 return dfs_ic; 7148 } 7149 7150 /** 7151 * wma_dfs_detach() - Detach DFS methods 7152 * @dfs_ic: ieee80211com ptr 7153 * 7154 * Return: none 7155 */ 7156 void wma_dfs_detach(struct ieee80211com *dfs_ic) 7157 { 7158 dfs_detach(dfs_ic); 7159 7160 qdf_spinlock_destroy(&dfs_ic->chan_lock); 7161 if (NULL != dfs_ic->ic_curchan) { 7162 OS_FREE(dfs_ic->ic_curchan); 7163 dfs_ic->ic_curchan = NULL; 7164 } 7165 7166 OS_FREE(dfs_ic); 7167 } 7168 7169 /** 7170 * wma_dfs_configure() - configure dfs 7171 * @ic: ieee80211com ptr 7172 * 7173 * Configures Radar Filters during 7174 * vdev start/channel change/regulatory domain 7175 * change.This Configuration enables to program 7176 * the DFS pattern matching module. 7177 * 7178 * Return: none 7179 */ 7180 void wma_dfs_configure(struct ieee80211com *ic) 7181 { 7182 struct ath_dfs_radar_tab_info rinfo; 7183 int dfsdomain; 7184 int radar_enabled_status = 0; 7185 if (ic == NULL) { 7186 WMA_LOGE("%s: DFS ic is Invalid", __func__); 7187 return; 7188 } 7189 7190 dfsdomain = ic->current_dfs_regdomain; 7191 7192 /* Fetch current radar patterns from the lmac */ 7193 OS_MEMZERO(&rinfo, sizeof(rinfo)); 7194 7195 /* 7196 * Look up the current DFS 7197 * regulatory domain and decide 7198 * which radar pulses to use. 7199 */ 7200 switch (dfsdomain) { 7201 case DFS_FCC_REGION: 7202 WMA_LOGI("%s: DFS-FCC domain", __func__); 7203 rinfo.dfsdomain = DFS_FCC_REGION; 7204 rinfo.dfs_radars = dfs_fcc_radars; 7205 rinfo.numradars = QDF_ARRAY_SIZE(dfs_fcc_radars); 7206 rinfo.b5pulses = dfs_fcc_bin5pulses; 7207 rinfo.numb5radars = QDF_ARRAY_SIZE(dfs_fcc_bin5pulses); 7208 break; 7209 case DFS_ETSI_REGION: 7210 WMA_LOGI("%s: DFS-ETSI domain", __func__); 7211 rinfo.dfsdomain = DFS_ETSI_REGION; 7212 rinfo.dfs_radars = dfs_etsi_radars; 7213 rinfo.numradars = QDF_ARRAY_SIZE(dfs_etsi_radars); 7214 rinfo.b5pulses = NULL; 7215 rinfo.numb5radars = 0; 7216 break; 7217 case DFS_MKK_REGION: 7218 WMA_LOGI("%s: DFS-MKK domain", __func__); 7219 rinfo.dfsdomain = DFS_MKK_REGION; 7220 rinfo.dfs_radars = dfs_mkk4_radars; 7221 rinfo.numradars = QDF_ARRAY_SIZE(dfs_mkk4_radars); 7222 rinfo.b5pulses = dfs_jpn_bin5pulses; 7223 rinfo.numb5radars = QDF_ARRAY_SIZE(dfs_jpn_bin5pulses); 7224 break; 7225 case DFS_CN_REGION: 7226 WMA_LOGI("%s: DFS-CN domain", __func__); 7227 rinfo.dfsdomain = DFS_CN_REGION; 7228 rinfo.dfs_radars = dfs_china_radars; 7229 rinfo.numradars = QDF_ARRAY_SIZE(dfs_china_radars); 7230 rinfo.b5pulses = NULL; 7231 rinfo.numb5radars = 0; 7232 break; 7233 case DFS_KR_REGION: 7234 WMA_LOGI("%s: DFS-KR domain", __func__); 7235 rinfo.dfsdomain = DFS_KR_REGION; 7236 rinfo.dfs_radars = dfs_korea_radars; 7237 rinfo.numradars = QDF_ARRAY_SIZE(dfs_korea_radars); 7238 rinfo.b5pulses = NULL; 7239 rinfo.numb5radars = 0; 7240 break; 7241 default: 7242 WMA_LOGI("%s: DFS-UNINT domain", __func__); 7243 rinfo.dfsdomain = DFS_UNINIT_REGION; 7244 rinfo.dfs_radars = NULL; 7245 rinfo.numradars = 0; 7246 rinfo.b5pulses = NULL; 7247 rinfo.numb5radars = 0; 7248 break; 7249 } 7250 7251 rinfo.dfs_pri_multiplier = ic->dfs_pri_multiplier; 7252 7253 /* 7254 * Set the regulatory domain, 7255 * radar pulse table and enable 7256 * radar events if required. 7257 * dfs_radar_enable() returns 7258 * 0 on success and non-zero 7259 * failure. 7260 */ 7261 radar_enabled_status = dfs_radar_enable(ic, &rinfo); 7262 if (radar_enabled_status != DFS_STATUS_SUCCESS) { 7263 WMA_LOGE("%s[%d]: DFS- Radar Detection Enabling Failed", 7264 __func__, __LINE__); 7265 } 7266 } 7267 7268 /** 7269 * wma_dfs_configure_channel() - configure DFS channel 7270 * @dfs_ic: ieee80211com ptr 7271 * @band_center_freq1: center frequency 1 7272 * @band_center_freq2: center frequency 2 7273 * (valid only for 11ac vht 80plus80 mode) 7274 * @ req: vdev start request 7275 * 7276 * Set the Channel parameters in to DFS module 7277 * Also,configure the DFS radar filters for 7278 * matching the DFS phyerrors. 7279 * 7280 * Return: dfs_ieee80211_channel / NULL for error 7281 */ 7282 struct dfs_ieee80211_channel *wma_dfs_configure_channel( 7283 struct ieee80211com *dfs_ic, 7284 uint32_t band_center_freq1, 7285 uint32_t band_center_freq2, 7286 struct wma_vdev_start_req 7287 *req) 7288 { 7289 uint8_t ext_channel; 7290 7291 if (dfs_ic == NULL) { 7292 WMA_LOGE("%s: DFS ic is Invalid", __func__); 7293 return NULL; 7294 } 7295 7296 if (!dfs_ic->ic_curchan) { 7297 dfs_ic->ic_curchan = (struct dfs_ieee80211_channel *)os_malloc( 7298 NULL, 7299 sizeof(struct dfs_ieee80211_channel), 7300 GFP_ATOMIC); 7301 if (dfs_ic->ic_curchan == NULL) { 7302 WMA_LOGE( 7303 "%s: allocation of dfs_ic->ic_curchan failed %zu", 7304 __func__, sizeof(struct dfs_ieee80211_channel)); 7305 return NULL; 7306 } 7307 } 7308 7309 OS_MEMZERO(dfs_ic->ic_curchan, sizeof(struct dfs_ieee80211_channel)); 7310 7311 dfs_ic->ic_curchan->ic_ieee = req->chan; 7312 dfs_ic->ic_curchan->ic_freq = cds_chan_to_freq(req->chan); 7313 dfs_ic->ic_curchan->ic_vhtop_ch_freq_seg1 = band_center_freq1; 7314 dfs_ic->ic_curchan->ic_vhtop_ch_freq_seg2 = band_center_freq2; 7315 dfs_ic->ic_curchan->ic_pri_freq_center_freq_mhz_separation = 7316 dfs_ic->ic_curchan->ic_freq - 7317 dfs_ic->ic_curchan->ic_vhtop_ch_freq_seg1; 7318 7319 if ((dfs_ic->ic_curchan->ic_ieee >= WMA_11A_CHANNEL_BEGIN) && 7320 (dfs_ic->ic_curchan->ic_ieee <= WMA_11A_CHANNEL_END)) { 7321 dfs_ic->ic_curchan->ic_flags |= IEEE80211_CHAN_5GHZ; 7322 } 7323 7324 switch (req->chan_width) { 7325 case CH_WIDTH_20MHZ: 7326 dfs_ic->ic_curchan->ic_flags |= 7327 (req->vht_capable ? IEEE80211_CHAN_VHT20 : 7328 IEEE80211_CHAN_HT20); 7329 break; 7330 case CH_WIDTH_40MHZ: 7331 if (req->chan < req->ch_center_freq_seg0) 7332 dfs_ic->ic_curchan->ic_flags |= 7333 (req->vht_capable ? 7334 IEEE80211_CHAN_VHT40PLUS : 7335 IEEE80211_CHAN_HT40PLUS); 7336 else 7337 dfs_ic->ic_curchan->ic_flags |= 7338 (req->vht_capable ? 7339 IEEE80211_CHAN_VHT40MINUS : 7340 IEEE80211_CHAN_HT40MINUS); 7341 break; 7342 case CH_WIDTH_80MHZ: 7343 dfs_ic->ic_curchan->ic_flags |= IEEE80211_CHAN_VHT80; 7344 break; 7345 case CH_WIDTH_80P80MHZ: 7346 ext_channel = cds_freq_to_chan(band_center_freq2); 7347 dfs_ic->ic_curchan->ic_flags |= 7348 IEEE80211_CHAN_VHT80P80; 7349 dfs_ic->ic_curchan->ic_freq_ext = 7350 band_center_freq2; 7351 dfs_ic->ic_curchan->ic_ieee_ext = ext_channel; 7352 7353 /* verify both the 80MHz are DFS bands or not */ 7354 if ((CHANNEL_STATE_DFS == 7355 cds_get_5g_bonded_channel_state(req->chan , 7356 CH_WIDTH_80MHZ)) && 7357 (CHANNEL_STATE_DFS == cds_get_5g_bonded_channel_state( 7358 ext_channel - WMA_80MHZ_START_CENTER_CH_DIFF, 7359 CH_WIDTH_80MHZ))) 7360 dfs_ic->ic_curchan->ic_80p80_both_dfs = true; 7361 break; 7362 case CH_WIDTH_160MHZ: 7363 dfs_ic->ic_curchan->ic_flags |= 7364 IEEE80211_CHAN_VHT160; 7365 break; 7366 default: 7367 WMA_LOGE( 7368 "%s: Recieved a wrong channel width %d", 7369 __func__, req->chan_width); 7370 break; 7371 } 7372 7373 dfs_ic->ic_curchan->ic_flagext |= IEEE80211_CHAN_DFS; 7374 7375 if (req->oper_mode == BSS_OPERATIONAL_MODE_AP) { 7376 dfs_ic->ic_opmode = IEEE80211_M_HOSTAP; 7377 dfs_ic->vdev_id = req->vdev_id; 7378 } 7379 7380 dfs_ic->dfs_pri_multiplier = req->dfs_pri_multiplier; 7381 7382 /* 7383 * Configuring the DFS with current channel and the radar filters 7384 */ 7385 wma_dfs_configure(dfs_ic); 7386 WMA_LOGI("%s: DFS- CHANNEL CONFIGURED", __func__); 7387 return dfs_ic->ic_curchan; 7388 } 7389 7390 7391 /** 7392 * wma_set_dfs_region() - set DFS region 7393 * @wma: wma handle 7394 * 7395 * Configure the DFS region for DFS radar filter initialization 7396 * 7397 * Return: none 7398 */ 7399 void wma_set_dfs_region(tp_wma_handle wma, enum dfs_region dfs_region) 7400 { 7401 if (dfs_region >= DFS_UNDEF_REGION || 7402 dfs_region == DFS_UNINIT_REGION) 7403 7404 /* assign DFS_FCC_REGION as default region*/ 7405 wma->dfs_ic->current_dfs_regdomain = DFS_FCC_REGION; 7406 else 7407 wma->dfs_ic->current_dfs_regdomain = dfs_region; 7408 7409 WMA_LOGI("%s: DFS Region Domain: %d", __func__, 7410 wma->dfs_ic->current_dfs_regdomain); 7411 } 7412 7413 /** 7414 * wma_get_channels() - prepare dfs radar channel list 7415 * @ichan: channel 7416 * @chan_list: return channel list 7417 * 7418 * Return: return number of channels 7419 */ 7420 int wma_get_channels(struct dfs_ieee80211_channel *ichan, 7421 struct wma_dfs_radar_channel_list *chan_list) 7422 { 7423 uint8_t center_chan = cds_freq_to_chan(ichan->ic_vhtop_ch_freq_seg1); 7424 int count = 0; 7425 int start_channel = 0; 7426 int loop; 7427 7428 chan_list->nchannels = 0; 7429 7430 if (IEEE80211_IS_CHAN_11AC_VHT160(ichan)) { 7431 7432 /* 7433 * as per the latest draft for BSS bandwidth 160MHz, 7434 * channel frequency segment 2 represents the center 7435 * channel frequency. 7436 */ 7437 if (ichan->ic_vhtop_ch_freq_seg2) 7438 center_chan = 7439 cds_freq_to_chan(ichan->ic_vhtop_ch_freq_seg2); 7440 /* 7441 * In 160MHz channel width, need to 7442 * check if each of the 8 20MHz channel 7443 * is DFS before adding to the NOL list. 7444 * As it is possible that part of the 7445 * 160MHz can be Non-DFS channels. 7446 */ 7447 start_channel = center_chan - WMA_160MHZ_START_CENTER_CH_DIFF; 7448 for (loop = 0; loop < WMA_DFS_MAX_20M_SUB_CH; loop++) { 7449 if (cds_get_channel_state(start_channel + 7450 (loop * WMA_NEXT_20MHZ_START_CH_DIFF)) == 7451 CHANNEL_STATE_DFS) { 7452 chan_list->channels[count] = start_channel + 7453 (loop * WMA_NEXT_20MHZ_START_CH_DIFF); 7454 count++; 7455 } 7456 } 7457 chan_list->nchannels = count; 7458 } else if (IEEE80211_IS_CHAN_11AC_VHT80P80(ichan)) { 7459 chan_list->nchannels = 4; 7460 /* 7461 * If SAP is operating in 80p80 mode, either 7462 * one of the two 80 segments or both the 80 7463 * segments can be DFS channels, so need to 7464 * identify on which 80 segment radar has 7465 * been detected and only add those channels 7466 * to the NOL list. center frequency should be 7467 * based on the segment id passed as part of 7468 * channel information in radar indication. 7469 */ 7470 if (ichan->ic_radar_found_segid == DFS_80P80_SEG1) 7471 center_chan = 7472 cds_freq_to_chan(ichan->ic_vhtop_ch_freq_seg2); 7473 chan_list->channels[0] = center_chan - 6; 7474 chan_list->channels[1] = center_chan - 2; 7475 chan_list->channels[2] = center_chan + 2; 7476 chan_list->channels[3] = center_chan + 6; 7477 } else if (IEEE80211_IS_CHAN_11AC_VHT80(ichan)) { 7478 chan_list->nchannels = 4; 7479 chan_list->channels[0] = center_chan - 6; 7480 chan_list->channels[1] = center_chan - 2; 7481 chan_list->channels[2] = center_chan + 2; 7482 chan_list->channels[3] = center_chan + 6; 7483 } else if (IEEE80211_IS_CHAN_11N_HT40(ichan) || 7484 IEEE80211_IS_CHAN_11AC_VHT40(ichan)) { 7485 chan_list->nchannels = 2; 7486 chan_list->channels[0] = center_chan - 2; 7487 chan_list->channels[1] = center_chan + 2; 7488 } else { 7489 chan_list->nchannels = 1; 7490 chan_list->channels[0] = center_chan; 7491 } 7492 7493 return chan_list->nchannels; 7494 } 7495 7496 7497 /** 7498 * wma_dfs_indicate_radar() - Indicate Radar to SAP/HDD 7499 * @ic: ieee80211com ptr 7500 * @ichan: ieee 80211 channel 7501 * 7502 * Return: 0 for success or error code 7503 */ 7504 int wma_dfs_indicate_radar(struct ieee80211com *ic, 7505 struct dfs_ieee80211_channel *ichan) 7506 { 7507 tp_wma_handle wma; 7508 void *hdd_ctx; 7509 struct wma_dfs_radar_indication *radar_event; 7510 struct wma_dfs_radar_ind wma_radar_event; 7511 tpAniSirGlobal pmac = NULL; 7512 bool indication_status; 7513 7514 wma = cds_get_context(QDF_MODULE_ID_WMA); 7515 if (wma == NULL) { 7516 WMA_LOGE("%s: DFS- Invalid wma", __func__); 7517 return -ENOENT; 7518 } 7519 7520 hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD); 7521 pmac = (tpAniSirGlobal) 7522 cds_get_context(QDF_MODULE_ID_PE); 7523 7524 if (!pmac) { 7525 WMA_LOGE("%s: Invalid MAC handle", __func__); 7526 return -ENOENT; 7527 } 7528 7529 if (wma->dfs_ic != ic) { 7530 WMA_LOGE("%s:DFS- Invalid WMA handle", __func__); 7531 return -ENOENT; 7532 } 7533 7534 /* 7535 * Do not post multiple Radar events on the same channel. 7536 * But, when DFS test mode is enabled, allow multiple dfs 7537 * radar events to be posted on the same channel. 7538 */ 7539 qdf_spin_lock_bh(&ic->chan_lock); 7540 if (!pmac->sap.SapDfsInfo.disable_dfs_ch_switch) 7541 wma->dfs_ic->disable_phy_err_processing = true; 7542 7543 if ((ichan->ic_ieee != (wma->dfs_ic->last_radar_found_chan)) || 7544 (pmac->sap.SapDfsInfo.disable_dfs_ch_switch == true)) { 7545 radar_event = (struct wma_dfs_radar_indication *) 7546 qdf_mem_malloc(sizeof(struct wma_dfs_radar_indication)); 7547 if (radar_event == NULL) { 7548 WMA_LOGE(FL("Failed to allocate memory for radar_event")); 7549 return -ENOMEM; 7550 } 7551 wma->dfs_ic->last_radar_found_chan = ichan->ic_ieee; 7552 /* Indicate the radar event to HDD to stop the netif Tx queues */ 7553 wma_radar_event.chan_freq = ichan->ic_freq; 7554 wma_radar_event.dfs_radar_status = WMA_DFS_RADAR_FOUND; 7555 indication_status = 7556 wma->dfs_radar_indication_cb(hdd_ctx, &wma_radar_event); 7557 if (indication_status == false) { 7558 WMA_LOGE("%s:Application triggered channel switch in progress!.. drop radar event indiaction to SAP", 7559 __func__); 7560 qdf_mem_free(radar_event); 7561 qdf_spin_unlock_bh(&ic->chan_lock); 7562 return 0; 7563 } 7564 7565 WMA_LOGE("%s:DFS- RADAR INDICATED TO HDD", __func__); 7566 7567 wma_radar_event.ieee_chan_number = ichan->ic_ieee; 7568 /* 7569 * Indicate to the radar event to SAP to 7570 * select a new channel and set CSA IE 7571 */ 7572 radar_event->vdev_id = ic->vdev_id; 7573 wma_get_channels(ichan, &radar_event->chan_list); 7574 radar_event->dfs_radar_status = WMA_DFS_RADAR_FOUND; 7575 radar_event->use_nol = ic->ic_dfs_usenol(ic); 7576 wma_send_msg(wma, WMA_DFS_RADAR_IND, (void *)radar_event, 0); 7577 WMA_LOGE("%s:DFS- WMA_DFS_RADAR_IND Message Posted", __func__); 7578 } 7579 qdf_spin_unlock_bh(&ic->chan_lock); 7580 7581 return 0; 7582 } 7583 7584 #ifdef WLAN_FEATURE_MEMDUMP 7585 /* 7586 * wma_process_fw_mem_dump_req() - Function to request fw memory dump from 7587 * firmware 7588 * @wma: Pointer to WMA handle 7589 * @mem_dump_req: Pointer for mem_dump_req 7590 * 7591 * This function sends memory dump request to firmware 7592 * 7593 * Return: QDF_STATUS_SUCCESS for success otherwise failure 7594 * 7595 */ 7596 QDF_STATUS wma_process_fw_mem_dump_req(tp_wma_handle wma, 7597 struct fw_dump_req *mem_dump_req) 7598 { 7599 int ret; 7600 7601 if (!mem_dump_req || !wma) { 7602 WMA_LOGE(FL("input pointer is NULL")); 7603 return QDF_STATUS_E_FAILURE; 7604 } 7605 7606 ret = wmi_unified_process_fw_mem_dump_cmd(wma->wmi_handle, 7607 (struct fw_dump_req_param *) mem_dump_req); 7608 if (ret) 7609 return QDF_STATUS_E_FAILURE; 7610 7611 return QDF_STATUS_SUCCESS; 7612 } 7613 7614 /** 7615 * wma_fw_mem_dump_rsp() - send fw mem dump response to SME 7616 * 7617 * @req_id - request id. 7618 * @status - copy status from the firmware. 7619 * 7620 * This function is called by the memory dump response handler to 7621 * indicate SME that firmware dump copy is complete 7622 * 7623 * Return: QDF_STATUS 7624 */ 7625 static QDF_STATUS wma_fw_mem_dump_rsp(uint32_t req_id, uint32_t status) 7626 { 7627 struct fw_dump_rsp *dump_rsp; 7628 cds_msg_t sme_msg = {0}; 7629 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 7630 7631 dump_rsp = qdf_mem_malloc(sizeof(*dump_rsp)); 7632 7633 if (!dump_rsp) { 7634 WMA_LOGE(FL("Memory allocation failed.")); 7635 qdf_status = QDF_STATUS_E_NOMEM; 7636 return qdf_status; 7637 } 7638 7639 WMA_LOGI(FL("FW memory dump copy complete status: %d for request: %d"), 7640 status, req_id); 7641 7642 dump_rsp->request_id = req_id; 7643 dump_rsp->dump_complete = status; 7644 7645 sme_msg.type = eWNI_SME_FW_DUMP_IND; 7646 sme_msg.bodyptr = dump_rsp; 7647 sme_msg.bodyval = 0; 7648 7649 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg); 7650 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 7651 WMA_LOGE(FL("Fail to post fw mem dump ind msg")); 7652 qdf_mem_free(dump_rsp); 7653 } 7654 7655 return qdf_status; 7656 } 7657 7658 /** 7659 * wma_fw_mem_dump_event_handler() - handles fw memory dump event 7660 * 7661 * @handle: pointer to wma handle. 7662 * @cmd_param_info: pointer to TLV info received in the event. 7663 * @len: length of data in @cmd_param_info 7664 * 7665 * This function is a handler for firmware memory dump event. 7666 * 7667 * Return: integer (0 for success and error code otherwise) 7668 */ 7669 int wma_fw_mem_dump_event_handler(void *handle, u_int8_t *cmd_param_info, 7670 u_int32_t len) 7671 { 7672 WMI_UPDATE_FW_MEM_DUMP_EVENTID_param_tlvs *param_buf; 7673 wmi_update_fw_mem_dump_fixed_param *event; 7674 QDF_STATUS status; 7675 7676 param_buf = 7677 (WMI_UPDATE_FW_MEM_DUMP_EVENTID_param_tlvs *) cmd_param_info; 7678 if (!param_buf) { 7679 WMA_LOGA("%s: Invalid stats event", __func__); 7680 return -EINVAL; 7681 } 7682 7683 event = param_buf->fixed_param; 7684 7685 status = wma_fw_mem_dump_rsp(event->request_id, 7686 event->fw_mem_dump_complete); 7687 if (QDF_STATUS_SUCCESS != status) { 7688 WMA_LOGE("Error posting FW MEM DUMP RSP."); 7689 return -EINVAL; 7690 } 7691 7692 WMA_LOGI("FW MEM DUMP RSP posted successfully"); 7693 return 0; 7694 } 7695 #endif /* WLAN_FEATURE_MEMDUMP */ 7696 7697 /* 7698 * wma_process_set_ie_info() - Function to send IE info to firmware 7699 * @wma: Pointer to WMA handle 7700 * @ie_data: Pointer for ie data 7701 * 7702 * This function sends IE information to firmware 7703 * 7704 * Return: QDF_STATUS_SUCCESS for success otherwise failure 7705 * 7706 */ 7707 QDF_STATUS wma_process_set_ie_info(tp_wma_handle wma, 7708 struct vdev_ie_info *ie_info) 7709 { 7710 struct vdev_ie_info_param cmd = {0}; 7711 int ret; 7712 7713 if (!ie_info || !wma) { 7714 WMA_LOGE(FL("input pointer is NULL")); 7715 return QDF_STATUS_E_FAILURE; 7716 } 7717 7718 /* Validate the input */ 7719 if (ie_info->length <= 0) { 7720 WMA_LOGE(FL("Invalid IE length")); 7721 return QDF_STATUS_E_INVAL; 7722 } 7723 7724 cmd.vdev_id = ie_info->vdev_id; 7725 cmd.ie_id = ie_info->ie_id; 7726 cmd.length = ie_info->length; 7727 cmd.band = ie_info->band; 7728 cmd.data = ie_info->data; 7729 cmd.ie_source = WMA_SET_VDEV_IE_SOURCE_HOST; 7730 7731 WMA_LOGD(FL("ie_id: %d, band: %d, len: %d"), 7732 ie_info->ie_id, ie_info->band, ie_info->length); 7733 7734 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG, 7735 ie_info->data, ie_info->length); 7736 7737 ret = wmi_unified_process_set_ie_info_cmd(wma->wmi_handle, 7738 &cmd); 7739 return ret; 7740 } 7741 7742 /** 7743 * wma_get_bpf_caps_event_handler() - Event handler for get bpf capability 7744 * @handle: WMA global handle 7745 * @cmd_param_info: command event data 7746 * @len: Length of @cmd_param_info 7747 * 7748 * Return: 0 on Success or Errno on failure 7749 */ 7750 int wma_get_bpf_caps_event_handler(void *handle, 7751 u_int8_t *cmd_param_info, 7752 u_int32_t len) 7753 { 7754 WMI_BPF_CAPABILIY_INFO_EVENTID_param_tlvs *param_buf; 7755 wmi_bpf_capability_info_evt_fixed_param *event; 7756 struct sir_bpf_get_offload *bpf_get_offload; 7757 tpAniSirGlobal pmac = (tpAniSirGlobal)cds_get_context( 7758 QDF_MODULE_ID_PE); 7759 7760 if (!pmac) { 7761 WMA_LOGE("%s: Invalid pmac", __func__); 7762 return -EINVAL; 7763 } 7764 if (!pmac->sme.pbpf_get_offload_cb) { 7765 WMA_LOGE("%s: Callback not registered", __func__); 7766 return -EINVAL; 7767 } 7768 7769 param_buf = (WMI_BPF_CAPABILIY_INFO_EVENTID_param_tlvs *)cmd_param_info; 7770 event = param_buf->fixed_param; 7771 bpf_get_offload = qdf_mem_malloc(sizeof(*bpf_get_offload)); 7772 7773 if (!bpf_get_offload) { 7774 WMA_LOGP("%s: Memory allocation failed.", __func__); 7775 return -ENOMEM; 7776 } 7777 7778 bpf_get_offload->bpf_version = event->bpf_version; 7779 bpf_get_offload->max_bpf_filters = event->max_bpf_filters; 7780 bpf_get_offload->max_bytes_for_bpf_inst = 7781 event->max_bytes_for_bpf_inst; 7782 WMA_LOGD("%s: BPF capabilities version: %d max bpf filter size: %d", 7783 __func__, bpf_get_offload->bpf_version, 7784 bpf_get_offload->max_bytes_for_bpf_inst); 7785 7786 WMA_LOGD("%s: sending bpf capabilities event to hdd", __func__); 7787 pmac->sme.pbpf_get_offload_cb(pmac->hHdd, bpf_get_offload); 7788 qdf_mem_free(bpf_get_offload); 7789 return 0; 7790 } 7791 7792 /** 7793 * wma_get_bpf_capabilities - Send get bpf capability to firmware 7794 * @wma_handle: wma handle 7795 * 7796 * Return: QDF_STATUS enumeration. 7797 */ 7798 QDF_STATUS wma_get_bpf_capabilities(tp_wma_handle wma) 7799 { 7800 QDF_STATUS status = QDF_STATUS_SUCCESS; 7801 wmi_bpf_get_capability_cmd_fixed_param *cmd; 7802 wmi_buf_t wmi_buf; 7803 uint32_t len; 7804 u_int8_t *buf_ptr; 7805 7806 if (!wma || !wma->wmi_handle) { 7807 WMA_LOGE(FL("WMA is closed, can not issue get BPF capab")); 7808 return QDF_STATUS_E_INVAL; 7809 } 7810 7811 if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap, 7812 WMI_SERVICE_BPF_OFFLOAD)) { 7813 WMA_LOGE(FL("BPF cababilities feature bit not enabled")); 7814 return QDF_STATUS_E_FAILURE; 7815 } 7816 7817 len = sizeof(*cmd); 7818 wmi_buf = wmi_buf_alloc(wma->wmi_handle, len); 7819 if (!wmi_buf) { 7820 WMA_LOGE("%s: wmi_buf_alloc failed", __func__); 7821 return QDF_STATUS_E_NOMEM; 7822 } 7823 7824 buf_ptr = (u_int8_t *) wmi_buf_data(wmi_buf); 7825 cmd = (wmi_bpf_get_capability_cmd_fixed_param *) buf_ptr; 7826 WMITLV_SET_HDR(&cmd->tlv_header, 7827 WMITLV_TAG_STRUC_wmi_bpf_get_capability_cmd_fixed_param, 7828 WMITLV_GET_STRUCT_TLVLEN( 7829 wmi_bpf_get_capability_cmd_fixed_param)); 7830 7831 if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len, 7832 WMI_BPF_GET_CAPABILITY_CMDID)) { 7833 WMA_LOGE(FL("Failed to send BPF capability command")); 7834 wmi_buf_free(wmi_buf); 7835 return QDF_STATUS_E_FAILURE; 7836 } 7837 return status; 7838 } 7839 7840 /** 7841 * wma_set_bpf_instructions - Set bpf instructions to firmware 7842 * @wma: wma handle 7843 * @bpf_set_offload: Bpf offload information to set to firmware 7844 * 7845 * Return: QDF_STATUS enumeration 7846 */ 7847 QDF_STATUS wma_set_bpf_instructions(tp_wma_handle wma, 7848 struct sir_bpf_set_offload *bpf_set_offload) 7849 { 7850 wmi_bpf_set_vdev_instructions_cmd_fixed_param *cmd; 7851 wmi_buf_t wmi_buf; 7852 uint32_t len = 0, len_aligned = 0; 7853 u_int8_t *buf_ptr; 7854 7855 if (!wma || !wma->wmi_handle) { 7856 WMA_LOGE("%s: WMA is closed, can not issue set BPF capability", 7857 __func__); 7858 return QDF_STATUS_E_INVAL; 7859 } 7860 7861 if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap, 7862 WMI_SERVICE_BPF_OFFLOAD)) { 7863 WMA_LOGE(FL("BPF offload feature Disabled")); 7864 return QDF_STATUS_E_NOSUPPORT; 7865 } 7866 7867 if (bpf_set_offload->total_length) { 7868 len_aligned = roundup(bpf_set_offload->current_length, 7869 sizeof(A_UINT32)); 7870 len = len_aligned + WMI_TLV_HDR_SIZE; 7871 } 7872 7873 len += sizeof(*cmd); 7874 wmi_buf = wmi_buf_alloc(wma->wmi_handle, len); 7875 if (!wmi_buf) { 7876 WMA_LOGE("%s: wmi_buf_alloc failed", __func__); 7877 return QDF_STATUS_E_NOMEM; 7878 } 7879 7880 buf_ptr = (u_int8_t *) wmi_buf_data(wmi_buf); 7881 cmd = (wmi_bpf_set_vdev_instructions_cmd_fixed_param *) buf_ptr; 7882 7883 WMITLV_SET_HDR(&cmd->tlv_header, 7884 WMITLV_TAG_STRUC_wmi_bpf_set_vdev_instructions_cmd_fixed_param, 7885 WMITLV_GET_STRUCT_TLVLEN( 7886 wmi_bpf_set_vdev_instructions_cmd_fixed_param)); 7887 cmd->vdev_id = bpf_set_offload->session_id; 7888 cmd->filter_id = bpf_set_offload->filter_id; 7889 cmd->total_length = bpf_set_offload->total_length; 7890 cmd->current_offset = bpf_set_offload->current_offset; 7891 cmd->current_length = bpf_set_offload->current_length; 7892 7893 if (bpf_set_offload->total_length) { 7894 buf_ptr += 7895 sizeof(wmi_bpf_set_vdev_instructions_cmd_fixed_param); 7896 WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, len_aligned); 7897 buf_ptr += WMI_TLV_HDR_SIZE; 7898 qdf_mem_copy(buf_ptr, bpf_set_offload->program, 7899 bpf_set_offload->current_length); 7900 } 7901 7902 if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len, 7903 WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID)) { 7904 WMA_LOGE(FL("Failed to send config bpf instructions command")); 7905 wmi_buf_free(wmi_buf); 7906 return QDF_STATUS_E_FAILURE; 7907 } 7908 return QDF_STATUS_SUCCESS; 7909 } 7910 7911 /** 7912 * wma_set_tx_rx_aggregation_size() - sets tx rx aggregation sizes 7913 * @tx_rx_aggregation_size: aggregation size parameters 7914 * 7915 * This function sets tx rx aggregation sizes 7916 * 7917 * Return: VOS_STATUS_SUCCESS on success, error number otherwise 7918 */ 7919 QDF_STATUS wma_set_tx_rx_aggregation_size( 7920 struct sir_set_tx_rx_aggregation_size *tx_rx_aggregation_size) 7921 { 7922 tp_wma_handle wma_handle; 7923 wmi_vdev_set_custom_aggr_size_cmd_fixed_param *cmd; 7924 int32_t len; 7925 wmi_buf_t buf; 7926 u_int8_t *buf_ptr; 7927 int ret; 7928 7929 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 7930 7931 if (!tx_rx_aggregation_size) { 7932 WMA_LOGE("%s: invalid pointer", __func__); 7933 return QDF_STATUS_E_INVAL; 7934 } 7935 7936 if (!wma_handle) { 7937 WMA_LOGE("%s: WMA context is invald!", __func__); 7938 return QDF_STATUS_E_INVAL; 7939 } 7940 7941 len = sizeof(*cmd); 7942 buf = wmi_buf_alloc(wma_handle->wmi_handle, len); 7943 7944 if (!buf) { 7945 WMA_LOGE("%s: Failed allocate wmi buffer", __func__); 7946 return QDF_STATUS_E_NOMEM; 7947 } 7948 7949 buf_ptr = (u_int8_t *) wmi_buf_data(buf); 7950 cmd = (wmi_vdev_set_custom_aggr_size_cmd_fixed_param *) buf_ptr; 7951 7952 WMITLV_SET_HDR(&cmd->tlv_header, 7953 WMITLV_TAG_STRUC_wmi_vdev_set_custom_aggr_size_cmd_fixed_param, 7954 WMITLV_GET_STRUCT_TLVLEN( 7955 wmi_vdev_set_custom_aggr_size_cmd_fixed_param)); 7956 7957 cmd->vdev_id = tx_rx_aggregation_size->vdev_id; 7958 cmd->tx_aggr_size = tx_rx_aggregation_size->tx_aggregation_size; 7959 cmd->rx_aggr_size = tx_rx_aggregation_size->rx_aggregation_size; 7960 7961 WMA_LOGI("tx aggr: %d rx aggr: %d vdev: %d", 7962 cmd->tx_aggr_size, cmd->rx_aggr_size, cmd->vdev_id); 7963 7964 ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len, 7965 WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID); 7966 if (ret) { 7967 WMA_LOGE("%s: Failed to send aggregation size command", 7968 __func__); 7969 wmi_buf_free(buf); 7970 return QDF_STATUS_E_FAILURE; 7971 } 7972 7973 return QDF_STATUS_SUCCESS; 7974 } 7975 7976 /** 7977 * wma_p2p_lo_start() - P2P listen offload start 7978 * @params: p2p listen offload parameters 7979 * 7980 * This function sends WMI command to start P2P listen offload. 7981 * 7982 * Return: QDF_STATUS enumeration 7983 */ 7984 QDF_STATUS wma_p2p_lo_start(struct sir_p2p_lo_start *params) 7985 { 7986 wmi_buf_t buf; 7987 wmi_p2p_lo_start_cmd_fixed_param *cmd; 7988 int32_t len = sizeof(*cmd); 7989 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 7990 uint8_t *buf_ptr; 7991 int ret; 7992 int device_types_len_aligned, probe_resp_len_aligned; 7993 7994 if (NULL == wma) { 7995 WMA_LOGE("%s: wma context is NULL", __func__); 7996 return QDF_STATUS_E_INVAL; 7997 } 7998 7999 device_types_len_aligned = qdf_roundup(params->dev_types_len, 8000 sizeof(A_UINT32)); 8001 probe_resp_len_aligned = qdf_roundup(params->probe_resp_len, 8002 sizeof(A_UINT32)); 8003 8004 len += 2 * WMI_TLV_HDR_SIZE + device_types_len_aligned + 8005 probe_resp_len_aligned; 8006 8007 buf = wmi_buf_alloc(wma->wmi_handle, len); 8008 if (!buf) { 8009 WMA_LOGP("%s: failed to allocate memory for p2p lo start", 8010 __func__); 8011 return QDF_STATUS_E_NOMEM; 8012 } 8013 8014 cmd = (wmi_p2p_lo_start_cmd_fixed_param *)wmi_buf_data(buf); 8015 buf_ptr = (uint8_t *) wmi_buf_data(buf); 8016 8017 WMITLV_SET_HDR(&cmd->tlv_header, 8018 WMITLV_TAG_STRUC_wmi_p2p_lo_start_cmd_fixed_param, 8019 WMITLV_GET_STRUCT_TLVLEN( 8020 wmi_p2p_lo_start_cmd_fixed_param)); 8021 8022 cmd->vdev_id = params->vdev_id; 8023 cmd->ctl_flags = params->ctl_flags; 8024 cmd->channel = params->freq; 8025 cmd->period = params->period; 8026 cmd->interval = params->interval; 8027 cmd->count = params->count; 8028 cmd->device_types_len = params->dev_types_len; 8029 cmd->prob_resp_len = params->probe_resp_len; 8030 8031 buf_ptr += sizeof(wmi_p2p_lo_start_cmd_fixed_param); 8032 WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, 8033 device_types_len_aligned); 8034 buf_ptr += WMI_TLV_HDR_SIZE; 8035 qdf_mem_copy(buf_ptr, params->device_types, params->dev_types_len); 8036 8037 buf_ptr += device_types_len_aligned; 8038 WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, probe_resp_len_aligned); 8039 buf_ptr += WMI_TLV_HDR_SIZE; 8040 qdf_mem_copy(buf_ptr, params->probe_resp_tmplt, params->probe_resp_len); 8041 8042 WMA_LOGI("%s: Sending WMI_P2P_LO_START command, channel=%d, period=%d, interval=%d, count=%d", 8043 __func__, cmd->channel, cmd->period, 8044 cmd->interval, cmd->count); 8045 8046 ret = wmi_unified_cmd_send(wma->wmi_handle, 8047 buf, len, 8048 WMI_P2P_LISTEN_OFFLOAD_START_CMDID); 8049 if (ret) { 8050 WMA_LOGE("Failed to send p2p lo start: %d", ret); 8051 wmi_buf_free(buf); 8052 } 8053 8054 WMA_LOGI("%s: Successfully sent WMI_P2P_LO_START", __func__); 8055 wma->interfaces[params->vdev_id].p2p_lo_in_progress = true; 8056 8057 return ret; 8058 } 8059 8060 /** 8061 * wma_p2p_lo_stop() - P2P listen offload stop 8062 * @vdev_id: vdev identifier 8063 * 8064 * This function sends WMI command to stop P2P listen offload. 8065 * 8066 * Return: QDF_STATUS enumeration 8067 */ 8068 QDF_STATUS wma_p2p_lo_stop(u_int32_t vdev_id) 8069 { 8070 wmi_buf_t buf; 8071 wmi_p2p_lo_stop_cmd_fixed_param *cmd; 8072 int32_t len; 8073 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 8074 int ret; 8075 8076 if (NULL == wma) { 8077 WMA_LOGE("%s: wma context is NULL", __func__); 8078 return QDF_STATUS_E_INVAL; 8079 } 8080 8081 len = sizeof(*cmd); 8082 buf = wmi_buf_alloc(wma->wmi_handle, len); 8083 if (!buf) { 8084 WMA_LOGP("%s: failed to allocate memory for p2p lo stop", 8085 __func__); 8086 return QDF_STATUS_E_NOMEM; 8087 } 8088 cmd = (wmi_p2p_lo_stop_cmd_fixed_param *)wmi_buf_data(buf); 8089 8090 WMITLV_SET_HDR(&cmd->tlv_header, 8091 WMITLV_TAG_STRUC_wmi_p2p_lo_stop_cmd_fixed_param, 8092 WMITLV_GET_STRUCT_TLVLEN( 8093 wmi_p2p_lo_stop_cmd_fixed_param)); 8094 8095 cmd->vdev_id = vdev_id; 8096 8097 WMA_LOGI("%s: Sending WMI_P2P_LO_STOP command", __func__); 8098 8099 ret = wmi_unified_cmd_send(wma->wmi_handle, 8100 buf, len, 8101 WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID); 8102 if (ret) { 8103 WMA_LOGE("Failed to send p2p lo stop: %d", ret); 8104 wmi_buf_free(buf); 8105 } 8106 8107 WMA_LOGI("%s: Successfully sent WMI_P2P_LO_STOP", __func__); 8108 wma->interfaces[vdev_id].p2p_lo_in_progress = false; 8109 8110 return ret; 8111 } 8112 8113 /** 8114 * wma_p2p_lo_event_handler() - p2p lo event 8115 * @handle: the WMA handle 8116 * @event_buf: buffer with the event parameters 8117 * @len: length of the buffer 8118 * 8119 * This function receives P2P listen offload stop event from FW and 8120 * pass the event information to upper layer. 8121 * 8122 * Return: 0 on success 8123 */ 8124 int wma_p2p_lo_event_handler(void *handle, uint8_t *event_buf, 8125 uint32_t len) 8126 { 8127 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 8128 struct sir_p2p_lo_event *event; 8129 WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID_param_tlvs *param_tlvs; 8130 wmi_p2p_lo_stopped_event_fixed_param *fix_param; 8131 tpAniSirGlobal p_mac = cds_get_context(QDF_MODULE_ID_PE); 8132 8133 if (!wma) { 8134 WMA_LOGE("%s: Invalid WMA Context", __func__); 8135 return -EINVAL; 8136 } 8137 8138 if (!p_mac) { 8139 WMA_LOGE("%s: Invalid p_mac", __func__); 8140 return -EINVAL; 8141 } 8142 8143 if (!p_mac->sme.p2p_lo_event_callback) { 8144 WMA_LOGE("%s: Callback not registered", __func__); 8145 return -EINVAL; 8146 } 8147 8148 param_tlvs = (WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID_param_tlvs *) 8149 event_buf; 8150 fix_param = param_tlvs->fixed_param; 8151 event = qdf_mem_malloc(sizeof(*event)); 8152 if (event == NULL) { 8153 WMA_LOGE("Event allocation failed"); 8154 return -ENOMEM; 8155 } 8156 event->vdev_id = fix_param->vdev_id; 8157 event->reason_code = fix_param->reason; 8158 8159 p_mac->sme.p2p_lo_event_callback(p_mac->hHdd, event); 8160 8161 wma->interfaces[event->vdev_id].p2p_lo_in_progress = false; 8162 8163 return 0; 8164 } 8165 8166 /** 8167 * wma_get_wakelock_stats() - Collects wake lock stats 8168 * @wake_lock_stats: wakelock structure to be filled 8169 * 8170 * This function collects wake lock stats 8171 * 8172 * Return: VOS_STATUS_SUCCESS on success, error number otherwise 8173 */ 8174 QDF_STATUS wma_get_wakelock_stats(struct sir_wake_lock_stats *wake_lock_stats) 8175 { 8176 tp_wma_handle wma_handle; 8177 8178 wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 8179 8180 if (!wake_lock_stats) { 8181 WMA_LOGE("%s: invalid pointer", __func__); 8182 return QDF_STATUS_E_INVAL; 8183 } 8184 8185 if (!wma_handle) { 8186 WMA_LOGE("%s: WMA context is invalid!", __func__); 8187 return QDF_STATUS_E_INVAL; 8188 } 8189 8190 wake_lock_stats->wow_ucast_wake_up_count = 8191 wma_handle->wow_ucast_wake_up_count; 8192 wake_lock_stats->wow_bcast_wake_up_count = 8193 wma_handle->wow_bcast_wake_up_count; 8194 wake_lock_stats->wow_ipv4_mcast_wake_up_count = 8195 wma_handle->wow_ipv4_mcast_wake_up_count; 8196 wake_lock_stats->wow_ipv6_mcast_wake_up_count = 8197 wma_handle->wow_ipv6_mcast_wake_up_count; 8198 wake_lock_stats->wow_ipv6_mcast_ra_stats = 8199 wma_handle->wow_ipv6_mcast_ra_stats; 8200 wake_lock_stats->wow_ipv6_mcast_ns_stats = 8201 wma_handle->wow_ipv6_mcast_ns_stats; 8202 wake_lock_stats->wow_ipv6_mcast_na_stats = 8203 wma_handle->wow_ipv6_mcast_na_stats; 8204 wake_lock_stats->wow_icmpv4_count = wma_handle->wow_icmpv4_count; 8205 wake_lock_stats->wow_icmpv6_count = 8206 wma_handle->wow_icmpv6_count; 8207 8208 return QDF_STATUS_SUCCESS; 8209 } 8210 8211 /** 8212 * wma_process_fw_test_cmd() - send unit test command to fw. 8213 * @handle: wma handle 8214 * @wma_fwtest: fw test command 8215 * 8216 * This function send fw test command to fw. 8217 * 8218 * Return: none 8219 */ 8220 void wma_process_fw_test_cmd(WMA_HANDLE handle, 8221 struct set_fwtest_params *wma_fwtest) 8222 { 8223 tp_wma_handle wma_handle = (tp_wma_handle) handle; 8224 8225 if (!wma_handle || !wma_handle->wmi_handle) { 8226 WMA_LOGE("%s: WMA is closed, can not issue fw test cmd", 8227 __func__); 8228 return; 8229 } 8230 8231 if (wmi_unified_fw_test_cmd(wma_handle->wmi_handle, 8232 (struct set_fwtest_params *)wma_fwtest)) { 8233 WMA_LOGE("%s: Failed to issue fw test cmd", 8234 __func__); 8235 return; 8236 } 8237 } 8238 8239 /** 8240 * wma_enable_disable_caevent_ind() - Issue WMI command to enable or 8241 * disable ca event indication 8242 * @wma: wma handler 8243 * @val: boolean value true or false 8244 * 8245 * Return: QDF_STATUS 8246 */ 8247 QDF_STATUS wma_enable_disable_caevent_ind(tp_wma_handle wma, uint8_t val) 8248 { 8249 WMI_CHAN_AVOID_RPT_ALLOW_CMD_fixed_param *cmd; 8250 wmi_buf_t wmi_buf; 8251 uint8_t *buf_ptr; 8252 uint32_t len; 8253 8254 if (!wma || !wma->wmi_handle) { 8255 WMA_LOGE(FL("WMA is closed, can not issue set/clear CA")); 8256 return QDF_STATUS_E_INVAL; 8257 } 8258 8259 len = sizeof(*cmd); 8260 wmi_buf = wmi_buf_alloc(wma->wmi_handle, len); 8261 if (!wmi_buf) { 8262 WMA_LOGE(FL("wmi_buf_alloc failed")); 8263 return QDF_STATUS_E_NOMEM; 8264 } 8265 buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); 8266 cmd = (WMI_CHAN_AVOID_RPT_ALLOW_CMD_fixed_param *) buf_ptr; 8267 WMITLV_SET_HDR(&cmd->tlv_header, 8268 WMITLV_TAG_STRUC_WMI_CHAN_AVOID_RPT_ALLOW_CMD_fixed_param, 8269 WMITLV_GET_STRUCT_TLVLEN( 8270 WMI_CHAN_AVOID_RPT_ALLOW_CMD_fixed_param)); 8271 cmd->rpt_allow = val; 8272 if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len, 8273 WMI_CHAN_AVOID_RPT_ALLOW_CMDID)) { 8274 WMA_LOGE(FL("Failed to send enable/disable CA event command")); 8275 wmi_buf_free(wmi_buf); 8276 return QDF_STATUS_E_FAILURE; 8277 } 8278 8279 return QDF_STATUS_SUCCESS; 8280 } 8281 8282 /** 8283 * wma_encrypt_decrypt_msg() - 8284 * @encrypt_decrypt_params: encryption/decryption params 8285 * @data_len: data length 8286 * @encrypt_decrypt_cb: encrypt/decrypt callback 8287 * 8288 * This function sends WMI command to check encryption/decryption engine. 8289 * 8290 * Return: QDF_STATUS enumeration 8291 */ 8292 QDF_STATUS wma_encrypt_decrypt_msg(WMA_HANDLE handle, 8293 struct encrypt_decrypt_req_params *encrypt_decrypt_params) 8294 { 8295 int ret; 8296 tp_wma_handle wma = (tp_wma_handle) handle; 8297 8298 if (!wma || !wma->wmi_handle) { 8299 WMA_LOGE("%s: WMA is closed, can not issue encrypt/decrypt msg", 8300 __func__); 8301 return QDF_STATUS_E_INVAL; 8302 } 8303 8304 if (encrypt_decrypt_params == NULL) { 8305 WMA_LOGE("%s: encrypt/decrypt ptr NULL", 8306 __func__); 8307 return QDF_STATUS_E_INVAL; 8308 } 8309 8310 ret = wmi_unified_encrypt_decrypt_send_cmd(wma->wmi_handle, 8311 encrypt_decrypt_params); 8312 8313 return ret; 8314 } 8315 8316 /** 8317 * wma_encrypt_decrypt_msg_handler() - handle encrypt/decrypt data 8318 * indicated by FW 8319 * @handle: wma context 8320 * @data: event buffer 8321 * @data len: length of event buffer 8322 * 8323 * Return: 0 on success 8324 */ 8325 int wma_encrypt_decrypt_msg_handler(void *handle, uint8_t *data, 8326 uint32_t data_len) 8327 { 8328 WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID_param_tlvs *param_buf; 8329 wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param *data_event; 8330 struct sir_encrypt_decrypt_rsp_params encrypt_decrypt_rsp_params; 8331 tp_wma_handle wma = handle; 8332 u_int8_t *buf_ptr; 8333 tpAniSirGlobal pmac; 8334 8335 if (data == NULL) { 8336 WMA_LOGE("%s: invalid pointer", __func__); 8337 return -EINVAL; 8338 } 8339 8340 if (wma == NULL) { 8341 WMA_LOGE("%s: wma context is NULL", __func__); 8342 return -EINVAL; 8343 } 8344 8345 WMA_LOGE("%s: received WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID ", 8346 __func__); 8347 8348 pmac = (tpAniSirGlobal)cds_get_context(QDF_MODULE_ID_PE); 8349 8350 if (!pmac) { 8351 WMA_LOGE("%s: Invalid pmac", __func__); 8352 return -EINVAL; 8353 } 8354 if (!pmac->sme.encrypt_decrypt_cb) { 8355 WMA_LOGE("%s: Callback not registered", __func__); 8356 return -EINVAL; 8357 } 8358 8359 param_buf = 8360 (WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID_param_tlvs *)data; 8361 if (!param_buf) { 8362 WMA_LOGE("%s: Invalid response data buf", __func__); 8363 return -EINVAL; 8364 } 8365 8366 data_event = param_buf->fixed_param; 8367 8368 encrypt_decrypt_rsp_params.vdev_id = data_event->vdev_id; 8369 encrypt_decrypt_rsp_params.status = data_event->status; 8370 encrypt_decrypt_rsp_params.data_length = data_event->data_length; 8371 8372 if (encrypt_decrypt_rsp_params.data_length) { 8373 buf_ptr = 8374 (uint8_t *)data_event + 8375 sizeof( 8376 wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param) + 8377 WMI_TLV_HDR_SIZE; 8378 8379 encrypt_decrypt_rsp_params.data = buf_ptr; 8380 } 8381 8382 pmac->sme.encrypt_decrypt_cb(pmac->hHdd, &encrypt_decrypt_rsp_params); 8383 8384 return 0; 8385 } 8386