1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: wma_utis.c 21 * This file contains utilities and stats related functions. 22 */ 23 24 /* Header files */ 25 26 #include "wma.h" 27 #include "wma_api.h" 28 #include "cds_api.h" 29 #include "wmi_unified_api.h" 30 #include "wlan_qct_sys.h" 31 #include "wni_api.h" 32 #include "ani_global.h" 33 #include "wmi_unified.h" 34 #include "wni_cfg.h" 35 #include "cfg_api.h" 36 37 #include "qdf_nbuf.h" 38 #include "qdf_types.h" 39 #include "qdf_mem.h" 40 41 #include "wma_types.h" 42 #include "lim_api.h" 43 #include "lim_session_utils.h" 44 45 #include "cds_utils.h" 46 47 #if !defined(REMOVE_PKT_LOG) 48 #include "pktlog_ac.h" 49 #endif /* REMOVE_PKT_LOG */ 50 51 #include "dbglog_host.h" 52 #include "csr_api.h" 53 #include "ol_fw.h" 54 55 #include "wma_internal.h" 56 #include "wlan_policy_mgr_api.h" 57 #include "wmi_unified_param.h" 58 #include "linux/ieee80211.h" 59 #include <cdp_txrx_handle.h> 60 #include "cds_reg_service.h" 61 #include "target_if.h" 62 63 /* MCS Based rate table */ 64 /* HT MCS parameters with Nss = 1 */ 65 static struct index_data_rate_type mcs_nss1[] = { 66 /* MCS L20 S20 L40 S40 */ 67 {0, {65, 72}, {135, 150 } }, 68 {1, {130, 144}, {270, 300 } }, 69 {2, {195, 217}, {405, 450 } }, 70 {3, {260, 289}, {540, 600 } }, 71 {4, {390, 433}, {815, 900 } }, 72 {5, {520, 578}, {1080, 1200} }, 73 {6, {585, 650}, {1215, 1350} }, 74 {7, {650, 722}, {1350, 1500} } 75 }; 76 77 /* HT MCS parameters with Nss = 2 */ 78 static struct index_data_rate_type mcs_nss2[] = { 79 /* MCS L20 S20 L40 S40 */ 80 {0, {130, 144}, {270, 300 } }, 81 {1, {260, 289}, {540, 600 } }, 82 {2, {390, 433}, {810, 900 } }, 83 {3, {520, 578}, {1080, 1200} }, 84 {4, {780, 867}, {1620, 1800} }, 85 {5, {1040, 1156}, {2160, 2400} }, 86 {6, {1170, 1300}, {2430, 2700} }, 87 {7, {1300, 1440}, {2700, 3000} } 88 }; 89 90 /* MCS Based VHT rate table */ 91 /* MCS parameters with Nss = 1*/ 92 static struct index_vht_data_rate_type vht_mcs_nss1[] = { 93 /* MCS L20 S20 L40 S40 L80 S80 */ 94 {0, {65, 72 }, {135, 150}, {293, 325} }, 95 {1, {130, 144}, {270, 300}, {585, 650} }, 96 {2, {195, 217}, {405, 450}, {878, 975} }, 97 {3, {260, 289}, {540, 600}, {1170, 1300} }, 98 {4, {390, 433}, {810, 900}, {1755, 1950} }, 99 {5, {520, 578}, {1080, 1200}, {2340, 2600} }, 100 {6, {585, 650}, {1215, 1350}, {2633, 2925} }, 101 {7, {650, 722}, {1350, 1500}, {2925, 3250} }, 102 {8, {780, 867}, {1620, 1800}, {3510, 3900} }, 103 {9, {865, 960}, {1800, 2000}, {3900, 4333} } 104 }; 105 106 /*MCS parameters with Nss = 2*/ 107 static struct index_vht_data_rate_type vht_mcs_nss2[] = { 108 /* MCS L20 S20 L40 S40 L80 S80 */ 109 {0, {130, 144}, {270, 300}, { 585, 650} }, 110 {1, {260, 289}, {540, 600}, {1170, 1300} }, 111 {2, {390, 433}, {810, 900}, {1755, 1950} }, 112 {3, {520, 578}, {1080, 1200}, {2340, 2600} }, 113 {4, {780, 867}, {1620, 1800}, {3510, 3900} }, 114 {5, {1040, 1156}, {2160, 2400}, {4680, 5200} }, 115 {6, {1170, 1300}, {2430, 2700}, {5265, 5850} }, 116 {7, {1300, 1444}, {2700, 3000}, {5850, 6500} }, 117 {8, {1560, 1733}, {3240, 3600}, {7020, 7800} }, 118 {9, {1730, 1920}, {3600, 4000}, {7800, 8667} } 119 }; 120 121 #ifdef BIG_ENDIAN_HOST 122 123 /* ############# function definitions ############ */ 124 125 /** 126 * wma_swap_bytes() - swap bytes 127 * @pv: buffer 128 * @n: swap bytes 129 * 130 * Return: none 131 */ 132 void wma_swap_bytes(void *pv, uint32_t n) 133 { 134 int32_t no_words; 135 int32_t i; 136 uint32_t *word_ptr; 137 138 no_words = n / sizeof(uint32_t); 139 word_ptr = (uint32_t *) pv; 140 for (i = 0; i < no_words; i++) 141 *(word_ptr + i) = __cpu_to_le32(*(word_ptr + i)); 142 } 143 144 #define SWAPME(x, len) wma_swap_bytes(&x, len) 145 #endif /* BIG_ENDIAN_HOST */ 146 147 /** 148 * wma_mcs_rate_match() - find the match mcs rate 149 * @match_rate: the rate to look up 150 * @is_sgi: return if the SGI rate is found 151 * @nss: the nss in use 152 * @nss1_rate: the nss1 rate 153 * @nss1_srate: the nss1 SGI rate 154 * @nss2_rate: the nss2 rate 155 * @nss2_srate: the nss2 SGI rate 156 * 157 * This is a helper function to find the match of the tx_rate 158 * in terms of the nss1/nss2 rate with non-SGI/SGI. 159 * 160 * Return: the found rate or 0 otherwise 161 */ 162 static inline uint16_t wma_mcs_rate_match(uint16_t match_rate, bool *is_sgi, 163 uint8_t nss, uint16_t nss1_rate, 164 uint16_t nss1_srate, 165 uint16_t nss2_rate, 166 uint16_t nss2_srate) 167 { 168 WMA_LOGD("%s match_rate: %d, %d %d %d %d", 169 __func__, match_rate, nss1_rate, nss1_srate, nss2_rate, 170 nss2_srate); 171 172 if (match_rate == nss1_rate) { 173 return nss1_rate; 174 } else if (match_rate == nss1_srate) { 175 *is_sgi = true; 176 return nss1_srate; 177 } else if (nss == 2 && match_rate == nss2_rate) 178 return nss2_rate; 179 else if (nss == 2 && match_rate == nss2_srate) { 180 *is_sgi = true; 181 return nss2_srate; 182 } else 183 return 0; 184 } 185 186 uint8_t wma_get_mcs_idx(uint16_t maxRate, uint8_t rate_flags, 187 uint8_t nss, uint8_t *mcsRateFlag) 188 { 189 uint8_t index = 0; 190 uint16_t match_rate = 0; 191 bool is_sgi = false; 192 193 WMA_LOGD("%s rate:%d rate_flgs: 0x%x, nss: %d", 194 __func__, maxRate, rate_flags, nss); 195 196 *mcsRateFlag = rate_flags; 197 *mcsRateFlag &= ~TX_RATE_SGI; 198 for (index = 0; index < MAX_VHT_MCS_IDX; index++) { 199 if (rate_flags & TX_RATE_VHT80) { 200 /* check for vht80 nss1/2 rate set */ 201 match_rate = wma_mcs_rate_match(maxRate, &is_sgi, nss, 202 vht_mcs_nss1[index].ht80_rate[0], 203 vht_mcs_nss1[index].ht80_rate[1], 204 vht_mcs_nss2[index].ht80_rate[0], 205 vht_mcs_nss2[index].ht80_rate[1]); 206 if (match_rate) 207 goto rate_found; 208 } 209 if ((rate_flags & TX_RATE_VHT40) | 210 (rate_flags & TX_RATE_VHT80)) { 211 /* check for vht40 nss1/2 rate set */ 212 match_rate = wma_mcs_rate_match(maxRate, &is_sgi, nss, 213 vht_mcs_nss1[index].ht40_rate[0], 214 vht_mcs_nss1[index].ht40_rate[1], 215 vht_mcs_nss2[index].ht40_rate[0], 216 vht_mcs_nss2[index].ht40_rate[1]); 217 if (match_rate) { 218 *mcsRateFlag &= ~TX_RATE_VHT80; 219 goto rate_found; 220 } 221 } 222 if ((rate_flags & TX_RATE_VHT20) | 223 (rate_flags & TX_RATE_VHT40) | 224 (rate_flags & TX_RATE_VHT80)) { 225 /* check for vht20 nss1/2 rate set */ 226 match_rate = wma_mcs_rate_match(maxRate, &is_sgi, nss, 227 vht_mcs_nss1[index].ht20_rate[0], 228 vht_mcs_nss1[index].ht20_rate[1], 229 vht_mcs_nss2[index].ht20_rate[0], 230 vht_mcs_nss2[index].ht20_rate[1]); 231 if (match_rate) { 232 *mcsRateFlag &= ~(TX_RATE_VHT80 | 233 TX_RATE_VHT40); 234 goto rate_found; 235 } 236 } 237 } 238 for (index = 0; index < MAX_HT_MCS_IDX; index++) { 239 if (rate_flags & TX_RATE_HT40) { 240 /* check for ht40 nss1/2 rate set */ 241 match_rate = wma_mcs_rate_match(maxRate, &is_sgi, nss, 242 mcs_nss1[index].ht40_rate[0], 243 mcs_nss1[index].ht40_rate[1], 244 mcs_nss2[index].ht40_rate[0], 245 mcs_nss2[index].ht40_rate[1]); 246 if (match_rate) { 247 *mcsRateFlag = TX_RATE_HT40; 248 if (nss == 2) 249 index += MAX_HT_MCS_IDX; 250 goto rate_found; 251 } 252 } 253 if ((rate_flags & TX_RATE_HT20) || 254 (rate_flags & TX_RATE_HT40)) { 255 /* check for ht20 nss1/2 rate set */ 256 match_rate = wma_mcs_rate_match(maxRate, &is_sgi, nss, 257 mcs_nss1[index].ht20_rate[0], 258 mcs_nss1[index].ht20_rate[1], 259 mcs_nss2[index].ht20_rate[0], 260 mcs_nss2[index].ht20_rate[1]); 261 if (match_rate) { 262 *mcsRateFlag = TX_RATE_HT20; 263 if (nss == 2) 264 index += MAX_HT_MCS_IDX; 265 goto rate_found; 266 } 267 } 268 } 269 270 rate_found: 271 /* set SGI flag only if this is SGI rate */ 272 if (match_rate && is_sgi == true) 273 *mcsRateFlag |= TX_RATE_SGI; 274 275 WMA_LOGD("%s - match_rate: %d index: %d rate_flag: 0x%x is_sgi: %d", 276 __func__, match_rate, index, *mcsRateFlag, is_sgi); 277 278 return match_rate ? index : INVALID_MCS_IDX; 279 } 280 281 #ifndef QCA_SUPPORT_CP_STATS 282 /** 283 * wma_peek_vdev_req() - peek what request message is queued for response. 284 * the function does not delete the node after found 285 * @wma: WMA handle 286 * @vdev_id: vdev ID 287 * @type: request message type 288 * 289 * Return: the request message found 290 */ 291 static struct wma_target_req *wma_peek_vdev_req(tp_wma_handle wma, 292 uint8_t vdev_id, uint8_t type) 293 { 294 struct wma_target_req *req_msg = NULL; 295 bool found = false; 296 qdf_list_node_t *node1 = NULL, *node2 = NULL; 297 298 qdf_spin_lock_bh(&wma->vdev_respq_lock); 299 if (QDF_STATUS_SUCCESS != qdf_list_peek_front(&wma->vdev_resp_queue, 300 &node2)) { 301 qdf_spin_unlock_bh(&wma->vdev_respq_lock); 302 return NULL; 303 } 304 305 do { 306 node1 = node2; 307 req_msg = qdf_container_of(node1, struct wma_target_req, node); 308 if (req_msg->vdev_id != vdev_id) 309 continue; 310 if (req_msg->type != type) 311 continue; 312 313 found = true; 314 break; 315 } while (QDF_STATUS_SUCCESS == qdf_list_peek_next(&wma->vdev_resp_queue, 316 node1, &node2)); 317 qdf_spin_unlock_bh(&wma->vdev_respq_lock); 318 if (!found) { 319 WMA_LOGE(FL("target request not found for vdev_id %d type %d"), 320 vdev_id, type); 321 return NULL; 322 } 323 WMA_LOGD(FL("target request found for vdev id: %d type %d msg %d"), 324 vdev_id, type, req_msg->msg_type); 325 return req_msg; 326 } 327 #endif /* QCA_SUPPORT_CP_STATS */ 328 329 void wma_lost_link_info_handler(tp_wma_handle wma, uint32_t vdev_id, 330 int32_t rssi) 331 { 332 struct sir_lost_link_info *lost_link_info; 333 QDF_STATUS qdf_status; 334 struct scheduler_msg sme_msg = {0}; 335 336 /* report lost link information only for STA mode */ 337 if (wma_is_vdev_up(vdev_id) && 338 (WMI_VDEV_TYPE_STA == wma->interfaces[vdev_id].type) && 339 (0 == wma->interfaces[vdev_id].sub_type)) { 340 lost_link_info = qdf_mem_malloc(sizeof(*lost_link_info)); 341 if (NULL == lost_link_info) { 342 WMA_LOGE("%s: failed to allocate memory", __func__); 343 return; 344 } 345 lost_link_info->vdev_id = vdev_id; 346 lost_link_info->rssi = rssi; 347 sme_msg.type = eWNI_SME_LOST_LINK_INFO_IND; 348 sme_msg.bodyptr = lost_link_info; 349 sme_msg.bodyval = 0; 350 WMA_LOGD("%s: post msg to SME, bss_idx %d, rssi %d", __func__, 351 lost_link_info->vdev_id, lost_link_info->rssi); 352 353 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 354 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 355 WMA_LOGE("%s: fail to post msg to SME", __func__); 356 qdf_mem_free(lost_link_info); 357 } 358 } 359 } 360 361 /** 362 * host_map_smps_mode() - map fw smps mode to enum eSmpsModeValue 363 * @fw_smps_mode: fw smps mode 364 * 365 * Return: return enum eSmpsModeValue 366 */ 367 enum eSmpsModeValue host_map_smps_mode(A_UINT32 fw_smps_mode) 368 { 369 enum eSmpsModeValue smps_mode = SMPS_MODE_DISABLED; 370 371 switch (fw_smps_mode) { 372 case WMI_SMPS_FORCED_MODE_STATIC: 373 smps_mode = STATIC_SMPS_MODE; 374 break; 375 case WMI_SMPS_FORCED_MODE_DYNAMIC: 376 smps_mode = DYNAMIC_SMPS_MODE; 377 break; 378 default: 379 smps_mode = SMPS_MODE_DISABLED; 380 } 381 382 return smps_mode; 383 } 384 385 /** 386 * wma_smps_mode_to_force_mode_param() - Map smps mode to force 387 * mode commmand param 388 * @smps_mode: SMPS mode according to the protocol 389 * 390 * Return: int > 0 for success else failure 391 */ 392 int wma_smps_mode_to_force_mode_param(uint8_t smps_mode) 393 { 394 int param = -EINVAL; 395 396 switch (smps_mode) { 397 case STATIC_SMPS_MODE: 398 param = WMI_SMPS_FORCED_MODE_STATIC; 399 break; 400 case DYNAMIC_SMPS_MODE: 401 param = WMI_SMPS_FORCED_MODE_DYNAMIC; 402 break; 403 case SMPS_MODE_DISABLED: 404 param = WMI_SMPS_FORCED_MODE_DISABLED; 405 break; 406 default: 407 WMA_LOGE(FL("smps mode cannot be mapped :%d "), 408 smps_mode); 409 } 410 return param; 411 } 412 413 #ifdef WLAN_FEATURE_STATS_EXT 414 /** 415 * wma_stats_ext_event_handler() - extended stats event handler 416 * @handle: wma handle 417 * @event_buf: event buffer received from fw 418 * @len: length of data 419 * 420 * Return: 0 for success or error code 421 */ 422 int wma_stats_ext_event_handler(void *handle, uint8_t *event_buf, 423 uint32_t len) 424 { 425 WMI_STATS_EXT_EVENTID_param_tlvs *param_buf; 426 tSirStatsExtEvent *stats_ext_event; 427 wmi_stats_ext_event_fixed_param *stats_ext_info; 428 QDF_STATUS status; 429 struct scheduler_msg cds_msg = {0}; 430 uint8_t *buf_ptr; 431 uint32_t alloc_len; 432 433 WMA_LOGD("%s: Posting stats ext event to SME", __func__); 434 435 param_buf = (WMI_STATS_EXT_EVENTID_param_tlvs *) event_buf; 436 if (!param_buf) { 437 WMA_LOGE("%s: Invalid stats ext event buf", __func__); 438 return -EINVAL; 439 } 440 441 stats_ext_info = param_buf->fixed_param; 442 buf_ptr = (uint8_t *) stats_ext_info; 443 444 alloc_len = sizeof(tSirStatsExtEvent); 445 alloc_len += stats_ext_info->data_len; 446 447 if (stats_ext_info->data_len > (WMI_SVC_MSG_MAX_SIZE - 448 WMI_TLV_HDR_SIZE - sizeof(*stats_ext_info)) || 449 stats_ext_info->data_len > param_buf->num_data) { 450 WMA_LOGE("Excess data_len:%d, num_data:%d", 451 stats_ext_info->data_len, param_buf->num_data); 452 return -EINVAL; 453 } 454 stats_ext_event = (tSirStatsExtEvent *) qdf_mem_malloc(alloc_len); 455 if (NULL == stats_ext_event) { 456 WMA_LOGE("%s: Memory allocation failure", __func__); 457 return -ENOMEM; 458 } 459 460 buf_ptr += sizeof(wmi_stats_ext_event_fixed_param) + WMI_TLV_HDR_SIZE; 461 462 stats_ext_event->vdev_id = stats_ext_info->vdev_id; 463 stats_ext_event->event_data_len = stats_ext_info->data_len; 464 qdf_mem_copy(stats_ext_event->event_data, 465 buf_ptr, stats_ext_event->event_data_len); 466 467 cds_msg.type = eWNI_SME_STATS_EXT_EVENT; 468 cds_msg.bodyptr = (void *)stats_ext_event; 469 cds_msg.bodyval = 0; 470 471 status = scheduler_post_msg(QDF_MODULE_ID_SME, &cds_msg); 472 if (status != QDF_STATUS_SUCCESS) { 473 WMA_LOGE("%s: Failed to post stats ext event to SME", __func__); 474 qdf_mem_free(stats_ext_event); 475 return -EFAULT; 476 } 477 478 WMA_LOGD("%s: stats ext event Posted to SME", __func__); 479 return 0; 480 } 481 #endif /* WLAN_FEATURE_STATS_EXT */ 482 483 484 /** 485 * wma_profile_data_report_event_handler() - fw profiling handler 486 * @handle: wma handle 487 * @event_buf: event buffer received from fw 488 * @len: length of data 489 * 490 * Return: 0 for success or error code 491 */ 492 int wma_profile_data_report_event_handler(void *handle, uint8_t *event_buf, 493 uint32_t len) 494 { 495 WMI_WLAN_PROFILE_DATA_EVENTID_param_tlvs *param_buf; 496 wmi_wlan_profile_ctx_t *profile_ctx; 497 wmi_wlan_profile_t *profile_data; 498 uint32_t i = 0; 499 uint32_t entries; 500 uint8_t *buf_ptr; 501 char temp_str[150]; 502 503 param_buf = (WMI_WLAN_PROFILE_DATA_EVENTID_param_tlvs *) event_buf; 504 if (!param_buf) { 505 WMA_LOGE("%s: Invalid profile data event buf", __func__); 506 return -EINVAL; 507 } 508 profile_ctx = param_buf->profile_ctx; 509 buf_ptr = (uint8_t *)profile_ctx; 510 buf_ptr = buf_ptr + sizeof(wmi_wlan_profile_ctx_t) + WMI_TLV_HDR_SIZE; 511 profile_data = (wmi_wlan_profile_t *) buf_ptr; 512 entries = profile_ctx->bin_count; 513 514 if (entries > param_buf->num_profile_data) { 515 WMA_LOGE("FW bin count %d more than data %d in TLV hdr", 516 entries, 517 param_buf->num_profile_data); 518 return -EINVAL; 519 } 520 521 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_ERROR, 522 "Profile data stats\n"); 523 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_ERROR, 524 "TOT: %d\n" 525 "tx_msdu_cnt: %d\n" 526 "tx_mpdu_cnt: %d\n" 527 "tx_ppdu_cnt: %d\n" 528 "rx_msdu_cnt: %d\n" 529 "rx_mpdu_cnt: %d\n" 530 "bin_count: %d\n", 531 profile_ctx->tot, 532 profile_ctx->tx_msdu_cnt, 533 profile_ctx->tx_mpdu_cnt, 534 profile_ctx->tx_ppdu_cnt, 535 profile_ctx->rx_msdu_cnt, 536 profile_ctx->rx_mpdu_cnt, 537 profile_ctx->bin_count); 538 539 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_ERROR, 540 "Profile ID: Count: TOT: Min: Max: hist_intvl: hist[0]: hist[1]:hist[2]"); 541 542 for (i = 0; i < entries; i++) { 543 if (i == WMI_WLAN_PROFILE_MAX_BIN_CNT) 544 break; 545 snprintf(temp_str, sizeof(temp_str), 546 " %d : %d : %d : %d : %d : %d : %d : %d : %d", 547 profile_data[i].id, 548 profile_data[i].cnt, 549 profile_data[i].tot, 550 profile_data[i].min, 551 profile_data[i].max, 552 profile_data[i].hist_intvl, 553 profile_data[i].hist[0], 554 profile_data[i].hist[1], 555 profile_data[i].hist[2]); 556 QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_ERROR, 557 "%s", temp_str); 558 } 559 560 return 0; 561 } 562 563 #ifdef WLAN_FEATURE_LINK_LAYER_STATS 564 565 #define WMA_FILL_TX_STATS(eve, msg) do {\ 566 (msg)->msdus = (eve)->tx_msdu_cnt;\ 567 (msg)->mpdus = (eve)->tx_mpdu_cnt;\ 568 (msg)->ppdus = (eve)->tx_ppdu_cnt;\ 569 (msg)->bytes = (eve)->tx_bytes;\ 570 (msg)->drops = (eve)->tx_msdu_drop_cnt;\ 571 (msg)->drop_bytes = (eve)->tx_drop_bytes;\ 572 (msg)->retries = (eve)->tx_mpdu_retry_cnt;\ 573 (msg)->failed = (eve)->tx_mpdu_fail_cnt;\ 574 } while (0) 575 576 #define WMA_FILL_RX_STATS(eve, msg) do {\ 577 (msg)->mpdus = (eve)->mac_rx_mpdu_cnt;\ 578 (msg)->bytes = (eve)->mac_rx_bytes;\ 579 (msg)->ppdus = (eve)->phy_rx_ppdu_cnt;\ 580 (msg)->ppdu_bytes = (eve)->phy_rx_bytes;\ 581 (msg)->mpdu_retry = (eve)->rx_mpdu_retry_cnt;\ 582 (msg)->mpdu_dup = (eve)->rx_mpdu_dup_cnt;\ 583 (msg)->mpdu_discard = (eve)->rx_mpdu_discard_cnt;\ 584 } while (0) 585 586 /** 587 * wma_get_ll_stats_ext_buf() - alloc result buffer for MAC counters 588 * @len: buffer length output 589 * @peer_num: peer number 590 * @fixed_param: fixed parameters in WMI event 591 * 592 * Structure of the stats message 593 * LL_EXT_STATS 594 * | 595 * |--Channel stats[1~n] 596 * |--Peer[1~n] 597 * | 598 * +---Signal 599 * +---TX 600 * | +---BE 601 * | +---BK 602 * | +---VI 603 * | +---VO 604 * | 605 * +---RX 606 * +---BE 607 * +---BK 608 * +---VI 609 * +---VO 610 * For each Access Category, the arregation and mcs 611 * stats are as this: 612 * TX 613 * +-BE/BK/VI/VO 614 * +----tx_mpdu_aggr_array 615 * +----tx_succ_mcs_array 616 * +----tx_fail_mcs_array 617 * +----tx_delay_array 618 * RX 619 * +-BE/BK/VI/VO 620 * +----rx_mpdu_aggr_array 621 * +----rx_mcs_array 622 * 623 * return: Address for result buffer. 624 */ 625 static tSirLLStatsResults *wma_get_ll_stats_ext_buf(uint32_t *len, 626 uint32_t peer_num, 627 wmi_report_stats_event_fixed_param *fixed_param) 628 { 629 tSirLLStatsResults *buf; 630 uint32_t buf_len; 631 uint32_t total_array_len, total_peer_len; 632 bool excess_data = false; 633 634 if (!len || !fixed_param) { 635 WMA_LOGE(FL("Invalid input parameters.")); 636 return NULL; 637 } 638 639 /* 640 * Result buffer has a structure like this: 641 * --------------------------------- 642 * | trigger_cond_i | 643 * +-------------------------------+ 644 * | cca_chgd_bitmap | 645 * +-------------------------------+ 646 * | sig_chgd_bitmap | 647 * +-------------------------------+ 648 * | tx_chgd_bitmap | 649 * +-------------------------------+ 650 * | rx_chgd_bitmap | 651 * +-------------------------------+ 652 * | peer_num | 653 * +-------------------------------+ 654 * | channel_num | 655 * +-------------------------------+ 656 * | tx_mpdu_aggr_array_len | 657 * +-------------------------------+ 658 * | tx_succ_mcs_array_len | 659 * +-------------------------------+ 660 * | tx_fail_mcs_array_len | 661 * +-------------------------------+ 662 * | tx_delay_array_len | 663 * +-------------------------------+ 664 * | rx_mpdu_aggr_array_len | 665 * +-------------------------------+ 666 * | rx_mcs_array_len | 667 * +-------------------------------+ 668 * | pointer to CCA stats | 669 * +-------------------------------+ 670 * | CCA stats | 671 * +-------------------------------+ 672 * | peer_stats |----+ 673 * +-------------------------------+ | 674 * | TX aggr/mcs parameters array | | 675 * | Length of this buffer is | | 676 * | not fixed. |<-+ | 677 * +-------------------------------+ | | 678 * | per peer tx stats |--+ | 679 * | BE | <--+ 680 * | BK | | 681 * | VI | | 682 * | VO | | 683 * +-------------------------------+ | 684 * | TX aggr/mcs parameters array | | 685 * | Length of this buffer is | | 686 * | not fixed. |<-+ | 687 * +-------------------------------+ | | 688 * | peer peer rx stats |--+ | 689 * | BE | <--+ 690 * | BK | 691 * | VI | 692 * | VO | 693 * --------------------------------- 694 */ 695 696 buf_len = sizeof(tSirLLStatsResults) + 697 sizeof(struct sir_wifi_ll_ext_stats); 698 do { 699 if (fixed_param->num_chan_cca_stats > (WMI_SVC_MSG_MAX_SIZE / 700 sizeof(struct sir_wifi_chan_cca_stats))) { 701 excess_data = true; 702 break; 703 } 704 buf_len += (fixed_param->num_chan_cca_stats * 705 sizeof(struct sir_wifi_chan_cca_stats)); 706 if (fixed_param->tx_mpdu_aggr_array_len > 707 WMI_SVC_MSG_MAX_SIZE) { 708 excess_data = true; 709 break; 710 } else { 711 total_array_len = fixed_param->tx_mpdu_aggr_array_len; 712 } 713 if (fixed_param->tx_succ_mcs_array_len > 714 (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { 715 excess_data = true; 716 break; 717 } else { 718 total_array_len += fixed_param->tx_succ_mcs_array_len; 719 } 720 if (fixed_param->tx_fail_mcs_array_len > 721 (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { 722 excess_data = true; 723 break; 724 } else { 725 total_array_len += fixed_param->tx_fail_mcs_array_len; 726 } 727 if (fixed_param->tx_ppdu_delay_array_len > 728 (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { 729 excess_data = true; 730 break; 731 } else { 732 total_array_len += fixed_param->tx_ppdu_delay_array_len; 733 } 734 if (fixed_param->rx_mpdu_aggr_array_len > 735 (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { 736 excess_data = true; 737 break; 738 } else { 739 total_array_len += fixed_param->rx_mpdu_aggr_array_len; 740 } 741 if (fixed_param->rx_mcs_array_len > 742 (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { 743 excess_data = true; 744 break; 745 } else { 746 total_array_len += fixed_param->rx_mcs_array_len; 747 } 748 749 if (total_array_len > (WMI_SVC_MSG_MAX_SIZE / 750 (sizeof(uint32_t) * WLAN_MAX_AC))) { 751 excess_data = true; 752 break; 753 } else { 754 total_peer_len = (sizeof(uint32_t) * WLAN_MAX_AC * 755 total_array_len) + 756 (WLAN_MAX_AC * 757 (sizeof(struct sir_wifi_tx) + 758 sizeof(struct sir_wifi_rx))); 759 } 760 if (total_peer_len > WMI_SVC_MSG_MAX_SIZE) { 761 excess_data = true; 762 break; 763 } 764 if (peer_num > WMI_SVC_MSG_MAX_SIZE / (total_peer_len + 765 sizeof(struct sir_wifi_ll_ext_peer_stats))) { 766 excess_data = true; 767 break; 768 } else { 769 buf_len += peer_num * 770 (sizeof(struct sir_wifi_ll_ext_peer_stats) + 771 total_peer_len); 772 } 773 } while (0); 774 775 if (excess_data || (buf_len > WMI_SVC_MSG_MAX_SIZE)) { 776 WMA_LOGE("%s: excess wmi buffer: peer %d cca %d tx_mpdu %d tx_succ%d tx_fail %d tx_ppdu %d rx_mpdu %d rx_mcs %d", 777 __func__, peer_num, fixed_param->num_chan_cca_stats, 778 fixed_param->tx_mpdu_aggr_array_len, 779 fixed_param->tx_succ_mcs_array_len, 780 fixed_param->tx_fail_mcs_array_len, 781 fixed_param->tx_ppdu_delay_array_len, 782 fixed_param->rx_mpdu_aggr_array_len, 783 fixed_param->rx_mcs_array_len); 784 return NULL; 785 } 786 787 buf = (tSirLLStatsResults *)qdf_mem_malloc(buf_len); 788 if (buf == NULL) { 789 WMA_LOGE("%s: Cannot allocate link layer stats.", __func__); 790 buf_len = 0; 791 return NULL; 792 } 793 794 *len = buf_len; 795 return buf; 796 } 797 798 /** 799 * wma_fill_tx_stats() - Fix TX stats into result buffer 800 * @ll_stats: LL stats buffer 801 * @fix_param: parameters with fixed length in WMI event 802 * @param_buf: parameters without fixed length in WMI event 803 * @buf: buffer for TLV parameters 804 * 805 * Return: None 806 */ 807 static void wma_fill_tx_stats(struct sir_wifi_ll_ext_stats *ll_stats, 808 wmi_report_stats_event_fixed_param *fix_param, 809 WMI_REPORT_STATS_EVENTID_param_tlvs *param_buf, 810 uint8_t **buf, uint32_t *buf_length) 811 { 812 uint8_t *result; 813 uint32_t i, j, k; 814 wmi_peer_ac_tx_stats *wmi_peer_tx; 815 wmi_tx_stats *wmi_tx; 816 struct sir_wifi_tx *tx_stats; 817 struct sir_wifi_ll_ext_peer_stats *peer_stats; 818 uint32_t *tx_mpdu_aggr, *tx_succ_mcs, *tx_fail_mcs, *tx_delay; 819 uint32_t len, dst_len, tx_mpdu_aggr_array_len, tx_succ_mcs_array_len, 820 tx_fail_mcs_array_len, tx_delay_array_len; 821 822 result = *buf; 823 dst_len = *buf_length; 824 tx_mpdu_aggr_array_len = fix_param->tx_mpdu_aggr_array_len; 825 ll_stats->tx_mpdu_aggr_array_len = tx_mpdu_aggr_array_len; 826 tx_succ_mcs_array_len = fix_param->tx_succ_mcs_array_len; 827 ll_stats->tx_succ_mcs_array_len = tx_succ_mcs_array_len; 828 tx_fail_mcs_array_len = fix_param->tx_fail_mcs_array_len; 829 ll_stats->tx_fail_mcs_array_len = tx_fail_mcs_array_len; 830 tx_delay_array_len = fix_param->tx_ppdu_delay_array_len; 831 ll_stats->tx_delay_array_len = tx_delay_array_len; 832 wmi_peer_tx = param_buf->peer_ac_tx_stats; 833 wmi_tx = param_buf->tx_stats; 834 835 len = fix_param->num_peer_ac_tx_stats * 836 WLAN_MAX_AC * tx_mpdu_aggr_array_len * sizeof(uint32_t); 837 if (len <= dst_len) { 838 tx_mpdu_aggr = (uint32_t *)result; 839 qdf_mem_copy(tx_mpdu_aggr, param_buf->tx_mpdu_aggr, len); 840 result += len; 841 dst_len -= len; 842 } else { 843 WMA_LOGE(FL("TX_MPDU_AGGR buffer length is wrong.")); 844 tx_mpdu_aggr = NULL; 845 } 846 847 len = fix_param->num_peer_ac_tx_stats * WLAN_MAX_AC * 848 tx_succ_mcs_array_len * sizeof(uint32_t); 849 if (len <= dst_len) { 850 tx_succ_mcs = (uint32_t *)result; 851 qdf_mem_copy(tx_succ_mcs, param_buf->tx_succ_mcs, len); 852 result += len; 853 dst_len -= len; 854 } else { 855 WMA_LOGE(FL("TX_SUCC_MCS buffer length is wrong.")); 856 tx_succ_mcs = NULL; 857 } 858 859 len = fix_param->num_peer_ac_tx_stats * WLAN_MAX_AC * 860 tx_fail_mcs_array_len * sizeof(uint32_t); 861 if (len <= dst_len) { 862 tx_fail_mcs = (uint32_t *)result; 863 qdf_mem_copy(tx_fail_mcs, param_buf->tx_fail_mcs, len); 864 result += len; 865 dst_len -= len; 866 } else { 867 WMA_LOGE(FL("TX_FAIL_MCS buffer length is wrong.")); 868 tx_fail_mcs = NULL; 869 } 870 871 len = fix_param->num_peer_ac_tx_stats * 872 WLAN_MAX_AC * tx_delay_array_len * sizeof(uint32_t); 873 if (len <= dst_len) { 874 tx_delay = (uint32_t *)result; 875 qdf_mem_copy(tx_delay, param_buf->tx_ppdu_delay, len); 876 result += len; 877 dst_len -= len; 878 } else { 879 WMA_LOGE(FL("TX_DELAY buffer length is wrong.")); 880 tx_delay = NULL; 881 } 882 883 /* per peer tx stats */ 884 peer_stats = ll_stats->peer_stats; 885 886 for (i = 0; i < fix_param->num_peer_ac_tx_stats; i++) { 887 uint32_t peer_id = wmi_peer_tx[i].peer_id; 888 struct sir_wifi_tx *ac; 889 wmi_tx_stats *wmi_tx_stats; 890 891 for (j = 0; j < ll_stats->peer_num; j++) { 892 peer_stats += j; 893 if (peer_stats->peer_id == WIFI_INVALID_PEER_ID || 894 peer_stats->peer_id == peer_id) 895 break; 896 } 897 898 if (j < ll_stats->peer_num) { 899 peer_stats->peer_id = wmi_peer_tx[i].peer_id; 900 peer_stats->vdev_id = wmi_peer_tx[i].vdev_id; 901 tx_stats = (struct sir_wifi_tx *)result; 902 for (k = 0; k < WLAN_MAX_AC; k++) { 903 wmi_tx_stats = &wmi_tx[i * WLAN_MAX_AC + k]; 904 ac = &tx_stats[k]; 905 WMA_FILL_TX_STATS(wmi_tx_stats, ac); 906 ac->mpdu_aggr_size = tx_mpdu_aggr; 907 ac->aggr_len = tx_mpdu_aggr_array_len * 908 sizeof(uint32_t); 909 ac->success_mcs_len = tx_succ_mcs_array_len * 910 sizeof(uint32_t); 911 ac->success_mcs = tx_succ_mcs; 912 ac->fail_mcs = tx_fail_mcs; 913 ac->fail_mcs_len = tx_fail_mcs_array_len * 914 sizeof(uint32_t); 915 ac->delay = tx_delay; 916 ac->delay_len = tx_delay_array_len * 917 sizeof(uint32_t); 918 peer_stats->ac_stats[k].tx_stats = ac; 919 peer_stats->ac_stats[k].type = k; 920 tx_mpdu_aggr += tx_mpdu_aggr_array_len; 921 tx_succ_mcs += tx_succ_mcs_array_len; 922 tx_fail_mcs += tx_fail_mcs_array_len; 923 tx_delay += tx_delay_array_len; 924 } 925 result += WLAN_MAX_AC * sizeof(struct sir_wifi_tx); 926 } else { 927 /* 928 * Buffer for Peer TX counter overflow. 929 * There is peer ID mismatch between TX, RX, 930 * signal counters. 931 */ 932 WMA_LOGE(FL("One peer TX info is dropped.")); 933 934 tx_mpdu_aggr += tx_mpdu_aggr_array_len * WLAN_MAX_AC; 935 tx_succ_mcs += tx_succ_mcs_array_len * WLAN_MAX_AC; 936 tx_fail_mcs += tx_fail_mcs_array_len * WLAN_MAX_AC; 937 tx_delay += tx_delay_array_len * WLAN_MAX_AC; 938 } 939 } 940 *buf = result; 941 *buf_length = dst_len; 942 } 943 944 /** 945 * wma_fill_rx_stats() - Fix RX stats into result buffer 946 * @ll_stats: LL stats buffer 947 * @fix_param: parameters with fixed length in WMI event 948 * @param_buf: parameters without fixed length in WMI event 949 * @buf: buffer for TLV parameters 950 * 951 * Return: None 952 */ 953 static void wma_fill_rx_stats(struct sir_wifi_ll_ext_stats *ll_stats, 954 wmi_report_stats_event_fixed_param *fix_param, 955 WMI_REPORT_STATS_EVENTID_param_tlvs *param_buf, 956 uint8_t **buf, uint32_t *buf_length) 957 { 958 uint8_t *result; 959 uint32_t i, j, k; 960 uint32_t *rx_mpdu_aggr, *rx_mcs; 961 wmi_rx_stats *wmi_rx; 962 wmi_peer_ac_rx_stats *wmi_peer_rx; 963 struct sir_wifi_rx *rx_stats; 964 struct sir_wifi_ll_ext_peer_stats *peer_stats; 965 uint32_t len, dst_len, rx_mpdu_aggr_array_len, rx_mcs_array_len; 966 967 rx_mpdu_aggr_array_len = fix_param->rx_mpdu_aggr_array_len; 968 ll_stats->rx_mpdu_aggr_array_len = rx_mpdu_aggr_array_len; 969 rx_mcs_array_len = fix_param->rx_mcs_array_len; 970 ll_stats->rx_mcs_array_len = rx_mcs_array_len; 971 wmi_peer_rx = param_buf->peer_ac_rx_stats; 972 wmi_rx = param_buf->rx_stats; 973 974 result = *buf; 975 dst_len = *buf_length; 976 len = sizeof(uint32_t) * (fix_param->num_peer_ac_rx_stats * 977 WLAN_MAX_AC * rx_mpdu_aggr_array_len); 978 if (len <= dst_len) { 979 rx_mpdu_aggr = (uint32_t *)result; 980 qdf_mem_copy(rx_mpdu_aggr, param_buf->rx_mpdu_aggr, len); 981 result += len; 982 dst_len -= len; 983 } else { 984 WMA_LOGE(FL("RX_MPDU_AGGR array length is wrong.")); 985 rx_mpdu_aggr = NULL; 986 } 987 988 len = sizeof(uint32_t) * (fix_param->num_peer_ac_rx_stats * 989 WLAN_MAX_AC * rx_mcs_array_len); 990 if (len <= dst_len) { 991 rx_mcs = (uint32_t *)result; 992 qdf_mem_copy(rx_mcs, param_buf->rx_mcs, len); 993 result += len; 994 dst_len -= len; 995 } else { 996 WMA_LOGE(FL("RX_MCS array length is wrong.")); 997 rx_mcs = NULL; 998 } 999 1000 /* per peer rx stats */ 1001 peer_stats = ll_stats->peer_stats; 1002 for (i = 0; i < fix_param->num_peer_ac_rx_stats; i++) { 1003 uint32_t peer_id = wmi_peer_rx[i].peer_id; 1004 struct sir_wifi_rx *ac; 1005 wmi_rx_stats *wmi_rx_stats; 1006 1007 for (j = 0; j < ll_stats->peer_num; j++) { 1008 peer_stats += j; 1009 if ((peer_stats->peer_id == WIFI_INVALID_PEER_ID) || 1010 (peer_stats->peer_id == peer_id)) 1011 break; 1012 } 1013 1014 if (j < ll_stats->peer_num) { 1015 peer_stats->peer_id = wmi_peer_rx[i].peer_id; 1016 peer_stats->vdev_id = wmi_peer_rx[i].vdev_id; 1017 peer_stats->sta_ps_inds = wmi_peer_rx[i].sta_ps_inds; 1018 peer_stats->sta_ps_durs = wmi_peer_rx[i].sta_ps_durs; 1019 peer_stats->rx_probe_reqs = 1020 wmi_peer_rx[i].rx_probe_reqs; 1021 peer_stats->rx_oth_mgmts = wmi_peer_rx[i].rx_oth_mgmts; 1022 rx_stats = (struct sir_wifi_rx *)result; 1023 1024 for (k = 0; k < WLAN_MAX_AC; k++) { 1025 wmi_rx_stats = &wmi_rx[i * WLAN_MAX_AC + k]; 1026 ac = &rx_stats[k]; 1027 WMA_FILL_RX_STATS(wmi_rx_stats, ac); 1028 ac->mpdu_aggr = rx_mpdu_aggr; 1029 ac->aggr_len = rx_mpdu_aggr_array_len * 1030 sizeof(uint32_t); 1031 ac->mcs = rx_mcs; 1032 ac->mcs_len = rx_mcs_array_len * 1033 sizeof(uint32_t); 1034 peer_stats->ac_stats[k].rx_stats = ac; 1035 peer_stats->ac_stats[k].type = k; 1036 rx_mpdu_aggr += rx_mpdu_aggr_array_len; 1037 rx_mcs += rx_mcs_array_len; 1038 } 1039 result += WLAN_MAX_AC * sizeof(struct sir_wifi_rx); 1040 } else { 1041 /* 1042 * Buffer for Peer RX counter overflow. 1043 * There is peer ID mismatch between TX, RX, 1044 * signal counters. 1045 */ 1046 WMA_LOGE(FL("One peer RX info is dropped.")); 1047 rx_mpdu_aggr += rx_mpdu_aggr_array_len * WLAN_MAX_AC; 1048 rx_mcs += rx_mcs_array_len * WLAN_MAX_AC; 1049 } 1050 } 1051 *buf = result; 1052 *buf_length = dst_len; 1053 } 1054 1055 /** 1056 * wma_ll_stats_evt_handler() - handler for MAC layer counters. 1057 * @handle - wma handle 1058 * @event - FW event 1059 * @len - length of FW event 1060 * 1061 * return: 0 success. 1062 */ 1063 static int wma_ll_stats_evt_handler(void *handle, u_int8_t *event, 1064 u_int32_t len) 1065 { 1066 WMI_REPORT_STATS_EVENTID_param_tlvs *param_buf; 1067 wmi_report_stats_event_fixed_param *fixed_param; 1068 tSirLLStatsResults *link_stats_results; 1069 wmi_chan_cca_stats *wmi_cca_stats; 1070 wmi_peer_signal_stats *wmi_peer_signal; 1071 wmi_peer_ac_rx_stats *wmi_peer_rx; 1072 struct sir_wifi_ll_ext_stats *ll_stats; 1073 struct sir_wifi_ll_ext_peer_stats *peer_stats; 1074 struct sir_wifi_chan_cca_stats *cca_stats; 1075 struct sir_wifi_peer_signal_stats *peer_signal; 1076 uint8_t *result; 1077 uint32_t i, peer_num, result_size, dst_len; 1078 tpAniSirGlobal mac; 1079 struct scheduler_msg sme_msg = { 0 }; 1080 QDF_STATUS qdf_status; 1081 1082 mac = (tpAniSirGlobal)cds_get_context(QDF_MODULE_ID_PE); 1083 if (!mac) { 1084 WMA_LOGD("%s: NULL mac ptr. Exiting", __func__); 1085 return -EINVAL; 1086 } 1087 1088 if (!mac->sme.link_layer_stats_ext_cb) { 1089 WMA_LOGD("%s: HDD callback is null", __func__); 1090 return -EINVAL; 1091 } 1092 1093 WMA_LOGD("%s: Posting MAC counters event to HDD", __func__); 1094 1095 param_buf = (WMI_REPORT_STATS_EVENTID_param_tlvs *)event; 1096 fixed_param = param_buf->fixed_param; 1097 wmi_cca_stats = param_buf->chan_cca_stats; 1098 wmi_peer_signal = param_buf->peer_signal_stats; 1099 wmi_peer_rx = param_buf->peer_ac_rx_stats; 1100 if (fixed_param->num_peer_signal_stats > 1101 param_buf->num_peer_signal_stats || 1102 fixed_param->num_peer_ac_tx_stats > 1103 param_buf->num_peer_ac_tx_stats || 1104 fixed_param->num_peer_ac_rx_stats > 1105 param_buf->num_peer_ac_rx_stats) { 1106 WMA_LOGE("%s: excess num_peer_signal_stats:%d, num_peer_ac_tx_stats:%d, num_peer_ac_rx_stats:%d", 1107 __func__, fixed_param->num_peer_signal_stats, 1108 fixed_param->num_peer_ac_tx_stats, 1109 fixed_param->num_peer_ac_rx_stats); 1110 return -EINVAL; 1111 } 1112 1113 /* Get the MAX of three peer numbers */ 1114 peer_num = fixed_param->num_peer_signal_stats > 1115 fixed_param->num_peer_ac_tx_stats ? 1116 fixed_param->num_peer_signal_stats : 1117 fixed_param->num_peer_ac_tx_stats; 1118 peer_num = peer_num > fixed_param->num_peer_ac_rx_stats ? 1119 peer_num : fixed_param->num_peer_ac_rx_stats; 1120 1121 if (peer_num == 0) 1122 return -EINVAL; 1123 1124 link_stats_results = wma_get_ll_stats_ext_buf(&result_size, 1125 peer_num, 1126 fixed_param); 1127 if (!link_stats_results) { 1128 WMA_LOGE("%s: Fail to allocate stats buffer", __func__); 1129 return -EINVAL; 1130 } 1131 link_stats_results->paramId = WMI_LL_STATS_EXT_MAC_COUNTER; 1132 link_stats_results->num_peers = peer_num; 1133 link_stats_results->peer_event_number = 1; 1134 link_stats_results->moreResultToFollow = 0; 1135 1136 ll_stats = (struct sir_wifi_ll_ext_stats *)link_stats_results->results; 1137 ll_stats->trigger_cond_id = fixed_param->trigger_cond_id; 1138 ll_stats->cca_chgd_bitmap = fixed_param->cca_chgd_bitmap; 1139 ll_stats->sig_chgd_bitmap = fixed_param->sig_chgd_bitmap; 1140 ll_stats->tx_chgd_bitmap = fixed_param->tx_chgd_bitmap; 1141 ll_stats->rx_chgd_bitmap = fixed_param->rx_chgd_bitmap; 1142 ll_stats->channel_num = fixed_param->num_chan_cca_stats; 1143 ll_stats->peer_num = peer_num; 1144 1145 result = (uint8_t *)ll_stats->stats; 1146 peer_stats = (struct sir_wifi_ll_ext_peer_stats *)result; 1147 ll_stats->peer_stats = peer_stats; 1148 1149 for (i = 0; i < peer_num; i++) { 1150 peer_stats[i].peer_id = WIFI_INVALID_PEER_ID; 1151 peer_stats[i].vdev_id = WIFI_INVALID_VDEV_ID; 1152 } 1153 1154 /* Per peer signal */ 1155 result_size -= sizeof(struct sir_wifi_ll_ext_stats); 1156 dst_len = sizeof(struct sir_wifi_peer_signal_stats); 1157 for (i = 0; i < fixed_param->num_peer_signal_stats; i++) { 1158 peer_stats[i].peer_id = wmi_peer_signal->peer_id; 1159 peer_stats[i].vdev_id = wmi_peer_signal->vdev_id; 1160 peer_signal = &peer_stats[i].peer_signal_stats; 1161 1162 WMA_LOGD("%d antennas for peer %d", 1163 wmi_peer_signal->num_chains_valid, 1164 wmi_peer_signal->peer_id); 1165 if (dst_len <= result_size) { 1166 peer_signal->vdev_id = wmi_peer_signal->vdev_id; 1167 peer_signal->peer_id = wmi_peer_signal->peer_id; 1168 peer_signal->num_chain = 1169 wmi_peer_signal->num_chains_valid; 1170 qdf_mem_copy(peer_signal->per_ant_snr, 1171 wmi_peer_signal->per_chain_snr, 1172 sizeof(peer_signal->per_ant_snr)); 1173 qdf_mem_copy(peer_signal->nf, 1174 wmi_peer_signal->per_chain_nf, 1175 sizeof(peer_signal->nf)); 1176 qdf_mem_copy(peer_signal->per_ant_rx_mpdus, 1177 wmi_peer_signal->per_antenna_rx_mpdus, 1178 sizeof(peer_signal->per_ant_rx_mpdus)); 1179 qdf_mem_copy(peer_signal->per_ant_tx_mpdus, 1180 wmi_peer_signal->per_antenna_tx_mpdus, 1181 sizeof(peer_signal->per_ant_tx_mpdus)); 1182 result_size -= dst_len; 1183 } else { 1184 WMA_LOGE(FL("Invalid length of PEER signal.")); 1185 } 1186 wmi_peer_signal++; 1187 } 1188 1189 result += peer_num * sizeof(struct sir_wifi_ll_ext_peer_stats); 1190 cca_stats = (struct sir_wifi_chan_cca_stats *)result; 1191 ll_stats->cca = cca_stats; 1192 dst_len = sizeof(struct sir_wifi_chan_cca_stats); 1193 for (i = 0; i < ll_stats->channel_num; i++) { 1194 if (dst_len <= result_size) { 1195 qdf_mem_copy(&cca_stats[i], &wmi_cca_stats->vdev_id, 1196 dst_len); 1197 result_size -= dst_len; 1198 } else { 1199 WMA_LOGE(FL("Invalid length of CCA.")); 1200 } 1201 } 1202 1203 result += i * sizeof(struct sir_wifi_chan_cca_stats); 1204 wma_fill_tx_stats(ll_stats, fixed_param, param_buf, 1205 &result, &result_size); 1206 wma_fill_rx_stats(ll_stats, fixed_param, param_buf, 1207 &result, &result_size); 1208 sme_msg.type = eWMI_SME_LL_STATS_IND; 1209 sme_msg.bodyptr = (void *)link_stats_results; 1210 sme_msg.bodyval = 0; 1211 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 1212 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 1213 WMA_LOGP(FL("Failed to post peer stat change msg!")); 1214 qdf_mem_free(link_stats_results); 1215 return -EINVAL; 1216 } 1217 1218 return 0; 1219 } 1220 1221 /** 1222 * wma_unified_link_peer_stats_event_handler() - peer stats event handler 1223 * @handle: wma handle 1224 * @cmd_param_info: data received with event from fw 1225 * @len: length of data 1226 * 1227 * Return: 0 for success or error code 1228 */ 1229 static int wma_unified_link_peer_stats_event_handler(void *handle, 1230 uint8_t *cmd_param_info, 1231 uint32_t len) 1232 { 1233 WMI_PEER_LINK_STATS_EVENTID_param_tlvs *param_tlvs; 1234 wmi_peer_stats_event_fixed_param *fixed_param; 1235 wmi_peer_link_stats *peer_stats, *temp_peer_stats; 1236 wmi_rate_stats *rate_stats; 1237 tSirLLStatsResults *link_stats_results; 1238 uint8_t *results, *t_peer_stats, *t_rate_stats; 1239 uint32_t count, rate_cnt; 1240 uint32_t total_num_rates = 0; 1241 uint32_t next_res_offset, next_peer_offset, next_rate_offset; 1242 size_t peer_info_size, peer_stats_size, rate_stats_size; 1243 size_t link_stats_results_size; 1244 bool excess_data = false; 1245 uint32_t buf_len; 1246 1247 tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); 1248 1249 if (!pMac) { 1250 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__); 1251 return -EINVAL; 1252 } 1253 1254 if (!pMac->sme.pLinkLayerStatsIndCallback) { 1255 WMA_LOGD("%s: HDD callback is null", __func__); 1256 return -EINVAL; 1257 } 1258 1259 param_tlvs = (WMI_PEER_LINK_STATS_EVENTID_param_tlvs *) cmd_param_info; 1260 if (!param_tlvs) { 1261 WMA_LOGA("%s: Invalid stats event", __func__); 1262 return -EINVAL; 1263 } 1264 /* 1265 * cmd_param_info contains 1266 * wmi_peer_stats_event_fixed_param fixed_param; 1267 * num_peers * size of(struct wmi_peer_link_stats) 1268 * total_num_rates * size of(struct wmi_rate_stats) 1269 * total_num_rates is the sum of the rates of all the peers. 1270 */ 1271 fixed_param = param_tlvs->fixed_param; 1272 peer_stats = param_tlvs->peer_stats; 1273 rate_stats = param_tlvs->peer_rate_stats; 1274 1275 if (!fixed_param || !peer_stats || 1276 (peer_stats->num_rates && !rate_stats)) { 1277 WMA_LOGA("%s: Invalid param_tlvs for Peer Stats", __func__); 1278 return -EINVAL; 1279 } 1280 1281 do { 1282 if (fixed_param->num_peers > 1283 WMI_SVC_MSG_MAX_SIZE/sizeof(wmi_peer_link_stats) || 1284 fixed_param->num_peers > param_tlvs->num_peer_stats) { 1285 excess_data = true; 1286 break; 1287 } else { 1288 buf_len = fixed_param->num_peers * 1289 sizeof(wmi_peer_link_stats); 1290 } 1291 temp_peer_stats = (wmi_peer_link_stats *) peer_stats; 1292 for (count = 0; count < fixed_param->num_peers; count++) { 1293 if (temp_peer_stats->num_rates > 1294 WMI_SVC_MSG_MAX_SIZE / sizeof(wmi_rate_stats)) { 1295 excess_data = true; 1296 break; 1297 } else { 1298 total_num_rates += temp_peer_stats->num_rates; 1299 if (total_num_rates > 1300 WMI_SVC_MSG_MAX_SIZE / 1301 sizeof(wmi_rate_stats) || total_num_rates > 1302 param_tlvs->num_peer_rate_stats) { 1303 excess_data = true; 1304 break; 1305 } 1306 buf_len += temp_peer_stats->num_rates * 1307 sizeof(wmi_rate_stats); 1308 } 1309 temp_peer_stats++; 1310 } 1311 } while (0); 1312 1313 if (excess_data || 1314 (sizeof(*fixed_param) > WMI_SVC_MSG_MAX_SIZE - buf_len)) { 1315 WMA_LOGE("excess wmi buffer: rates:%d, peers:%d", 1316 peer_stats->num_rates, fixed_param->num_peers); 1317 return -EINVAL; 1318 } 1319 1320 peer_stats_size = sizeof(tSirWifiPeerStat); 1321 peer_info_size = sizeof(tSirWifiPeerInfo); 1322 rate_stats_size = sizeof(tSirWifiRateStat); 1323 link_stats_results_size = 1324 sizeof(*link_stats_results) + peer_stats_size + 1325 (fixed_param->num_peers * peer_info_size) + 1326 (total_num_rates * rate_stats_size); 1327 1328 link_stats_results = qdf_mem_malloc(link_stats_results_size); 1329 if (NULL == link_stats_results) { 1330 WMA_LOGD("%s: could not allocate mem for stats results-len %zu", 1331 __func__, link_stats_results_size); 1332 return -ENOMEM; 1333 } 1334 1335 qdf_mem_zero(link_stats_results, link_stats_results_size); 1336 1337 link_stats_results->paramId = WMI_LINK_STATS_ALL_PEER; 1338 link_stats_results->rspId = fixed_param->request_id; 1339 link_stats_results->ifaceId = 0; 1340 link_stats_results->num_peers = fixed_param->num_peers; 1341 link_stats_results->peer_event_number = fixed_param->peer_event_number; 1342 link_stats_results->moreResultToFollow = fixed_param->more_data; 1343 1344 qdf_mem_copy(link_stats_results->results, 1345 &fixed_param->num_peers, peer_stats_size); 1346 1347 results = (uint8_t *) link_stats_results->results; 1348 t_peer_stats = (uint8_t *) peer_stats; 1349 t_rate_stats = (uint8_t *) rate_stats; 1350 next_res_offset = peer_stats_size; 1351 next_peer_offset = WMI_TLV_HDR_SIZE; 1352 next_rate_offset = WMI_TLV_HDR_SIZE; 1353 for (rate_cnt = 0; rate_cnt < fixed_param->num_peers; rate_cnt++) { 1354 qdf_mem_copy(results + next_res_offset, 1355 t_peer_stats + next_peer_offset, peer_info_size); 1356 next_res_offset += peer_info_size; 1357 1358 /* Copy rate stats associated with this peer */ 1359 for (count = 0; count < peer_stats->num_rates; count++) { 1360 rate_stats++; 1361 1362 qdf_mem_copy(results + next_res_offset, 1363 t_rate_stats + next_rate_offset, 1364 rate_stats_size); 1365 next_res_offset += rate_stats_size; 1366 next_rate_offset += sizeof(*rate_stats); 1367 } 1368 next_peer_offset += sizeof(*peer_stats); 1369 peer_stats++; 1370 } 1371 1372 /* call hdd callback with Link Layer Statistics 1373 * vdev_id/ifacId in link_stats_results will be 1374 * used to retrieve the correct HDD context 1375 */ 1376 pMac->sme.pLinkLayerStatsIndCallback(pMac->hHdd, 1377 WMA_LINK_LAYER_STATS_RESULTS_RSP, 1378 link_stats_results); 1379 qdf_mem_free(link_stats_results); 1380 1381 return 0; 1382 } 1383 1384 /** 1385 * wma_unified_radio_tx_mem_free() - Free radio tx power stats memory 1386 * @handle: WMI handle 1387 * 1388 * Return: 0 on success, error number otherwise. 1389 */ 1390 int wma_unified_radio_tx_mem_free(void *handle) 1391 { 1392 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1393 tSirWifiRadioStat *rs_results; 1394 uint32_t i = 0; 1395 1396 if (!wma_handle->link_stats_results) 1397 return 0; 1398 1399 rs_results = (tSirWifiRadioStat *) 1400 &wma_handle->link_stats_results->results[0]; 1401 for (i = 0; i < wma_handle->link_stats_results->num_radio; i++) { 1402 rs_results += i; 1403 if (rs_results->tx_time_per_power_level) { 1404 qdf_mem_free(rs_results->tx_time_per_power_level); 1405 rs_results->tx_time_per_power_level = NULL; 1406 } 1407 1408 if (rs_results->channels) { 1409 qdf_mem_free(rs_results->channels); 1410 rs_results->channels = NULL; 1411 } 1412 } 1413 1414 qdf_mem_free(wma_handle->link_stats_results); 1415 wma_handle->link_stats_results = NULL; 1416 1417 return 0; 1418 } 1419 1420 /** 1421 * wma_unified_radio_tx_power_level_stats_event_handler() - tx power level stats 1422 * @handle: WMI handle 1423 * @cmd_param_info: command param info 1424 * @len: Length of @cmd_param_info 1425 * 1426 * This is the WMI event handler function to receive radio stats tx 1427 * power level stats. 1428 * 1429 * Return: 0 on success, error number otherwise. 1430 */ 1431 static int wma_unified_radio_tx_power_level_stats_event_handler(void *handle, 1432 u_int8_t *cmd_param_info, u_int32_t len) 1433 { 1434 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1435 WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID_param_tlvs *param_tlvs; 1436 wmi_tx_power_level_stats_evt_fixed_param *fixed_param; 1437 uint8_t *tx_power_level_values; 1438 tSirLLStatsResults *link_stats_results; 1439 tSirWifiRadioStat *rs_results; 1440 uint32_t max_total_num_tx_power_levels = MAX_TPC_LEVELS * NUM_OF_BANDS * 1441 MAX_SPATIAL_STREAM_ANY_V3; 1442 1443 tpAniSirGlobal mac = cds_get_context(QDF_MODULE_ID_PE); 1444 1445 if (!mac) { 1446 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__); 1447 return -EINVAL; 1448 } 1449 1450 if (!mac->sme.pLinkLayerStatsIndCallback) { 1451 WMA_LOGD("%s: HDD callback is null", __func__); 1452 return -EINVAL; 1453 } 1454 1455 param_tlvs = (WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID_param_tlvs *) 1456 cmd_param_info; 1457 if (!param_tlvs) { 1458 WMA_LOGA("%s: Invalid tx power level stats event", __func__); 1459 return -EINVAL; 1460 } 1461 1462 fixed_param = param_tlvs->fixed_param; 1463 if (!fixed_param) { 1464 WMA_LOGA("%s:Invalid param_tlvs for Radio tx_power level Stats", 1465 __func__); 1466 return -EINVAL; 1467 } 1468 1469 link_stats_results = wma_handle->link_stats_results; 1470 if (!link_stats_results) { 1471 WMA_LOGA("%s: link_stats_results is NULL", __func__); 1472 return -EINVAL; 1473 } 1474 1475 WMA_LOGD("%s: tot_num_tx_pwr_lvls: %u num_tx_pwr_lvls: %u pwr_lvl_offset: %u radio_id: %u", 1476 __func__, fixed_param->total_num_tx_power_levels, 1477 fixed_param->num_tx_power_levels, 1478 fixed_param->power_level_offset, 1479 fixed_param->radio_id); 1480 1481 if (fixed_param->num_tx_power_levels > ((WMI_SVC_MSG_MAX_SIZE - 1482 sizeof(*fixed_param)) / sizeof(uint32_t)) || 1483 fixed_param->num_tx_power_levels > 1484 param_tlvs->num_tx_time_per_power_level) { 1485 WMA_LOGE("%s: excess tx_power buffers:%d, num_tx_time_per_power_level:%d", 1486 __func__, fixed_param->num_tx_power_levels, 1487 param_tlvs->num_tx_time_per_power_level); 1488 return -EINVAL; 1489 } 1490 1491 if (fixed_param->radio_id >= link_stats_results->num_radio) { 1492 WMA_LOGE("%s: Invalid radio_id %d num_radio %d", 1493 __func__, fixed_param->radio_id, 1494 link_stats_results->num_radio); 1495 return -EINVAL; 1496 } 1497 1498 if (fixed_param->total_num_tx_power_levels > 1499 max_total_num_tx_power_levels) { 1500 WMA_LOGD("Invalid total_num_tx_power_levels %d", 1501 fixed_param->total_num_tx_power_levels); 1502 return -EINVAL; 1503 } 1504 1505 rs_results = (tSirWifiRadioStat *) &link_stats_results->results[0] + 1506 fixed_param->radio_id; 1507 tx_power_level_values = (uint8_t *) param_tlvs->tx_time_per_power_level; 1508 1509 rs_results->total_num_tx_power_levels = 1510 fixed_param->total_num_tx_power_levels; 1511 if (!rs_results->total_num_tx_power_levels) { 1512 link_stats_results->nr_received++; 1513 goto post_stats; 1514 } 1515 1516 if ((fixed_param->power_level_offset > 1517 rs_results->total_num_tx_power_levels) || 1518 (fixed_param->num_tx_power_levels > 1519 rs_results->total_num_tx_power_levels - 1520 fixed_param->power_level_offset)) { 1521 WMA_LOGE("%s: Invalid offset %d total_num %d num %d", 1522 __func__, fixed_param->power_level_offset, 1523 rs_results->total_num_tx_power_levels, 1524 fixed_param->num_tx_power_levels); 1525 return -EINVAL; 1526 } 1527 1528 if (!rs_results->tx_time_per_power_level) { 1529 rs_results->tx_time_per_power_level = qdf_mem_malloc( 1530 sizeof(uint32_t) * 1531 rs_results->total_num_tx_power_levels); 1532 if (!rs_results->tx_time_per_power_level) { 1533 WMA_LOGA("%s: Mem alloc fail for tx power level stats", 1534 __func__); 1535 /* In error case, atleast send the radio stats without 1536 * tx_power_level stats */ 1537 rs_results->total_num_tx_power_levels = 0; 1538 link_stats_results->nr_received++; 1539 goto post_stats; 1540 } 1541 } 1542 qdf_mem_copy(&rs_results->tx_time_per_power_level[ 1543 fixed_param->power_level_offset], 1544 tx_power_level_values, 1545 sizeof(uint32_t) * fixed_param->num_tx_power_levels); 1546 if (rs_results->total_num_tx_power_levels == 1547 (fixed_param->num_tx_power_levels + 1548 fixed_param->power_level_offset)) { 1549 link_stats_results->moreResultToFollow = 0; 1550 link_stats_results->nr_received++; 1551 } 1552 1553 WMA_LOGD("%s: moreResultToFollow: %u nr: %u nr_received: %u", 1554 __func__, link_stats_results->moreResultToFollow, 1555 link_stats_results->num_radio, 1556 link_stats_results->nr_received); 1557 1558 /* If still data to receive, return from here */ 1559 if (link_stats_results->moreResultToFollow) 1560 return 0; 1561 1562 post_stats: 1563 if (link_stats_results->num_radio != link_stats_results->nr_received) { 1564 /* Not received all radio stats yet, don't post yet */ 1565 return 0; 1566 } 1567 1568 /* call hdd callback with Link Layer Statistics 1569 * vdev_id/ifacId in link_stats_results will be 1570 * used to retrieve the correct HDD context 1571 */ 1572 mac->sme.pLinkLayerStatsIndCallback(mac->hHdd, 1573 WMA_LINK_LAYER_STATS_RESULTS_RSP, 1574 link_stats_results); 1575 wma_unified_radio_tx_mem_free(handle); 1576 1577 return 0; 1578 } 1579 1580 /** 1581 * wma_unified_link_radio_stats_event_handler() - radio link stats event handler 1582 * @handle: wma handle 1583 * @cmd_param_info: data received with event from fw 1584 * @len: length of data 1585 * 1586 * Return: 0 for success or error code 1587 */ 1588 static int wma_unified_link_radio_stats_event_handler(void *handle, 1589 uint8_t *cmd_param_info, 1590 uint32_t len) 1591 { 1592 tp_wma_handle wma_handle = (tp_wma_handle) handle; 1593 WMI_RADIO_LINK_STATS_EVENTID_param_tlvs *param_tlvs; 1594 wmi_radio_link_stats_event_fixed_param *fixed_param; 1595 wmi_radio_link_stats *radio_stats; 1596 wmi_channel_stats *channel_stats; 1597 tSirLLStatsResults *link_stats_results; 1598 uint8_t *results, *t_radio_stats, *t_channel_stats; 1599 uint32_t next_chan_offset, count; 1600 size_t radio_stats_size, chan_stats_size; 1601 size_t link_stats_results_size; 1602 tSirWifiRadioStat *rs_results; 1603 tSirWifiChannelStats *chn_results; 1604 1605 tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); 1606 1607 if (!pMac) { 1608 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__); 1609 return -EINVAL; 1610 } 1611 1612 if (!pMac->sme.pLinkLayerStatsIndCallback) { 1613 WMA_LOGD("%s: HDD callback is null", __func__); 1614 return -EINVAL; 1615 } 1616 1617 param_tlvs = (WMI_RADIO_LINK_STATS_EVENTID_param_tlvs *) cmd_param_info; 1618 if (!param_tlvs) { 1619 WMA_LOGA("%s: Invalid stats event", __func__); 1620 return -EINVAL; 1621 } 1622 1623 /* 1624 * cmd_param_info contains 1625 * wmi_radio_link_stats_event_fixed_param fixed_param; 1626 * size of(struct wmi_radio_link_stats); 1627 * num_channels * size of(struct wmi_channel_stats) 1628 */ 1629 fixed_param = param_tlvs->fixed_param; 1630 radio_stats = param_tlvs->radio_stats; 1631 channel_stats = param_tlvs->channel_stats; 1632 1633 if (!fixed_param || !radio_stats || 1634 (radio_stats->num_channels && !channel_stats)) { 1635 WMA_LOGA("%s: Invalid param_tlvs for Radio Stats", __func__); 1636 return -EINVAL; 1637 } 1638 if (radio_stats->num_channels > 1639 (NUM_24GHZ_CHANNELS + NUM_5GHZ_CHANNELS) || 1640 radio_stats->num_channels > param_tlvs->num_channel_stats) { 1641 WMA_LOGE("%s: Too many channels %d", 1642 __func__, radio_stats->num_channels); 1643 return -EINVAL; 1644 } 1645 1646 radio_stats_size = sizeof(tSirWifiRadioStat); 1647 chan_stats_size = sizeof(tSirWifiChannelStats); 1648 if (fixed_param->num_radio > 1649 (UINT_MAX - sizeof(*link_stats_results))/radio_stats_size) { 1650 WMA_LOGE("excess num_radio %d is leading to int overflow", 1651 fixed_param->num_radio); 1652 return -EINVAL; 1653 } 1654 link_stats_results_size = sizeof(*link_stats_results) + 1655 fixed_param->num_radio * radio_stats_size; 1656 1657 if (radio_stats->radio_id >= fixed_param->num_radio) { 1658 WMA_LOGE("%s, invalid radio id:%d, num radio:%d", 1659 __func__, radio_stats->radio_id, 1660 fixed_param->num_radio); 1661 return -EINVAL; 1662 } 1663 1664 if (!wma_handle->link_stats_results) { 1665 wma_handle->link_stats_results = qdf_mem_malloc( 1666 link_stats_results_size); 1667 if (NULL == wma_handle->link_stats_results) { 1668 WMA_LOGD("%s: could not allocate mem for stats results-len %zu", 1669 __func__, link_stats_results_size); 1670 return -ENOMEM; 1671 } 1672 } 1673 link_stats_results = wma_handle->link_stats_results; 1674 1675 WMA_LOGD("Radio stats Fixed Param:"); 1676 WMA_LOGD("req_id: %u num_radio: %u more_radio_events: %u", 1677 fixed_param->request_id, fixed_param->num_radio, 1678 fixed_param->more_radio_events); 1679 1680 WMA_LOGD("Radio Info: radio_id: %u on_time: %u tx_time: %u rx_time: %u on_time_scan: %u", 1681 radio_stats->radio_id, radio_stats->on_time, 1682 radio_stats->tx_time, radio_stats->rx_time, 1683 radio_stats->on_time_scan); 1684 WMA_LOGD("on_time_nbd: %u on_time_gscan: %u on_time_roam_scan: %u", 1685 radio_stats->on_time_nbd, 1686 radio_stats->on_time_gscan, radio_stats->on_time_roam_scan); 1687 WMA_LOGD("on_time_pno_scan: %u on_time_hs20: %u num_channels: %u", 1688 radio_stats->on_time_pno_scan, radio_stats->on_time_hs20, 1689 radio_stats->num_channels); 1690 WMA_LOGD("on_time_host_scan: %u, on_time_lpi_scan: %u", 1691 radio_stats->on_time_host_scan, radio_stats->on_time_lpi_scan); 1692 1693 link_stats_results->paramId = WMI_LINK_STATS_RADIO; 1694 link_stats_results->rspId = fixed_param->request_id; 1695 link_stats_results->ifaceId = 0; 1696 link_stats_results->num_radio = fixed_param->num_radio; 1697 link_stats_results->peer_event_number = 0; 1698 1699 /* 1700 * Backward compatibility: 1701 * There are firmware(s) which will send Radio stats only with 1702 * more_radio_events set to 0 and firmware which sends Radio stats 1703 * followed by tx_power level stats with more_radio_events set to 1. 1704 * if more_radio_events is set to 1, buffer the radio stats and 1705 * wait for tx_power_level stats. 1706 */ 1707 link_stats_results->moreResultToFollow = fixed_param->more_radio_events; 1708 1709 results = (uint8_t *) link_stats_results->results; 1710 t_radio_stats = (uint8_t *) radio_stats; 1711 t_channel_stats = (uint8_t *) channel_stats; 1712 1713 rs_results = (tSirWifiRadioStat *) &results[0] + radio_stats->radio_id; 1714 rs_results->radio = radio_stats->radio_id; 1715 rs_results->onTime = radio_stats->on_time; 1716 rs_results->txTime = radio_stats->tx_time; 1717 rs_results->rxTime = radio_stats->rx_time; 1718 rs_results->onTimeScan = radio_stats->on_time_scan; 1719 rs_results->onTimeNbd = radio_stats->on_time_nbd; 1720 rs_results->onTimeGscan = radio_stats->on_time_gscan; 1721 rs_results->onTimeRoamScan = radio_stats->on_time_roam_scan; 1722 rs_results->onTimePnoScan = radio_stats->on_time_pno_scan; 1723 rs_results->onTimeHs20 = radio_stats->on_time_hs20; 1724 rs_results->total_num_tx_power_levels = 0; 1725 if (rs_results->tx_time_per_power_level) { 1726 qdf_mem_free(rs_results->tx_time_per_power_level); 1727 rs_results->tx_time_per_power_level = NULL; 1728 } 1729 if (rs_results->channels) { 1730 qdf_mem_free(rs_results->channels); 1731 rs_results->channels = NULL; 1732 } 1733 rs_results->numChannels = radio_stats->num_channels; 1734 rs_results->on_time_host_scan = radio_stats->on_time_host_scan; 1735 rs_results->on_time_lpi_scan = radio_stats->on_time_lpi_scan; 1736 if (rs_results->numChannels) { 1737 rs_results->channels = (tSirWifiChannelStats *) qdf_mem_malloc( 1738 radio_stats->num_channels * 1739 chan_stats_size); 1740 if (rs_results->channels == NULL) { 1741 WMA_LOGD("%s: could not allocate mem for channel stats (size=%zu)", 1742 __func__, radio_stats->num_channels * chan_stats_size); 1743 wma_unified_radio_tx_mem_free(handle); 1744 return -ENOMEM; 1745 } 1746 1747 chn_results = (tSirWifiChannelStats *) &rs_results->channels[0]; 1748 next_chan_offset = WMI_TLV_HDR_SIZE; 1749 WMA_LOGD("Channel Stats Info"); 1750 for (count = 0; count < radio_stats->num_channels; count++) { 1751 WMA_LOGD("channel_width %u center_freq %u center_freq0 %u", 1752 channel_stats->channel_width, 1753 channel_stats->center_freq, 1754 channel_stats->center_freq0); 1755 WMA_LOGD("center_freq1 %u radio_awake_time %u cca_busy_time %u", 1756 channel_stats->center_freq1, 1757 channel_stats->radio_awake_time, 1758 channel_stats->cca_busy_time); 1759 channel_stats++; 1760 1761 qdf_mem_copy(chn_results, 1762 t_channel_stats + next_chan_offset, 1763 chan_stats_size); 1764 chn_results++; 1765 next_chan_offset += sizeof(*channel_stats); 1766 } 1767 } 1768 1769 if (link_stats_results->moreResultToFollow) { 1770 /* More results coming, don't post yet */ 1771 return 0; 1772 } 1773 link_stats_results->nr_received++; 1774 1775 if (link_stats_results->num_radio != link_stats_results->nr_received) { 1776 /* Not received all radio stats yet, don't post yet */ 1777 return 0; 1778 } 1779 1780 pMac->sme.pLinkLayerStatsIndCallback(pMac->hHdd, 1781 WMA_LINK_LAYER_STATS_RESULTS_RSP, 1782 link_stats_results); 1783 wma_unified_radio_tx_mem_free(handle); 1784 1785 return 0; 1786 } 1787 1788 #ifdef WLAN_PEER_PS_NOTIFICATION 1789 /** 1790 * wma_peer_ps_evt_handler() - handler for PEER power state change. 1791 * @handle: wma handle 1792 * @event: FW event 1793 * @len: length of FW event 1794 * 1795 * Once peer STA power state changes, an event will be indicated by 1796 * FW. This function send a link layer state change msg to HDD. HDD 1797 * link layer callback will converts the event to NL msg. 1798 * 1799 * Return: 0 Success. Others fail. 1800 */ 1801 static int wma_peer_ps_evt_handler(void *handle, u_int8_t *event, 1802 u_int32_t len) 1803 { 1804 WMI_PEER_STA_PS_STATECHG_EVENTID_param_tlvs *param_buf; 1805 wmi_peer_sta_ps_statechange_event_fixed_param *fixed_param; 1806 tSirWifiPeerStat *peer_stat; 1807 tSirWifiPeerInfo *peer_info; 1808 tSirLLStatsResults *link_stats_results; 1809 tSirMacAddr mac_address; 1810 uint32_t result_len; 1811 cds_msg_t sme_msg = { 0 }; 1812 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 1813 1814 tpAniSirGlobal mac = cds_get_context(QDF_MODULE_ID_PE); 1815 1816 if (!mac) { 1817 WMA_LOGD("%s: NULL mac ptr. Exiting", __func__); 1818 return -EINVAL; 1819 } 1820 1821 if (!mac->sme.link_layer_stats_ext_cb) { 1822 WMA_LOGD("%s: HDD callback is null", __func__); 1823 return -EINVAL; 1824 } 1825 1826 WMA_LOGD("%s: Posting Peer Stats PS event to HDD", __func__); 1827 1828 param_buf = (WMI_PEER_STA_PS_STATECHG_EVENTID_param_tlvs *)event; 1829 fixed_param = param_buf->fixed_param; 1830 1831 result_len = sizeof(tSirLLStatsResults) + 1832 sizeof(tSirWifiPeerStat) + 1833 sizeof(tSirWifiPeerInfo); 1834 link_stats_results = qdf_mem_malloc(result_len); 1835 if (link_stats_results == NULL) { 1836 WMA_LOGE("%s: Cannot allocate link layer stats.", __func__); 1837 return -EINVAL; 1838 } 1839 1840 WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_param->peer_macaddr, &mac_address[0]); 1841 WMA_LOGD("Peer power state change event from FW"); 1842 WMA_LOGD("Fixed Param:"); 1843 WMA_LOGD("MAC address: %2x:%2x:%2x:%2x:%2x:%2x, Power state: %d", 1844 mac_address[0], mac_address[1], mac_address[2], 1845 mac_address[3], mac_address[4], mac_address[5], 1846 fixed_param->peer_ps_state); 1847 1848 link_stats_results->paramId = WMI_LL_STATS_EXT_PS_CHG; 1849 link_stats_results->num_peers = 1; 1850 link_stats_results->peer_event_number = 1; 1851 link_stats_results->moreResultToFollow = 0; 1852 1853 peer_stat = (tSirWifiPeerStat *)link_stats_results->results; 1854 peer_stat->numPeers = 1; 1855 peer_info = (tSirWifiPeerInfo *)peer_stat->peerInfo; 1856 qdf_mem_copy(&peer_info->peerMacAddress, 1857 &mac_address, 1858 sizeof(tSirMacAddr)); 1859 peer_info->power_saving = fixed_param->peer_ps_state; 1860 1861 sme_msg.type = eWMI_SME_LL_STATS_IND; 1862 sme_msg.bodyptr = link_stats_results; 1863 sme_msg.bodyval = 0; 1864 1865 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 1866 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 1867 WMA_LOGE("%s: Fail to post ps change ind msg", __func__); 1868 qdf_mem_free(link_stats_results); 1869 } 1870 1871 return 0; 1872 } 1873 #else 1874 /** 1875 * wma_peer_ps_evt_handler() - handler for PEER power state change. 1876 * @handle: wma handle 1877 * @event: FW event 1878 * @len: length of FW event 1879 * 1880 * Once peer STA power state changes, an event will be indicated by 1881 * FW. This function send a link layer state change msg to HDD. HDD 1882 * link layer callback will converts the event to NL msg. 1883 * 1884 * Return: 0 Success. Others fail. 1885 */ 1886 static inline int wma_peer_ps_evt_handler(void *handle, u_int8_t *event, 1887 u_int32_t len) 1888 { 1889 return 0; 1890 } 1891 #endif 1892 1893 /** 1894 * wma_register_ll_stats_event_handler() - register link layer stats related 1895 * event handler 1896 * @wma_handle: wma handle 1897 * 1898 * Return: none 1899 */ 1900 void wma_register_ll_stats_event_handler(tp_wma_handle wma_handle) 1901 { 1902 if (NULL == wma_handle) { 1903 WMA_LOGE("%s: wma_handle is NULL", __func__); 1904 return; 1905 } 1906 1907 wmi_unified_register_event_handler(wma_handle->wmi_handle, 1908 wmi_iface_link_stats_event_id, 1909 wma_unified_link_iface_stats_event_handler, 1910 WMA_RX_SERIALIZER_CTX); 1911 wmi_unified_register_event_handler(wma_handle->wmi_handle, 1912 wmi_peer_link_stats_event_id, 1913 wma_unified_link_peer_stats_event_handler, 1914 WMA_RX_SERIALIZER_CTX); 1915 wmi_unified_register_event_handler(wma_handle->wmi_handle, 1916 wmi_radio_link_stats_link, 1917 wma_unified_link_radio_stats_event_handler, 1918 WMA_RX_SERIALIZER_CTX); 1919 wmi_unified_register_event_handler(wma_handle->wmi_handle, 1920 wmi_radio_tx_power_level_stats_event_id, 1921 wma_unified_radio_tx_power_level_stats_event_handler, 1922 WMA_RX_SERIALIZER_CTX); 1923 wmi_unified_register_event_handler(wma_handle->wmi_handle, 1924 wmi_peer_sta_ps_statechg_event_id, 1925 wma_peer_ps_evt_handler, 1926 WMA_RX_SERIALIZER_CTX); 1927 wmi_unified_register_event_handler(wma_handle->wmi_handle, 1928 wmi_report_stats_event_id, 1929 wma_ll_stats_evt_handler, 1930 WMA_RX_SERIALIZER_CTX); 1931 1932 } 1933 1934 1935 /** 1936 * wma_process_ll_stats_clear_req() - clear link layer stats 1937 * @wma: wma handle 1938 * @clearReq: ll stats clear request command params 1939 * 1940 * Return: QDF_STATUS_SUCCESS for success or error code 1941 */ 1942 QDF_STATUS wma_process_ll_stats_clear_req(tp_wma_handle wma, 1943 const tpSirLLStatsClearReq clearReq) 1944 { 1945 struct ll_stats_clear_params cmd = {0}; 1946 int ret; 1947 1948 if (!clearReq || !wma) { 1949 WMA_LOGE("%s: input pointer is NULL", __func__); 1950 return QDF_STATUS_E_FAILURE; 1951 } 1952 1953 if (!wma->interfaces[clearReq->staId].handle) { 1954 WMA_LOGE("%s: vdev_id %d handle is NULL", 1955 __func__, clearReq->staId); 1956 return QDF_STATUS_E_FAILURE; 1957 } 1958 1959 cmd.stop_req = clearReq->stopReq; 1960 cmd.sta_id = clearReq->staId; 1961 cmd.stats_clear_mask = clearReq->statsClearReqMask; 1962 1963 ret = wmi_unified_process_ll_stats_clear_cmd(wma->wmi_handle, &cmd, 1964 wma->interfaces[clearReq->staId].addr); 1965 if (ret) { 1966 WMA_LOGE("%s: Failed to send clear link stats req", __func__); 1967 return QDF_STATUS_E_FAILURE; 1968 } 1969 1970 return QDF_STATUS_SUCCESS; 1971 } 1972 1973 /** 1974 * wma_process_ll_stats_set_req() - link layer stats set request 1975 * @wma: wma handle 1976 * @setReq: ll stats set request command params 1977 * 1978 * Return: QDF_STATUS_SUCCESS for success or error code 1979 */ 1980 QDF_STATUS wma_process_ll_stats_set_req(tp_wma_handle wma, 1981 const tpSirLLStatsSetReq setReq) 1982 { 1983 struct ll_stats_set_params cmd = {0}; 1984 int ret; 1985 1986 if (!setReq || !wma) { 1987 WMA_LOGE("%s: input pointer is NULL", __func__); 1988 return QDF_STATUS_E_FAILURE; 1989 } 1990 1991 cmd.mpdu_size_threshold = setReq->mpduSizeThreshold; 1992 cmd.aggressive_statistics_gathering = 1993 setReq->aggressiveStatisticsGathering; 1994 1995 ret = wmi_unified_process_ll_stats_set_cmd(wma->wmi_handle, 1996 &cmd); 1997 if (ret) { 1998 WMA_LOGE("%s: Failed to send set link stats request", __func__); 1999 return QDF_STATUS_E_FAILURE; 2000 } 2001 2002 return QDF_STATUS_SUCCESS; 2003 } 2004 2005 /** 2006 * wma_process_ll_stats_get_req() - link layer stats get request 2007 * @wma:wma handle 2008 * @getReq:ll stats get request command params 2009 * 2010 * Return: QDF_STATUS_SUCCESS for success or error code 2011 */ 2012 QDF_STATUS wma_process_ll_stats_get_req(tp_wma_handle wma, 2013 const tpSirLLStatsGetReq getReq) 2014 { 2015 struct ll_stats_get_params cmd = {0}; 2016 int ret; 2017 2018 if (!getReq || !wma) { 2019 WMA_LOGE("%s: input pointer is NULL", __func__); 2020 return QDF_STATUS_E_FAILURE; 2021 } 2022 2023 if (!wma->interfaces[getReq->staId].vdev_active) { 2024 WMA_LOGE("%s: vdev not created yet", __func__); 2025 return QDF_STATUS_E_FAILURE; 2026 } 2027 2028 cmd.req_id = getReq->reqId; 2029 cmd.param_id_mask = getReq->paramIdMask; 2030 cmd.sta_id = getReq->staId; 2031 2032 ret = wmi_unified_process_ll_stats_get_cmd(wma->wmi_handle, &cmd, 2033 wma->interfaces[getReq->staId].addr); 2034 if (ret) { 2035 WMA_LOGE("%s: Failed to send get link stats request", __func__); 2036 return QDF_STATUS_E_FAILURE; 2037 } 2038 2039 return QDF_STATUS_SUCCESS; 2040 } 2041 2042 /** 2043 * wma_unified_link_iface_stats_event_handler() - link iface stats event handler 2044 * @wma:wma handle 2045 * @cmd_param_info: data from event 2046 * @len: length 2047 * 2048 * Return: 0 for success or error code 2049 */ 2050 int wma_unified_link_iface_stats_event_handler(void *handle, 2051 uint8_t *cmd_param_info, 2052 uint32_t len) 2053 { 2054 WMI_IFACE_LINK_STATS_EVENTID_param_tlvs *param_tlvs; 2055 wmi_iface_link_stats_event_fixed_param *fixed_param; 2056 wmi_iface_link_stats *link_stats; 2057 wmi_wmm_ac_stats *ac_stats; 2058 wmi_iface_offload_stats *offload_stats; 2059 tSirLLStatsResults *link_stats_results; 2060 uint8_t *results, *t_link_stats, *t_ac_stats, *t_offload_stats; 2061 uint32_t next_res_offset, next_ac_offset, next_offload_offset, count; 2062 uint32_t roaming_offset, size; 2063 size_t link_stats_size, ac_stats_size, iface_info_size; 2064 size_t link_stats_results_size, offload_stats_size; 2065 size_t total_ac_size, total_offload_size; 2066 2067 tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); 2068 2069 if (!pMac) { 2070 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__); 2071 return -EINVAL; 2072 } 2073 2074 if (!pMac->sme.pLinkLayerStatsIndCallback) { 2075 WMA_LOGD("%s: HDD callback is null", __func__); 2076 return -EINVAL; 2077 } 2078 2079 param_tlvs = (WMI_IFACE_LINK_STATS_EVENTID_param_tlvs *) cmd_param_info; 2080 if (!param_tlvs) { 2081 WMA_LOGA("%s: Invalid stats event", __func__); 2082 return -EINVAL; 2083 } 2084 2085 /* 2086 * cmd_param_info contains 2087 * wmi_iface_link_stats_event_fixed_param fixed_param; 2088 * wmi_iface_link_stats iface_link_stats; 2089 * iface_link_stats->num_ac * size of(struct wmi_wmm_ac_stats) 2090 * fixed_param->num_offload_stats * size of(wmi_iface_offload_stats); 2091 */ 2092 fixed_param = param_tlvs->fixed_param; 2093 link_stats = param_tlvs->iface_link_stats; 2094 ac_stats = param_tlvs->ac; 2095 offload_stats = param_tlvs->iface_offload_stats; 2096 2097 if (!fixed_param || !link_stats || (link_stats->num_ac && !ac_stats) || 2098 (fixed_param->num_offload_stats && !offload_stats)) { 2099 WMA_LOGA("%s: Invalid param_tlvs for Iface Stats", __func__); 2100 return -EINVAL; 2101 } 2102 if (link_stats->num_ac > WIFI_AC_MAX || link_stats->num_ac > 2103 param_tlvs->num_ac) { 2104 WMA_LOGE("%s: Excess data received from firmware num_ac %d, param_tlvs->num_ac %d", 2105 __func__, link_stats->num_ac, param_tlvs->num_ac); 2106 return -EINVAL; 2107 } 2108 if (fixed_param->num_offload_stats > WMI_OFFLOAD_STATS_TYPE_MAX || 2109 fixed_param->num_offload_stats > 2110 param_tlvs->num_iface_offload_stats) { 2111 WMA_LOGE("%s: Excess num offload stats recvd from fw: %d, um_iface_offload_stats: %d", 2112 __func__, fixed_param->num_offload_stats, 2113 param_tlvs->num_iface_offload_stats); 2114 return -EINVAL; 2115 } 2116 2117 link_stats_size = sizeof(tSirWifiIfaceStat); 2118 iface_info_size = sizeof(tSirWifiInterfaceInfo); 2119 2120 ac_stats_size = sizeof(tSirWifiWmmAcStat); 2121 offload_stats_size = sizeof(struct wifi_iface_offload_stat); 2122 2123 total_ac_size = ac_stats_size * WIFI_AC_MAX; 2124 total_offload_size = offload_stats_size * WMI_OFFLOAD_STATS_TYPE_MAX + 2125 member_size(tSirWifiIfaceStat, num_offload_stats); 2126 2127 link_stats_results_size = sizeof(*link_stats_results) + link_stats_size; 2128 2129 link_stats_results = qdf_mem_malloc(link_stats_results_size); 2130 if (!link_stats_results) { 2131 WMA_LOGD("%s: could not allocate mem for stats results-len %zu", 2132 __func__, link_stats_results_size); 2133 return -ENOMEM; 2134 } 2135 2136 qdf_mem_zero(link_stats_results, link_stats_results_size); 2137 2138 link_stats_results->paramId = WMI_LINK_STATS_IFACE; 2139 link_stats_results->rspId = fixed_param->request_id; 2140 link_stats_results->ifaceId = fixed_param->vdev_id; 2141 link_stats_results->num_peers = link_stats->num_peers; 2142 link_stats_results->peer_event_number = 0; 2143 link_stats_results->moreResultToFollow = 0; 2144 2145 /* results is copied to tSirWifiIfaceStat in upper layer 2146 * tSirWifiIfaceStat 2147 * - tSirWifiInterfaceInfo (all fields except roaming is 2148 * filled by host in the upper layer) 2149 * - various members of tSirWifiIfaceStat (from wmi_iface_link_stats) 2150 * - ACs information (from wmi_wmm_ac_stats) 2151 * - num_offload_stats (from fixed param) 2152 * - offload stats (from wmi_iface_offload_stats) 2153 */ 2154 2155 results = (uint8_t *) link_stats_results->results; 2156 t_link_stats = (uint8_t *) link_stats; 2157 t_ac_stats = (uint8_t *) ac_stats; 2158 t_offload_stats = (uint8_t *) offload_stats; 2159 2160 /* Copy roaming state */ 2161 roaming_offset = offsetof(tSirWifiInterfaceInfo, roaming); 2162 size = member_size(tSirWifiInterfaceInfo, roaming); 2163 2164 qdf_mem_copy(results + roaming_offset, &link_stats->roam_state, size); 2165 2166 next_res_offset = iface_info_size; 2167 qdf_mem_copy(results + next_res_offset, 2168 t_link_stats + WMI_TLV_HDR_SIZE, 2169 link_stats_size - iface_info_size - 2170 total_ac_size - total_offload_size); 2171 2172 next_res_offset = link_stats_size - total_ac_size - total_offload_size; 2173 next_ac_offset = WMI_TLV_HDR_SIZE; 2174 2175 for (count = 0; count < link_stats->num_ac; count++) { 2176 ac_stats++; 2177 2178 qdf_mem_copy(results + next_res_offset, 2179 t_ac_stats + next_ac_offset, ac_stats_size); 2180 next_res_offset += ac_stats_size; 2181 next_ac_offset += sizeof(*ac_stats); 2182 } 2183 2184 next_res_offset = link_stats_size - total_offload_size; 2185 /* copy num_offload_stats into result */ 2186 size = member_size(tSirWifiIfaceStat, num_offload_stats); 2187 qdf_mem_copy(results + next_res_offset, &fixed_param->num_offload_stats, 2188 size); 2189 2190 next_res_offset += size; 2191 next_offload_offset = WMI_TLV_HDR_SIZE; 2192 2193 for (count = 0; count < fixed_param->num_offload_stats; count++) { 2194 qdf_mem_copy(results + next_res_offset, 2195 t_offload_stats + next_offload_offset, 2196 offload_stats_size); 2197 next_res_offset += offload_stats_size; 2198 next_offload_offset += sizeof(*offload_stats); 2199 } 2200 2201 /* call hdd callback with Link Layer Statistics 2202 * vdev_id/ifacId in link_stats_results will be 2203 * used to retrieve the correct HDD context 2204 */ 2205 pMac->sme.pLinkLayerStatsIndCallback(pMac->hHdd, 2206 WMA_LINK_LAYER_STATS_RESULTS_RSP, 2207 link_stats_results); 2208 qdf_mem_free(link_stats_results); 2209 2210 return 0; 2211 } 2212 2213 /** 2214 * wma_config_stats_ext_threshold - set threthold for MAC counters 2215 * @wma: wma handler 2216 * @threshold: threhold for MAC counters 2217 * 2218 * For each MAC layer counter, FW holds two copies. One is the current value. 2219 * The other is the last report. Once a current counter's increment is larger 2220 * than the threshold, FW will indicate that counter to host even if the 2221 * monitoring timer does not expire. 2222 * 2223 * Return: None 2224 */ 2225 void wma_config_stats_ext_threshold(tp_wma_handle wma, 2226 struct sir_ll_ext_stats_threshold *thresh) 2227 { 2228 uint32_t len, tag, hdr_len; 2229 uint8_t *buf_ptr; 2230 wmi_buf_t buf; 2231 wmi_pdev_set_stats_threshold_cmd_fixed_param *cmd; 2232 wmi_chan_cca_stats_thresh *cca; 2233 wmi_peer_signal_stats_thresh *signal; 2234 wmi_tx_stats_thresh *tx; 2235 wmi_rx_stats_thresh *rx; 2236 2237 if (!thresh) { 2238 WMA_LOGE(FL("Invalid threshold input.")); 2239 return; 2240 } 2241 2242 len = sizeof(wmi_pdev_set_stats_threshold_cmd_fixed_param) + 2243 sizeof(wmi_chan_cca_stats_thresh) + 2244 sizeof(wmi_peer_signal_stats_thresh) + 2245 sizeof(wmi_tx_stats_thresh) + 2246 sizeof(wmi_rx_stats_thresh) + 2247 5 * WMI_TLV_HDR_SIZE; 2248 buf = wmi_buf_alloc(wma->wmi_handle, len); 2249 if (!buf) { 2250 WMA_LOGP("%s: wmi_buf_alloc failed", __func__); 2251 return; 2252 } 2253 2254 buf_ptr = (u_int8_t *)wmi_buf_data(buf); 2255 tag = WMITLV_TAG_STRUC_wmi_pdev_set_stats_threshold_cmd_fixed_param; 2256 hdr_len = WMITLV_GET_STRUCT_TLVLEN( 2257 wmi_pdev_set_stats_threshold_cmd_fixed_param); 2258 WMA_LOGD(FL("Setting fixed parameters. tag=%d, len=%d"), tag, hdr_len); 2259 cmd = (wmi_pdev_set_stats_threshold_cmd_fixed_param *)buf_ptr; 2260 WMITLV_SET_HDR(&cmd->tlv_header, tag, hdr_len); 2261 cmd->enable_thresh = thresh->enable; 2262 cmd->use_thresh_bitmap = thresh->enable_bitmap; 2263 cmd->gbl_thresh = thresh->global_threshold; 2264 cmd->cca_thresh_enable_bitmap = thresh->cca_bitmap; 2265 cmd->signal_thresh_enable_bitmap = thresh->signal_bitmap; 2266 cmd->tx_thresh_enable_bitmap = thresh->tx_bitmap; 2267 cmd->rx_thresh_enable_bitmap = thresh->rx_bitmap; 2268 len = sizeof(wmi_pdev_set_stats_threshold_cmd_fixed_param); 2269 2270 tag = WMITLV_TAG_STRUC_wmi_chan_cca_stats_thresh, 2271 hdr_len = WMITLV_GET_STRUCT_TLVLEN(wmi_chan_cca_stats_thresh); 2272 cca = (wmi_chan_cca_stats_thresh *)(buf_ptr + len); 2273 WMITLV_SET_HDR(&cca->tlv_header, tag, hdr_len); 2274 WMA_LOGD(FL("Setting cca parameters. tag=%d, len=%d"), tag, hdr_len); 2275 cca->idle_time = thresh->cca.idle_time; 2276 cca->tx_time = thresh->cca.tx_time; 2277 cca->rx_in_bss_time = thresh->cca.rx_in_bss_time; 2278 cca->rx_out_bss_time = thresh->cca.rx_out_bss_time; 2279 cca->rx_busy_time = thresh->cca.rx_busy_time; 2280 cca->rx_in_bad_cond_time = thresh->cca.rx_in_bad_cond_time; 2281 cca->tx_in_bad_cond_time = thresh->cca.tx_in_bad_cond_time; 2282 cca->wlan_not_avail_time = thresh->cca.wlan_not_avail_time; 2283 WMA_LOGD(FL("idle time=%d, tx_time=%d, in_bss=%d, out_bss=%d"), 2284 cca->idle_time, cca->tx_time, 2285 cca->rx_in_bss_time, cca->rx_out_bss_time); 2286 WMA_LOGD(FL("rx_busy=%d, rx_bad=%d, tx_bad=%d, not_avail=%d"), 2287 cca->rx_busy_time, cca->rx_in_bad_cond_time, 2288 cca->tx_in_bad_cond_time, cca->wlan_not_avail_time); 2289 len += sizeof(wmi_chan_cca_stats_thresh); 2290 2291 signal = (wmi_peer_signal_stats_thresh *)(buf_ptr + len); 2292 tag = WMITLV_TAG_STRUC_wmi_peer_signal_stats_thresh; 2293 hdr_len = WMITLV_GET_STRUCT_TLVLEN(wmi_peer_signal_stats_thresh); 2294 WMA_LOGD(FL("Setting signal parameters. tag=%d, len=%d"), tag, hdr_len); 2295 WMITLV_SET_HDR(&signal->tlv_header, tag, hdr_len); 2296 signal->per_chain_snr = thresh->signal.snr; 2297 signal->per_chain_nf = thresh->signal.nf; 2298 WMA_LOGD(FL("snr=%d, nf=%d"), signal->per_chain_snr, 2299 signal->per_chain_nf); 2300 len += sizeof(wmi_peer_signal_stats_thresh); 2301 2302 tx = (wmi_tx_stats_thresh *)(buf_ptr + len); 2303 tag = WMITLV_TAG_STRUC_wmi_tx_stats_thresh; 2304 hdr_len = WMITLV_GET_STRUCT_TLVLEN(wmi_tx_stats_thresh); 2305 WMA_LOGD(FL("Setting TX parameters. tag=%d, len=%d"), tag, len); 2306 WMITLV_SET_HDR(&tx->tlv_header, tag, hdr_len); 2307 tx->tx_msdu_cnt = thresh->tx.msdu; 2308 tx->tx_mpdu_cnt = thresh->tx.mpdu; 2309 tx->tx_ppdu_cnt = thresh->tx.ppdu; 2310 tx->tx_bytes = thresh->tx.bytes; 2311 tx->tx_msdu_drop_cnt = thresh->tx.msdu_drop; 2312 tx->tx_drop_bytes = thresh->tx.byte_drop; 2313 tx->tx_mpdu_retry_cnt = thresh->tx.mpdu_retry; 2314 tx->tx_mpdu_fail_cnt = thresh->tx.mpdu_fail; 2315 tx->tx_ppdu_fail_cnt = thresh->tx.ppdu_fail; 2316 tx->tx_mpdu_aggr = thresh->tx.aggregation; 2317 tx->tx_succ_mcs = thresh->tx.succ_mcs; 2318 tx->tx_fail_mcs = thresh->tx.fail_mcs; 2319 tx->tx_ppdu_delay = thresh->tx.delay; 2320 WMA_LOGD(FL("msdu=%d, mpdu=%d, ppdu=%d, bytes=%d, msdu_drop=%d"), 2321 tx->tx_msdu_cnt, tx->tx_mpdu_cnt, tx->tx_ppdu_cnt, 2322 tx->tx_bytes, tx->tx_msdu_drop_cnt); 2323 WMA_LOGD(FL("byte_drop=%d, mpdu_retry=%d, mpdu_fail=%d, ppdu_fail=%d"), 2324 tx->tx_drop_bytes, tx->tx_mpdu_retry_cnt, 2325 tx->tx_mpdu_fail_cnt, tx->tx_ppdu_fail_cnt); 2326 WMA_LOGD(FL("aggr=%d, succ_mcs=%d, fail_mcs=%d, delay=%d"), 2327 tx->tx_mpdu_aggr, tx->tx_succ_mcs, tx->tx_fail_mcs, 2328 tx->tx_ppdu_delay); 2329 len += sizeof(wmi_tx_stats_thresh); 2330 2331 rx = (wmi_rx_stats_thresh *)(buf_ptr + len); 2332 tag = WMITLV_TAG_STRUC_wmi_rx_stats_thresh, 2333 hdr_len = WMITLV_GET_STRUCT_TLVLEN(wmi_rx_stats_thresh); 2334 WMITLV_SET_HDR(&rx->tlv_header, tag, hdr_len); 2335 WMA_LOGD(FL("Setting RX parameters. tag=%d, len=%d"), tag, hdr_len); 2336 rx->mac_rx_mpdu_cnt = thresh->rx.mpdu; 2337 rx->mac_rx_bytes = thresh->rx.bytes; 2338 rx->phy_rx_ppdu_cnt = thresh->rx.ppdu; 2339 rx->phy_rx_bytes = thresh->rx.ppdu_bytes; 2340 rx->rx_disorder_cnt = thresh->rx.disorder; 2341 rx->rx_mpdu_retry_cnt = thresh->rx.mpdu_retry; 2342 rx->rx_mpdu_dup_cnt = thresh->rx.mpdu_dup; 2343 rx->rx_mpdu_discard_cnt = thresh->rx.mpdu_discard; 2344 rx->rx_mpdu_aggr = thresh->rx.aggregation; 2345 rx->rx_mcs = thresh->rx.mcs; 2346 rx->sta_ps_inds = thresh->rx.ps_inds; 2347 rx->sta_ps_durs = thresh->rx.ps_durs; 2348 rx->rx_probe_reqs = thresh->rx.probe_reqs; 2349 rx->rx_oth_mgmts = thresh->rx.other_mgmt; 2350 WMA_LOGD(FL("rx_mpdu=%d, rx_bytes=%d, rx_ppdu=%d, rx_pbytes=%d"), 2351 rx->mac_rx_mpdu_cnt, rx->mac_rx_bytes, 2352 rx->phy_rx_ppdu_cnt, rx->phy_rx_bytes); 2353 WMA_LOGD(FL("disorder=%d, rx_dup=%d, rx_aggr=%d, rx_mcs=%d"), 2354 rx->rx_disorder_cnt, rx->rx_mpdu_dup_cnt, 2355 rx->rx_mpdu_aggr, rx->rx_mcs); 2356 WMA_LOGD(FL("rx_ind=%d, rx_dur=%d, rx_probe=%d, rx_mgmt=%d"), 2357 rx->sta_ps_inds, rx->sta_ps_durs, 2358 rx->rx_probe_reqs, rx->rx_oth_mgmts); 2359 len += sizeof(wmi_rx_stats_thresh); 2360 2361 WMA_LOGA("WMA --> WMI_PDEV_SET_STATS_THRESHOLD_CMDID(0x%x), length=%d", 2362 WMI_PDEV_SET_STATS_THRESHOLD_CMDID, len); 2363 if (EOK != wmi_unified_cmd_send(wma->wmi_handle, 2364 buf, len, 2365 WMI_PDEV_SET_STATS_THRESHOLD_CMDID)) { 2366 WMA_LOGE("Failed to send WMI_PDEV_SET_STATS_THRESHOLD_CMDID"); 2367 wmi_buf_free(buf); 2368 } 2369 } 2370 2371 #endif /* WLAN_FEATURE_LINK_LAYER_STATS */ 2372 2373 #ifndef QCA_SUPPORT_CP_STATS 2374 /** 2375 * wma_update_pdev_stats() - update pdev stats 2376 * @wma: wma handle 2377 * @pdev_stats: pdev stats 2378 * 2379 * Return: none 2380 */ 2381 static void wma_update_pdev_stats(tp_wma_handle wma, 2382 wmi_pdev_stats *pdev_stats) 2383 { 2384 tAniGetPEStatsRsp *stats_rsp_params; 2385 uint32_t temp_mask; 2386 uint8_t *stats_buf; 2387 tCsrGlobalClassAStatsInfo *classa_stats = NULL; 2388 struct wma_txrx_node *node; 2389 uint8_t i; 2390 2391 for (i = 0; i < wma->max_bssid; i++) { 2392 node = &wma->interfaces[i]; 2393 stats_rsp_params = node->stats_rsp; 2394 if (stats_rsp_params) { 2395 node->fw_stats_set |= FW_PDEV_STATS_SET; 2396 WMA_LOGD("<---FW PDEV STATS received for vdevId:%d", i); 2397 stats_buf = (uint8_t *) (stats_rsp_params + 1); 2398 temp_mask = stats_rsp_params->statsMask; 2399 if (temp_mask & (1 << eCsrSummaryStats)) 2400 stats_buf += sizeof(tCsrSummaryStatsInfo); 2401 2402 if (temp_mask & (1 << eCsrGlobalClassAStats)) { 2403 classa_stats = 2404 (tCsrGlobalClassAStatsInfo *) stats_buf; 2405 classa_stats->max_pwr = pdev_stats->chan_tx_pwr; 2406 } 2407 } 2408 } 2409 } 2410 2411 /** 2412 * wma_vdev_stats_lost_link_helper() - helper function to extract 2413 * lost link information from vdev statistics event while deleting BSS. 2414 * @wma: WMA handle 2415 * @vdev_stats: statistics information from firmware 2416 * 2417 * This is for informing HDD to collect lost link information while 2418 * disconnection. Following conditions to check 2419 * 1. vdev is up 2420 * 2. bssid is zero. When handling DELETE_BSS request message, it sets bssid to 2421 * zero, hence add the check here to indicate the event comes during deleting 2422 * BSS 2423 * 3. DELETE_BSS is the request message queued. Put this condition check on the 2424 * last one as it consumes more resource searching entries in the list 2425 * 2426 * Return: none 2427 */ 2428 static void wma_vdev_stats_lost_link_helper(tp_wma_handle wma, 2429 wmi_vdev_stats *vdev_stats) 2430 { 2431 struct wma_txrx_node *node; 2432 int32_t rssi; 2433 struct wma_target_req *req_msg; 2434 static const uint8_t zero_mac[QDF_MAC_ADDR_SIZE] = {0}; 2435 int32_t bcn_snr, dat_snr; 2436 2437 if (vdev_stats->vdev_id >= wma->max_bssid) { 2438 WMA_LOGE("%s: Invalid vdev_id %hu", 2439 __func__, vdev_stats->vdev_id); 2440 return; 2441 } 2442 2443 node = &wma->interfaces[vdev_stats->vdev_id]; 2444 if (wma_is_vdev_up(vdev_stats->vdev_id) && 2445 !qdf_mem_cmp(node->bssid, zero_mac, QDF_MAC_ADDR_SIZE)) { 2446 req_msg = wma_peek_vdev_req(wma, vdev_stats->vdev_id, 2447 WMA_TARGET_REQ_TYPE_VDEV_STOP); 2448 if ((NULL == req_msg) || 2449 (WMA_DELETE_BSS_REQ != req_msg->msg_type)) { 2450 WMA_LOGD(FL("cannot find DELETE_BSS request message")); 2451 return; 2452 } 2453 bcn_snr = vdev_stats->vdev_snr.bcn_snr; 2454 dat_snr = vdev_stats->vdev_snr.dat_snr; 2455 WMA_LOGD(FL("get vdev id %d, beancon snr %d, data snr %d"), 2456 vdev_stats->vdev_id, bcn_snr, dat_snr); 2457 2458 if (WMA_TGT_IS_VALID_SNR(bcn_snr)) 2459 rssi = bcn_snr; 2460 else if (WMA_TGT_IS_VALID_SNR(dat_snr)) 2461 rssi = dat_snr; 2462 else 2463 rssi = WMA_TGT_INVALID_SNR; 2464 2465 /* Get the absolute rssi value from the current rssi value */ 2466 rssi = rssi + WMA_TGT_NOISE_FLOOR_DBM; 2467 wma_lost_link_info_handler(wma, vdev_stats->vdev_id, rssi); 2468 } 2469 } 2470 2471 /** 2472 * wma_update_vdev_stats() - update vdev stats 2473 * @wma: wma handle 2474 * @vdev_stats: vdev stats 2475 * 2476 * Return: none 2477 */ 2478 static void wma_update_vdev_stats(tp_wma_handle wma, 2479 wmi_vdev_stats *vdev_stats) 2480 { 2481 tAniGetPEStatsRsp *stats_rsp_params; 2482 tCsrSummaryStatsInfo *summary_stats = NULL; 2483 uint8_t *stats_buf; 2484 struct wma_txrx_node *node; 2485 uint8_t i; 2486 int32_t rssi = 0; 2487 QDF_STATUS qdf_status; 2488 tAniGetRssiReq *pGetRssiReq = (tAniGetRssiReq *) wma->pGetRssiReq; 2489 struct scheduler_msg sme_msg = { 0 }; 2490 int32_t bcn_snr, dat_snr; 2491 2492 if (vdev_stats->vdev_id >= wma->max_bssid) { 2493 WMA_LOGE("%s: Invalid vdev_id %hu", 2494 __func__, vdev_stats->vdev_id); 2495 return; 2496 } 2497 2498 bcn_snr = vdev_stats->vdev_snr.bcn_snr; 2499 dat_snr = vdev_stats->vdev_snr.dat_snr; 2500 WMA_LOGD("vdev id %d beancon snr %d data snr %d", 2501 vdev_stats->vdev_id, bcn_snr, dat_snr); 2502 2503 node = &wma->interfaces[vdev_stats->vdev_id]; 2504 stats_rsp_params = node->stats_rsp; 2505 if (stats_rsp_params) { 2506 stats_buf = (uint8_t *) (stats_rsp_params + 1); 2507 node->fw_stats_set |= FW_VDEV_STATS_SET; 2508 WMA_LOGD("<---FW VDEV STATS received for vdevId:%d", 2509 vdev_stats->vdev_id); 2510 if (stats_rsp_params->statsMask & (1 << eCsrSummaryStats)) { 2511 summary_stats = (tCsrSummaryStatsInfo *) stats_buf; 2512 for (i = 0; i < 4; i++) { 2513 summary_stats->tx_frm_cnt[i] = 2514 vdev_stats->tx_frm_cnt[i]; 2515 summary_stats->fail_cnt[i] = 2516 vdev_stats->fail_cnt[i]; 2517 summary_stats->multiple_retry_cnt[i] = 2518 vdev_stats->multiple_retry_cnt[i]; 2519 } 2520 2521 summary_stats->rx_frm_cnt = vdev_stats->rx_frm_cnt; 2522 summary_stats->rx_error_cnt = vdev_stats->rx_err_cnt; 2523 summary_stats->rx_discard_cnt = 2524 vdev_stats->rx_discard_cnt; 2525 summary_stats->ack_fail_cnt = vdev_stats->ack_fail_cnt; 2526 summary_stats->rts_succ_cnt = vdev_stats->rts_succ_cnt; 2527 summary_stats->rts_fail_cnt = vdev_stats->rts_fail_cnt; 2528 /* Update SNR and RSSI in SummaryStats */ 2529 if (WMA_TGT_IS_VALID_SNR(bcn_snr)) { 2530 summary_stats->snr = bcn_snr; 2531 summary_stats->rssi = 2532 bcn_snr + WMA_TGT_NOISE_FLOOR_DBM; 2533 } else if (WMA_TGT_IS_VALID_SNR(dat_snr)) { 2534 summary_stats->snr = dat_snr; 2535 summary_stats->rssi = 2536 dat_snr + WMA_TGT_NOISE_FLOOR_DBM; 2537 } else { 2538 summary_stats->snr = WMA_TGT_INVALID_SNR; 2539 summary_stats->rssi = 0; 2540 } 2541 } 2542 } 2543 2544 if (pGetRssiReq && pGetRssiReq->sessionId == vdev_stats->vdev_id) { 2545 if (WMA_TGT_IS_VALID_SNR(bcn_snr)) { 2546 rssi = bcn_snr; 2547 rssi = rssi + WMA_TGT_NOISE_FLOOR_DBM; 2548 } else if (WMA_TGT_IS_VALID_SNR(dat_snr)) { 2549 rssi = dat_snr; 2550 rssi = rssi + WMA_TGT_NOISE_FLOOR_DBM; 2551 } else { 2552 /* 2553 * Firmware sends invalid snr till it sees 2554 * Beacon/Data after connection since after 2555 * vdev up fw resets the snr to invalid. 2556 * In this duartion Host will return the last know 2557 * rssi during connection. 2558 */ 2559 WMA_LOGE("Invalid SNR from firmware"); 2560 } 2561 2562 WMA_LOGD("Average Rssi = %d, vdev id= %d", rssi, 2563 pGetRssiReq->sessionId); 2564 2565 /* update the average rssi value to UMAC layer */ 2566 if (NULL != pGetRssiReq->rssiCallback) { 2567 ((tCsrRssiCallback) (pGetRssiReq->rssiCallback))(rssi, 2568 pGetRssiReq->staId, 2569 pGetRssiReq->pDevContext); 2570 } 2571 2572 qdf_mem_free(pGetRssiReq); 2573 wma->pGetRssiReq = NULL; 2574 } 2575 2576 if (node->psnr_req) { 2577 tAniGetSnrReq *p_snr_req = node->psnr_req; 2578 2579 if (WMA_TGT_IS_VALID_SNR(bcn_snr)) 2580 p_snr_req->snr = bcn_snr; 2581 else if (WMA_TGT_IS_VALID_SNR(dat_snr)) 2582 p_snr_req->snr = dat_snr; 2583 else 2584 p_snr_req->snr = WMA_TGT_INVALID_SNR; 2585 2586 sme_msg.type = eWNI_SME_SNR_IND; 2587 sme_msg.bodyptr = p_snr_req; 2588 sme_msg.bodyval = 0; 2589 2590 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 2591 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2592 WMA_LOGE("%s: Fail to post snr ind msg", __func__); 2593 qdf_mem_free(p_snr_req); 2594 } 2595 2596 node->psnr_req = NULL; 2597 } 2598 wma_vdev_stats_lost_link_helper(wma, vdev_stats); 2599 } 2600 2601 /** 2602 * wma_post_stats() - update stats to PE 2603 * @wma: wma handle 2604 * @node: txrx node 2605 * 2606 * Return: none 2607 */ 2608 static void wma_post_stats(tp_wma_handle wma, struct wma_txrx_node *node) 2609 { 2610 /* send response to UMAC */ 2611 wma_send_msg(wma, WMA_GET_STATISTICS_RSP, node->stats_rsp, 0); 2612 node->stats_rsp = NULL; 2613 node->fw_stats_set = 0; 2614 } 2615 2616 /** 2617 * wma_update_peer_stats() - update peer stats 2618 * @wma: wma handle 2619 * @peer_stats: peer stats 2620 * 2621 * Return: none 2622 */ 2623 static void wma_update_peer_stats(tp_wma_handle wma, 2624 wmi_peer_stats *peer_stats) 2625 { 2626 tAniGetPEStatsRsp *stats_rsp_params; 2627 tCsrGlobalClassAStatsInfo *classa_stats = NULL; 2628 struct wma_txrx_node *node; 2629 uint8_t *stats_buf, vdev_id, macaddr[IEEE80211_ADDR_LEN], mcsRateFlags; 2630 uint32_t temp_mask; 2631 2632 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, &macaddr[0]); 2633 if (!wma_find_vdev_by_bssid(wma, macaddr, &vdev_id)) 2634 return; 2635 2636 node = &wma->interfaces[vdev_id]; 2637 stats_rsp_params = (tAniGetPEStatsRsp *) node->stats_rsp; 2638 if (stats_rsp_params) { 2639 node->fw_stats_set |= FW_PEER_STATS_SET; 2640 WMA_LOGD("<-- FW PEER STATS received for vdevId:%d", vdev_id); 2641 stats_buf = (uint8_t *) (stats_rsp_params + 1); 2642 temp_mask = stats_rsp_params->statsMask; 2643 if (temp_mask & (1 << eCsrSummaryStats)) 2644 stats_buf += sizeof(tCsrSummaryStatsInfo); 2645 2646 if (temp_mask & (1 << eCsrGlobalClassAStats)) { 2647 classa_stats = (tCsrGlobalClassAStatsInfo *) stats_buf; 2648 WMA_LOGD("peer tx rate:%d", peer_stats->peer_tx_rate); 2649 /* The linkspeed returned by fw is in kbps so convert 2650 * it in to units of 500kbps which is expected by UMAC 2651 */ 2652 if (peer_stats->peer_tx_rate) { 2653 classa_stats->tx_rate = 2654 peer_stats->peer_tx_rate / 500; 2655 } 2656 2657 classa_stats->tx_rate_flags = node->rate_flags; 2658 if (!(node->rate_flags & TX_RATE_LEGACY)) { 2659 classa_stats->mcs_index = 2660 wma_get_mcs_idx( 2661 (peer_stats->peer_tx_rate / 2662 100), node->rate_flags, 2663 node->nss, &mcsRateFlags); 2664 classa_stats->nss = node->nss; 2665 classa_stats->mcs_rate_flags = mcsRateFlags; 2666 } 2667 /* FW returns tx power in intervals of 0.5 dBm 2668 * Convert it back to intervals of 1 dBm 2669 */ 2670 classa_stats->max_pwr = 2671 roundup(classa_stats->max_pwr, 2) >> 1; 2672 } 2673 } 2674 } 2675 #endif /* WMA_GET_STATISTICS_RSP */ 2676 2677 /** 2678 * wma_post_link_status() - post link status to SME 2679 * @pGetLinkStatus: SME Link status 2680 * @link_status: Link status 2681 * 2682 * Return: none 2683 */ 2684 void wma_post_link_status(tAniGetLinkStatus *pGetLinkStatus, 2685 uint8_t link_status) 2686 { 2687 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2688 struct scheduler_msg sme_msg = { 0 }; 2689 2690 pGetLinkStatus->linkStatus = link_status; 2691 sme_msg.type = eWNI_SME_LINK_STATUS_IND; 2692 sme_msg.bodyptr = pGetLinkStatus; 2693 sme_msg.bodyval = 0; 2694 2695 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 2696 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2697 WMA_LOGE("%s: Fail to post link status ind msg", __func__); 2698 qdf_mem_free(pGetLinkStatus); 2699 } 2700 } 2701 2702 #ifndef QCA_SUPPORT_CP_STATS 2703 /** 2704 * wma_update_per_chain_rssi_stats() - to store per chain rssi stats 2705 * @wma: wma handle 2706 * @rssi_stats: rssi stats 2707 * @rssi_per_chain_stats: buffer where rssi stats to be stored 2708 * 2709 * This function stores per chain rssi stats received from fw for all vdevs for 2710 * which the stats were requested into a csr stats structure. 2711 * 2712 * Return: void 2713 */ 2714 static void wma_update_per_chain_rssi_stats(tp_wma_handle wma, 2715 wmi_rssi_stats *rssi_stats, 2716 struct csr_per_chain_rssi_stats_info *rssi_per_chain_stats) 2717 { 2718 int i; 2719 int32_t bcn_snr, dat_snr; 2720 2721 for (i = 0; i < NUM_CHAINS_MAX; i++) { 2722 bcn_snr = rssi_stats->rssi_avg_beacon[i]; 2723 dat_snr = rssi_stats->rssi_avg_data[i]; 2724 WMA_LOGD("chain %d beacon snr %d data snr %d", 2725 i, bcn_snr, dat_snr); 2726 if (WMA_TGT_IS_VALID_SNR(bcn_snr)) 2727 rssi_per_chain_stats->rssi[i] = bcn_snr; 2728 else if (WMA_TGT_IS_VALID_SNR(dat_snr)) 2729 rssi_per_chain_stats->rssi[i] = dat_snr; 2730 else 2731 /* 2732 * Firmware sends invalid snr till it sees 2733 * Beacon/Data after connection since after 2734 * vdev up fw resets the snr to invalid. 2735 * In this duartion Host will return an invalid rssi 2736 * value. 2737 */ 2738 rssi_per_chain_stats->rssi[i] = WMA_TGT_INVALID_SNR; 2739 2740 /* 2741 * Get the absolute rssi value from the current rssi value the 2742 * sinr value is hardcoded into 0 in the qcacld-new/CORE stack 2743 */ 2744 rssi_per_chain_stats->rssi[i] += WMA_TGT_NOISE_FLOOR_DBM; 2745 WMI_MAC_ADDR_TO_CHAR_ARRAY(&(rssi_stats->peer_macaddr), 2746 rssi_per_chain_stats->peer_mac_addr); 2747 } 2748 } 2749 2750 /** 2751 * wma_update_rssi_stats() - to update rssi stats for all vdevs 2752 * for which the stats were requested. 2753 * @wma: wma handle 2754 * @rssi_stats: rssi stats 2755 * 2756 * This function updates the rssi stats for all vdevs for which 2757 * the stats were requested. 2758 * 2759 * Return: void 2760 */ 2761 static void wma_update_rssi_stats(tp_wma_handle wma, 2762 wmi_rssi_stats *rssi_stats) 2763 { 2764 tAniGetPEStatsRsp *stats_rsp_params; 2765 struct csr_per_chain_rssi_stats_info *rssi_per_chain_stats = NULL; 2766 struct wma_txrx_node *node; 2767 uint8_t *stats_buf; 2768 uint32_t temp_mask; 2769 uint8_t vdev_id; 2770 2771 if (rssi_stats->vdev_id >= wma->max_bssid) { 2772 WMA_LOGE("%s: Invalid vdev_id %hu", 2773 __func__, rssi_stats->vdev_id); 2774 return; 2775 } 2776 2777 vdev_id = rssi_stats->vdev_id; 2778 node = &wma->interfaces[vdev_id]; 2779 stats_rsp_params = (tAniGetPEStatsRsp *) node->stats_rsp; 2780 if (stats_rsp_params) { 2781 node->fw_stats_set |= FW_RSSI_PER_CHAIN_STATS_SET; 2782 WMA_LOGD("<-- FW RSSI PER CHAIN STATS received for vdevId:%d", 2783 vdev_id); 2784 stats_buf = (uint8_t *) (stats_rsp_params + 1); 2785 temp_mask = stats_rsp_params->statsMask; 2786 2787 if (temp_mask & (1 << eCsrSummaryStats)) 2788 stats_buf += sizeof(tCsrSummaryStatsInfo); 2789 if (temp_mask & (1 << eCsrGlobalClassAStats)) 2790 stats_buf += sizeof(tCsrGlobalClassAStatsInfo); 2791 if (temp_mask & (1 << eCsrGlobalClassDStats)) 2792 stats_buf += sizeof(tCsrGlobalClassDStatsInfo); 2793 2794 if (temp_mask & (1 << csr_per_chain_rssi_stats)) { 2795 rssi_per_chain_stats = 2796 (struct csr_per_chain_rssi_stats_info *)stats_buf; 2797 wma_update_per_chain_rssi_stats(wma, rssi_stats, 2798 rssi_per_chain_stats); 2799 } 2800 } 2801 } 2802 #endif /* QCA_SUPPORT_CP_STATS */ 2803 2804 /** 2805 * wma_link_status_event_handler() - link status event handler 2806 * @handle: wma handle 2807 * @cmd_param_info: data from event 2808 * @len: length 2809 * 2810 * Return: 0 for success or error code 2811 */ 2812 int wma_link_status_event_handler(void *handle, uint8_t *cmd_param_info, 2813 uint32_t len) 2814 { 2815 tp_wma_handle wma = (tp_wma_handle) handle; 2816 WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *param_buf; 2817 wmi_vdev_rate_stats_event_fixed_param *event; 2818 wmi_vdev_rate_ht_info *ht_info; 2819 struct wma_txrx_node *intr = wma->interfaces; 2820 uint8_t link_status = LINK_STATUS_LEGACY; 2821 uint32_t i; 2822 2823 param_buf = 2824 (WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *) cmd_param_info; 2825 if (!param_buf) { 2826 WMA_LOGA("%s: Invalid stats event", __func__); 2827 return -EINVAL; 2828 } 2829 2830 event = (wmi_vdev_rate_stats_event_fixed_param *) 2831 param_buf->fixed_param; 2832 ht_info = (wmi_vdev_rate_ht_info *) param_buf->ht_info; 2833 2834 WMA_LOGD("num_vdev_stats: %d", event->num_vdev_stats); 2835 2836 if (event->num_vdev_stats > ((WMI_SVC_MSG_MAX_SIZE - 2837 sizeof(*event)) / sizeof(*ht_info)) || 2838 event->num_vdev_stats > param_buf->num_ht_info) { 2839 WMA_LOGE("%s: excess vdev_stats buffers:%d, num_ht_info:%d", 2840 __func__, event->num_vdev_stats, 2841 param_buf->num_ht_info); 2842 return -EINVAL; 2843 } 2844 for (i = 0; (i < event->num_vdev_stats) && ht_info; i++) { 2845 WMA_LOGD("%s vdevId:%d tx_nss:%d rx_nss:%d tx_preamble:%d rx_preamble:%d", 2846 __func__, ht_info->vdevid, ht_info->tx_nss, 2847 ht_info->rx_nss, ht_info->tx_preamble, 2848 ht_info->rx_preamble); 2849 if (ht_info->vdevid < wma->max_bssid 2850 && intr[ht_info->vdevid].plink_status_req) { 2851 if (ht_info->tx_nss || ht_info->rx_nss) 2852 link_status = LINK_STATUS_MIMO; 2853 2854 if ((ht_info->tx_preamble == LINK_RATE_VHT) || 2855 (ht_info->rx_preamble == LINK_RATE_VHT)) 2856 link_status |= LINK_STATUS_VHT; 2857 2858 if (intr[ht_info->vdevid].nss == 2) 2859 link_status |= LINK_SUPPORT_MIMO; 2860 2861 if (intr[ht_info->vdevid].rate_flags & 2862 (TX_RATE_VHT20 | TX_RATE_VHT40 | 2863 TX_RATE_VHT80)) 2864 link_status |= LINK_SUPPORT_VHT; 2865 2866 wma_post_link_status( 2867 intr[ht_info->vdevid].plink_status_req, 2868 link_status); 2869 intr[ht_info->vdevid].plink_status_req = NULL; 2870 link_status = LINK_STATUS_LEGACY; 2871 } 2872 2873 ht_info++; 2874 } 2875 2876 return 0; 2877 } 2878 2879 int wma_rso_cmd_status_event_handler(wmi_roam_event_fixed_param *wmi_event) 2880 { 2881 struct rso_cmd_status *rso_status; 2882 struct scheduler_msg sme_msg = {0}; 2883 QDF_STATUS qdf_status; 2884 2885 rso_status = qdf_mem_malloc(sizeof(*rso_status)); 2886 if (!rso_status) { 2887 WMA_LOGE("%s: malloc fails for rso cmd status", __func__); 2888 return -ENOMEM; 2889 } 2890 2891 rso_status->vdev_id = wmi_event->vdev_id; 2892 if (WMI_ROAM_NOTIF_SCAN_MODE_SUCCESS == wmi_event->notif) 2893 rso_status->status = true; 2894 else if (WMI_ROAM_NOTIF_SCAN_MODE_FAIL == wmi_event->notif) 2895 rso_status->status = false; 2896 sme_msg.type = eWNI_SME_RSO_CMD_STATUS_IND; 2897 sme_msg.bodyptr = rso_status; 2898 sme_msg.bodyval = 0; 2899 WMA_LOGI("%s: Post RSO cmd status to SME", __func__); 2900 2901 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 2902 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2903 WMA_LOGE("%s: fail to post RSO cmd status to SME", __func__); 2904 qdf_mem_free(rso_status); 2905 } 2906 return 0; 2907 } 2908 2909 #ifndef QCA_SUPPORT_CP_STATS 2910 /** 2911 * wma_handle_sta_peer_info() - handle peer information in 2912 * peer stats 2913 * @num_peer_stats: peer number 2914 * @peer_stats: peer stats received from firmware 2915 * @peer_macaddr: the specified mac address 2916 * @sapaddr: sap mac address 2917 * 2918 * This function will send eWNI_SME_GET_PEER_INFO_IND 2919 * to sme with stations' information 2920 * 2921 */ 2922 static void wma_handle_sta_peer_info(uint32_t num_peer_stats, 2923 wmi_peer_stats *peer_stats, 2924 struct qdf_mac_addr peer_macaddr, 2925 uint8_t *sapaddr) 2926 { 2927 QDF_STATUS qdf_status; 2928 wmi_mac_addr temp_addr; 2929 struct sir_peer_info_resp *peer_info; 2930 struct scheduler_msg sme_msg = {0}; 2931 uint32_t i; 2932 uint32_t j = 0; 2933 2934 if (!qdf_is_macaddr_broadcast(&peer_macaddr)) { 2935 WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_macaddr.bytes, &temp_addr); 2936 for (i = 0; i < num_peer_stats; i++) { 2937 if ((((temp_addr.mac_addr47to32) & 0x0000ffff) == 2938 ((peer_stats->peer_macaddr.mac_addr47to32) & 2939 0x0000ffff)) 2940 && (temp_addr.mac_addr31to0 == 2941 peer_stats->peer_macaddr.mac_addr31to0)) { 2942 2943 break; 2944 } 2945 peer_stats = peer_stats + 1; 2946 } 2947 peer_info = qdf_mem_malloc(sizeof(*peer_info) + 2948 sizeof(peer_info->info[0])); 2949 if (NULL == peer_info) { 2950 WMA_LOGE("%s: Memory allocation failed.", __func__); 2951 return; 2952 } 2953 if (i < num_peer_stats) { 2954 peer_info->count = 1; 2955 WMI_MAC_ADDR_TO_CHAR_ARRAY(&(peer_stats->peer_macaddr), 2956 peer_info->info[0].peer_macaddr.bytes); 2957 peer_info->info[0].rssi = peer_stats->peer_rssi; 2958 peer_info->info[0].tx_rate = peer_stats->peer_tx_rate; 2959 peer_info->info[0].rx_rate = peer_stats->peer_rx_rate; 2960 WMA_LOGD("%s peer %pM rssi %d tx_rate %d rx_rate %d", 2961 __func__, 2962 peer_info->info[0].peer_macaddr.bytes, 2963 peer_stats->peer_rssi, 2964 peer_stats->peer_tx_rate, 2965 peer_stats->peer_rx_rate); 2966 } else { 2967 WMA_LOGE("%s: no match mac address", __func__); 2968 peer_info->count = 0; 2969 } 2970 } else { 2971 peer_info = qdf_mem_malloc(sizeof(*peer_info) + 2972 num_peer_stats * sizeof(peer_info->info[0])); 2973 if (NULL == peer_info) { 2974 WMA_LOGE("%s: Memory allocation failed.", __func__); 2975 return; 2976 } 2977 peer_info->count = num_peer_stats; 2978 2979 for (i = 0; i < num_peer_stats; i++) { 2980 WMI_MAC_ADDR_TO_CHAR_ARRAY(&(peer_stats->peer_macaddr), 2981 peer_info->info[j].peer_macaddr.bytes); 2982 peer_info->info[j].rssi = peer_stats->peer_rssi; 2983 peer_info->info[j].tx_rate = peer_stats->peer_tx_rate; 2984 peer_info->info[j].rx_rate = peer_stats->peer_rx_rate; 2985 WMA_LOGD("%s peer %pM rssi %d tx_rate %d rx_rate %d", 2986 __func__, 2987 peer_info->info[j].peer_macaddr.bytes, 2988 peer_stats->peer_rssi, 2989 peer_stats->peer_tx_rate, 2990 peer_stats->peer_rx_rate); 2991 if (!qdf_mem_cmp(peer_info->info[j].peer_macaddr.bytes, 2992 sapaddr, QDF_MAC_ADDR_SIZE)) { 2993 peer_info->count = peer_info->count - 1; 2994 } else { 2995 j++; 2996 } 2997 peer_stats = peer_stats + 1; 2998 } 2999 WMA_LOGD("WDA send peer num %d", peer_info->count); 3000 } 3001 3002 sme_msg.type = eWNI_SME_GET_PEER_INFO_IND; 3003 sme_msg.bodyptr = peer_info; 3004 sme_msg.bodyval = 0; 3005 3006 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 3007 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3008 WMA_LOGE("%s: Fail to post get rssi msg", __func__); 3009 qdf_mem_free(peer_info); 3010 } 3011 3012 return; 3013 } 3014 3015 /** 3016 * wma_stats_event_handler() - stats event handler 3017 * @handle: wma handle 3018 * @cmd_param_info: data from event 3019 * @len: length 3020 * 3021 * Return: 0 for success or error code 3022 */ 3023 int wma_stats_event_handler(void *handle, uint8_t *cmd_param_info, 3024 uint32_t len) 3025 { 3026 tp_wma_handle wma = (tp_wma_handle) handle; 3027 WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; 3028 wmi_stats_event_fixed_param *event; 3029 wmi_pdev_stats *pdev_stats; 3030 wmi_vdev_stats *vdev_stats; 3031 wmi_peer_stats *peer_stats; 3032 wmi_rssi_stats *rssi_stats; 3033 wmi_per_chain_rssi_stats *rssi_event; 3034 struct wma_txrx_node *node; 3035 uint8_t *temp; 3036 uint32_t i; 3037 uint32_t buf_len = 0; 3038 bool excess_data = false; 3039 wmi_congestion_stats *congestion_stats; 3040 tpAniSirGlobal mac; 3041 3042 param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) cmd_param_info; 3043 if (!param_buf) { 3044 WMA_LOGA("%s: Invalid stats event", __func__); 3045 return -EINVAL; 3046 } 3047 event = param_buf->fixed_param; 3048 temp = (uint8_t *) param_buf->data; 3049 if ((event->num_pdev_stats + event->num_vdev_stats + 3050 event->num_peer_stats) > param_buf->num_data) { 3051 WMA_LOGE("%s: Invalid num_pdev_stats:%d or num_vdev_stats:%d or num_peer_stats:%d", 3052 __func__, event->num_pdev_stats, event->num_vdev_stats, 3053 event->num_peer_stats); 3054 return -EINVAL; 3055 } 3056 3057 do { 3058 if (event->num_pdev_stats > ((WMI_SVC_MSG_MAX_SIZE - 3059 sizeof(*event)) / sizeof(*pdev_stats))) { 3060 excess_data = true; 3061 break; 3062 } else { 3063 buf_len += event->num_pdev_stats * sizeof(*pdev_stats); 3064 } 3065 3066 if (event->num_vdev_stats > ((WMI_SVC_MSG_MAX_SIZE - 3067 sizeof(*event)) / sizeof(*vdev_stats))) { 3068 excess_data = true; 3069 break; 3070 } else { 3071 buf_len += event->num_vdev_stats * sizeof(*vdev_stats); 3072 } 3073 3074 if (event->num_peer_stats > ((WMI_SVC_MSG_MAX_SIZE - 3075 sizeof(*event)) / sizeof(*peer_stats))) { 3076 excess_data = true; 3077 break; 3078 } else { 3079 buf_len += event->num_peer_stats * sizeof(*peer_stats); 3080 } 3081 3082 rssi_event = 3083 (wmi_per_chain_rssi_stats *) param_buf->chain_stats; 3084 if (rssi_event) { 3085 if (rssi_event->num_per_chain_rssi_stats > 3086 ((WMI_SVC_MSG_MAX_SIZE - sizeof(*event)) / 3087 sizeof(*rssi_event))) { 3088 excess_data = true; 3089 break; 3090 } else { 3091 buf_len += sizeof(*rssi_event) * 3092 rssi_event->num_per_chain_rssi_stats; 3093 } 3094 } 3095 } while (0); 3096 3097 if (excess_data || 3098 (buf_len > WMI_SVC_MSG_MAX_SIZE - sizeof(*event))) { 3099 WMA_LOGE("excess wmi buffer: stats pdev %d vdev %d peer %d", 3100 event->num_pdev_stats, event->num_vdev_stats, 3101 event->num_peer_stats); 3102 QDF_ASSERT(0); 3103 return -EINVAL; 3104 } 3105 3106 if (event->num_pdev_stats > 0) { 3107 for (i = 0; i < event->num_pdev_stats; i++) { 3108 pdev_stats = (wmi_pdev_stats *) temp; 3109 wma_update_pdev_stats(wma, pdev_stats); 3110 temp += sizeof(wmi_pdev_stats); 3111 } 3112 } 3113 3114 if (event->num_vdev_stats > 0) { 3115 for (i = 0; i < event->num_vdev_stats; i++) { 3116 vdev_stats = (wmi_vdev_stats *) temp; 3117 wma_update_vdev_stats(wma, vdev_stats); 3118 temp += sizeof(wmi_vdev_stats); 3119 } 3120 } 3121 3122 if (event->num_peer_stats > 0) { 3123 if (wma->get_sta_peer_info == true) { 3124 wma_handle_sta_peer_info(event->num_peer_stats, 3125 (wmi_peer_stats *)temp, 3126 wma->peer_macaddr, 3127 wma->myaddr); 3128 } else { 3129 for (i = 0; i < event->num_peer_stats; i++) { 3130 peer_stats = (wmi_peer_stats *) temp; 3131 wma_update_peer_stats(wma, peer_stats); 3132 temp += sizeof(wmi_peer_stats); 3133 } 3134 } 3135 } 3136 3137 rssi_event = (wmi_per_chain_rssi_stats *) param_buf->chain_stats; 3138 if (rssi_event) { 3139 if (rssi_event->num_per_chain_rssi_stats > 3140 param_buf->num_rssi_stats) { 3141 WMA_LOGE("%s: Invalid num_per_chain_rssi_stats:%d", 3142 __func__, rssi_event->num_per_chain_rssi_stats); 3143 return -EINVAL; 3144 } 3145 if (((rssi_event->tlv_header & 0xFFFF0000) >> 16 == 3146 WMITLV_TAG_STRUC_wmi_per_chain_rssi_stats) && 3147 ((rssi_event->tlv_header & 0x0000FFFF) == 3148 WMITLV_GET_STRUCT_TLVLEN(wmi_per_chain_rssi_stats))) { 3149 if (rssi_event->num_per_chain_rssi_stats > 0) { 3150 temp = (uint8_t *) rssi_event; 3151 temp += sizeof(*rssi_event); 3152 3153 /* skip past struct array tlv header */ 3154 temp += WMI_TLV_HDR_SIZE; 3155 3156 for (i = 0; 3157 i < rssi_event->num_per_chain_rssi_stats; 3158 i++) { 3159 rssi_stats = (wmi_rssi_stats *)temp; 3160 wma_update_rssi_stats(wma, rssi_stats); 3161 temp += sizeof(wmi_rssi_stats); 3162 } 3163 } 3164 } 3165 } 3166 3167 congestion_stats = (wmi_congestion_stats *) param_buf->congestion_stats; 3168 if (congestion_stats) { 3169 if (((congestion_stats->tlv_header & 0xFFFF0000) >> 16 == 3170 WMITLV_TAG_STRUC_wmi_congestion_stats) && 3171 ((congestion_stats->tlv_header & 0x0000FFFF) == 3172 WMITLV_GET_STRUCT_TLVLEN(wmi_congestion_stats))) { 3173 mac = cds_get_context(QDF_MODULE_ID_PE); 3174 if (!mac) { 3175 WMA_LOGE("%s: Invalid mac", __func__); 3176 return -EINVAL; 3177 } 3178 if (!mac->sme.congestion_cb) { 3179 WMA_LOGE("%s: Callback not registered", 3180 __func__); 3181 return -EINVAL; 3182 } 3183 WMA_LOGI("%s: congestion %d", __func__, 3184 congestion_stats->congestion); 3185 mac->sme.congestion_cb(mac->hHdd, 3186 congestion_stats->congestion, 3187 congestion_stats->vdev_id); 3188 } 3189 } 3190 3191 for (i = 0; i < wma->max_bssid; i++) { 3192 node = &wma->interfaces[i]; 3193 if (node->fw_stats_set & FW_PEER_STATS_SET) 3194 wma_post_stats(wma, node); 3195 } 3196 3197 return 0; 3198 } 3199 #endif /* QCA_SUPPORT_CP_STATS */ 3200 3201 /** 3202 * wma_fill_peer_info() - fill SIR peer info from WMI peer info struct 3203 * @wma: wma interface 3204 * @stats_info: WMI peer info pointer 3205 * @peer_info: SIR peer info pointer 3206 * 3207 * This function will fill SIR peer info from WMI peer info struct 3208 * 3209 * Return: None 3210 */ 3211 static void wma_fill_peer_info(tp_wma_handle wma, 3212 wmi_peer_stats_info *stats_info, 3213 struct sir_peer_info_ext *peer_info) 3214 { 3215 peer_info->tx_packets = stats_info->tx_packets.low_32; 3216 peer_info->tx_bytes = stats_info->tx_bytes.high_32; 3217 peer_info->tx_bytes <<= 32; 3218 peer_info->tx_bytes += stats_info->tx_bytes.low_32; 3219 peer_info->rx_packets = stats_info->rx_packets.low_32; 3220 peer_info->rx_bytes = stats_info->rx_bytes.high_32; 3221 peer_info->rx_bytes <<= 32; 3222 peer_info->rx_bytes += stats_info->rx_bytes.low_32; 3223 peer_info->tx_retries = stats_info->tx_retries; 3224 peer_info->tx_failed = stats_info->tx_failed; 3225 peer_info->rssi = stats_info->peer_rssi; 3226 peer_info->tx_rate = stats_info->last_tx_bitrate_kbps; 3227 peer_info->tx_rate_code = stats_info->last_tx_rate_code; 3228 peer_info->rx_rate = stats_info->last_rx_bitrate_kbps; 3229 peer_info->rx_rate_code = stats_info->last_rx_rate_code; 3230 } 3231 3232 /** 3233 * wma_peer_info_ext_rsp() - process peer ext info ext 3234 * @handle: wma interface 3235 * @buf: wmi event buf pointer 3236 * 3237 * This function will send eWNI_SME_GET_PEER_INFO_EXT_IND to SME 3238 * 3239 * Return: 0 on success, error code otherwise 3240 */ 3241 static QDF_STATUS wma_peer_info_ext_rsp(tp_wma_handle wma, u_int8_t *buf) 3242 { 3243 wmi_peer_stats_info_event_fixed_param *event; 3244 wmi_peer_stats_info *stats_info; 3245 struct sir_peer_info_ext_resp *resp; 3246 struct sir_peer_info_ext *peer_info; 3247 struct scheduler_msg sme_msg = {0}; 3248 int i, j = 0; 3249 QDF_STATUS qdf_status; 3250 3251 event = (wmi_peer_stats_info_event_fixed_param *)buf; 3252 stats_info = (wmi_peer_stats_info *)(buf + 3253 sizeof(wmi_peer_stats_info_event_fixed_param)); 3254 3255 if (wma->get_one_peer_info) { 3256 resp = qdf_mem_malloc(sizeof(struct sir_peer_info_ext_resp) + 3257 sizeof(resp->info[0])); 3258 if (!resp) { 3259 WMA_LOGE(FL("resp allocation failed.")); 3260 return QDF_STATUS_E_NOMEM; 3261 } 3262 resp->count = 0; 3263 peer_info = &resp->info[0]; 3264 for (i = 0; i < event->num_peers; i++) { 3265 WMI_MAC_ADDR_TO_CHAR_ARRAY(&stats_info->peer_macaddr, 3266 peer_info->peer_macaddr.bytes); 3267 3268 if (!qdf_mem_cmp(peer_info->peer_macaddr.bytes, 3269 wma->peer_macaddr.bytes, 3270 QDF_MAC_ADDR_SIZE)) { 3271 wma_fill_peer_info(wma, stats_info, peer_info); 3272 resp->count++; 3273 break; 3274 } 3275 3276 stats_info = stats_info + 1; 3277 } 3278 } else { 3279 resp = qdf_mem_malloc(sizeof(struct sir_peer_info_ext_resp) + 3280 event->num_peers * sizeof(resp->info[0])); 3281 if (!resp) { 3282 WMA_LOGE(FL("resp allocation failed.")); 3283 return QDF_STATUS_E_NOMEM; 3284 } 3285 resp->count = event->num_peers; 3286 for (i = 0; i < event->num_peers; i++) { 3287 peer_info = &resp->info[j]; 3288 WMI_MAC_ADDR_TO_CHAR_ARRAY(&stats_info->peer_macaddr, 3289 peer_info->peer_macaddr.bytes); 3290 3291 if (!qdf_mem_cmp(peer_info->peer_macaddr.bytes, 3292 wma->myaddr, QDF_MAC_ADDR_SIZE)) { 3293 resp->count = resp->count - 1; 3294 } else { 3295 wma_fill_peer_info(wma, stats_info, peer_info); 3296 j++; 3297 } 3298 stats_info = stats_info + 1; 3299 } 3300 } 3301 3302 sme_msg.type = eWNI_SME_GET_PEER_INFO_EXT_IND; 3303 sme_msg.bodyptr = resp; 3304 sme_msg.bodyval = 0; 3305 3306 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); 3307 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3308 WMA_LOGE("%s: Fail to post get peer info msg", __func__); 3309 qdf_mem_free(resp); 3310 } 3311 3312 return qdf_status; 3313 } 3314 3315 /** 3316 * dump_peer_stats_info() - dump wmi peer info struct 3317 * @event: wmi peer info fixed param pointer 3318 * @peer_stats: wmi peer stats info pointer 3319 * 3320 * This function will dump wmi peer info struct 3321 * 3322 * Return: None 3323 */ 3324 static void dump_peer_stats_info(wmi_peer_stats_info_event_fixed_param *event, 3325 wmi_peer_stats_info *peer_stats) 3326 { 3327 int i; 3328 wmi_peer_stats_info *stats = peer_stats; 3329 u_int8_t mac[6]; 3330 3331 WMA_LOGI("%s vdev_id %d, num_peers %d more_data %d", 3332 __func__, event->vdev_id, 3333 event->num_peers, event->more_data); 3334 3335 for (i = 0; i < event->num_peers; i++) { 3336 WMI_MAC_ADDR_TO_CHAR_ARRAY(&stats->peer_macaddr, mac); 3337 WMA_LOGI("%s mac %pM", __func__, mac); 3338 WMA_LOGI("%s tx_bytes %d %d tx_packets %d %d", 3339 __func__, 3340 stats->tx_bytes.low_32, 3341 stats->tx_bytes.high_32, 3342 stats->tx_packets.low_32, 3343 stats->tx_packets.high_32); 3344 WMA_LOGI("%s rx_bytes %d %d rx_packets %d %d", 3345 __func__, 3346 stats->rx_bytes.low_32, 3347 stats->rx_bytes.high_32, 3348 stats->rx_packets.low_32, 3349 stats->rx_packets.high_32); 3350 WMA_LOGI("%s tx_retries %d tx_failed %d", 3351 __func__, stats->tx_retries, stats->tx_failed); 3352 WMA_LOGI("%s tx_rate_code %x rx_rate_code %x", 3353 __func__, 3354 stats->last_tx_rate_code, 3355 stats->last_rx_rate_code); 3356 WMA_LOGI("%s tx_rate %x rx_rate %x", 3357 __func__, 3358 stats->last_tx_bitrate_kbps, 3359 stats->last_rx_bitrate_kbps); 3360 WMA_LOGI("%s peer_rssi %d", __func__, stats->peer_rssi); 3361 stats++; 3362 } 3363 } 3364 3365 int wma_peer_info_event_handler(void *handle, u_int8_t *cmd_param_info, 3366 u_int32_t len) 3367 { 3368 tp_wma_handle wma = (tp_wma_handle) handle; 3369 WMI_PEER_STATS_INFO_EVENTID_param_tlvs *param_buf; 3370 wmi_peer_stats_info_event_fixed_param *event; 3371 u_int32_t buf_size; 3372 u_int8_t *buf; 3373 3374 param_buf = 3375 (WMI_PEER_STATS_INFO_EVENTID_param_tlvs *)cmd_param_info; 3376 if (!param_buf) { 3377 WMA_LOGA("%s: Invalid stats event", __func__); 3378 return -EINVAL; 3379 } 3380 3381 WMA_LOGI("%s Recv WMI_PEER_STATS_INFO_EVENTID", __func__); 3382 event = param_buf->fixed_param; 3383 if (event->num_peers > 3384 ((WMI_SVC_MSG_MAX_SIZE - 3385 sizeof(wmi_peer_stats_info_event_fixed_param))/ 3386 sizeof(wmi_peer_stats_info)) || event->num_peers > 3387 param_buf->num_peer_stats_info) { 3388 WMA_LOGE("Excess num of peers from fw: %d, num_peer_stats_info:%d", 3389 event->num_peers, param_buf->num_peer_stats_info); 3390 return -EINVAL; 3391 } 3392 buf_size = sizeof(wmi_peer_stats_info_event_fixed_param) + 3393 sizeof(wmi_peer_stats_info) * event->num_peers; 3394 buf = qdf_mem_malloc(buf_size); 3395 if (!buf) { 3396 WMA_LOGE("%s: Failed alloc memory for buf", __func__); 3397 return -ENOMEM; 3398 } 3399 3400 qdf_mem_copy(buf, param_buf->fixed_param, 3401 sizeof(wmi_peer_stats_info_event_fixed_param)); 3402 qdf_mem_copy((buf + sizeof(wmi_peer_stats_info_event_fixed_param)), 3403 param_buf->peer_stats_info, 3404 sizeof(wmi_peer_stats_info) * event->num_peers); 3405 WMA_LOGI("%s dump peer stats info", __func__); 3406 dump_peer_stats_info(event, param_buf->peer_stats_info); 3407 3408 wma_peer_info_ext_rsp(wma, buf); 3409 qdf_mem_free(buf); 3410 3411 return 0; 3412 } 3413 3414 /** 3415 * wma_send_link_speed() - send link speed to SME 3416 * @link_speed: link speed 3417 * 3418 * Return: QDF_STATUS_SUCCESS for success or error code 3419 */ 3420 QDF_STATUS wma_send_link_speed(uint32_t link_speed) 3421 { 3422 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 3423 tpAniSirGlobal mac_ctx; 3424 tSirLinkSpeedInfo *ls_ind; 3425 3426 mac_ctx = cds_get_context(QDF_MODULE_ID_PE); 3427 if (!mac_ctx) { 3428 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__); 3429 return QDF_STATUS_E_INVAL; 3430 } 3431 3432 ls_ind = (tSirLinkSpeedInfo *)qdf_mem_malloc(sizeof(tSirLinkSpeedInfo)); 3433 if (!ls_ind) { 3434 WMA_LOGE("%s: Memory allocation failed.", __func__); 3435 qdf_status = QDF_STATUS_E_NOMEM; 3436 } else { 3437 ls_ind->estLinkSpeed = link_speed; 3438 if (mac_ctx->sme.pLinkSpeedIndCb) 3439 mac_ctx->sme.pLinkSpeedIndCb(ls_ind, 3440 mac_ctx->sme.pLinkSpeedCbContext); 3441 else 3442 WMA_LOGD("%s: pLinkSpeedIndCb is null", __func__); 3443 qdf_mem_free(ls_ind); 3444 3445 } 3446 return qdf_status; 3447 } 3448 3449 /** 3450 * wma_link_speed_event_handler() - link speed event handler 3451 * @handle: wma handle 3452 * @cmd_param_info: event data 3453 * @len: length 3454 * 3455 * Return: 0 for success or error code 3456 */ 3457 int wma_link_speed_event_handler(void *handle, uint8_t *cmd_param_info, 3458 uint32_t len) 3459 { 3460 WMI_PEER_ESTIMATED_LINKSPEED_EVENTID_param_tlvs *param_buf; 3461 wmi_peer_estimated_linkspeed_event_fixed_param *event; 3462 QDF_STATUS qdf_status; 3463 3464 param_buf = (WMI_PEER_ESTIMATED_LINKSPEED_EVENTID_param_tlvs *) 3465 cmd_param_info; 3466 if (!param_buf) { 3467 WMA_LOGE("%s: Invalid linkspeed event", __func__); 3468 return -EINVAL; 3469 } 3470 event = param_buf->fixed_param; 3471 qdf_status = wma_send_link_speed(event->est_linkspeed_kbps); 3472 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) 3473 return -EINVAL; 3474 return 0; 3475 } 3476 3477 /** 3478 * wma_wni_cfg_dnld() - cfg download request 3479 * @handle: wma handle 3480 * 3481 * Return: QDF_STATUS_SUCCESS for success or error code 3482 */ 3483 QDF_STATUS wma_wni_cfg_dnld(tp_wma_handle wma_handle) 3484 { 3485 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 3486 void *mac = cds_get_context(QDF_MODULE_ID_PE); 3487 3488 WMA_LOGD("%s: Enter", __func__); 3489 3490 if (NULL == mac) { 3491 WMA_LOGE("%s: Invalid context", __func__); 3492 QDF_ASSERT(0); 3493 return QDF_STATUS_E_FAILURE; 3494 } 3495 3496 process_cfg_download_req(mac); 3497 3498 WMA_LOGD("%s: Exit", __func__); 3499 return qdf_status; 3500 } 3501 3502 #define BIG_ENDIAN_MAX_DEBUG_BUF 500 3503 /** 3504 * wma_unified_debug_print_event_handler() - debug print event handler 3505 * @handle: wma handle 3506 * @datap: data pointer 3507 * @len: length 3508 * 3509 * Return: 0 for success or error code 3510 */ 3511 int wma_unified_debug_print_event_handler(void *handle, uint8_t *datap, 3512 uint32_t len) 3513 { 3514 WMI_DEBUG_PRINT_EVENTID_param_tlvs *param_buf; 3515 uint8_t *data; 3516 uint32_t datalen; 3517 3518 param_buf = (WMI_DEBUG_PRINT_EVENTID_param_tlvs *) datap; 3519 if (!param_buf || !param_buf->data) { 3520 WMA_LOGE("Get NULL point message from FW"); 3521 return -ENOMEM; 3522 } 3523 data = param_buf->data; 3524 datalen = param_buf->num_data; 3525 if (datalen > WMI_SVC_MSG_MAX_SIZE) { 3526 WMA_LOGE("Received data len %d exceeds max value %d", 3527 datalen, WMI_SVC_MSG_MAX_SIZE); 3528 return QDF_STATUS_E_FAILURE; 3529 } 3530 data[datalen - 1] = '\0'; 3531 3532 #ifdef BIG_ENDIAN_HOST 3533 { 3534 if (datalen >= BIG_ENDIAN_MAX_DEBUG_BUF) { 3535 WMA_LOGE("%s Invalid data len %d, limiting to max", 3536 __func__, datalen); 3537 datalen = BIG_ENDIAN_MAX_DEBUG_BUF - 1; 3538 } 3539 char dbgbuf[BIG_ENDIAN_MAX_DEBUG_BUF] = { 0 }; 3540 3541 memcpy(dbgbuf, data, datalen); 3542 SWAPME(dbgbuf, datalen); 3543 WMA_LOGD("FIRMWARE:%s", dbgbuf); 3544 return 0; 3545 } 3546 #else 3547 WMA_LOGD("FIRMWARE:%s", data); 3548 return 0; 3549 #endif /* BIG_ENDIAN_HOST */ 3550 } 3551 3552 /** 3553 * wma_is_sap_active() - check sap is active or not 3554 * @handle: wma handle 3555 * 3556 * Return: true/false 3557 */ 3558 bool wma_is_sap_active(tp_wma_handle wma_handle) 3559 { 3560 int i; 3561 3562 for (i = 0; i < wma_handle->max_bssid; i++) { 3563 if (!wma_is_vdev_up(i)) 3564 continue; 3565 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_AP && 3566 wma_handle->interfaces[i].sub_type == 0) 3567 return true; 3568 } 3569 return false; 3570 } 3571 3572 /** 3573 * wma_is_p2p_go_active() - check p2p go is active or not 3574 * @handle: wma handle 3575 * 3576 * Return: true/false 3577 */ 3578 bool wma_is_p2p_go_active(tp_wma_handle wma_handle) 3579 { 3580 int i; 3581 3582 for (i = 0; i < wma_handle->max_bssid; i++) { 3583 if (!wma_is_vdev_up(i)) 3584 continue; 3585 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_AP && 3586 wma_handle->interfaces[i].sub_type == 3587 WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO) 3588 return true; 3589 } 3590 return false; 3591 } 3592 3593 /** 3594 * wma_is_p2p_cli_active() - check p2p cli is active or not 3595 * @handle: wma handle 3596 * 3597 * Return: true/false 3598 */ 3599 bool wma_is_p2p_cli_active(tp_wma_handle wma_handle) 3600 { 3601 int i; 3602 3603 for (i = 0; i < wma_handle->max_bssid; i++) { 3604 if (!wma_is_vdev_up(i)) 3605 continue; 3606 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_STA && 3607 wma_handle->interfaces[i].sub_type == 3608 WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) 3609 return true; 3610 } 3611 return false; 3612 } 3613 3614 /** 3615 * wma_is_sta_active() - check sta is active or not 3616 * @handle: wma handle 3617 * 3618 * Return: true/false 3619 */ 3620 bool wma_is_sta_active(tp_wma_handle wma_handle) 3621 { 3622 int i; 3623 3624 for (i = 0; i < wma_handle->max_bssid; i++) { 3625 if (!wma_is_vdev_up(i)) 3626 continue; 3627 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_STA && 3628 wma_handle->interfaces[i].sub_type == 0) 3629 return true; 3630 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_IBSS) 3631 return true; 3632 } 3633 return false; 3634 } 3635 3636 /** 3637 * wma_peer_phymode() - get phymode 3638 * @nw_type: nw type 3639 * @sta_type: sta type 3640 * @is_ht: is ht supported 3641 * @ch_width: supported channel width 3642 * @is_vht: is vht supported 3643 * @is_he: is HE supported 3644 * 3645 * Return: WLAN_PHY_MODE 3646 */ 3647 WLAN_PHY_MODE wma_peer_phymode(tSirNwType nw_type, uint8_t sta_type, 3648 uint8_t is_ht, uint8_t ch_width, 3649 uint8_t is_vht, bool is_he) 3650 { 3651 WLAN_PHY_MODE phymode = MODE_UNKNOWN; 3652 3653 switch (nw_type) { 3654 case eSIR_11B_NW_TYPE: 3655 #ifdef FEATURE_WLAN_TDLS 3656 if (STA_ENTRY_TDLS_PEER == sta_type) { 3657 if (is_vht) { 3658 if (CH_WIDTH_80MHZ == ch_width) 3659 phymode = MODE_11AC_VHT80; 3660 else 3661 phymode = (CH_WIDTH_40MHZ == ch_width) ? 3662 MODE_11AC_VHT40 : 3663 MODE_11AC_VHT20; 3664 } else if (is_ht) { 3665 phymode = (CH_WIDTH_40MHZ == ch_width) ? 3666 MODE_11NG_HT40 : MODE_11NG_HT20; 3667 } else 3668 phymode = MODE_11B; 3669 } else 3670 #endif /* FEATURE_WLAN_TDLS */ 3671 { 3672 phymode = MODE_11B; 3673 if (is_ht || is_vht || is_he) 3674 WMA_LOGE("HT/VHT is enabled with 11B NW type"); 3675 } 3676 break; 3677 case eSIR_11G_NW_TYPE: 3678 if (!(is_ht || is_vht || is_he)) { 3679 phymode = MODE_11G; 3680 break; 3681 } 3682 if (CH_WIDTH_40MHZ < ch_width) 3683 WMA_LOGE("80/160 MHz BW sent in 11G, configured 40MHz"); 3684 if (ch_width) 3685 phymode = (is_he) ? MODE_11AX_HE40_2G : (is_vht) ? 3686 MODE_11AC_VHT40_2G : MODE_11NG_HT40; 3687 else 3688 phymode = (is_he) ? MODE_11AX_HE20_2G : (is_vht) ? 3689 MODE_11AC_VHT20_2G : MODE_11NG_HT20; 3690 break; 3691 case eSIR_11A_NW_TYPE: 3692 if (!(is_ht || is_vht || is_he)) { 3693 phymode = MODE_11A; 3694 break; 3695 } 3696 if (is_he) { 3697 if (ch_width == CH_WIDTH_160MHZ) 3698 phymode = MODE_11AX_HE160; 3699 else if (ch_width == CH_WIDTH_80P80MHZ) 3700 phymode = MODE_11AX_HE80_80; 3701 else if (ch_width == CH_WIDTH_80MHZ) 3702 phymode = MODE_11AX_HE80; 3703 else 3704 phymode = (ch_width) ? 3705 MODE_11AX_HE40 : MODE_11AX_HE20; 3706 } else if (is_vht) { 3707 if (ch_width == CH_WIDTH_160MHZ) 3708 phymode = MODE_11AC_VHT160; 3709 else if (ch_width == CH_WIDTH_80P80MHZ) 3710 phymode = MODE_11AC_VHT80_80; 3711 else if (ch_width == CH_WIDTH_80MHZ) 3712 phymode = MODE_11AC_VHT80; 3713 else 3714 phymode = (ch_width) ? 3715 MODE_11AC_VHT40 : MODE_11AC_VHT20; 3716 } else 3717 phymode = (ch_width) ? MODE_11NA_HT40 : MODE_11NA_HT20; 3718 break; 3719 default: 3720 WMA_LOGE("%s: Invalid nw type %d", __func__, nw_type); 3721 break; 3722 } 3723 WMA_LOGD(FL("nw_type %d is_ht %d ch_width %d is_vht %d is_he %d phymode %d"), 3724 nw_type, is_ht, ch_width, is_vht, is_he, phymode); 3725 3726 return phymode; 3727 } 3728 3729 /** 3730 * wma_txrx_fw_stats_reset() - reset txrx fw statistics 3731 * @wma_handle: wma handle 3732 * @vdev_id: vdev id 3733 * @value: value 3734 * 3735 * Return: 0 for success or return error 3736 */ 3737 int32_t wma_txrx_fw_stats_reset(tp_wma_handle wma_handle, 3738 uint8_t vdev_id, uint32_t value) 3739 { 3740 struct ol_txrx_stats_req req; 3741 struct cdp_vdev *vdev; 3742 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 3743 3744 if (!soc) { 3745 WMA_LOGE("%s:SOC context is NULL", __func__); 3746 return -EINVAL; 3747 } 3748 3749 vdev = wma_find_vdev_by_id(wma_handle, vdev_id); 3750 if (!vdev) { 3751 WMA_LOGE("%s:Invalid vdev handle", __func__); 3752 return -EINVAL; 3753 } 3754 qdf_mem_zero(&req, sizeof(req)); 3755 req.stats_type_reset_mask = value; 3756 cdp_fw_stats_get(soc, vdev, &req, false, false); 3757 3758 return 0; 3759 } 3760 3761 #ifdef HELIUMPLUS 3762 #define SET_UPLOAD_MASK(_mask, _rate_info) \ 3763 ((_mask) = 1 << (_rate_info ## _V2)) 3764 #else /* !HELIUMPLUS */ 3765 #define SET_UPLOAD_MASK(_mask, _rate_info) \ 3766 ((_mask) = 1 << (_rate_info)) 3767 #endif 3768 3769 #ifdef HELIUMPLUS 3770 static bool wma_is_valid_fw_stats_cmd(uint32_t value) 3771 { 3772 if (value > (HTT_DBG_NUM_STATS + 1) || 3773 value == (HTT_DBG_STATS_RX_RATE_INFO + 1) || 3774 value == (HTT_DBG_STATS_TX_RATE_INFO + 1) || 3775 value == (HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT + 1)) { 3776 WMA_LOGE("%s: Not supported", __func__); 3777 return false; 3778 } 3779 return true; 3780 } 3781 #else 3782 static bool wma_is_valid_fw_stats_cmd(uint32_t value) 3783 { 3784 if (value > (HTT_DBG_NUM_STATS + 1) || 3785 value == (HTT_DBG_STATS_RX_RATE_INFO_V2 + 1) || 3786 value == (HTT_DBG_STATS_TX_RATE_INFO_V2 + 1) || 3787 value == (HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT + 1)) { 3788 WMA_LOGE("%s: Not supported", __func__); 3789 return false; 3790 } 3791 return true; 3792 } 3793 #endif 3794 3795 /** 3796 * wma_set_txrx_fw_stats_level() - set txrx fw stats level 3797 * @wma_handle: wma handle 3798 * @vdev_id: vdev id 3799 * @value: value 3800 * 3801 * Return: 0 for success or return error 3802 */ 3803 int32_t wma_set_txrx_fw_stats_level(tp_wma_handle wma_handle, 3804 uint8_t vdev_id, uint32_t value) 3805 { 3806 struct ol_txrx_stats_req req; 3807 struct cdp_vdev *vdev; 3808 uint32_t l_up_mask; 3809 void *soc = cds_get_context(QDF_MODULE_ID_SOC); 3810 3811 if (!soc) { 3812 WMA_LOGE("%s:SOC context is NULL", __func__); 3813 return -EINVAL; 3814 } 3815 3816 vdev = wma_find_vdev_by_id(wma_handle, vdev_id); 3817 if (!vdev) { 3818 WMA_LOGE("%s:Invalid vdev handle", __func__); 3819 return -EINVAL; 3820 } 3821 3822 if (wma_is_valid_fw_stats_cmd(value) == false) 3823 return -EINVAL; 3824 3825 qdf_mem_zero(&req, sizeof(req)); 3826 req.print.verbose = 1; 3827 3828 /* TODO: Need to check how to avoid mem leak*/ 3829 l_up_mask = 1 << (value - 1); 3830 req.stats_type_upload_mask = l_up_mask; 3831 3832 cdp_fw_stats_get(soc, vdev, &req, false, true); 3833 3834 return 0; 3835 } 3836 3837 #ifndef QCA_SUPPORT_CP_STATS 3838 /** 3839 * wma_get_stats_rsp_buf() - fill get stats response buffer 3840 * @get_stats_param: get stats parameters 3841 * 3842 * Return: stats response buffer 3843 */ 3844 static tAniGetPEStatsRsp *wma_get_stats_rsp_buf 3845 (tAniGetPEStatsReq *get_stats_param) 3846 { 3847 tAniGetPEStatsRsp *stats_rsp_params; 3848 uint32_t len, temp_mask; 3849 3850 len = sizeof(tAniGetPEStatsRsp); 3851 temp_mask = get_stats_param->statsMask; 3852 3853 if (temp_mask & (1 << eCsrSummaryStats)) 3854 len += sizeof(tCsrSummaryStatsInfo); 3855 3856 if (temp_mask & (1 << eCsrGlobalClassAStats)) 3857 len += sizeof(tCsrGlobalClassAStatsInfo); 3858 3859 if (temp_mask & (1 << eCsrGlobalClassDStats)) 3860 len += sizeof(tCsrGlobalClassDStatsInfo); 3861 3862 if (temp_mask & (1 << csr_per_chain_rssi_stats)) 3863 len += sizeof(struct csr_per_chain_rssi_stats_info); 3864 3865 stats_rsp_params = qdf_mem_malloc(len); 3866 if (!stats_rsp_params) { 3867 WMA_LOGE("memory allocation failed for tAniGetPEStatsRsp"); 3868 QDF_ASSERT(0); 3869 return NULL; 3870 } 3871 3872 stats_rsp_params->staId = get_stats_param->staId; 3873 stats_rsp_params->statsMask = get_stats_param->statsMask; 3874 stats_rsp_params->msgType = WMA_GET_STATISTICS_RSP; 3875 stats_rsp_params->msgLen = len - sizeof(tAniGetPEStatsRsp); 3876 stats_rsp_params->rc = QDF_STATUS_SUCCESS; 3877 return stats_rsp_params; 3878 } 3879 3880 /** 3881 * wma_get_stats_req() - get stats request 3882 * @handle: wma handle 3883 * @get_stats_param: stats params 3884 * 3885 * Return: none 3886 */ 3887 void wma_get_stats_req(WMA_HANDLE handle, 3888 tAniGetPEStatsReq *get_stats_param) 3889 { 3890 tp_wma_handle wma_handle = (tp_wma_handle) handle; 3891 struct wma_txrx_node *node; 3892 struct stats_request_params cmd = {0}; 3893 tAniGetPEStatsRsp *pGetPEStatsRspParams; 3894 3895 3896 WMA_LOGD("%s: Enter", __func__); 3897 node = &wma_handle->interfaces[get_stats_param->sessionId]; 3898 if (node->stats_rsp) { 3899 pGetPEStatsRspParams = node->stats_rsp; 3900 if (pGetPEStatsRspParams->staId == get_stats_param->staId && 3901 pGetPEStatsRspParams->statsMask == 3902 get_stats_param->statsMask) { 3903 WMA_LOGD("Stats for staId %d with stats mask %d is pending.. ignore new request", 3904 get_stats_param->staId, 3905 get_stats_param->statsMask); 3906 pGetPEStatsRspParams = 3907 wma_get_stats_rsp_buf(get_stats_param); 3908 if (!pGetPEStatsRspParams) { 3909 WMA_LOGE("failed to allocate memory for stats response"); 3910 goto end; 3911 } 3912 goto req_pending; 3913 } else { 3914 qdf_mem_free(node->stats_rsp); 3915 node->stats_rsp = NULL; 3916 node->fw_stats_set = 0; 3917 } 3918 } 3919 3920 pGetPEStatsRspParams = wma_get_stats_rsp_buf(get_stats_param); 3921 if (!pGetPEStatsRspParams) 3922 goto end; 3923 3924 node->fw_stats_set = 0; 3925 if (node->stats_rsp) { 3926 WMA_LOGD(FL("stats_rsp is not null, prev_value: %pK"), 3927 node->stats_rsp); 3928 qdf_mem_free(node->stats_rsp); 3929 node->stats_rsp = NULL; 3930 } 3931 node->stats_rsp = pGetPEStatsRspParams; 3932 wma_handle->get_sta_peer_info = false; 3933 WMA_LOGD("stats_rsp allocated: %pK, sta_id: %d, mask: %d, vdev_id: %d", 3934 node->stats_rsp, node->stats_rsp->staId, 3935 node->stats_rsp->statsMask, get_stats_param->sessionId); 3936 3937 cmd.vdev_id = get_stats_param->sessionId; 3938 cmd.stats_id = get_stats_param->statsMask; 3939 if (wmi_unified_stats_request_send(wma_handle->wmi_handle, 3940 node->bssid, 3941 &cmd)) { 3942 WMA_LOGE("%s: Failed to send WMI_REQUEST_STATS_CMDID", 3943 __func__); 3944 goto failed; 3945 } 3946 3947 goto end; 3948 failed: 3949 node->stats_rsp = NULL; 3950 req_pending: 3951 pGetPEStatsRspParams->rc = QDF_STATUS_E_FAILURE; 3952 /* send response to UMAC */ 3953 wma_send_msg(wma_handle, WMA_GET_STATISTICS_RSP, pGetPEStatsRspParams, 3954 0); 3955 end: 3956 qdf_mem_free(get_stats_param); 3957 WMA_LOGD("%s: Exit", __func__); 3958 } 3959 #endif /* QCA_SUPPORT_CP_STATS */ 3960 3961 /** 3962 * wma_get_cca_stats() - send request to fw to get CCA 3963 * @wma_handle: wma handle 3964 * @vdev_id: vdev id 3965 * 3966 * Return: QDF status 3967 */ 3968 QDF_STATUS wma_get_cca_stats(tp_wma_handle wma_handle, 3969 uint8_t vdev_id) 3970 { 3971 if (wmi_unified_congestion_request_cmd(wma_handle->wmi_handle, 3972 vdev_id)) { 3973 WMA_LOGE("Failed to congestion request to fw"); 3974 return QDF_STATUS_E_FAILURE; 3975 } 3976 return QDF_STATUS_SUCCESS; 3977 } 3978 3979 /** 3980 * wma_get_beacon_buffer_by_vdev_id() - get the beacon buffer from vdev ID 3981 * @vdev_id: vdev id 3982 * @buffer_size: size of buffer 3983 * 3984 * Return: none 3985 */ 3986 void *wma_get_beacon_buffer_by_vdev_id(uint8_t vdev_id, uint32_t *buffer_size) 3987 { 3988 tp_wma_handle wma; 3989 struct beacon_info *beacon; 3990 uint8_t *buf; 3991 uint32_t buf_size; 3992 3993 wma = cds_get_context(QDF_MODULE_ID_WMA); 3994 if (!wma) { 3995 WMA_LOGE("%s: Invalid WMA handle", __func__); 3996 return NULL; 3997 } 3998 3999 if (vdev_id >= wma->max_bssid) { 4000 WMA_LOGE("%s: Invalid vdev_id %u", __func__, vdev_id); 4001 return NULL; 4002 } 4003 4004 if (!wma_is_vdev_in_ap_mode(wma, vdev_id)) { 4005 WMA_LOGE("%s: vdevid %d is not in AP mode", __func__, vdev_id); 4006 return NULL; 4007 } 4008 4009 beacon = wma->interfaces[vdev_id].beacon; 4010 4011 if (!beacon) { 4012 WMA_LOGE("%s: beacon invalid", __func__); 4013 return NULL; 4014 } 4015 4016 qdf_spin_lock_bh(&beacon->lock); 4017 4018 buf_size = qdf_nbuf_len(beacon->buf); 4019 buf = qdf_mem_malloc(buf_size); 4020 4021 if (!buf) { 4022 qdf_spin_unlock_bh(&beacon->lock); 4023 WMA_LOGE("%s: alloc failed for beacon buf", __func__); 4024 return NULL; 4025 } 4026 4027 qdf_mem_copy(buf, qdf_nbuf_data(beacon->buf), buf_size); 4028 4029 qdf_spin_unlock_bh(&beacon->lock); 4030 4031 if (buffer_size) 4032 *buffer_size = buf_size; 4033 4034 return buf; 4035 } 4036 4037 /** 4038 * wma_get_vdev_address_by_vdev_id() - lookup MAC address from vdev ID 4039 * @vdev_id: vdev id 4040 * 4041 * Return: mac address 4042 */ 4043 uint8_t *wma_get_vdev_address_by_vdev_id(uint8_t vdev_id) 4044 { 4045 tp_wma_handle wma; 4046 4047 wma = cds_get_context(QDF_MODULE_ID_WMA); 4048 if (!wma) { 4049 WMA_LOGE("%s: Invalid WMA handle", __func__); 4050 return NULL; 4051 } 4052 4053 if (vdev_id >= wma->max_bssid) { 4054 WMA_LOGE("%s: Invalid vdev_id %u", __func__, vdev_id); 4055 return NULL; 4056 } 4057 4058 return wma->interfaces[vdev_id].addr; 4059 } 4060 4061 QDF_STATUS wma_get_connection_info(uint8_t vdev_id, 4062 struct policy_mgr_vdev_entry_info *conn_table_entry) 4063 { 4064 struct wma_txrx_node *wma_conn_table_entry; 4065 4066 wma_conn_table_entry = wma_get_interface_by_vdev_id(vdev_id); 4067 if (NULL == wma_conn_table_entry) { 4068 WMA_LOGE("%s: can't find vdev_id %d in WMA table", __func__, vdev_id); 4069 return QDF_STATUS_E_FAILURE; 4070 } 4071 conn_table_entry->chan_width = wma_conn_table_entry->chan_width; 4072 conn_table_entry->mac_id = wma_conn_table_entry->mac_id; 4073 conn_table_entry->mhz = wma_conn_table_entry->mhz; 4074 conn_table_entry->sub_type = wma_conn_table_entry->sub_type; 4075 conn_table_entry->type = wma_conn_table_entry->type; 4076 4077 return QDF_STATUS_SUCCESS; 4078 } 4079 4080 /** 4081 * wma_get_interface_by_vdev_id() - lookup interface entry using vdev ID 4082 * @vdev_id: vdev id 4083 * 4084 * Return: entry from vdev table 4085 */ 4086 struct wma_txrx_node *wma_get_interface_by_vdev_id(uint8_t vdev_id) 4087 { 4088 tp_wma_handle wma; 4089 4090 wma = cds_get_context(QDF_MODULE_ID_WMA); 4091 if (!wma) { 4092 WMA_LOGE("%s: Invalid WMA handle", __func__); 4093 return NULL; 4094 } 4095 4096 if (vdev_id >= wma->max_bssid) { 4097 WMA_LOGE("%s: Invalid vdev_id %u", __func__, vdev_id); 4098 return NULL; 4099 } 4100 4101 return &wma->interfaces[vdev_id]; 4102 } 4103 4104 QDF_STATUS wma_get_wcnss_software_version(uint8_t *version, 4105 uint32_t version_buffer_size) 4106 { 4107 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA); 4108 struct target_psoc_info *tgt_hdl; 4109 4110 if (NULL == wma_handle) { 4111 WMA_LOGE("%s: Failed to get wma", __func__); 4112 return QDF_STATUS_E_FAULT; 4113 } 4114 4115 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc); 4116 if (!tgt_hdl) { 4117 WMA_LOGE("%s: Failed to get wma", __func__); 4118 return QDF_STATUS_E_FAULT; 4119 } 4120 4121 snprintf(version, version_buffer_size, "%x", 4122 target_if_get_fw_version(tgt_hdl)); 4123 return QDF_STATUS_SUCCESS; 4124 } 4125 4126 /** 4127 * wma_get_mac_id_of_vdev() - Get MAC id corresponding to a vdev 4128 * @vdev_id: VDEV whose MAC ID is required 4129 * 4130 * Get MAC id corresponding to a vdev id from the WMA structure 4131 * 4132 * Return: Negative value on failure and MAC id on success 4133 */ 4134 int8_t wma_get_mac_id_of_vdev(uint32_t vdev_id) 4135 { 4136 tp_wma_handle wma; 4137 4138 wma = cds_get_context(QDF_MODULE_ID_WMA); 4139 if (!wma) { 4140 WMA_LOGE("%s: Invalid WMA handle", __func__); 4141 return -EINVAL; 4142 } 4143 4144 if (wma->interfaces) 4145 return wma->interfaces[vdev_id].mac_id; 4146 4147 return -EINVAL; 4148 } 4149 4150 /** 4151 * wma_update_intf_hw_mode_params() - Update WMA params 4152 * @vdev_id: VDEV id whose params needs to be updated 4153 * @mac_id: MAC id to be updated 4154 * @cfgd_hw_mode_index: HW mode index from which Tx and Rx SS will be updated 4155 * 4156 * Updates the MAC id, tx spatial stream, rx spatial stream in WMA 4157 * 4158 * Return: None 4159 */ 4160 void wma_update_intf_hw_mode_params(uint32_t vdev_id, uint32_t mac_id, 4161 uint32_t cfgd_hw_mode_index) 4162 { 4163 tp_wma_handle wma; 4164 uint32_t param; 4165 4166 wma = cds_get_context(QDF_MODULE_ID_WMA); 4167 if (!wma) { 4168 WMA_LOGE("%s: Invalid WMA handle", __func__); 4169 return; 4170 } 4171 4172 if (!wma->interfaces) { 4173 WMA_LOGE("%s: Interface is NULL", __func__); 4174 return; 4175 } 4176 4177 if (cfgd_hw_mode_index > wma->num_dbs_hw_modes) { 4178 WMA_LOGE("%s: Invalid index", __func__); 4179 return; 4180 } 4181 4182 param = wma->hw_mode.hw_mode_list[cfgd_hw_mode_index]; 4183 wma->interfaces[vdev_id].mac_id = mac_id; 4184 if (mac_id == 0) { 4185 wma->interfaces[vdev_id].tx_streams = 4186 WMA_HW_MODE_MAC0_TX_STREAMS_GET(param); 4187 wma->interfaces[vdev_id].rx_streams = 4188 WMA_HW_MODE_MAC0_RX_STREAMS_GET(param); 4189 } else { 4190 wma->interfaces[vdev_id].tx_streams = 4191 WMA_HW_MODE_MAC1_TX_STREAMS_GET(param); 4192 wma->interfaces[vdev_id].rx_streams = 4193 WMA_HW_MODE_MAC1_RX_STREAMS_GET(param); 4194 } 4195 } 4196 4197 /** 4198 * wma_get_vht_ch_width - return vht channel width 4199 * 4200 * Return: return vht channel width 4201 */ 4202 uint32_t wma_get_vht_ch_width(void) 4203 { 4204 uint32_t fw_ch_wd = WNI_CFG_VHT_CHANNEL_WIDTH_80MHZ; 4205 tp_wma_handle wm_hdl = cds_get_context(QDF_MODULE_ID_WMA); 4206 struct target_psoc_info *tgt_hdl; 4207 int vht_cap_info; 4208 4209 if (NULL == wm_hdl) 4210 return fw_ch_wd; 4211 4212 tgt_hdl = wlan_psoc_get_tgt_if_handle(wm_hdl->psoc); 4213 if (!tgt_hdl) 4214 return fw_ch_wd; 4215 4216 vht_cap_info = target_if_get_vht_cap_info(tgt_hdl); 4217 if (vht_cap_info & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ) 4218 fw_ch_wd = WNI_CFG_VHT_CHANNEL_WIDTH_80_PLUS_80MHZ; 4219 else if (vht_cap_info & WMI_VHT_CAP_CH_WIDTH_160MHZ) 4220 fw_ch_wd = WNI_CFG_VHT_CHANNEL_WIDTH_160MHZ; 4221 4222 return fw_ch_wd; 4223 } 4224 4225 /** 4226 * wma_get_num_of_setbits_from_bitmask() - to get num of setbits from bitmask 4227 * @mask: given bitmask 4228 * 4229 * This helper function should return number of setbits from bitmask 4230 * 4231 * Return: number of setbits from bitmask 4232 */ 4233 uint32_t wma_get_num_of_setbits_from_bitmask(uint32_t mask) 4234 { 4235 uint32_t num_of_setbits = 0; 4236 4237 while (mask) { 4238 mask &= (mask - 1); 4239 num_of_setbits++; 4240 } 4241 return num_of_setbits; 4242 } 4243 4244 /** 4245 * wma_is_csa_offload_enabled - checks fw CSA offload capability 4246 * 4247 * Return: true or false 4248 */ 4249 4250 bool wma_is_csa_offload_enabled(void) 4251 { 4252 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 4253 4254 if (!wma) 4255 return false; 4256 4257 return wmi_service_enabled(wma->wmi_handle, 4258 wmi_service_csa_offload); 4259 } 4260 #ifdef FEATURE_FW_LOG_PARSING 4261 /** 4262 * wma_config_debug_module_cmd - set debug log config 4263 * @wmi_handle: wmi layer handle 4264 * @param: debug log parameter 4265 * @val: debug log value 4266 * @module_id_bitmap: debug module id bitmap 4267 * @bitmap_len: debug module bitmap length 4268 * 4269 * Return: QDF_STATUS_SUCCESS for success or error code 4270 */ 4271 QDF_STATUS 4272 wma_config_debug_module_cmd(wmi_unified_t wmi_handle, A_UINT32 param, 4273 A_UINT32 val, A_UINT32 *module_id_bitmap, 4274 A_UINT32 bitmap_len) 4275 { 4276 struct dbglog_params dbg_param; 4277 4278 dbg_param.param = param; 4279 dbg_param.val = val; 4280 dbg_param.module_id_bitmap = module_id_bitmap; 4281 dbg_param.bitmap_len = bitmap_len; 4282 4283 return wmi_unified_dbglog_cmd_send(wmi_handle, &dbg_param); 4284 } 4285 #endif 4286 4287 /** 4288 * wma_is_p2p_lo_capable() - if driver is capable of p2p listen offload 4289 * 4290 * This function checks if driver is capable of p2p listen offload 4291 * true: capable of p2p offload 4292 * false: not capable 4293 * 4294 * Return: true - capable, false - not capable 4295 */ 4296 bool wma_is_p2p_lo_capable(void) 4297 { 4298 return wma_is_service_enabled(wmi_service_p2p_listen_offload_support); 4299 } 4300 4301 bool wma_capability_enhanced_mcast_filter(void) 4302 { 4303 return wma_is_service_enabled(wmi_service_enhanced_mcast_filter); 4304 } 4305 4306 4307 bool wma_is_vdev_up(uint8_t vdev_id) 4308 { 4309 struct wlan_objmgr_vdev *vdev; 4310 tp_wma_handle wma = (tp_wma_handle)cds_get_context(QDF_MODULE_ID_WMA); 4311 enum wlan_vdev_state state = WLAN_VDEV_S_INIT; 4312 4313 if (!wma) { 4314 WMA_LOGE("%s: WMA context is invald!", __func__); 4315 return false; 4316 } 4317 4318 vdev = wlan_objmgr_get_vdev_by_id_from_psoc(wma->psoc, vdev_id, 4319 WLAN_LEGACY_WMA_ID); 4320 if (vdev) { 4321 wlan_vdev_obj_lock(vdev); 4322 state = wlan_vdev_mlme_get_state(vdev); 4323 wlan_vdev_obj_unlock(vdev); 4324 wlan_objmgr_vdev_release_ref(vdev, WLAN_LEGACY_WMA_ID); 4325 } 4326 return (state == WLAN_VDEV_S_RUN) ? true : false; 4327 } 4328 4329 void wma_acquire_wakelock(qdf_wake_lock_t *wl, uint32_t msec) 4330 { 4331 t_wma_handle *wma = cds_get_context(QDF_MODULE_ID_WMA); 4332 4333 cds_host_diag_log_work(wl, msec, WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP); 4334 qdf_wake_lock_timeout_acquire(wl, msec); 4335 qdf_runtime_pm_prevent_suspend(&wma->wmi_cmd_rsp_runtime_lock); 4336 } 4337 4338 void wma_release_wakelock(qdf_wake_lock_t *wl) 4339 { 4340 t_wma_handle *wma = cds_get_context(QDF_MODULE_ID_WMA); 4341 4342 qdf_wake_lock_release(wl, WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP); 4343 qdf_runtime_pm_allow_suspend(&wma->wmi_cmd_rsp_runtime_lock); 4344 } 4345 4346 QDF_STATUS 4347 wma_send_vdev_start_to_fw(t_wma_handle *wma, struct vdev_start_params *params) 4348 { 4349 QDF_STATUS status; 4350 struct wma_txrx_node *vdev = &wma->interfaces[params->vdev_id]; 4351 4352 wma_acquire_wakelock(&vdev->vdev_start_wakelock, 4353 WMA_VDEV_START_REQUEST_TIMEOUT); 4354 status = wmi_unified_vdev_start_send(wma->wmi_handle, params); 4355 if (QDF_IS_STATUS_ERROR(status)) 4356 wma_release_wakelock(&vdev->vdev_start_wakelock); 4357 4358 return status; 4359 } 4360 4361 QDF_STATUS wma_send_vdev_stop_to_fw(t_wma_handle *wma, uint8_t vdev_id) 4362 { 4363 QDF_STATUS status; 4364 struct wma_txrx_node *vdev = &wma->interfaces[vdev_id]; 4365 4366 wma_acquire_wakelock(&vdev->vdev_stop_wakelock, 4367 WMA_VDEV_STOP_REQUEST_TIMEOUT); 4368 status = wmi_unified_vdev_stop_send(wma->wmi_handle, vdev_id); 4369 if (QDF_IS_STATUS_ERROR(status)) 4370 wma_release_wakelock(&vdev->vdev_stop_wakelock); 4371 4372 return status; 4373 } 4374 4375 QDF_STATUS wma_get_rcpi_req(WMA_HANDLE handle, 4376 struct sme_rcpi_req *rcpi_request) 4377 { 4378 tp_wma_handle wma_handle = (tp_wma_handle) handle; 4379 struct rcpi_req cmd = {0}; 4380 struct wma_txrx_node *iface; 4381 struct sme_rcpi_req *node_rcpi_req; 4382 4383 WMA_LOGD("%s: Enter", __func__); 4384 iface = &wma_handle->interfaces[rcpi_request->session_id]; 4385 /* command is in progress */ 4386 if (iface->rcpi_req != NULL) { 4387 WMA_LOGE("%s : previous rcpi request is pending", __func__); 4388 return QDF_STATUS_SUCCESS; 4389 } 4390 4391 node_rcpi_req = qdf_mem_malloc(sizeof(*node_rcpi_req)); 4392 if (!node_rcpi_req) { 4393 WMA_LOGE("Failed to allocate memory for rcpi_request"); 4394 return QDF_STATUS_E_NOMEM; 4395 } 4396 4397 *node_rcpi_req = *rcpi_request; 4398 iface->rcpi_req = node_rcpi_req; 4399 4400 cmd.vdev_id = rcpi_request->session_id; 4401 qdf_mem_copy(cmd.mac_addr, &rcpi_request->mac_addr, QDF_MAC_ADDR_SIZE); 4402 cmd.measurement_type = rcpi_request->measurement_type; 4403 4404 if (wmi_unified_send_request_get_rcpi_cmd(wma_handle->wmi_handle, 4405 &cmd)) { 4406 WMA_LOGE("%s: Failed to send WMI_REQUEST_RCPI_CMDID", 4407 __func__); 4408 iface->rcpi_req = NULL; 4409 qdf_mem_free(node_rcpi_req); 4410 return QDF_STATUS_E_FAILURE; 4411 } 4412 4413 WMA_LOGD("%s: Exit", __func__); 4414 4415 return QDF_STATUS_SUCCESS; 4416 } 4417 4418 int wma_rcpi_event_handler(void *handle, uint8_t *cmd_param_info, 4419 uint32_t len) 4420 { 4421 struct rcpi_res res = {0}; 4422 struct sme_rcpi_req *rcpi_req; 4423 struct qdf_mac_addr qdf_mac; 4424 struct wma_txrx_node *iface; 4425 QDF_STATUS status = QDF_STATUS_SUCCESS; 4426 tp_wma_handle wma_handle = (tp_wma_handle)handle; 4427 4428 status = wmi_extract_rcpi_response_event(wma_handle->wmi_handle, 4429 cmd_param_info, &res); 4430 if (status == QDF_STATUS_E_INVAL) 4431 return -EINVAL; 4432 4433 iface = &wma_handle->interfaces[res.vdev_id]; 4434 if (!iface->rcpi_req) { 4435 WMI_LOGE("rcpi_req buffer not available"); 4436 return 0; 4437 } 4438 4439 rcpi_req = iface->rcpi_req; 4440 if (!rcpi_req->rcpi_callback) { 4441 iface->rcpi_req = NULL; 4442 qdf_mem_free(rcpi_req); 4443 return 0; 4444 } 4445 4446 if ((res.measurement_type == RCPI_MEASUREMENT_TYPE_INVALID) || 4447 (res.vdev_id != rcpi_req->session_id) || 4448 (res.measurement_type != rcpi_req->measurement_type) || 4449 (qdf_mem_cmp(res.mac_addr, &rcpi_req->mac_addr, 4450 QDF_MAC_ADDR_SIZE))) { 4451 WMI_LOGE("invalid rcpi_response"); 4452 iface->rcpi_req = NULL; 4453 qdf_mem_free(rcpi_req); 4454 return 0; 4455 } 4456 4457 qdf_mem_copy(&qdf_mac, res.mac_addr, QDF_MAC_ADDR_SIZE); 4458 (rcpi_req->rcpi_callback)(rcpi_req->rcpi_context, qdf_mac, 4459 res.rcpi_value, status); 4460 iface->rcpi_req = NULL; 4461 qdf_mem_free(rcpi_req); 4462 4463 return 0; 4464 } 4465 4466 bool wma_is_service_enabled(uint32_t service_type) 4467 { 4468 tp_wma_handle wma; 4469 4470 wma = cds_get_context(QDF_MODULE_ID_WMA); 4471 if (!wma) { 4472 WMA_LOGE("%s: Invalid WMA handle", __func__); 4473 return false; 4474 } 4475 4476 if (service_type >= WMI_MAX_SERVICE) { 4477 WMA_LOGE("%s: Invalid service type %d", __func__, service_type); 4478 return false; 4479 } 4480 4481 return wmi_service_enabled(wma->wmi_handle, service_type); 4482 } 4483 4484 QDF_STATUS wma_send_vdev_up_to_fw(t_wma_handle *wma, 4485 struct vdev_up_params *params, 4486 uint8_t bssid[IEEE80211_ADDR_LEN]) 4487 { 4488 QDF_STATUS status; 4489 struct wma_txrx_node *vdev = &wma->interfaces[params->vdev_id]; 4490 4491 if (wma_is_vdev_up(params->vdev_id)) { 4492 WMA_LOGD("vdev %d is already up for bssid %pM. Do not send", 4493 params->vdev_id, bssid); 4494 return QDF_STATUS_SUCCESS; 4495 } 4496 status = wmi_unified_vdev_up_send(wma->wmi_handle, bssid, params); 4497 wma_release_wakelock(&vdev->vdev_start_wakelock); 4498 4499 return status; 4500 } 4501 4502 QDF_STATUS wma_send_vdev_down_to_fw(t_wma_handle *wma, uint8_t vdev_id) 4503 { 4504 QDF_STATUS status; 4505 struct wma_txrx_node *vdev = &wma->interfaces[vdev_id]; 4506 4507 wma->interfaces[vdev_id].roaming_in_progress = false; 4508 status = wmi_unified_vdev_down_send(wma->wmi_handle, vdev_id); 4509 wma_release_wakelock(&vdev->vdev_start_wakelock); 4510 4511 return status; 4512 } 4513 4514 tSirWifiPeerType wmi_to_sir_peer_type(enum wmi_peer_type type) 4515 { 4516 switch (type) { 4517 case WMI_PEER_TYPE_DEFAULT: 4518 return WIFI_PEER_STA; 4519 case WMI_PEER_TYPE_BSS: 4520 return WIFI_PEER_AP; 4521 case WMI_PEER_TYPE_TDLS: 4522 return WIFI_PEER_TDLS; 4523 case WMI_PEER_TYPE_NAN_DATA: 4524 return WIFI_PEER_NAN; 4525 default: 4526 WMA_LOGE("Cannot map wmi_peer_type %d to HAL peer type", type); 4527 return WIFI_PEER_INVALID; 4528 } 4529 } 4530 4531 /** 4532 * wma_set_vc_mode_config() - set voltage corner mode config to FW. 4533 * @wma_handle: pointer to wma handle. 4534 * @vc_bitmap: value needs to set to firmware. 4535 * 4536 * At the time of driver startup, set operating voltage corner mode 4537 * for differenet phymode and bw configurations. 4538 * 4539 * Return: QDF_STATUS. 4540 */ 4541 QDF_STATUS wma_set_vc_mode_config(void *wma_handle, 4542 uint32_t vc_bitmap) 4543 { 4544 int32_t ret; 4545 tp_wma_handle wma = (tp_wma_handle)wma_handle; 4546 struct pdev_params pdevparam; 4547 4548 pdevparam.param_id = WMI_PDEV_UPDATE_WDCVS_ALGO; 4549 pdevparam.param_value = vc_bitmap; 4550 4551 ret = wmi_unified_pdev_param_send(wma->wmi_handle, 4552 &pdevparam, 4553 WMA_WILDCARD_PDEV_ID); 4554 if (ret) { 4555 WMA_LOGE("Fail to Set Voltage Corner config (0x%x)", 4556 vc_bitmap); 4557 return QDF_STATUS_E_FAILURE; 4558 } 4559 4560 WMA_LOGD("Successfully Set Voltage Corner config (0x%x)", 4561 vc_bitmap); 4562 4563 return QDF_STATUS_SUCCESS; 4564 } 4565 4566 int wma_chip_power_save_failure_detected_handler(void *handle, 4567 uint8_t *cmd_param_info, 4568 uint32_t len) 4569 { 4570 tp_wma_handle wma = (tp_wma_handle)handle; 4571 WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID_param_tlvs *param_buf; 4572 wmi_chip_power_save_failure_detected_fixed_param *event; 4573 struct chip_pwr_save_fail_detected_params pwr_save_fail_params; 4574 tpAniSirGlobal mac = (tpAniSirGlobal)cds_get_context( 4575 QDF_MODULE_ID_PE); 4576 if (wma == NULL) { 4577 WMA_LOGE("%s: wma_handle is NULL", __func__); 4578 return -EINVAL; 4579 } 4580 if (!mac) { 4581 WMA_LOGE("%s: Invalid mac context", __func__); 4582 return -EINVAL; 4583 } 4584 if (!mac->sme.chip_power_save_fail_cb) { 4585 WMA_LOGE("%s: Callback not registered", __func__); 4586 return -EINVAL; 4587 } 4588 4589 param_buf = 4590 (WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID_param_tlvs *) 4591 cmd_param_info; 4592 if (!param_buf) { 4593 WMA_LOGE("%s: Invalid pwr_save_fail_params breached event", 4594 __func__); 4595 return -EINVAL; 4596 } 4597 event = param_buf->fixed_param; 4598 pwr_save_fail_params.failure_reason_code = 4599 event->power_save_failure_reason_code; 4600 pwr_save_fail_params.wake_lock_bitmap[0] = 4601 event->protocol_wake_lock_bitmap[0]; 4602 pwr_save_fail_params.wake_lock_bitmap[1] = 4603 event->protocol_wake_lock_bitmap[1]; 4604 pwr_save_fail_params.wake_lock_bitmap[2] = 4605 event->protocol_wake_lock_bitmap[2]; 4606 pwr_save_fail_params.wake_lock_bitmap[3] = 4607 event->protocol_wake_lock_bitmap[3]; 4608 mac->sme.chip_power_save_fail_cb(mac->hHdd, 4609 &pwr_save_fail_params); 4610 4611 WMA_LOGD("%s: Invoke HDD pwr_save_fail callback", __func__); 4612 return 0; 4613 } 4614