1 /* 2 * Copyright (c) 2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_peer.h" 24 #include "hal_rx.h" 25 #include "hal_api.h" 26 #include "qdf_trace.h" 27 #include "qdf_nbuf.h" 28 #include "hal_api_mon.h" 29 #include "dp_internal.h" 30 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 31 #include "dp_htt.h" 32 #include "dp_mon.h" 33 #include "dp_rx_mon.h" 34 35 #include "htt.h" 36 #ifdef FEATURE_PERPKT_INFO 37 #include "dp_ratetable.h" 38 #endif 39 40 #ifndef IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 41 #define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0 42 #endif 43 44 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 45 void 46 dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev, 47 struct hal_rx_ppdu_info *ppdu_info, 48 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 49 { 50 struct dp_peer *peer; 51 struct dp_soc *soc = pdev->soc; 52 struct mon_rx_user_status *rx_user_status; 53 struct cdp_rx_stats_ppdu_user *rx_stats_peruser; 54 uint32_t num_users; 55 int user_id; 56 uint16_t sw_peer_id; 57 58 num_users = ppdu_info->com_info.num_users; 59 for (user_id = 0; user_id < num_users; user_id++) { 60 if (user_id >= OFDMA_NUM_USERS) 61 return; 62 63 rx_user_status = &ppdu_info->rx_user_status[user_id]; 64 rx_stats_peruser = &cdp_rx_ppdu->user[user_id]; 65 sw_peer_id = rx_user_status->sw_peer_id; 66 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 67 DP_MOD_ID_RX_PPDU_STATS); 68 if (!peer) { 69 rx_stats_peruser->peer_id = HTT_INVALID_PEER; 70 continue; 71 } 72 73 qdf_mem_copy(rx_stats_peruser->mac_addr, 74 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 75 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 76 } 77 } 78 79 void 80 dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev, 81 struct hal_rx_ppdu_info *ppdu_info, 82 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 83 { 84 struct dp_peer *peer; 85 struct dp_soc *soc = pdev->soc; 86 int chain; 87 uint16_t sw_peer_id; 88 struct mon_rx_user_status *rx_user_status; 89 uint32_t num_users = ppdu_info->com_info.num_users; 90 91 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 92 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 93 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 94 95 for (chain = 0; chain < MAX_CHAIN; chain++) 96 cdp_rx_ppdu->per_chain_rssi[chain] = 97 ppdu_info->rx_status.rssi[chain]; 98 99 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 100 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 101 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 102 103 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && 104 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) 105 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 106 else 107 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 108 109 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 110 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 111 } else if (ppdu_info->rx_status.preamble_type == 112 HAL_RX_PKT_TYPE_11AX) { 113 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 114 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 115 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 116 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 117 } 118 119 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 120 dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu); 121 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 122 sw_peer_id = rx_user_status->sw_peer_id; 123 cdp_rx_ppdu->num_users = num_users; 124 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, DP_MOD_ID_RX_PPDU_STATS); 125 if (!peer) { 126 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; 127 return; 128 } 129 130 cdp_rx_ppdu->peer_id = peer->peer_id; 131 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 132 133 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 134 } 135 136 bool 137 dp_cfr_rcc_mode_status(struct dp_pdev *pdev) 138 { 139 return pdev->cfr_rcc_mode; 140 } 141 142 void 143 dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev, 144 struct hal_rx_ppdu_info *ppdu_info, 145 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 146 { 147 struct cdp_rx_ppdu_cfr_info *cfr_info; 148 149 if (!qdf_unlikely(dp_cfr_rcc_mode_status(pdev))) 150 return; 151 152 cfr_info = &cdp_rx_ppdu->cfr_info; 153 154 cfr_info->bb_captured_channel 155 = ppdu_info->cfr_info.bb_captured_channel; 156 cfr_info->bb_captured_timeout 157 = ppdu_info->cfr_info.bb_captured_timeout; 158 cfr_info->bb_captured_reason 159 = ppdu_info->cfr_info.bb_captured_reason; 160 cfr_info->rx_location_info_valid 161 = ppdu_info->cfr_info.rx_location_info_valid; 162 cfr_info->chan_capture_status 163 = ppdu_info->cfr_info.chan_capture_status; 164 cfr_info->rtt_che_buffer_pointer_high8 165 = ppdu_info->cfr_info.rtt_che_buffer_pointer_high8; 166 cfr_info->rtt_che_buffer_pointer_low32 167 = ppdu_info->cfr_info.rtt_che_buffer_pointer_low32; 168 cfr_info->rtt_cfo_measurement 169 = (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement; 170 cfr_info->agc_gain_info0 171 = ppdu_info->cfr_info.agc_gain_info0; 172 cfr_info->agc_gain_info1 173 = ppdu_info->cfr_info.agc_gain_info1; 174 cfr_info->agc_gain_info2 175 = ppdu_info->cfr_info.agc_gain_info2; 176 cfr_info->agc_gain_info3 177 = ppdu_info->cfr_info.agc_gain_info3; 178 cfr_info->rx_start_ts 179 = ppdu_info->cfr_info.rx_start_ts; 180 cfr_info->mcs_rate 181 = ppdu_info->cfr_info.mcs_rate; 182 cfr_info->gi_type 183 = ppdu_info->cfr_info.gi_type; 184 } 185 186 void 187 dp_update_cfr_dbg_stats(struct dp_pdev *pdev, 188 struct hal_rx_ppdu_info *ppdu_info) 189 { 190 struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; 191 192 DP_STATS_INC(pdev, 193 rcc.chan_capture_status[cfr->chan_capture_status], 1); 194 if (cfr->rx_location_info_valid) { 195 DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1); 196 if (cfr->bb_captured_channel) { 197 DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1); 198 DP_STATS_INC(pdev, 199 rcc.reason_cnt[cfr->bb_captured_reason], 200 1); 201 } else if (cfr->bb_captured_timeout) { 202 DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1); 203 DP_STATS_INC(pdev, 204 rcc.reason_cnt[cfr->bb_captured_reason], 205 1); 206 } 207 } 208 } 209 210 void 211 dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev, 212 struct hal_rx_ppdu_info *ppdu_info) 213 { 214 qdf_nbuf_t ppdu_nbuf; 215 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 216 217 dp_update_cfr_dbg_stats(pdev, ppdu_info); 218 if (!ppdu_info->cfr_info.bb_captured_channel) 219 return; 220 221 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 222 sizeof(struct cdp_rx_indication_ppdu), 223 0, 224 0, 225 FALSE); 226 if (ppdu_nbuf) { 227 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; 228 229 dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); 230 dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); 231 qdf_nbuf_put_tail(ppdu_nbuf, 232 sizeof(struct cdp_rx_indication_ppdu)); 233 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, 234 ppdu_nbuf, HTT_INVALID_PEER, 235 WDI_NO_VAL, pdev->pdev_id); 236 } 237 } 238 239 void 240 dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, 241 struct hal_rx_ppdu_info *ppdu_info, 242 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 243 { 244 if (!dp_cfr_rcc_mode_status(pdev)) 245 return; 246 247 if (ppdu_info->cfr_info.bb_captured_channel) 248 dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); 249 } 250 251 /** 252 * dp_bb_captured_chan_status() - Get the bb_captured_channel status 253 * @pdev: pdev ctx 254 * @ppdu_info: structure for rx ppdu ring 255 * 256 * Return: Success/ Failure 257 */ 258 static inline QDF_STATUS 259 dp_bb_captured_chan_status(struct dp_pdev *pdev, 260 struct hal_rx_ppdu_info *ppdu_info) 261 { 262 QDF_STATUS status = QDF_STATUS_E_FAILURE; 263 struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; 264 265 if (dp_cfr_rcc_mode_status(pdev)) { 266 if (cfr->bb_captured_channel) 267 status = QDF_STATUS_SUCCESS; 268 } 269 270 return status; 271 } 272 #else 273 static inline QDF_STATUS 274 dp_bb_captured_chan_status(struct dp_pdev *pdev, 275 struct hal_rx_ppdu_info *ppdu_info) 276 { 277 return QDF_STATUS_E_NOSUPPORT; 278 } 279 #endif /* WLAN_CFR_ENABLE */ 280 281 #ifdef QCA_ENHANCED_STATS_SUPPORT 282 #ifdef QCA_RSSI_DB2DBM 283 /** 284 * dp_rx_mon_rf_index_conv() - this function will convert BB index to RF 285 * index in the rssi_chain[chain][bw] array 286 * 287 * @chain: BB chain index 288 * @mon_pdev: pdev structure 289 * 290 * Return: return RF chain index 291 * 292 * Computation: 293 * 3 Bytes of xbar_config are used for RF to BB mapping 294 * Samples of xbar_config, 295 * 296 * If xbar_config is 0x688FAC(hex): 297 * RF chains 0-3 are connected to BB chains 4-7 298 * RF chains 4-7 are connected to BB chains 0-3 299 * here, 300 * bits 0 to 2 = 4, maps BB chain 4 for RF chain 0 301 * bits 3 to 5 = 5, maps BB chain 5 for RF chain 1 302 * bits 6 to 8 = 6, maps BB chain 6 for RF chain 2 303 * bits 9 to 11 = 7, maps BB chain 7 for RF chain 3 304 * bits 12 to 14 = 0, maps BB chain 0 for RF chain 4 305 * bits 15 to 17 = 1, maps BB chain 1 for RF chain 5 306 * bits 18 to 20 = 2, maps BB chain 2 for RF chain 6 307 * bits 21 to 23 = 3, maps BB chain 3 for RF chain 7 308 */ 309 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain, 310 struct dp_mon_pdev *mon_pdev) 311 { 312 uint32_t xbar_config = mon_pdev->rssi_offsets.xbar_config; 313 314 if (mon_pdev->rssi_dbm_conv_support && xbar_config) 315 return ((xbar_config >> (3 * chain)) & 0x07); 316 return chain; 317 } 318 #else 319 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain, 320 struct dp_mon_pdev *mon_pdev) 321 { 322 return chain; 323 } 324 #endif 325 void 326 dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info, 327 struct cdp_rx_indication_ppdu *cdp_rx_ppdu, 328 struct dp_pdev *pdev) 329 { 330 uint8_t chain, bw; 331 uint8_t rssi; 332 uint8_t chain_rf; 333 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 334 335 for (chain = 0; chain < SS_COUNT; chain++) { 336 for (bw = 0; bw < MAX_BW; bw++) { 337 chain_rf = dp_rx_mon_rf_index_conv(chain, mon_pdev); 338 rssi = ppdu_info->rx_status.rssi_chain[chain_rf][bw]; 339 if (rssi != DP_RSSI_INVAL) 340 cdp_rx_ppdu->rssi_chain[chain_rf][bw] = rssi; 341 else 342 cdp_rx_ppdu->rssi_chain[chain_rf][bw] = 0; 343 } 344 } 345 } 346 347 void 348 dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info, 349 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 350 { 351 uint16_t pilot_evm; 352 uint16_t nss_count; 353 uint16_t pilot_count; 354 355 nss_count = ppdu_info->evm_info.nss_count; 356 pilot_count = ppdu_info->evm_info.pilot_count; 357 358 if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) { 359 qdf_debug("pilot evm count is more than expected"); 360 return; 361 } 362 cdp_rx_ppdu->evm_info.pilot_count = pilot_count; 363 cdp_rx_ppdu->evm_info.nss_count = nss_count; 364 365 /* Populate evm for pilot_evm = nss_count*pilot_count */ 366 for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) { 367 cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] = 368 ppdu_info->evm_info.pilot_evm[pilot_evm]; 369 } 370 } 371 372 /** 373 * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size 374 * @pdev: pdev ctx 375 * @rx_user_status: mon rx user status 376 * 377 * Return: bool 378 */ 379 static inline bool 380 dp_rx_inc_rusize_cnt(struct dp_pdev *pdev, 381 struct mon_rx_user_status *rx_user_status) 382 { 383 uint32_t ru_size; 384 bool is_data; 385 386 ru_size = rx_user_status->ofdma_ru_size; 387 388 if (dp_is_subtype_data(rx_user_status->frame_control)) { 389 DP_STATS_INC(pdev, 390 ul_ofdma.data_rx_ru_size[ru_size], 1); 391 is_data = true; 392 } else { 393 DP_STATS_INC(pdev, 394 ul_ofdma.nondata_rx_ru_size[ru_size], 1); 395 is_data = false; 396 } 397 398 return is_data; 399 } 400 401 /** 402 * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication 403 * @pdev: pdev ctx 404 * @ppdu_info: ppdu info structure from ppdu ring 405 * @cdp_rx_ppdu: Rx PPDU indication structure 406 * 407 * Return: none 408 */ 409 static void 410 dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev, 411 struct hal_rx_ppdu_info *ppdu_info, 412 struct cdp_rx_indication_ppdu 413 *cdp_rx_ppdu) 414 { 415 struct dp_peer *peer; 416 struct dp_soc *soc = pdev->soc; 417 int i; 418 struct mon_rx_user_status *rx_user_status; 419 struct mon_rx_user_info *rx_user_info; 420 struct cdp_rx_stats_ppdu_user *rx_stats_peruser; 421 int ru_size; 422 bool is_data = false; 423 uint32_t num_users; 424 struct dp_mon_ops *mon_ops; 425 uint16_t sw_peer_id; 426 427 num_users = ppdu_info->com_info.num_users; 428 for (i = 0; i < num_users; i++) { 429 if (i >= OFDMA_NUM_USERS) 430 return; 431 432 rx_user_status = &ppdu_info->rx_user_status[i]; 433 rx_user_info = &ppdu_info->rx_user_info[i]; 434 rx_stats_peruser = &cdp_rx_ppdu->user[i]; 435 436 sw_peer_id = rx_user_status->sw_peer_id; 437 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 438 DP_MOD_ID_RX_PPDU_STATS); 439 if (qdf_unlikely(!peer)) { 440 rx_stats_peruser->peer_id = HTT_INVALID_PEER; 441 continue; 442 } 443 rx_stats_peruser->is_bss_peer = peer->bss_peer; 444 445 rx_stats_peruser->first_data_seq_ctrl = 446 rx_user_status->first_data_seq_ctrl; 447 448 rx_stats_peruser->frame_control_info_valid = 449 rx_user_status->frame_control_info_valid; 450 rx_stats_peruser->frame_control = 451 rx_user_status->frame_control; 452 453 rx_stats_peruser->qos_control_info_valid = 454 rx_user_info->qos_control_info_valid; 455 rx_stats_peruser->qos_control = 456 rx_user_info->qos_control; 457 rx_stats_peruser->tcp_msdu_count = 458 rx_user_status->tcp_msdu_count; 459 rx_stats_peruser->udp_msdu_count = 460 rx_user_status->udp_msdu_count; 461 rx_stats_peruser->other_msdu_count = 462 rx_user_status->other_msdu_count; 463 464 rx_stats_peruser->num_msdu = 465 rx_stats_peruser->tcp_msdu_count + 466 rx_stats_peruser->udp_msdu_count + 467 rx_stats_peruser->other_msdu_count; 468 469 rx_stats_peruser->preamble_type = 470 cdp_rx_ppdu->u.preamble; 471 rx_stats_peruser->mpdu_cnt_fcs_ok = 472 rx_user_status->mpdu_cnt_fcs_ok; 473 rx_stats_peruser->mpdu_cnt_fcs_err = 474 rx_user_status->mpdu_cnt_fcs_err; 475 qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap, 476 &rx_user_status->mpdu_fcs_ok_bitmap, 477 HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * 478 sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0])); 479 rx_stats_peruser->mpdu_ok_byte_count = 480 rx_user_status->mpdu_ok_byte_count; 481 rx_stats_peruser->mpdu_err_byte_count = 482 rx_user_status->mpdu_err_byte_count; 483 484 cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok; 485 cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu; 486 rx_stats_peruser->retries = 487 CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ? 488 rx_stats_peruser->mpdu_cnt_fcs_ok : 0; 489 cdp_rx_ppdu->retries += rx_stats_peruser->retries; 490 491 if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1) 492 rx_stats_peruser->is_ampdu = 1; 493 else 494 rx_stats_peruser->is_ampdu = 0; 495 496 rx_stats_peruser->tid = ppdu_info->rx_status.tid; 497 498 qdf_mem_copy(rx_stats_peruser->mac_addr, 499 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 500 rx_stats_peruser->peer_id = peer->peer_id; 501 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 502 rx_stats_peruser->vdev_id = peer->vdev->vdev_id; 503 rx_stats_peruser->mu_ul_info_valid = 0; 504 505 mon_ops = dp_mon_ops_get(soc); 506 if (mon_ops && mon_ops->mon_rx_populate_ppdu_usr_info) 507 mon_ops->mon_rx_populate_ppdu_usr_info(rx_user_status, 508 rx_stats_peruser); 509 510 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 511 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || 512 cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { 513 if (rx_user_status->mu_ul_info_valid) { 514 rx_stats_peruser->nss = rx_user_status->nss; 515 cdp_rx_ppdu->usr_nss_sum += rx_stats_peruser->nss; 516 rx_stats_peruser->mcs = rx_user_status->mcs; 517 rx_stats_peruser->mu_ul_info_valid = 518 rx_user_status->mu_ul_info_valid; 519 rx_stats_peruser->ofdma_ru_start_index = 520 rx_user_status->ofdma_ru_start_index; 521 rx_stats_peruser->ofdma_ru_width = 522 rx_user_status->ofdma_ru_width; 523 cdp_rx_ppdu->usr_ru_tones_sum += 524 rx_stats_peruser->ofdma_ru_width; 525 rx_stats_peruser->user_index = i; 526 ru_size = rx_user_status->ofdma_ru_size; 527 /* 528 * max RU size will be equal to 529 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2 530 */ 531 if (qdf_unlikely(ru_size >= OFDMA_NUM_RU_SIZE)) { 532 dp_err("invalid ru_size %d", ru_size); 533 return; 534 } 535 is_data = dp_rx_inc_rusize_cnt(pdev, 536 rx_user_status); 537 } 538 if (is_data) { 539 /* counter to get number of MU OFDMA */ 540 pdev->stats.ul_ofdma.data_rx_ppdu++; 541 pdev->stats.ul_ofdma.data_users[num_users]++; 542 } 543 } 544 } 545 } 546 547 /** 548 * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure 549 * @pdev: pdev ctx 550 * @ppdu_info: ppdu info structure from ppdu ring 551 * @cdp_rx_ppdu: Rx PPDU indication structure 552 * 553 * Return: none 554 */ 555 static void 556 dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, 557 struct hal_rx_ppdu_info *ppdu_info, 558 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 559 { 560 struct dp_peer *peer; 561 struct dp_soc *soc = pdev->soc; 562 uint32_t i; 563 struct dp_mon_ops *mon_ops; 564 uint16_t sw_peer_id; 565 struct mon_rx_user_status *rx_user_status; 566 uint32_t num_users = ppdu_info->com_info.num_users; 567 568 cdp_rx_ppdu->first_data_seq_ctrl = 569 ppdu_info->rx_status.first_data_seq_ctrl; 570 cdp_rx_ppdu->frame_ctrl = 571 ppdu_info->rx_status.frame_control; 572 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; 573 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; 574 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; 575 /* num mpdu is consolidated and added together in num user loop */ 576 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; 577 /* num msdu is consolidated and added together in num user loop */ 578 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + 579 cdp_rx_ppdu->udp_msdu_count + 580 cdp_rx_ppdu->other_msdu_count); 581 582 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? 583 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; 584 585 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) 586 cdp_rx_ppdu->is_ampdu = 1; 587 else 588 cdp_rx_ppdu->is_ampdu = 0; 589 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; 590 591 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 592 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 593 sw_peer_id = rx_user_status->sw_peer_id; 594 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 595 DP_MOD_ID_RX_PPDU_STATS); 596 if (qdf_unlikely(!peer)) { 597 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; 598 cdp_rx_ppdu->num_users = 0; 599 goto end; 600 } 601 602 qdf_mem_copy(cdp_rx_ppdu->mac_addr, 603 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 604 cdp_rx_ppdu->peer_id = peer->peer_id; 605 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 606 607 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 608 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; 609 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; 610 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; 611 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; 612 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && 613 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) 614 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 615 else 616 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 617 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 618 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 619 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> 620 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; 621 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; 622 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 623 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; 624 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 625 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; 626 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; 627 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 628 629 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 630 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 631 } else if (ppdu_info->rx_status.preamble_type == 632 HAL_RX_PKT_TYPE_11AX) { 633 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 634 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 635 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 636 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 637 } 638 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev); 639 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); 640 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; 641 642 mon_ops = dp_mon_ops_get(pdev->soc); 643 if (mon_ops && mon_ops->mon_rx_populate_ppdu_info) 644 mon_ops->mon_rx_populate_ppdu_info(ppdu_info, 645 cdp_rx_ppdu); 646 647 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; 648 for (i = 0; i < MAX_CHAIN; i++) 649 cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i]; 650 651 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; 652 653 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; 654 655 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); 656 657 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 658 659 return; 660 end: 661 dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu); 662 } 663 664 /** 665 * dp_rx_rate_stats_update() - Update per-peer rate statistics 666 * @peer: Datapath peer handle 667 * @ppdu: PPDU Descriptor 668 * @user: user index 669 * 670 * Return: None 671 */ 672 static inline void dp_rx_rate_stats_update(struct dp_peer *peer, 673 struct cdp_rx_indication_ppdu *ppdu, 674 uint32_t user) 675 { 676 uint32_t ratekbps = 0; 677 uint32_t ppdu_rx_rate = 0; 678 uint32_t nss = 0; 679 uint8_t mcs = 0; 680 uint32_t rix; 681 uint16_t ratecode = 0; 682 struct cdp_rx_stats_ppdu_user *ppdu_user = NULL; 683 struct dp_mon_peer *mon_peer = NULL; 684 685 if (!peer || !ppdu) 686 return; 687 688 mon_peer = peer->monitor_peer; 689 ppdu_user = &ppdu->user[user]; 690 691 if (!mon_peer) 692 return; 693 694 if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) { 695 if (ppdu_user->nss == 0) 696 nss = 0; 697 else 698 nss = ppdu_user->nss - 1; 699 mcs = ppdu_user->mcs; 700 701 mon_peer->stats.rx.nss_info = ppdu_user->nss; 702 mon_peer->stats.rx.mcs_info = ppdu_user->mcs; 703 } else { 704 if (ppdu->u.nss == 0) 705 nss = 0; 706 else 707 nss = ppdu->u.nss - 1; 708 mcs = ppdu->u.mcs; 709 710 mon_peer->stats.rx.nss_info = ppdu->u.nss; 711 mon_peer->stats.rx.mcs_info = ppdu->u.mcs; 712 } 713 714 ratekbps = dp_getrateindex(ppdu->u.gi, 715 mcs, 716 nss, 717 ppdu->u.preamble, 718 ppdu->u.bw, 719 ppdu->punc_bw, 720 &rix, 721 &ratecode); 722 723 if (!ratekbps) { 724 ppdu->rix = 0; 725 ppdu_user->rix = 0; 726 ppdu->rx_ratekbps = 0; 727 ppdu->rx_ratecode = 0; 728 ppdu_user->rx_ratekbps = 0; 729 return; 730 } 731 732 mon_peer->stats.rx.bw_info = ppdu->u.bw; 733 mon_peer->stats.rx.gi_info = ppdu->u.gi; 734 mon_peer->stats.rx.preamble_info = ppdu->u.preamble; 735 736 ppdu->rix = rix; 737 ppdu_user->rix = rix; 738 DP_STATS_UPD(mon_peer, rx.last_rx_rate, ratekbps); 739 mon_peer->stats.rx.avg_rx_rate = 740 dp_ath_rate_lpf(mon_peer->stats.rx.avg_rx_rate, ratekbps); 741 ppdu_rx_rate = dp_ath_rate_out(mon_peer->stats.rx.avg_rx_rate); 742 DP_STATS_UPD(mon_peer, rx.rnd_avg_rx_rate, ppdu_rx_rate); 743 ppdu->rx_ratekbps = ratekbps; 744 ppdu->rx_ratecode = ratecode; 745 ppdu_user->rx_ratekbps = ratekbps; 746 747 if (peer->vdev) 748 peer->vdev->stats.rx.last_rx_rate = ratekbps; 749 } 750 751 #ifdef WLAN_CONFIG_TELEMETRY_AGENT 752 static void 753 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev, 754 struct dp_peer *peer, 755 struct cdp_rx_indication_ppdu *ppdu_desc, 756 struct cdp_rx_stats_ppdu_user *user) 757 { 758 uint32_t nss_ru_width_sum = 0; 759 struct dp_mon_peer *mon_peer = NULL; 760 uint8_t ac = 0; 761 762 if (!pdev || !ppdu_desc || !user || !peer) 763 return; 764 765 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 766 if (!nss_ru_width_sum) 767 nss_ru_width_sum = 1; 768 769 if (ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || 770 ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { 771 user->rx_time_us = (ppdu_desc->duration * 772 user->nss * user->ofdma_ru_width) / 773 nss_ru_width_sum; 774 } else { 775 user->rx_time_us = ppdu_desc->duration; 776 } 777 778 mon_peer = peer->monitor_peer; 779 if (qdf_unlikely(!mon_peer)) 780 return; 781 782 ac = TID_TO_WME_AC(user->tid); 783 DP_STATS_INC(mon_peer, airtime_stats.rx_airtime_consumption[ac].consumption, 784 user->rx_time_us); 785 } 786 787 /** 788 * dp_rx_mon_update_user_deter_stats() - Update per-peer deterministic stats 789 * @pdev: Datapath pdev handle 790 * @peer: Datapath peer handle 791 * @ppdu: PPDU Descriptor 792 * @user: Per user RX stats 793 * 794 * Return: None 795 */ 796 static inline 797 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev, 798 struct dp_peer *peer, 799 struct cdp_rx_indication_ppdu *ppdu, 800 struct cdp_rx_stats_ppdu_user *user) 801 { 802 struct dp_mon_peer *mon_peer; 803 uint8_t tid; 804 805 if (!pdev || !ppdu || !user || !peer) 806 return; 807 808 if (!dp_is_subtype_data(ppdu->frame_ctrl)) 809 return; 810 811 if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) 812 return; 813 814 mon_peer = peer->monitor_peer; 815 if (!mon_peer) 816 return; 817 818 tid = user->tid; 819 if (tid >= CDP_DATA_TID_MAX) 820 return; 821 822 DP_STATS_INC(mon_peer, 823 deter_stats.deter[tid].rx_det.mode_cnt, 824 1); 825 DP_STATS_UPD(mon_peer, 826 deter_stats.deter[tid].rx_det.avg_rate, 827 mon_peer->stats.rx.avg_rx_rate); 828 } 829 830 /** 831 * dp_rx_mon_update_pdev_deter_stats() - Update pdev deterministic stats 832 * @pdev: Datapath pdev handle 833 * @ppdu: PPDU Descriptor 834 * 835 * Return: None 836 */ 837 static inline 838 void dp_rx_mon_update_pdev_deter_stats(struct dp_pdev *pdev, 839 struct cdp_rx_indication_ppdu *ppdu) 840 { 841 if (!dp_is_subtype_data(ppdu->frame_ctrl)) 842 return; 843 844 DP_STATS_INC(pdev, 845 deter_stats.rx_su_cnt, 846 1); 847 } 848 #else 849 static inline void 850 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev, 851 struct dp_peer *peer, 852 struct cdp_rx_indication_ppdu *ppdu_desc, 853 struct cdp_rx_stats_ppdu_user *user) 854 { } 855 856 static inline 857 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev, 858 struct dp_peer *peer, 859 struct cdp_rx_indication_ppdu *ppdu, 860 struct cdp_rx_stats_ppdu_user *user) 861 { } 862 863 static inline 864 void dp_rx_mon_update_pdev_deter_stats(struct dp_pdev *pdev, 865 struct cdp_rx_indication_ppdu *ppdu) 866 { } 867 #endif 868 869 static void dp_rx_stats_update(struct dp_pdev *pdev, 870 struct cdp_rx_indication_ppdu *ppdu) 871 { 872 struct dp_soc *soc = NULL; 873 uint8_t mcs, preamble, ac = 0, nss, ppdu_type, res_mcs = 0; 874 uint32_t num_msdu; 875 struct dp_peer *peer; 876 struct dp_mon_peer *mon_peer; 877 struct cdp_rx_stats_ppdu_user *ppdu_user; 878 uint32_t i; 879 enum cdp_mu_packet_type mu_pkt_type; 880 struct dp_mon_ops *mon_ops; 881 struct dp_mon_pdev *mon_pdev = NULL; 882 uint64_t byte_count; 883 bool is_preamble_valid = true; 884 885 if (qdf_likely(pdev)) 886 soc = pdev->soc; 887 else 888 return; 889 890 if (qdf_likely(!soc) || soc->process_rx_status) 891 return; 892 893 mon_pdev = pdev->monitor_pdev; 894 895 preamble = ppdu->u.preamble; 896 ppdu_type = ppdu->u.ppdu_type; 897 898 for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) { 899 peer = NULL; 900 ppdu_user = &ppdu->user[i]; 901 peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id, 902 DP_MOD_ID_RX_PPDU_STATS); 903 904 if (qdf_unlikely(!peer)) 905 mon_peer = mon_pdev->invalid_mon_peer; 906 else 907 mon_peer = peer->monitor_peer; 908 909 if (qdf_unlikely(!mon_peer)) { 910 if (peer) 911 dp_peer_unref_delete(peer, 912 DP_MOD_ID_RX_PPDU_STATS); 913 914 continue; 915 } 916 917 if ((preamble == DOT11_A) || (preamble == DOT11_B)) 918 ppdu->u.nss = 1; 919 920 if (ppdu_type == HAL_RX_TYPE_SU) { 921 mcs = ppdu->u.mcs; 922 nss = ppdu->u.nss; 923 } else { 924 mcs = ppdu_user->mcs; 925 nss = ppdu_user->nss; 926 } 927 928 num_msdu = ppdu_user->num_msdu; 929 byte_count = ppdu_user->mpdu_ok_byte_count + 930 ppdu_user->mpdu_err_byte_count; 931 932 DP_STATS_UPD(mon_peer, rx.snr, ppdu->rssi); 933 934 if (qdf_unlikely(mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR)) 935 mon_peer->stats.rx.avg_snr = 936 CDP_SNR_IN(mon_peer->stats.rx.snr); 937 else 938 CDP_SNR_UPDATE_AVG(mon_peer->stats.rx.avg_snr, 939 mon_peer->stats.rx.snr); 940 941 if (ppdu_type == HAL_RX_TYPE_SU) { 942 if (nss) { 943 DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu); 944 DP_STATS_INC(mon_peer, rx.ppdu_nss[nss - 1], 1); 945 } 946 947 DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_ok, 948 ppdu_user->mpdu_cnt_fcs_ok); 949 DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_err, 950 ppdu_user->mpdu_cnt_fcs_err); 951 } 952 953 if (ppdu_type >= HAL_RX_TYPE_MU_MIMO && 954 ppdu_type <= HAL_RX_TYPE_MU_OFDMA) { 955 if (ppdu_type == HAL_RX_TYPE_MU_MIMO) 956 mu_pkt_type = TXRX_TYPE_MU_MIMO; 957 else 958 mu_pkt_type = TXRX_TYPE_MU_OFDMA; 959 960 if (qdf_likely(nss)) { 961 DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu); 962 DP_STATS_INC(mon_peer, 963 rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1], 964 1); 965 } 966 967 DP_STATS_INC(mon_peer, 968 rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok, 969 ppdu_user->mpdu_cnt_fcs_ok); 970 DP_STATS_INC(mon_peer, 971 rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err, 972 ppdu_user->mpdu_cnt_fcs_err); 973 } 974 975 DP_STATS_INC(mon_peer, rx.sgi_count[ppdu->u.gi], num_msdu); 976 DP_STATS_INC(mon_peer, rx.bw[ppdu->u.bw], num_msdu); 977 DP_STATS_INC(mon_peer, rx.reception_type[ppdu->u.ppdu_type], 978 num_msdu); 979 DP_STATS_INC(mon_peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1); 980 DP_STATS_INCC(mon_peer, rx.ampdu_cnt, num_msdu, 981 ppdu_user->is_ampdu); 982 DP_STATS_INCC(mon_peer, rx.non_ampdu_cnt, num_msdu, 983 !(ppdu_user->is_ampdu)); 984 DP_STATS_UPD(mon_peer, rx.rx_rate, mcs); 985 986 switch (preamble) { 987 case DOT11_A: 988 res_mcs = (mcs < MAX_MCS_11A) ? mcs : (MAX_MCS - 1); 989 break; 990 case DOT11_B: 991 res_mcs = (mcs < MAX_MCS_11B) ? mcs : (MAX_MCS - 1); 992 break; 993 case DOT11_N: 994 res_mcs = (mcs < MAX_MCS_11N) ? mcs : (MAX_MCS - 1); 995 break; 996 case DOT11_AC: 997 res_mcs = (mcs < MAX_MCS_11AC) ? mcs : (MAX_MCS - 1); 998 break; 999 case DOT11_AX: 1000 res_mcs = (mcs < MAX_MCS_11AX) ? mcs : (MAX_MCS - 1); 1001 break; 1002 default: 1003 is_preamble_valid = false; 1004 } 1005 1006 DP_STATS_INCC(mon_peer, 1007 rx.pkt_type[preamble].mcs_count[res_mcs], num_msdu, 1008 is_preamble_valid); 1009 1010 if (preamble == DOT11_AX) { 1011 DP_STATS_INCC(mon_peer, 1012 rx.su_ax_ppdu_cnt.mcs_count[res_mcs], 1, 1013 (ppdu_type == HAL_RX_TYPE_SU)); 1014 DP_STATS_INCC(mon_peer, 1015 rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[res_mcs], 1016 1, (ppdu_type == HAL_RX_TYPE_MU_OFDMA)); 1017 DP_STATS_INCC(mon_peer, 1018 rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[res_mcs], 1019 1, (ppdu_type == HAL_RX_TYPE_MU_MIMO)); 1020 } 1021 1022 /* 1023 * If invalid TID, it could be a non-qos frame, hence do not 1024 * update any AC counters 1025 */ 1026 ac = TID_TO_WME_AC(ppdu_user->tid); 1027 1028 if (qdf_likely(ppdu->tid != HAL_TID_INVALID)) { 1029 DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu); 1030 DP_STATS_INC(mon_peer, rx.wme_ac_type_bytes[ac], 1031 byte_count); 1032 } 1033 1034 DP_STATS_INC(mon_peer, rx.rx_ppdus, 1); 1035 DP_STATS_INC(mon_peer, rx.rx_mpdus, 1036 (ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err)); 1037 1038 mon_ops = dp_mon_ops_get(soc); 1039 if (qdf_likely(mon_ops && mon_ops->mon_rx_stats_update)) 1040 mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user); 1041 1042 if (qdf_unlikely(!peer)) 1043 continue; 1044 1045 dp_peer_stats_notify(pdev, peer); 1046 DP_STATS_UPD(mon_peer, rx.last_snr, ppdu->rssi); 1047 1048 dp_peer_qos_stats_notify(pdev, ppdu_user); 1049 1050 if (dp_is_subtype_data(ppdu->frame_ctrl)) 1051 dp_rx_rate_stats_update(peer, ppdu, i); 1052 1053 dp_send_stats_event(pdev, peer, ppdu_user->peer_id); 1054 1055 dp_ppdu_desc_user_rx_time_update(pdev, peer, ppdu, ppdu_user); 1056 1057 if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) 1058 dp_rx_mon_update_user_deter_stats(pdev, peer, 1059 ppdu, ppdu_user); 1060 1061 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 1062 } 1063 } 1064 1065 void 1066 dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, 1067 struct hal_rx_ppdu_info *ppdu_info) 1068 { 1069 qdf_nbuf_t ppdu_nbuf; 1070 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 1071 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1072 uint64_t size = 0; 1073 uint8_t num_users = 0; 1074 1075 /* 1076 * Do not allocate if fcs error, 1077 * ast idx invalid / fctl invalid 1078 * 1079 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed 1080 */ 1081 if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)) 1082 return; 1083 1084 if (qdf_unlikely(mon_pdev->neighbour_peers_added)) { 1085 if (ppdu_info->nac_info.fc_valid && 1086 ppdu_info->nac_info.to_ds_flag && 1087 ppdu_info->nac_info.mac_addr2_valid) { 1088 struct dp_neighbour_peer *peer = NULL; 1089 uint8_t rssi = ppdu_info->rx_status.rssi_comb; 1090 1091 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1092 if (mon_pdev->neighbour_peers_added) { 1093 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1094 neighbour_peer_list_elem) { 1095 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr, 1096 &ppdu_info->nac_info.mac_addr2, 1097 QDF_MAC_ADDR_SIZE)) { 1098 peer->rssi = rssi; 1099 break; 1100 } 1101 } 1102 } 1103 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1104 } else { 1105 dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d", 1106 ppdu_info->nac_info.fc_valid, 1107 ppdu_info->nac_info.to_ds_flag, 1108 ppdu_info->nac_info.mac_addr2_valid); 1109 } 1110 } 1111 1112 /* need not generate wdi event when mcopy, cfr rcc mode and 1113 * enhanced stats are not enabled 1114 */ 1115 if (qdf_unlikely(!mon_pdev->mcopy_mode && 1116 !mon_pdev->enhanced_stats_en && 1117 !dp_cfr_rcc_mode_status(pdev))) 1118 return; 1119 1120 if (qdf_unlikely(dp_cfr_rcc_mode_status(pdev))) 1121 dp_update_cfr_dbg_stats(pdev, ppdu_info); 1122 1123 if (qdf_unlikely(!ppdu_info->rx_status.frame_control_info_valid || 1124 ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) { 1125 if (!(mon_pdev->mcopy_mode || 1126 (dp_bb_captured_chan_status(pdev, ppdu_info) == 1127 QDF_STATUS_SUCCESS))) 1128 return; 1129 } 1130 num_users = ppdu_info->com_info.num_users; 1131 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 1132 size = sizeof(struct cdp_rx_indication_ppdu) + 1133 num_users * sizeof(struct cdp_rx_stats_ppdu_user); 1134 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 1135 size, 1136 0, 0, FALSE); 1137 if (qdf_likely(ppdu_nbuf)) { 1138 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf); 1139 1140 qdf_mem_zero(cdp_rx_ppdu, size); 1141 dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); 1142 dp_rx_populate_cdp_indication_ppdu(pdev, 1143 ppdu_info, cdp_rx_ppdu); 1144 if (!qdf_unlikely(qdf_nbuf_put_tail(ppdu_nbuf, 1145 sizeof(struct cdp_rx_indication_ppdu)))) 1146 return; 1147 1148 if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) { 1149 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_SU) 1150 dp_rx_mon_update_pdev_deter_stats(pdev, 1151 cdp_rx_ppdu); 1152 } 1153 1154 dp_rx_stats_update(pdev, cdp_rx_ppdu); 1155 1156 if (qdf_unlikely(cdp_rx_ppdu->peer_id != HTT_INVALID_PEER)) { 1157 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, 1158 soc, ppdu_nbuf, 1159 cdp_rx_ppdu->peer_id, 1160 WDI_NO_VAL, pdev->pdev_id); 1161 } else if (qdf_unlikely(mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev))) { 1162 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, 1163 ppdu_nbuf, HTT_INVALID_PEER, 1164 WDI_NO_VAL, pdev->pdev_id); 1165 } else { 1166 qdf_nbuf_free(ppdu_nbuf); 1167 } 1168 } 1169 } 1170 #endif/* QCA_ENHANCED_STATS_SUPPORT */ 1171 1172 #ifdef QCA_UNDECODED_METADATA_SUPPORT 1173 #define RX_PHYERR_MASK_GET64(_val1, _val2) (((uint64_t)(_val2) << 32) | (_val1)) 1174 /** 1175 * dp_rx_populate_cdp_indication_ppdu_undecoded_metadata() - Populate cdp 1176 * rx indication structure 1177 * @pdev: pdev ctx 1178 * @ppdu_info: ppdu info structure from ppdu ring 1179 * @cdp_rx_ppdu: Rx PPDU indication structure 1180 * 1181 * Return: none 1182 */ 1183 static void 1184 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev *pdev, 1185 struct hal_rx_ppdu_info *ppdu_info, 1186 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 1187 { 1188 uint32_t chain; 1189 1190 cdp_rx_ppdu->phyrx_abort = ppdu_info->rx_status.phyrx_abort; 1191 cdp_rx_ppdu->phyrx_abort_reason = 1192 ppdu_info->rx_status.phyrx_abort_reason; 1193 1194 cdp_rx_ppdu->first_data_seq_ctrl = 1195 ppdu_info->rx_status.first_data_seq_ctrl; 1196 cdp_rx_ppdu->frame_ctrl = 1197 ppdu_info->rx_status.frame_control; 1198 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; 1199 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; 1200 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; 1201 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type; 1202 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; 1203 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + 1204 cdp_rx_ppdu->udp_msdu_count + 1205 cdp_rx_ppdu->other_msdu_count); 1206 1207 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? 1208 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; 1209 1210 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) 1211 cdp_rx_ppdu->is_ampdu = 1; 1212 else 1213 cdp_rx_ppdu->is_ampdu = 0; 1214 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; 1215 1216 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 1217 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; 1218 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; 1219 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw; 1220 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; 1221 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; 1222 if (ppdu_info->rx_status.sgi == VHT_SGI_NYSM && 1223 ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) 1224 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 1225 else 1226 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 1227 1228 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 1229 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 1230 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> 1231 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; 1232 1233 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; 1234 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 1235 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; 1236 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 1237 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; 1238 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; 1239 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 1240 1241 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 1242 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 1243 cdp_rx_ppdu->vht_no_txop_ps = 1244 ppdu_info->rx_status.vht_no_txop_ps; 1245 cdp_rx_ppdu->vht_crc = ppdu_info->rx_status.vht_crc; 1246 cdp_rx_ppdu->group_id = ppdu_info->rx_status.vht_flag_values5; 1247 } else if (ppdu_info->rx_status.preamble_type == 1248 HAL_RX_PKT_TYPE_11AX) { 1249 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 1250 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 1251 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 1252 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 1253 } else { 1254 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.ht_stbc; 1255 cdp_rx_ppdu->ht_length = ppdu_info->rx_status.ht_length; 1256 cdp_rx_ppdu->ht_smoothing = ppdu_info->rx_status.smoothing; 1257 cdp_rx_ppdu->ht_not_sounding = 1258 ppdu_info->rx_status.not_sounding; 1259 cdp_rx_ppdu->ht_aggregation = ppdu_info->rx_status.aggregation; 1260 cdp_rx_ppdu->ht_stbc = ppdu_info->rx_status.ht_stbc; 1261 cdp_rx_ppdu->ht_crc = ppdu_info->rx_status.ht_crc; 1262 } 1263 1264 cdp_rx_ppdu->l_sig_length = ppdu_info->rx_status.l_sig_length; 1265 cdp_rx_ppdu->l_sig_a_parity = ppdu_info->rx_status.l_sig_a_parity; 1266 cdp_rx_ppdu->l_sig_a_pkt_type = ppdu_info->rx_status.l_sig_a_pkt_type; 1267 1268 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AX) { 1269 cdp_rx_ppdu->he_crc = ppdu_info->rx_status.he_crc; 1270 cdp_rx_ppdu->bss_color_id = 1271 ppdu_info->rx_status.he_data3 & 0x3F; 1272 cdp_rx_ppdu->beam_change = (ppdu_info->rx_status.he_data3 >> 1273 QDF_MON_STATUS_BEAM_CHANGE_SHIFT) & 0x1; 1274 cdp_rx_ppdu->dl_ul_flag = (ppdu_info->rx_status.he_data3 >> 1275 QDF_MON_STATUS_DL_UL_SHIFT) & 0x1; 1276 cdp_rx_ppdu->ldpc_extra_sym = (ppdu_info->rx_status.he_data3 >> 1277 QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT) & 0x1; 1278 cdp_rx_ppdu->special_reuse = 1279 ppdu_info->rx_status.he_data4 & 0xF; 1280 cdp_rx_ppdu->ltf_sym = (ppdu_info->rx_status.he_data5 >> 1281 QDF_MON_STATUS_HE_LTF_SYM_SHIFT) & 0x7; 1282 cdp_rx_ppdu->txbf = (ppdu_info->rx_status.he_data5 >> 1283 QDF_MON_STATUS_TXBF_SHIFT) & 0x1; 1284 cdp_rx_ppdu->pe_disambiguity = (ppdu_info->rx_status.he_data5 >> 1285 QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT) & 0x1; 1286 cdp_rx_ppdu->pre_fec_pad = (ppdu_info->rx_status.he_data5 >> 1287 QDF_MON_STATUS_PRE_FEC_PAD_SHIFT) & 0x3; 1288 cdp_rx_ppdu->dopplar = (ppdu_info->rx_status.he_data6 >> 1289 QDF_MON_STATUS_DOPPLER_SHIFT) & 0x1; 1290 cdp_rx_ppdu->txop_duration = (ppdu_info->rx_status.he_data6 >> 1291 QDF_MON_STATUS_TXOP_SHIFT) & 0x7F; 1292 cdp_rx_ppdu->sig_b_mcs = ppdu_info->rx_status.he_flags1 & 0x7; 1293 cdp_rx_ppdu->sig_b_dcm = (ppdu_info->rx_status.he_flags1 >> 1294 QDF_MON_STATUS_DCM_FLAG_1_SHIFT) & 0x1; 1295 cdp_rx_ppdu->sig_b_sym = (ppdu_info->rx_status.he_flags2 >> 1296 QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT) & 0xF; 1297 cdp_rx_ppdu->sig_b_comp = (ppdu_info->rx_status.he_flags2 >> 1298 QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT) & 0x1; 1299 } 1300 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev); 1301 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); 1302 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; 1303 1304 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; 1305 for (chain = 0; chain < MAX_CHAIN; chain++) 1306 cdp_rx_ppdu->per_chain_rssi[chain] = 1307 ppdu_info->rx_status.rssi[chain]; 1308 1309 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; 1310 1311 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; 1312 1313 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); 1314 } 1315 1316 /** 1317 * dp_rx_is_valid_undecoded_frame() - Check unencoded frame received valid 1318 * or not against configured error mask 1319 * @err_mask: configured err mask 1320 * @err_code: Received error reason code for phy abort 1321 * 1322 * Return: true / false 1323 */ 1324 static inline bool 1325 dp_rx_is_valid_undecoded_frame(uint64_t err_mask, uint8_t err_code) 1326 { 1327 if (err_code < CDP_PHYRX_ERR_MAX && 1328 (err_mask & (1ULL << err_code))) 1329 return true; 1330 1331 return false; 1332 } 1333 1334 void 1335 dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc *soc, struct dp_pdev *pdev, 1336 struct hal_rx_ppdu_info *ppdu_info) 1337 { 1338 qdf_nbuf_t ppdu_nbuf; 1339 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 1340 uint8_t abort_reason = 0; 1341 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1342 uint64_t mask64; 1343 1344 /* Return if RX_ABORT not set */ 1345 if (ppdu_info->rx_status.phyrx_abort == 0) 1346 return; 1347 1348 mask64 = RX_PHYERR_MASK_GET64(mon_pdev->phyrx_error_mask, 1349 mon_pdev->phyrx_error_mask_cont); 1350 abort_reason = ppdu_info->rx_status.phyrx_abort_reason; 1351 1352 if (!dp_rx_is_valid_undecoded_frame(mask64, abort_reason)) 1353 return; 1354 1355 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 1356 sizeof(struct cdp_rx_indication_ppdu), 1357 0, 0, FALSE); 1358 if (ppdu_nbuf) { 1359 cdp_rx_ppdu = ((struct cdp_rx_indication_ppdu *) 1360 qdf_nbuf_data(ppdu_nbuf)); 1361 1362 qdf_mem_zero(cdp_rx_ppdu, 1363 sizeof(struct cdp_rx_indication_ppdu)); 1364 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(pdev, 1365 ppdu_info, cdp_rx_ppdu); 1366 1367 if (!qdf_nbuf_put_tail(ppdu_nbuf, 1368 sizeof(struct cdp_rx_indication_ppdu))) { 1369 return; 1370 } 1371 1372 mon_pdev->rx_mon_stats.rx_undecoded_count++; 1373 mon_pdev->rx_mon_stats.rx_undecoded_error[abort_reason] += 1; 1374 1375 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC_UNDECODED_METADATA, 1376 soc, ppdu_nbuf, HTT_INVALID_PEER, 1377 WDI_NO_VAL, pdev->pdev_id); 1378 } 1379 } 1380 #endif/* QCA_UNDECODED_METADATA_SUPPORT */ 1381 1382 #ifdef QCA_MCOPY_SUPPORT 1383 QDF_STATUS 1384 dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1385 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf, 1386 uint8_t fcs_ok_mpdu_cnt, bool deliver_frame) 1387 { 1388 uint16_t size = 0; 1389 struct ieee80211_frame *wh; 1390 uint32_t *nbuf_data; 1391 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1392 1393 if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload) 1394 return QDF_STATUS_SUCCESS; 1395 1396 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1397 if (mon_pdev->mcopy_mode == M_COPY) { 1398 if (mon_pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id) 1399 return QDF_STATUS_SUCCESS; 1400 } 1401 1402 wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4); 1403 1404 size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload - 1405 qdf_nbuf_data(nbuf)); 1406 1407 if (qdf_nbuf_pull_head(nbuf, size) == NULL) 1408 return QDF_STATUS_SUCCESS; 1409 1410 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1411 IEEE80211_FC0_TYPE_MGT) || 1412 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1413 IEEE80211_FC0_TYPE_CTL)) { 1414 return QDF_STATUS_SUCCESS; 1415 } 1416 1417 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf); 1418 *nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id; 1419 /* only retain RX MSDU payload in the skb */ 1420 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len); 1421 if (deliver_frame) { 1422 mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; 1423 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, 1424 nbuf, HTT_INVALID_PEER, 1425 WDI_NO_VAL, pdev->pdev_id); 1426 } 1427 return QDF_STATUS_E_ALREADY; 1428 } 1429 1430 void 1431 dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev, 1432 struct hal_rx_ppdu_info *ppdu_info, 1433 qdf_nbuf_t status_nbuf) 1434 { 1435 QDF_STATUS mcopy_status; 1436 qdf_nbuf_t nbuf_clone = NULL; 1437 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1438 1439 /* If the MPDU end tlv and RX header are received in different buffers, 1440 * process the RX header based on fcs status. 1441 */ 1442 if (mon_pdev->mcopy_status_nbuf) { 1443 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1444 if (mon_pdev->mcopy_mode == M_COPY) { 1445 if (mon_pdev->m_copy_id.rx_ppdu_id == 1446 ppdu_info->com_info.ppdu_id) 1447 goto end1; 1448 } 1449 1450 if (ppdu_info->is_fcs_passed) { 1451 nbuf_clone = qdf_nbuf_clone(mon_pdev->mcopy_status_nbuf); 1452 if (!nbuf_clone) { 1453 QDF_TRACE(QDF_MODULE_ID_TXRX, 1454 QDF_TRACE_LEVEL_ERROR, 1455 "Failed to clone nbuf"); 1456 goto end1; 1457 } 1458 1459 mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; 1460 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, 1461 nbuf_clone, 1462 HTT_INVALID_PEER, 1463 WDI_NO_VAL, pdev->pdev_id); 1464 ppdu_info->is_fcs_passed = false; 1465 } 1466 end1: 1467 qdf_nbuf_free(mon_pdev->mcopy_status_nbuf); 1468 mon_pdev->mcopy_status_nbuf = NULL; 1469 } 1470 1471 /* If the MPDU end tlv and RX header are received in different buffers, 1472 * preserve the RX header as the fcs status will be received in MPDU 1473 * end tlv in next buffer. So, cache the buffer to be processd in next 1474 * iteration 1475 */ 1476 if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) != 1477 ppdu_info->com_info.mpdu_cnt) { 1478 mon_pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf); 1479 if (mon_pdev->mcopy_status_nbuf) { 1480 mcopy_status = dp_rx_handle_mcopy_mode( 1481 soc, pdev, 1482 ppdu_info, 1483 mon_pdev->mcopy_status_nbuf, 1484 ppdu_info->fcs_ok_cnt, 1485 false); 1486 if (mcopy_status == QDF_STATUS_SUCCESS) { 1487 qdf_nbuf_free(mon_pdev->mcopy_status_nbuf); 1488 mon_pdev->mcopy_status_nbuf = NULL; 1489 } 1490 } 1491 } 1492 } 1493 1494 void 1495 dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev, 1496 struct hal_rx_ppdu_info *ppdu_info, 1497 uint32_t tlv_status) 1498 { 1499 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1500 1501 if (qdf_unlikely(!mon_pdev->mcopy_mode)) 1502 return; 1503 1504 /* The fcs status is received in MPDU end tlv. If the RX header 1505 * and its MPDU end tlv are received in different status buffer then 1506 * to process that header ppdu_info->is_fcs_passed is used. 1507 * If end tlv is received in next status buffer then com_info.mpdu_cnt 1508 * will be 0 at the time of receiving MPDU end tlv and we update the 1509 * is_fcs_passed flag based on ppdu_info->fcs_err. 1510 */ 1511 if (tlv_status != HAL_TLV_STATUS_MPDU_END) 1512 return; 1513 1514 if (!ppdu_info->fcs_err) { 1515 if (ppdu_info->fcs_ok_cnt > 1516 HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) { 1517 dp_err("No. of MPDUs(%d) per status buff exceeded", 1518 ppdu_info->fcs_ok_cnt); 1519 return; 1520 } 1521 if (ppdu_info->com_info.mpdu_cnt) 1522 ppdu_info->fcs_ok_cnt++; 1523 else 1524 ppdu_info->is_fcs_passed = true; 1525 } else { 1526 if (ppdu_info->com_info.mpdu_cnt) 1527 ppdu_info->fcs_err_cnt++; 1528 else 1529 ppdu_info->is_fcs_passed = false; 1530 } 1531 } 1532 1533 void 1534 dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1535 struct hal_rx_ppdu_info *ppdu_info, 1536 uint32_t tlv_status, 1537 qdf_nbuf_t status_nbuf) 1538 { 1539 QDF_STATUS mcopy_status; 1540 qdf_nbuf_t nbuf_clone = NULL; 1541 uint8_t fcs_ok_mpdu_cnt = 0; 1542 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1543 1544 dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf); 1545 1546 if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) 1547 goto end; 1548 1549 if (qdf_unlikely(!ppdu_info->fcs_ok_cnt)) 1550 goto end; 1551 1552 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1553 if (mon_pdev->mcopy_mode == M_COPY) 1554 ppdu_info->fcs_ok_cnt = 1; 1555 1556 while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) { 1557 nbuf_clone = qdf_nbuf_clone(status_nbuf); 1558 if (!nbuf_clone) { 1559 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1560 "Failed to clone nbuf"); 1561 goto end; 1562 } 1563 1564 mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev, 1565 ppdu_info, 1566 nbuf_clone, 1567 fcs_ok_mpdu_cnt, 1568 true); 1569 1570 if (mcopy_status == QDF_STATUS_SUCCESS) 1571 qdf_nbuf_free(nbuf_clone); 1572 1573 fcs_ok_mpdu_cnt++; 1574 } 1575 end: 1576 qdf_nbuf_free(status_nbuf); 1577 ppdu_info->fcs_ok_cnt = 0; 1578 ppdu_info->fcs_err_cnt = 0; 1579 ppdu_info->com_info.mpdu_cnt = 0; 1580 qdf_mem_zero(&ppdu_info->ppdu_msdu_info, 1581 HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER 1582 * sizeof(struct hal_rx_msdu_payload_info)); 1583 } 1584 #endif /* QCA_MCOPY_SUPPORT */ 1585 1586 int 1587 dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1588 struct hal_rx_ppdu_info *ppdu_info, 1589 qdf_nbuf_t nbuf) 1590 { 1591 uint8_t size = 0; 1592 struct dp_mon_vdev *mon_vdev; 1593 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1594 1595 if (!mon_pdev->mvdev) { 1596 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1597 "[%s]:[%d] Monitor vdev is NULL !!", 1598 __func__, __LINE__); 1599 return 1; 1600 } 1601 1602 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1603 1604 if (!ppdu_info->msdu_info.first_msdu_payload) { 1605 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1606 "[%s]:[%d] First msdu payload not present", 1607 __func__, __LINE__); 1608 return 1; 1609 } 1610 1611 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */ 1612 size = (ppdu_info->msdu_info.first_msdu_payload - 1613 qdf_nbuf_data(nbuf)) + 4; 1614 ppdu_info->msdu_info.first_msdu_payload = NULL; 1615 1616 if (!qdf_nbuf_pull_head(nbuf, size)) { 1617 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1618 "[%s]:[%d] No header present", 1619 __func__, __LINE__); 1620 return 1; 1621 } 1622 1623 /* Only retain RX MSDU payload in the skb */ 1624 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - 1625 ppdu_info->msdu_info.payload_len); 1626 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf, 1627 qdf_nbuf_headroom(nbuf))) { 1628 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1629 return 1; 1630 } 1631 1632 mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1633 nbuf, NULL); 1634 mon_pdev->ppdu_info.rx_status.monitor_direct_used = 0; 1635 return 0; 1636 } 1637 1638 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE 1639 int dp_rx_handle_local_pkt_capture(struct dp_pdev *pdev, 1640 struct hal_rx_ppdu_info *ppdu_info, 1641 qdf_nbuf_t nbuf) 1642 { 1643 uint8_t size; 1644 struct dp_mon_vdev *mon_vdev; 1645 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1646 1647 if (!mon_pdev->mvdev) { 1648 dp_info_rl("Monitor vdev is NULL !!"); 1649 return 1; 1650 } 1651 1652 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1653 1654 if (!ppdu_info->msdu_info.first_msdu_payload) { 1655 dp_info_rl("First msdu payload not present"); 1656 return 1; 1657 } 1658 1659 /* Adding 8 bytes to get to start of 802.11 frame after phy_ppdu_id */ 1660 size = (ppdu_info->msdu_info.first_msdu_payload - 1661 qdf_nbuf_data(nbuf)) + mon_pdev->phy_ppdu_id_size; 1662 ppdu_info->msdu_info.first_msdu_payload = NULL; 1663 1664 if (!qdf_nbuf_pull_head(nbuf, size)) { 1665 dp_info_rl("No header present"); 1666 return 1; 1667 } 1668 1669 /* Only retain RX MSDU payload in the skb */ 1670 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - 1671 ppdu_info->msdu_info.payload_len + 1672 mon_pdev->phy_ppdu_id_size); 1673 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf, 1674 qdf_nbuf_headroom(nbuf))) { 1675 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1676 return 1; 1677 } 1678 1679 if (mon_vdev && mon_vdev->osif_rx_mon) 1680 mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, nbuf, NULL); 1681 1682 return 0; 1683 } 1684 #endif 1685 1686 qdf_nbuf_t 1687 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1688 { 1689 uint8_t *buf; 1690 int32_t nbuf_retry_count; 1691 QDF_STATUS ret; 1692 qdf_nbuf_t nbuf = NULL; 1693 1694 for (nbuf_retry_count = 0; nbuf_retry_count < 1695 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1696 nbuf_retry_count++) { 1697 /* Allocate a new skb using alloc_skb */ 1698 nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE, 1699 RX_MON_STATUS_BUF_RESERVATION, 1700 RX_DATA_BUFFER_ALIGNMENT); 1701 1702 if (!nbuf) { 1703 DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1); 1704 continue; 1705 } 1706 1707 buf = qdf_nbuf_data(nbuf); 1708 1709 memset(buf, 0, RX_MON_STATUS_BUF_SIZE); 1710 1711 ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf, 1712 QDF_DMA_FROM_DEVICE, 1713 RX_MON_STATUS_BUF_SIZE); 1714 1715 /* nbuf map failed */ 1716 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1717 qdf_nbuf_free(nbuf); 1718 DP_STATS_INC(pdev, replenish.map_err, 1); 1719 continue; 1720 } 1721 /* qdf_nbuf alloc and map succeeded */ 1722 break; 1723 } 1724 1725 /* qdf_nbuf still alloc or map failed */ 1726 if (qdf_unlikely(nbuf_retry_count >= 1727 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1728 return NULL; 1729 1730 return nbuf; 1731 } 1732 1733 #ifndef DISABLE_MON_CONFIG 1734 uint32_t 1735 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1736 uint32_t mac_id, uint32_t quota) 1737 { 1738 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1739 1740 if (mon_soc && mon_soc->mon_rx_process) 1741 return mon_soc->mon_rx_process(soc, int_ctx, 1742 mac_id, quota); 1743 return 0; 1744 } 1745 #else 1746 uint32_t 1747 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1748 uint32_t mac_id, uint32_t quota) 1749 { 1750 return 0; 1751 } 1752 #endif 1753 1754 /** 1755 * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers 1756 * 1757 * @soc: soc handle 1758 * @nbuf: Mgmt packet 1759 * @pdev: pdev handle 1760 * 1761 * Return: QDF_STATUS_SUCCESS on success 1762 * QDF_STATUS_E_INVAL in error 1763 */ 1764 #ifdef QCA_MCOPY_SUPPORT 1765 QDF_STATUS 1766 dp_send_mgmt_packet_to_stack(struct dp_soc *soc, 1767 qdf_nbuf_t nbuf, 1768 struct dp_pdev *pdev) 1769 { 1770 uint32_t *nbuf_data; 1771 struct ieee80211_frame *wh; 1772 qdf_frag_t addr; 1773 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1774 1775 if (!nbuf) 1776 return QDF_STATUS_E_INVAL; 1777 1778 /* Get addr pointing to80211 header */ 1779 addr = dp_rx_mon_get_nbuf_80211_hdr(nbuf); 1780 if (qdf_unlikely(!addr)) { 1781 qdf_nbuf_free(nbuf); 1782 return QDF_STATUS_E_INVAL; 1783 } 1784 1785 /*check if this is not a mgmt packet*/ 1786 wh = (struct ieee80211_frame *)addr; 1787 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1788 IEEE80211_FC0_TYPE_MGT) && 1789 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1790 IEEE80211_FC0_TYPE_CTL)) { 1791 qdf_nbuf_free(nbuf); 1792 return QDF_STATUS_E_INVAL; 1793 } 1794 nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4); 1795 if (!nbuf_data) { 1796 QDF_TRACE(QDF_MODULE_ID_DP, 1797 QDF_TRACE_LEVEL_ERROR, 1798 FL("No headroom")); 1799 qdf_nbuf_free(nbuf); 1800 return QDF_STATUS_E_INVAL; 1801 } 1802 *nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id; 1803 1804 dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf, 1805 HTT_INVALID_PEER, 1806 WDI_NO_VAL, pdev->pdev_id); 1807 return QDF_STATUS_SUCCESS; 1808 } 1809 #else 1810 QDF_STATUS 1811 dp_send_mgmt_packet_to_stack(struct dp_soc *soc, 1812 qdf_nbuf_t nbuf, 1813 struct dp_pdev *pdev) 1814 { 1815 return QDF_STATUS_SUCCESS; 1816 } 1817 #endif /* QCA_MCOPY_SUPPORT */ 1818 1819 QDF_STATUS dp_rx_mon_process_dest_pktlog(struct dp_soc *soc, 1820 uint32_t mac_id, 1821 qdf_nbuf_t mpdu) 1822 { 1823 uint32_t event, msdu_timestamp = 0; 1824 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1825 void *data; 1826 struct ieee80211_frame *wh; 1827 uint8_t type, subtype; 1828 struct dp_mon_pdev *mon_pdev; 1829 1830 if (!pdev) 1831 return QDF_STATUS_E_INVAL; 1832 1833 mon_pdev = pdev->monitor_pdev; 1834 1835 if (mon_pdev->rx_pktlog_cbf) { 1836 if (qdf_nbuf_get_nr_frags(mpdu)) 1837 data = qdf_nbuf_get_frag_addr(mpdu, 0); 1838 else 1839 data = qdf_nbuf_data(mpdu); 1840 1841 /* CBF logging required, doesn't matter if it is a full mode 1842 * or lite mode. 1843 * Need to look for mpdu with: 1844 * TYPE = ACTION, SUBTYPE = NO ACK in the header 1845 */ 1846 event = WDI_EVENT_RX_CBF; 1847 1848 wh = (struct ieee80211_frame *)data; 1849 type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1850 subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1851 if (type == IEEE80211_FC0_TYPE_MGT && 1852 subtype == IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK) { 1853 msdu_timestamp = mon_pdev->ppdu_info.rx_status.tsft; 1854 dp_rx_populate_cbf_hdr(soc, 1855 mac_id, event, 1856 mpdu, 1857 msdu_timestamp); 1858 } 1859 } 1860 return QDF_STATUS_SUCCESS; 1861 } 1862 1863 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc, 1864 uint32_t mac_id) 1865 { 1866 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1867 ol_txrx_rx_mon_fp osif_rx_mon; 1868 qdf_nbuf_t dummy_msdu; 1869 struct dp_mon_pdev *mon_pdev; 1870 struct dp_mon_vdev *mon_vdev; 1871 1872 /* Sanity checking */ 1873 if (!pdev || !pdev->monitor_pdev) 1874 goto mon_deliver_non_std_fail; 1875 1876 mon_pdev = pdev->monitor_pdev; 1877 1878 if (!mon_pdev->mvdev || !mon_pdev->mvdev || 1879 !mon_pdev->mvdev->monitor_vdev || 1880 !mon_pdev->mvdev->monitor_vdev->osif_rx_mon) 1881 goto mon_deliver_non_std_fail; 1882 1883 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1884 /* Generate a dummy skb_buff */ 1885 osif_rx_mon = mon_vdev->osif_rx_mon; 1886 dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER, 1887 MAX_MONITOR_HEADER, 4, FALSE); 1888 if (!dummy_msdu) 1889 goto allocate_dummy_msdu_fail; 1890 1891 qdf_nbuf_set_pktlen(dummy_msdu, 0); 1892 qdf_nbuf_set_next(dummy_msdu, NULL); 1893 1894 mon_pdev->ppdu_info.rx_status.ppdu_id = 1895 mon_pdev->ppdu_info.com_info.ppdu_id; 1896 1897 /* Apply the radio header to this dummy skb */ 1898 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, dummy_msdu, 1899 qdf_nbuf_headroom(dummy_msdu))) { 1900 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1901 qdf_nbuf_free(dummy_msdu); 1902 goto mon_deliver_non_std_fail; 1903 } 1904 1905 /* deliver to the user layer application */ 1906 osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1907 dummy_msdu, NULL); 1908 1909 /* Clear rx_status*/ 1910 qdf_mem_zero(&mon_pdev->ppdu_info.rx_status, 1911 sizeof(mon_pdev->ppdu_info.rx_status)); 1912 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 1913 1914 return QDF_STATUS_SUCCESS; 1915 1916 allocate_dummy_msdu_fail: 1917 dp_rx_mon_dest_debug("%pK: mon_skb=%pK ", 1918 soc, dummy_msdu); 1919 1920 mon_deliver_non_std_fail: 1921 return QDF_STATUS_E_INVAL; 1922 } 1923 1924 /** 1925 * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based 1926 * filtering enabled 1927 * @soc: core txrx main context 1928 * @ppdu_info: Structure for rx ppdu info 1929 * @status_nbuf: Qdf nbuf abstraction for linux skb 1930 * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN 1931 * 1932 * Return: none 1933 */ 1934 void 1935 dp_rx_process_peer_based_pktlog(struct dp_soc *soc, 1936 struct hal_rx_ppdu_info *ppdu_info, 1937 qdf_nbuf_t status_nbuf, uint32_t pdev_id) 1938 { 1939 struct dp_peer *peer; 1940 struct mon_rx_user_status *rx_user_status; 1941 uint32_t num_users = ppdu_info->com_info.num_users; 1942 uint16_t sw_peer_id; 1943 1944 /* Sanity check for num_users */ 1945 if (!num_users) 1946 return; 1947 1948 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 1949 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 1950 1951 sw_peer_id = rx_user_status->sw_peer_id; 1952 1953 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 1954 DP_MOD_ID_RX_PPDU_STATS); 1955 1956 if (!peer) 1957 return; 1958 1959 if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) && 1960 (peer->monitor_peer->peer_based_pktlog_filter)) { 1961 dp_wdi_event_handler( 1962 WDI_EVENT_RX_DESC, soc, 1963 status_nbuf, 1964 peer->peer_id, 1965 WDI_NO_VAL, pdev_id); 1966 } 1967 dp_peer_unref_delete(peer, 1968 DP_MOD_ID_RX_PPDU_STATS); 1969 } 1970 1971 uint32_t 1972 dp_mon_rx_add_tlv(uint8_t id, uint16_t len, void *value, qdf_nbuf_t mpdu_nbuf) 1973 { 1974 uint8_t *dest = NULL; 1975 uint32_t num_bytes_pushed = 0; 1976 1977 /* Add tlv id field */ 1978 dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint8_t)); 1979 if (qdf_likely(dest)) { 1980 *((uint8_t *)dest) = id; 1981 num_bytes_pushed += sizeof(uint8_t); 1982 } 1983 1984 /* Add tlv len field */ 1985 dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint16_t)); 1986 if (qdf_likely(dest)) { 1987 *((uint16_t *)dest) = len; 1988 num_bytes_pushed += sizeof(uint16_t); 1989 } 1990 1991 /* Add tlv value field */ 1992 dest = qdf_nbuf_push_head(mpdu_nbuf, len); 1993 if (qdf_likely(dest)) { 1994 qdf_mem_copy(dest, value, len); 1995 num_bytes_pushed += len; 1996 } 1997 1998 return num_bytes_pushed; 1999 } 2000 2001 void 2002 dp_mon_rx_stats_update_rssi_dbm_params(struct dp_mon_pdev *mon_pdev, 2003 struct hal_rx_ppdu_info *ppdu_info) 2004 { 2005 ppdu_info->rx_status.rssi_offset = mon_pdev->rssi_offsets.rssi_offset; 2006 ppdu_info->rx_status.rssi_dbm_conv_support = 2007 mon_pdev->rssi_dbm_conv_support; 2008 ppdu_info->rx_status.chan_noise_floor = 2009 mon_pdev->rssi_offsets.rssi_offset; 2010 } 2011 2012 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS 2013 void dp_rx_mon_update_user_ctrl_frame_stats(struct dp_pdev *pdev, 2014 struct hal_rx_ppdu_info *ppdu_info) 2015 { 2016 struct dp_peer *peer; 2017 struct dp_mon_peer *mon_peer; 2018 struct dp_soc *soc = pdev->soc; 2019 uint16_t fc, sw_peer_id; 2020 uint8_t i; 2021 2022 if (qdf_unlikely(!ppdu_info)) 2023 return; 2024 2025 fc = ppdu_info->nac_info.frame_control; 2026 if (qdf_likely((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) != 2027 QDF_IEEE80211_FC0_TYPE_CTL)) 2028 return; 2029 2030 for (i = 0; i < ppdu_info->com_info.num_users; i++) { 2031 sw_peer_id = ppdu_info->rx_user_status[i].sw_peer_id; 2032 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 2033 DP_MOD_ID_RX_PPDU_STATS); 2034 if (qdf_unlikely(!peer)) 2035 continue; 2036 mon_peer = peer->monitor_peer; 2037 if (qdf_unlikely(!mon_peer)) { 2038 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 2039 continue; 2040 } 2041 DP_STATS_INCC(mon_peer, rx.ndpa_cnt, 1, 2042 ppdu_info->ctrl_frm_info[i].ndpa); 2043 DP_STATS_INCC(mon_peer, rx.bar_cnt, 1, 2044 ppdu_info->ctrl_frm_info[i].bar); 2045 2046 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 2047 } 2048 } 2049 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */ 2050