1 /* 2 * Copyright (c) 2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_peer.h" 24 #include "hal_rx.h" 25 #include "hal_api.h" 26 #include "qdf_trace.h" 27 #include "qdf_nbuf.h" 28 #include "hal_api_mon.h" 29 #include "dp_internal.h" 30 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 31 #include "dp_htt.h" 32 #include "dp_mon.h" 33 #include "dp_rx_mon.h" 34 35 #include "htt.h" 36 #ifdef FEATURE_PERPKT_INFO 37 #include "dp_ratetable.h" 38 #endif 39 40 #ifndef IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 41 #define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0 42 #endif 43 44 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 45 void 46 dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev, 47 struct hal_rx_ppdu_info *ppdu_info, 48 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 49 { 50 struct dp_peer *peer; 51 struct dp_soc *soc = pdev->soc; 52 struct mon_rx_user_status *rx_user_status; 53 struct cdp_rx_stats_ppdu_user *rx_stats_peruser; 54 uint32_t num_users; 55 int user_id; 56 uint16_t sw_peer_id; 57 58 num_users = ppdu_info->com_info.num_users; 59 for (user_id = 0; user_id < num_users; user_id++) { 60 if (user_id > OFDMA_NUM_USERS) { 61 return; 62 } 63 64 rx_user_status = &ppdu_info->rx_user_status[user_id]; 65 rx_stats_peruser = &cdp_rx_ppdu->user[user_id]; 66 sw_peer_id = rx_user_status->sw_peer_id; 67 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 68 DP_MOD_ID_RX_PPDU_STATS); 69 if (!peer) { 70 rx_stats_peruser->peer_id = HTT_INVALID_PEER; 71 continue; 72 } 73 74 qdf_mem_copy(rx_stats_peruser->mac_addr, 75 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 76 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 77 } 78 } 79 80 void 81 dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev, 82 struct hal_rx_ppdu_info *ppdu_info, 83 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 84 { 85 struct dp_peer *peer; 86 struct dp_soc *soc = pdev->soc; 87 int chain; 88 uint16_t sw_peer_id; 89 struct mon_rx_user_status *rx_user_status; 90 uint32_t num_users = ppdu_info->com_info.num_users; 91 92 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 93 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 94 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 95 96 for (chain = 0; chain < MAX_CHAIN; chain++) 97 cdp_rx_ppdu->per_chain_rssi[chain] = 98 ppdu_info->rx_status.rssi[chain]; 99 100 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 101 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 102 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 103 104 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && 105 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) 106 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 107 else 108 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 109 110 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 111 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 112 } else if (ppdu_info->rx_status.preamble_type == 113 HAL_RX_PKT_TYPE_11AX) { 114 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 115 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 116 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 117 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 118 } 119 120 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 121 dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu); 122 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 123 sw_peer_id = rx_user_status->sw_peer_id; 124 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, DP_MOD_ID_RX_PPDU_STATS); 125 if (!peer) { 126 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; 127 cdp_rx_ppdu->num_users = 0; 128 return; 129 } 130 131 cdp_rx_ppdu->peer_id = peer->peer_id; 132 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 133 cdp_rx_ppdu->num_users = num_users; 134 } 135 136 bool 137 dp_cfr_rcc_mode_status(struct dp_pdev *pdev) 138 { 139 return pdev->cfr_rcc_mode; 140 } 141 142 void 143 dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev, 144 struct hal_rx_ppdu_info *ppdu_info, 145 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 146 { 147 struct cdp_rx_ppdu_cfr_info *cfr_info; 148 149 if (!qdf_unlikely(dp_cfr_rcc_mode_status(pdev))) 150 return; 151 152 cfr_info = &cdp_rx_ppdu->cfr_info; 153 154 cfr_info->bb_captured_channel 155 = ppdu_info->cfr_info.bb_captured_channel; 156 cfr_info->bb_captured_timeout 157 = ppdu_info->cfr_info.bb_captured_timeout; 158 cfr_info->bb_captured_reason 159 = ppdu_info->cfr_info.bb_captured_reason; 160 cfr_info->rx_location_info_valid 161 = ppdu_info->cfr_info.rx_location_info_valid; 162 cfr_info->chan_capture_status 163 = ppdu_info->cfr_info.chan_capture_status; 164 cfr_info->rtt_che_buffer_pointer_high8 165 = ppdu_info->cfr_info.rtt_che_buffer_pointer_high8; 166 cfr_info->rtt_che_buffer_pointer_low32 167 = ppdu_info->cfr_info.rtt_che_buffer_pointer_low32; 168 cfr_info->rtt_cfo_measurement 169 = (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement; 170 cfr_info->agc_gain_info0 171 = ppdu_info->cfr_info.agc_gain_info0; 172 cfr_info->agc_gain_info1 173 = ppdu_info->cfr_info.agc_gain_info1; 174 cfr_info->agc_gain_info2 175 = ppdu_info->cfr_info.agc_gain_info2; 176 cfr_info->agc_gain_info3 177 = ppdu_info->cfr_info.agc_gain_info3; 178 cfr_info->rx_start_ts 179 = ppdu_info->cfr_info.rx_start_ts; 180 cfr_info->mcs_rate 181 = ppdu_info->cfr_info.mcs_rate; 182 cfr_info->gi_type 183 = ppdu_info->cfr_info.gi_type; 184 } 185 186 void 187 dp_update_cfr_dbg_stats(struct dp_pdev *pdev, 188 struct hal_rx_ppdu_info *ppdu_info) 189 { 190 struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; 191 192 DP_STATS_INC(pdev, 193 rcc.chan_capture_status[cfr->chan_capture_status], 1); 194 if (cfr->rx_location_info_valid) { 195 DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1); 196 if (cfr->bb_captured_channel) { 197 DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1); 198 DP_STATS_INC(pdev, 199 rcc.reason_cnt[cfr->bb_captured_reason], 200 1); 201 } else if (cfr->bb_captured_timeout) { 202 DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1); 203 DP_STATS_INC(pdev, 204 rcc.reason_cnt[cfr->bb_captured_reason], 205 1); 206 } 207 } 208 } 209 210 void 211 dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev, 212 struct hal_rx_ppdu_info *ppdu_info) 213 { 214 qdf_nbuf_t ppdu_nbuf; 215 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 216 217 dp_update_cfr_dbg_stats(pdev, ppdu_info); 218 if (!ppdu_info->cfr_info.bb_captured_channel) 219 return; 220 221 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 222 sizeof(struct cdp_rx_indication_ppdu), 223 0, 224 0, 225 FALSE); 226 if (ppdu_nbuf) { 227 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; 228 229 dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); 230 dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); 231 qdf_nbuf_put_tail(ppdu_nbuf, 232 sizeof(struct cdp_rx_indication_ppdu)); 233 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, 234 ppdu_nbuf, HTT_INVALID_PEER, 235 WDI_NO_VAL, pdev->pdev_id); 236 } 237 } 238 239 void 240 dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, 241 struct hal_rx_ppdu_info *ppdu_info, 242 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 243 { 244 if (!dp_cfr_rcc_mode_status(pdev)) 245 return; 246 247 if (ppdu_info->cfr_info.bb_captured_channel) 248 dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); 249 } 250 251 /** 252 * dp_bb_captured_chan_status() - Get the bb_captured_channel status 253 * @pdev: pdev ctx 254 * @ppdu_info: structure for rx ppdu ring 255 * 256 * Return: Success/ Failure 257 */ 258 static inline QDF_STATUS 259 dp_bb_captured_chan_status(struct dp_pdev *pdev, 260 struct hal_rx_ppdu_info *ppdu_info) 261 { 262 QDF_STATUS status = QDF_STATUS_E_FAILURE; 263 struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; 264 265 if (dp_cfr_rcc_mode_status(pdev)) { 266 if (cfr->bb_captured_channel) 267 status = QDF_STATUS_SUCCESS; 268 } 269 270 return status; 271 } 272 #else 273 static inline QDF_STATUS 274 dp_bb_captured_chan_status(struct dp_pdev *pdev, 275 struct hal_rx_ppdu_info *ppdu_info) 276 { 277 return QDF_STATUS_E_NOSUPPORT; 278 } 279 #endif /* WLAN_CFR_ENABLE */ 280 281 #ifdef QCA_ENHANCED_STATS_SUPPORT 282 #ifdef QCA_RSSI_DB2DBM 283 /** 284 * dp_rx_mon_rf_index_conv() - this function will convert BB index to RF 285 * index in the rssi_chain[chain][bw] array 286 * 287 * @chain: BB chain index 288 * @mon_pdev: pdev structure 289 * 290 * Return: return RF chain index 291 * 292 * Computation: 293 * 3 Bytes of xbar_config are used for RF to BB mapping 294 * Samples of xbar_config, 295 * 296 * If xbar_config is 0x688FAC(hex): 297 * RF chains 0-3 are connected to BB chains 4-7 298 * RF chains 4-7 are connected to BB chains 0-3 299 * here, 300 * bits 0 to 2 = 4, maps BB chain 4 for RF chain 0 301 * bits 3 to 5 = 5, maps BB chain 5 for RF chain 1 302 * bits 6 to 8 = 6, maps BB chain 6 for RF chain 2 303 * bits 9 to 11 = 7, maps BB chain 7 for RF chain 3 304 * bits 12 to 14 = 0, maps BB chain 0 for RF chain 4 305 * bits 15 to 17 = 1, maps BB chain 1 for RF chain 5 306 * bits 18 to 20 = 2, maps BB chain 2 for RF chain 6 307 * bits 21 to 23 = 3, maps BB chain 3 for RF chain 7 308 */ 309 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain, 310 struct dp_mon_pdev *mon_pdev) 311 { 312 uint32_t xbar_config = mon_pdev->rssi_offsets.xbar_config; 313 314 if (mon_pdev->rssi_dbm_conv_support && xbar_config) 315 return ((xbar_config >> (3 * chain)) & 0x07); 316 return chain; 317 } 318 #else 319 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain, 320 struct dp_mon_pdev *mon_pdev) 321 { 322 return chain; 323 } 324 #endif 325 void 326 dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info, 327 struct cdp_rx_indication_ppdu *cdp_rx_ppdu, 328 struct dp_pdev *pdev) 329 { 330 uint8_t chain, bw; 331 uint8_t rssi; 332 uint8_t chain_rf; 333 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 334 335 for (chain = 0; chain < SS_COUNT; chain++) { 336 for (bw = 0; bw < MAX_BW; bw++) { 337 chain_rf = dp_rx_mon_rf_index_conv(chain, mon_pdev); 338 rssi = ppdu_info->rx_status.rssi_chain[chain_rf][bw]; 339 if (rssi != DP_RSSI_INVAL) 340 cdp_rx_ppdu->rssi_chain[chain_rf][bw] = rssi; 341 else 342 cdp_rx_ppdu->rssi_chain[chain_rf][bw] = 0; 343 } 344 } 345 } 346 347 void 348 dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info, 349 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 350 { 351 uint8_t pilot_evm; 352 uint8_t nss_count; 353 uint8_t pilot_count; 354 355 nss_count = ppdu_info->evm_info.nss_count; 356 pilot_count = ppdu_info->evm_info.pilot_count; 357 358 if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) { 359 qdf_err("pilot evm count is more than expected"); 360 return; 361 } 362 cdp_rx_ppdu->evm_info.pilot_count = pilot_count; 363 cdp_rx_ppdu->evm_info.nss_count = nss_count; 364 365 /* Populate evm for pilot_evm = nss_count*pilot_count */ 366 for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) { 367 cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] = 368 ppdu_info->evm_info.pilot_evm[pilot_evm]; 369 } 370 } 371 372 /** 373 * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size 374 * @pdev: pdev ctx 375 * @rx_user_status: mon rx user status 376 * 377 * Return: bool 378 */ 379 static inline bool 380 dp_rx_inc_rusize_cnt(struct dp_pdev *pdev, 381 struct mon_rx_user_status *rx_user_status) 382 { 383 uint32_t ru_size; 384 bool is_data; 385 386 ru_size = rx_user_status->ofdma_ru_size; 387 388 if (dp_is_subtype_data(rx_user_status->frame_control)) { 389 DP_STATS_INC(pdev, 390 ul_ofdma.data_rx_ru_size[ru_size], 1); 391 is_data = true; 392 } else { 393 DP_STATS_INC(pdev, 394 ul_ofdma.nondata_rx_ru_size[ru_size], 1); 395 is_data = false; 396 } 397 398 return is_data; 399 } 400 401 /** 402 * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication 403 * @pdev: pdev ctx 404 * @ppdu_info: ppdu info structure from ppdu ring 405 * @cdp_rx_ppdu: Rx PPDU indication structure 406 * 407 * Return: none 408 */ 409 static void 410 dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev, 411 struct hal_rx_ppdu_info *ppdu_info, 412 struct cdp_rx_indication_ppdu 413 *cdp_rx_ppdu) 414 { 415 struct dp_peer *peer; 416 struct dp_soc *soc = pdev->soc; 417 int i; 418 struct mon_rx_user_status *rx_user_status; 419 struct mon_rx_user_info *rx_user_info; 420 struct cdp_rx_stats_ppdu_user *rx_stats_peruser; 421 int ru_size; 422 bool is_data = false; 423 uint32_t num_users; 424 struct dp_mon_ops *mon_ops; 425 uint16_t sw_peer_id; 426 427 num_users = ppdu_info->com_info.num_users; 428 for (i = 0; i < num_users; i++) { 429 if (i > OFDMA_NUM_USERS) 430 return; 431 432 rx_user_status = &ppdu_info->rx_user_status[i]; 433 rx_user_info = &ppdu_info->rx_user_info[i]; 434 rx_stats_peruser = &cdp_rx_ppdu->user[i]; 435 436 sw_peer_id = rx_user_status->sw_peer_id; 437 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 438 DP_MOD_ID_RX_PPDU_STATS); 439 if (qdf_unlikely(!peer)) { 440 rx_stats_peruser->peer_id = HTT_INVALID_PEER; 441 continue; 442 } 443 rx_stats_peruser->is_bss_peer = peer->bss_peer; 444 445 rx_stats_peruser->first_data_seq_ctrl = 446 rx_user_status->first_data_seq_ctrl; 447 448 rx_stats_peruser->frame_control_info_valid = 449 rx_user_status->frame_control_info_valid; 450 rx_stats_peruser->frame_control = 451 rx_user_status->frame_control; 452 453 rx_stats_peruser->qos_control_info_valid = 454 rx_user_info->qos_control_info_valid; 455 rx_stats_peruser->qos_control = 456 rx_user_info->qos_control; 457 rx_stats_peruser->tcp_msdu_count = 458 rx_user_status->tcp_msdu_count; 459 rx_stats_peruser->udp_msdu_count = 460 rx_user_status->udp_msdu_count; 461 rx_stats_peruser->other_msdu_count = 462 rx_user_status->other_msdu_count; 463 464 rx_stats_peruser->num_msdu = 465 rx_stats_peruser->tcp_msdu_count + 466 rx_stats_peruser->udp_msdu_count + 467 rx_stats_peruser->other_msdu_count; 468 469 rx_stats_peruser->preamble_type = 470 cdp_rx_ppdu->u.preamble; 471 rx_stats_peruser->mpdu_cnt_fcs_ok = 472 rx_user_status->mpdu_cnt_fcs_ok; 473 rx_stats_peruser->mpdu_cnt_fcs_err = 474 rx_user_status->mpdu_cnt_fcs_err; 475 qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap, 476 &rx_user_status->mpdu_fcs_ok_bitmap, 477 HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * 478 sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0])); 479 rx_stats_peruser->mpdu_ok_byte_count = 480 rx_user_status->mpdu_ok_byte_count; 481 rx_stats_peruser->mpdu_err_byte_count = 482 rx_user_status->mpdu_err_byte_count; 483 484 cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok; 485 cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu; 486 rx_stats_peruser->retries = 487 CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ? 488 rx_stats_peruser->mpdu_cnt_fcs_ok : 0; 489 cdp_rx_ppdu->retries += rx_stats_peruser->retries; 490 491 if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1) 492 rx_stats_peruser->is_ampdu = 1; 493 else 494 rx_stats_peruser->is_ampdu = 0; 495 496 rx_stats_peruser->tid = ppdu_info->rx_status.tid; 497 498 qdf_mem_copy(rx_stats_peruser->mac_addr, 499 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 500 rx_stats_peruser->peer_id = peer->peer_id; 501 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 502 rx_stats_peruser->vdev_id = peer->vdev->vdev_id; 503 rx_stats_peruser->mu_ul_info_valid = 0; 504 505 mon_ops = dp_mon_ops_get(soc); 506 if (mon_ops && mon_ops->mon_rx_populate_ppdu_usr_info) 507 mon_ops->mon_rx_populate_ppdu_usr_info(rx_user_status, 508 rx_stats_peruser); 509 510 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 511 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || 512 cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { 513 if (rx_user_status->mu_ul_info_valid) { 514 rx_stats_peruser->nss = rx_user_status->nss; 515 cdp_rx_ppdu->usr_nss_sum += rx_stats_peruser->nss; 516 rx_stats_peruser->mcs = rx_user_status->mcs; 517 rx_stats_peruser->mu_ul_info_valid = 518 rx_user_status->mu_ul_info_valid; 519 rx_stats_peruser->ofdma_ru_start_index = 520 rx_user_status->ofdma_ru_start_index; 521 rx_stats_peruser->ofdma_ru_width = 522 rx_user_status->ofdma_ru_width; 523 cdp_rx_ppdu->usr_ru_tones_sum += 524 rx_stats_peruser->ofdma_ru_width; 525 rx_stats_peruser->user_index = i; 526 ru_size = rx_user_status->ofdma_ru_size; 527 /* 528 * max RU size will be equal to 529 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2 530 */ 531 if (qdf_unlikely(ru_size >= OFDMA_NUM_RU_SIZE)) { 532 dp_err("invalid ru_size %d\n", 533 ru_size); 534 return; 535 } 536 is_data = dp_rx_inc_rusize_cnt(pdev, 537 rx_user_status); 538 } 539 if (is_data) { 540 /* counter to get number of MU OFDMA */ 541 pdev->stats.ul_ofdma.data_rx_ppdu++; 542 pdev->stats.ul_ofdma.data_users[num_users]++; 543 } 544 } 545 } 546 } 547 548 /** 549 * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure 550 * @pdev: pdev ctx 551 * @ppdu_info: ppdu info structure from ppdu ring 552 * @cdp_rx_ppdu: Rx PPDU indication structure 553 * 554 * Return: none 555 */ 556 static void 557 dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, 558 struct hal_rx_ppdu_info *ppdu_info, 559 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 560 { 561 struct dp_peer *peer; 562 struct dp_soc *soc = pdev->soc; 563 uint32_t i; 564 struct dp_mon_ops *mon_ops; 565 uint16_t sw_peer_id; 566 struct mon_rx_user_status *rx_user_status; 567 uint32_t num_users = ppdu_info->com_info.num_users; 568 569 cdp_rx_ppdu->first_data_seq_ctrl = 570 ppdu_info->rx_status.first_data_seq_ctrl; 571 cdp_rx_ppdu->frame_ctrl = 572 ppdu_info->rx_status.frame_control; 573 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; 574 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; 575 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; 576 /* num mpdu is consolidated and added together in num user loop */ 577 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; 578 /* num msdu is consolidated and added together in num user loop */ 579 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + 580 cdp_rx_ppdu->udp_msdu_count + 581 cdp_rx_ppdu->other_msdu_count); 582 583 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? 584 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; 585 586 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) 587 cdp_rx_ppdu->is_ampdu = 1; 588 else 589 cdp_rx_ppdu->is_ampdu = 0; 590 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; 591 592 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 593 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 594 sw_peer_id = rx_user_status->sw_peer_id; 595 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 596 DP_MOD_ID_RX_PPDU_STATS); 597 if (qdf_unlikely(!peer)) { 598 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; 599 cdp_rx_ppdu->num_users = 0; 600 goto end; 601 } 602 603 qdf_mem_copy(cdp_rx_ppdu->mac_addr, 604 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 605 cdp_rx_ppdu->peer_id = peer->peer_id; 606 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 607 608 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 609 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; 610 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; 611 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; 612 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; 613 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && 614 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) 615 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 616 else 617 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 618 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 619 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 620 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> 621 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; 622 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; 623 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 624 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; 625 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 626 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; 627 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; 628 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 629 630 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 631 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 632 } else if (ppdu_info->rx_status.preamble_type == 633 HAL_RX_PKT_TYPE_11AX) { 634 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 635 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 636 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 637 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 638 } 639 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev); 640 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); 641 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; 642 643 mon_ops = dp_mon_ops_get(pdev->soc); 644 if (mon_ops && mon_ops->mon_rx_populate_ppdu_info) 645 mon_ops->mon_rx_populate_ppdu_info(ppdu_info, 646 cdp_rx_ppdu); 647 648 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; 649 for (i = 0; i < MAX_CHAIN; i++) 650 cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i]; 651 652 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; 653 654 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; 655 656 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); 657 658 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 659 660 return; 661 end: 662 dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu); 663 } 664 665 /** 666 * dp_rx_rate_stats_update() - Update per-peer rate statistics 667 * @peer: Datapath peer handle 668 * @ppdu: PPDU Descriptor 669 * @user: user index 670 * 671 * Return: None 672 */ 673 static inline void dp_rx_rate_stats_update(struct dp_peer *peer, 674 struct cdp_rx_indication_ppdu *ppdu, 675 uint32_t user) 676 { 677 uint32_t ratekbps = 0; 678 uint32_t ppdu_rx_rate = 0; 679 uint32_t nss = 0; 680 uint8_t mcs = 0; 681 uint32_t rix; 682 uint16_t ratecode = 0; 683 struct cdp_rx_stats_ppdu_user *ppdu_user = NULL; 684 struct dp_mon_peer *mon_peer = NULL; 685 686 if (!peer || !ppdu) 687 return; 688 689 mon_peer = peer->monitor_peer; 690 ppdu_user = &ppdu->user[user]; 691 692 if (!mon_peer) 693 return; 694 695 if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) { 696 if (ppdu_user->nss == 0) 697 nss = 0; 698 else 699 nss = ppdu_user->nss - 1; 700 mcs = ppdu_user->mcs; 701 702 mon_peer->stats.rx.nss_info = ppdu_user->nss; 703 mon_peer->stats.rx.mcs_info = ppdu_user->mcs; 704 } else { 705 if (ppdu->u.nss == 0) 706 nss = 0; 707 else 708 nss = ppdu->u.nss - 1; 709 mcs = ppdu->u.mcs; 710 711 mon_peer->stats.rx.nss_info = ppdu->u.nss; 712 mon_peer->stats.rx.mcs_info = ppdu->u.mcs; 713 } 714 715 ratekbps = dp_getrateindex(ppdu->u.gi, 716 mcs, 717 nss, 718 ppdu->u.preamble, 719 ppdu->u.bw, 720 ppdu->punc_bw, 721 &rix, 722 &ratecode); 723 724 if (!ratekbps) { 725 ppdu->rix = 0; 726 ppdu_user->rix = 0; 727 ppdu->rx_ratekbps = 0; 728 ppdu->rx_ratecode = 0; 729 ppdu_user->rx_ratekbps = 0; 730 return; 731 } 732 733 mon_peer->stats.rx.bw_info = ppdu->u.bw; 734 mon_peer->stats.rx.gi_info = ppdu->u.gi; 735 mon_peer->stats.rx.preamble_info = ppdu->u.preamble; 736 737 ppdu->rix = rix; 738 ppdu_user->rix = rix; 739 DP_STATS_UPD(mon_peer, rx.last_rx_rate, ratekbps); 740 mon_peer->stats.rx.avg_rx_rate = 741 dp_ath_rate_lpf(mon_peer->stats.rx.avg_rx_rate, ratekbps); 742 ppdu_rx_rate = dp_ath_rate_out(mon_peer->stats.rx.avg_rx_rate); 743 DP_STATS_UPD(mon_peer, rx.rnd_avg_rx_rate, ppdu_rx_rate); 744 ppdu->rx_ratekbps = ratekbps; 745 ppdu->rx_ratecode = ratecode; 746 ppdu_user->rx_ratekbps = ratekbps; 747 748 if (peer->vdev) 749 peer->vdev->stats.rx.last_rx_rate = ratekbps; 750 } 751 752 #ifdef WLAN_FEATURE_11BE 753 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc, 754 enum CMN_BW_TYPES bw) 755 { 756 uint8_t pkt_bw_offset; 757 758 switch (bw) { 759 case CMN_BW_20MHZ: 760 pkt_bw_offset = PKT_BW_GAIN_20MHZ; 761 break; 762 case CMN_BW_40MHZ: 763 pkt_bw_offset = PKT_BW_GAIN_40MHZ; 764 break; 765 case CMN_BW_80MHZ: 766 pkt_bw_offset = PKT_BW_GAIN_80MHZ; 767 break; 768 case CMN_BW_160MHZ: 769 pkt_bw_offset = PKT_BW_GAIN_160MHZ; 770 break; 771 case CMN_BW_320MHZ: 772 pkt_bw_offset = PKT_BW_GAIN_320MHZ; 773 break; 774 default: 775 pkt_bw_offset = 0; 776 dp_rx_mon_status_debug("%pK: Invalid BW index = %d", 777 soc, bw); 778 } 779 780 return pkt_bw_offset; 781 } 782 #else 783 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc, 784 enum CMN_BW_TYPES bw) 785 { 786 uint8_t pkt_bw_offset; 787 788 switch (bw) { 789 case CMN_BW_20MHZ: 790 pkt_bw_offset = PKT_BW_GAIN_20MHZ; 791 break; 792 case CMN_BW_40MHZ: 793 pkt_bw_offset = PKT_BW_GAIN_40MHZ; 794 break; 795 case CMN_BW_80MHZ: 796 pkt_bw_offset = PKT_BW_GAIN_80MHZ; 797 break; 798 case CMN_BW_160MHZ: 799 pkt_bw_offset = PKT_BW_GAIN_160MHZ; 800 break; 801 default: 802 pkt_bw_offset = 0; 803 dp_rx_mon_status_debug("%pK: Invalid BW index = %d", 804 soc, bw); 805 } 806 807 return pkt_bw_offset; 808 } 809 #endif 810 811 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 812 static void 813 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev, 814 struct dp_peer *peer, 815 struct cdp_rx_indication_ppdu *ppdu_desc, 816 struct cdp_rx_stats_ppdu_user *user) 817 { 818 uint32_t nss_ru_width_sum = 0; 819 struct dp_mon_peer *mon_peer = NULL; 820 uint8_t ac = 0; 821 822 if (!pdev || !ppdu_desc || !user || !peer) 823 return; 824 825 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 826 if (!nss_ru_width_sum) 827 nss_ru_width_sum = 1; 828 829 if (ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || 830 ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { 831 user->rx_time_us = (ppdu_desc->duration * 832 user->nss * user->ofdma_ru_width) / 833 nss_ru_width_sum; 834 } else { 835 user->rx_time_us = ppdu_desc->duration; 836 } 837 838 mon_peer = peer->monitor_peer; 839 if (qdf_unlikely(!mon_peer)) 840 return; 841 842 ac = TID_TO_WME_AC(user->tid); 843 DP_STATS_INC(mon_peer, airtime_stats.rx_airtime_consumption[ac].consumption, 844 user->rx_time_us); 845 } 846 847 /** 848 * dp_rx_mon_update_user_deter_stats() - Update per-peer deterministic stats 849 * @pdev: Datapath pdev handle 850 * @peer: Datapath peer handle 851 * @ppdu: PPDU Descriptor 852 * @user: Per user RX stats 853 * 854 * Return: None 855 */ 856 static inline 857 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev, 858 struct dp_peer *peer, 859 struct cdp_rx_indication_ppdu *ppdu, 860 struct cdp_rx_stats_ppdu_user *user) 861 { 862 struct dp_mon_peer *mon_peer; 863 uint8_t tid; 864 865 if (!pdev || !ppdu || !user || !peer) 866 return; 867 868 if (!dp_is_subtype_data(ppdu->frame_ctrl)) 869 return; 870 871 if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) 872 return; 873 874 mon_peer = peer->monitor_peer; 875 if (!mon_peer) 876 return; 877 878 tid = user->tid; 879 if (tid >= CDP_DATA_TID_MAX) 880 return; 881 882 DP_STATS_INC(mon_peer, 883 deter_stats[tid].rx_det.mode_cnt, 884 1); 885 DP_STATS_UPD(mon_peer, 886 deter_stats[tid].rx_det.avg_rate, 887 mon_peer->stats.rx.avg_rx_rate); 888 } 889 #else 890 static inline void 891 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev, 892 struct dp_peer *peer, 893 struct cdp_rx_indication_ppdu *ppdu_desc, 894 struct cdp_rx_stats_ppdu_user *user) 895 { } 896 897 static inline 898 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev, 899 struct dp_peer *peer, 900 struct cdp_rx_indication_ppdu *ppdu, 901 struct cdp_rx_stats_ppdu_user *user) 902 { } 903 #endif 904 905 static void dp_rx_stats_update(struct dp_pdev *pdev, 906 struct cdp_rx_indication_ppdu *ppdu) 907 { 908 struct dp_soc *soc = NULL; 909 uint8_t mcs, preamble, ac = 0, nss, ppdu_type; 910 uint32_t num_msdu; 911 uint8_t pkt_bw_offset; 912 struct dp_peer *peer; 913 struct dp_mon_peer *mon_peer; 914 struct cdp_rx_stats_ppdu_user *ppdu_user; 915 uint32_t i; 916 enum cdp_mu_packet_type mu_pkt_type; 917 struct dp_mon_ops *mon_ops; 918 struct dp_mon_pdev *mon_pdev = NULL; 919 uint64_t byte_count; 920 921 if (qdf_likely(pdev)) 922 soc = pdev->soc; 923 else 924 return; 925 926 if (qdf_likely(!soc) || soc->process_rx_status) 927 return; 928 929 mon_pdev = pdev->monitor_pdev; 930 931 preamble = ppdu->u.preamble; 932 ppdu_type = ppdu->u.ppdu_type; 933 934 for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) { 935 peer = NULL; 936 ppdu_user = &ppdu->user[i]; 937 peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id, 938 DP_MOD_ID_RX_PPDU_STATS); 939 940 if (qdf_unlikely(!peer)) 941 mon_peer = mon_pdev->invalid_mon_peer; 942 else 943 mon_peer = peer->monitor_peer; 944 945 if (qdf_unlikely(!mon_peer)) { 946 if (peer) 947 dp_peer_unref_delete(peer, 948 DP_MOD_ID_RX_PPDU_STATS); 949 950 continue; 951 } 952 953 if ((preamble == DOT11_A) || (preamble == DOT11_B)) 954 ppdu->u.nss = 1; 955 956 if (ppdu_type == HAL_RX_TYPE_SU) { 957 mcs = ppdu->u.mcs; 958 nss = ppdu->u.nss; 959 } else { 960 mcs = ppdu_user->mcs; 961 nss = ppdu_user->nss; 962 } 963 964 num_msdu = ppdu_user->num_msdu; 965 byte_count = ppdu_user->mpdu_ok_byte_count + 966 ppdu_user->mpdu_err_byte_count; 967 968 pkt_bw_offset = dp_get_bw_offset_frm_bw(soc, ppdu->u.bw); 969 DP_STATS_UPD(mon_peer, rx.snr, (ppdu->rssi + pkt_bw_offset)); 970 971 if (qdf_unlikely(mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR)) 972 mon_peer->stats.rx.avg_snr = 973 CDP_SNR_IN(mon_peer->stats.rx.snr); 974 else 975 CDP_SNR_UPDATE_AVG(mon_peer->stats.rx.avg_snr, 976 mon_peer->stats.rx.snr); 977 978 if (ppdu_type == HAL_RX_TYPE_SU) { 979 if (nss) { 980 DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu); 981 DP_STATS_INC(mon_peer, rx.ppdu_nss[nss - 1], 1); 982 } 983 984 DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_ok, 985 ppdu_user->mpdu_cnt_fcs_ok); 986 DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_err, 987 ppdu_user->mpdu_cnt_fcs_err); 988 } 989 990 if (ppdu_type >= HAL_RX_TYPE_MU_MIMO && 991 ppdu_type <= HAL_RX_TYPE_MU_OFDMA) { 992 if (ppdu_type == HAL_RX_TYPE_MU_MIMO) 993 mu_pkt_type = TXRX_TYPE_MU_MIMO; 994 else 995 mu_pkt_type = TXRX_TYPE_MU_OFDMA; 996 997 if (qdf_likely(nss)) { 998 DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu); 999 DP_STATS_INC(mon_peer, 1000 rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1], 1001 1); 1002 } 1003 1004 DP_STATS_INC(mon_peer, 1005 rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok, 1006 ppdu_user->mpdu_cnt_fcs_ok); 1007 DP_STATS_INC(mon_peer, 1008 rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err, 1009 ppdu_user->mpdu_cnt_fcs_err); 1010 } 1011 1012 DP_STATS_INC(mon_peer, rx.sgi_count[ppdu->u.gi], num_msdu); 1013 DP_STATS_INC(mon_peer, rx.bw[ppdu->u.bw], num_msdu); 1014 DP_STATS_INC(mon_peer, rx.reception_type[ppdu->u.ppdu_type], 1015 num_msdu); 1016 DP_STATS_INC(mon_peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1); 1017 DP_STATS_INCC(mon_peer, rx.ampdu_cnt, num_msdu, 1018 ppdu_user->is_ampdu); 1019 DP_STATS_INCC(mon_peer, rx.non_ampdu_cnt, num_msdu, 1020 !(ppdu_user->is_ampdu)); 1021 DP_STATS_UPD(mon_peer, rx.rx_rate, mcs); 1022 DP_STATS_INCC(mon_peer, 1023 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 1024 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); 1025 DP_STATS_INCC(mon_peer, 1026 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 1027 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); 1028 DP_STATS_INCC(mon_peer, 1029 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 1030 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); 1031 DP_STATS_INCC(mon_peer, 1032 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 1033 ((mcs < MAX_MCS_11B) && (preamble == DOT11_B))); 1034 DP_STATS_INCC(mon_peer, 1035 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 1036 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); 1037 DP_STATS_INCC(mon_peer, 1038 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 1039 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); 1040 DP_STATS_INCC(mon_peer, 1041 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 1042 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); 1043 DP_STATS_INCC(mon_peer, 1044 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 1045 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); 1046 DP_STATS_INCC(mon_peer, 1047 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 1048 ((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX))); 1049 DP_STATS_INCC(mon_peer, 1050 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 1051 ((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX))); 1052 DP_STATS_INCC(mon_peer, 1053 rx.su_ax_ppdu_cnt.mcs_count[MAX_MCS - 1], 1, 1054 ((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX) && 1055 (ppdu_type == HAL_RX_TYPE_SU))); 1056 DP_STATS_INCC(mon_peer, 1057 rx.su_ax_ppdu_cnt.mcs_count[mcs], 1, 1058 ((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX) && 1059 (ppdu_type == HAL_RX_TYPE_SU))); 1060 DP_STATS_INCC(mon_peer, 1061 rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[MAX_MCS - 1], 1062 1, ((mcs >= (MAX_MCS_11AX)) && 1063 (preamble == DOT11_AX) && 1064 (ppdu_type == HAL_RX_TYPE_MU_OFDMA))); 1065 DP_STATS_INCC(mon_peer, 1066 rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[mcs], 1067 1, ((mcs < (MAX_MCS_11AX)) && 1068 (preamble == DOT11_AX) && 1069 (ppdu_type == HAL_RX_TYPE_MU_OFDMA))); 1070 DP_STATS_INCC(mon_peer, 1071 rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[MAX_MCS - 1], 1072 1, ((mcs >= (MAX_MCS_11AX)) && 1073 (preamble == DOT11_AX) && 1074 (ppdu_type == HAL_RX_TYPE_MU_MIMO))); 1075 DP_STATS_INCC(mon_peer, 1076 rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[mcs], 1077 1, ((mcs < (MAX_MCS_11AX)) && 1078 (preamble == DOT11_AX) && 1079 (ppdu_type == HAL_RX_TYPE_MU_MIMO))); 1080 1081 /* 1082 * If invalid TID, it could be a non-qos frame, hence do not 1083 * update any AC counters 1084 */ 1085 ac = TID_TO_WME_AC(ppdu_user->tid); 1086 1087 if (qdf_likely(ppdu->tid != HAL_TID_INVALID)) { 1088 DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu); 1089 DP_STATS_INC(mon_peer, rx.wme_ac_type_bytes[ac], 1090 byte_count); 1091 } 1092 1093 DP_STATS_INC(mon_peer, rx.rx_ppdus, 1); 1094 DP_STATS_INC(mon_peer, rx.rx_mpdus, 1095 (ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err)); 1096 1097 mon_ops = dp_mon_ops_get(soc); 1098 if (qdf_likely(mon_ops && mon_ops->mon_rx_stats_update)) 1099 mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user); 1100 1101 if (qdf_unlikely(!peer)) 1102 continue; 1103 1104 dp_peer_stats_notify(pdev, peer); 1105 DP_STATS_UPD(mon_peer, rx.last_snr, 1106 (ppdu->rssi + pkt_bw_offset)); 1107 1108 dp_peer_qos_stats_notify(pdev, ppdu_user); 1109 1110 if (dp_is_subtype_data(ppdu->frame_ctrl)) 1111 dp_rx_rate_stats_update(peer, ppdu, i); 1112 1113 dp_send_stats_event(pdev, peer, ppdu_user->peer_id); 1114 1115 dp_ppdu_desc_user_rx_time_update(pdev, peer, ppdu, ppdu_user); 1116 dp_rx_mon_update_user_deter_stats(pdev, peer, ppdu, ppdu_user); 1117 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 1118 } 1119 } 1120 1121 void 1122 dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, 1123 struct hal_rx_ppdu_info *ppdu_info) 1124 { 1125 qdf_nbuf_t ppdu_nbuf; 1126 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 1127 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1128 uint64_t size = 0; 1129 uint8_t num_users = 0; 1130 1131 /* 1132 * Do not allocate if fcs error, 1133 * ast idx invalid / fctl invalid 1134 * 1135 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed 1136 */ 1137 if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)) 1138 return; 1139 1140 if (qdf_unlikely(mon_pdev->neighbour_peers_added)) { 1141 if (ppdu_info->nac_info.fc_valid && 1142 ppdu_info->nac_info.to_ds_flag && 1143 ppdu_info->nac_info.mac_addr2_valid) { 1144 struct dp_neighbour_peer *peer = NULL; 1145 uint8_t rssi = ppdu_info->rx_status.rssi_comb; 1146 1147 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1148 if (mon_pdev->neighbour_peers_added) { 1149 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1150 neighbour_peer_list_elem) { 1151 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr, 1152 &ppdu_info->nac_info.mac_addr2, 1153 QDF_MAC_ADDR_SIZE)) { 1154 peer->rssi = rssi; 1155 break; 1156 } 1157 } 1158 } 1159 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1160 } else { 1161 dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d", 1162 ppdu_info->nac_info.fc_valid, 1163 ppdu_info->nac_info.to_ds_flag, 1164 ppdu_info->nac_info.mac_addr2_valid); 1165 } 1166 } 1167 1168 /* need not generate wdi event when mcopy, cfr rcc mode and 1169 * enhanced stats are not enabled 1170 */ 1171 if (qdf_unlikely(!mon_pdev->mcopy_mode && 1172 !mon_pdev->enhanced_stats_en && 1173 !dp_cfr_rcc_mode_status(pdev))) 1174 return; 1175 1176 if (qdf_unlikely(dp_cfr_rcc_mode_status(pdev))) 1177 dp_update_cfr_dbg_stats(pdev, ppdu_info); 1178 1179 if (qdf_unlikely(!ppdu_info->rx_status.frame_control_info_valid || 1180 ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) { 1181 if (!(mon_pdev->mcopy_mode || 1182 (dp_bb_captured_chan_status(pdev, ppdu_info) == 1183 QDF_STATUS_SUCCESS))) 1184 return; 1185 } 1186 num_users = ppdu_info->com_info.num_users; 1187 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 1188 size = sizeof(struct cdp_rx_indication_ppdu) + 1189 num_users * sizeof(struct cdp_rx_stats_ppdu_user); 1190 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 1191 size, 1192 0, 0, FALSE); 1193 if (qdf_likely(ppdu_nbuf)) { 1194 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf); 1195 1196 qdf_mem_zero(cdp_rx_ppdu, size); 1197 dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); 1198 dp_rx_populate_cdp_indication_ppdu(pdev, 1199 ppdu_info, cdp_rx_ppdu); 1200 if (!qdf_unlikely(qdf_nbuf_put_tail(ppdu_nbuf, 1201 sizeof(struct cdp_rx_indication_ppdu)))) 1202 return; 1203 1204 dp_rx_stats_update(pdev, cdp_rx_ppdu); 1205 1206 if (qdf_unlikely(cdp_rx_ppdu->peer_id != HTT_INVALID_PEER)) { 1207 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, 1208 soc, ppdu_nbuf, 1209 cdp_rx_ppdu->peer_id, 1210 WDI_NO_VAL, pdev->pdev_id); 1211 } else if (qdf_unlikely(mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev))) { 1212 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, 1213 ppdu_nbuf, HTT_INVALID_PEER, 1214 WDI_NO_VAL, pdev->pdev_id); 1215 } else { 1216 qdf_nbuf_free(ppdu_nbuf); 1217 } 1218 } 1219 } 1220 #endif/* QCA_ENHANCED_STATS_SUPPORT */ 1221 1222 #ifdef QCA_UNDECODED_METADATA_SUPPORT 1223 #define RX_PHYERR_MASK_GET64(_val1, _val2) (((uint64_t)(_val2) << 32) | (_val1)) 1224 /** 1225 * dp_rx_populate_cdp_indication_ppdu_undecoded_metadata() - Populate cdp 1226 * rx indication structure 1227 * @pdev: pdev ctx 1228 * @ppdu_info: ppdu info structure from ppdu ring 1229 * @cdp_rx_ppdu: Rx PPDU indication structure 1230 * 1231 * Return: none 1232 */ 1233 static void 1234 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev *pdev, 1235 struct hal_rx_ppdu_info *ppdu_info, 1236 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 1237 { 1238 uint32_t chain; 1239 1240 cdp_rx_ppdu->phyrx_abort = ppdu_info->rx_status.phyrx_abort; 1241 cdp_rx_ppdu->phyrx_abort_reason = 1242 ppdu_info->rx_status.phyrx_abort_reason; 1243 1244 cdp_rx_ppdu->first_data_seq_ctrl = 1245 ppdu_info->rx_status.first_data_seq_ctrl; 1246 cdp_rx_ppdu->frame_ctrl = 1247 ppdu_info->rx_status.frame_control; 1248 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; 1249 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; 1250 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; 1251 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type; 1252 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; 1253 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + 1254 cdp_rx_ppdu->udp_msdu_count + 1255 cdp_rx_ppdu->other_msdu_count); 1256 1257 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? 1258 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; 1259 1260 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) 1261 cdp_rx_ppdu->is_ampdu = 1; 1262 else 1263 cdp_rx_ppdu->is_ampdu = 0; 1264 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; 1265 1266 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 1267 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; 1268 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; 1269 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw; 1270 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; 1271 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; 1272 if (ppdu_info->rx_status.sgi == VHT_SGI_NYSM && 1273 ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) 1274 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 1275 else 1276 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 1277 1278 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 1279 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 1280 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> 1281 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; 1282 1283 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; 1284 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 1285 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; 1286 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 1287 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; 1288 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; 1289 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 1290 1291 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 1292 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 1293 cdp_rx_ppdu->vht_no_txop_ps = 1294 ppdu_info->rx_status.vht_no_txop_ps; 1295 cdp_rx_ppdu->vht_crc = ppdu_info->rx_status.vht_crc; 1296 cdp_rx_ppdu->group_id = ppdu_info->rx_status.vht_flag_values5; 1297 } else if (ppdu_info->rx_status.preamble_type == 1298 HAL_RX_PKT_TYPE_11AX) { 1299 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 1300 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 1301 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 1302 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 1303 } else { 1304 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.ht_stbc; 1305 cdp_rx_ppdu->ht_length = ppdu_info->rx_status.ht_length; 1306 cdp_rx_ppdu->ht_smoothing = ppdu_info->rx_status.smoothing; 1307 cdp_rx_ppdu->ht_not_sounding = 1308 ppdu_info->rx_status.not_sounding; 1309 cdp_rx_ppdu->ht_aggregation = ppdu_info->rx_status.aggregation; 1310 cdp_rx_ppdu->ht_stbc = ppdu_info->rx_status.ht_stbc; 1311 cdp_rx_ppdu->ht_crc = ppdu_info->rx_status.ht_crc; 1312 } 1313 1314 cdp_rx_ppdu->l_sig_length = ppdu_info->rx_status.l_sig_length; 1315 cdp_rx_ppdu->l_sig_a_parity = ppdu_info->rx_status.l_sig_a_parity; 1316 cdp_rx_ppdu->l_sig_a_pkt_type = ppdu_info->rx_status.l_sig_a_pkt_type; 1317 1318 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AX) { 1319 cdp_rx_ppdu->he_crc = ppdu_info->rx_status.he_crc; 1320 cdp_rx_ppdu->bss_color_id = 1321 ppdu_info->rx_status.he_data3 & 0x3F; 1322 cdp_rx_ppdu->beam_change = (ppdu_info->rx_status.he_data3 >> 1323 QDF_MON_STATUS_BEAM_CHANGE_SHIFT) & 0x1; 1324 cdp_rx_ppdu->dl_ul_flag = (ppdu_info->rx_status.he_data3 >> 1325 QDF_MON_STATUS_DL_UL_SHIFT) & 0x1; 1326 cdp_rx_ppdu->ldpc_extra_sym = (ppdu_info->rx_status.he_data3 >> 1327 QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT) & 0x1; 1328 cdp_rx_ppdu->special_reuse = 1329 ppdu_info->rx_status.he_data4 & 0xF; 1330 cdp_rx_ppdu->ltf_sym = (ppdu_info->rx_status.he_data5 >> 1331 QDF_MON_STATUS_HE_LTF_SYM_SHIFT) & 0x7; 1332 cdp_rx_ppdu->txbf = (ppdu_info->rx_status.he_data5 >> 1333 QDF_MON_STATUS_TXBF_SHIFT) & 0x1; 1334 cdp_rx_ppdu->pe_disambiguity = (ppdu_info->rx_status.he_data5 >> 1335 QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT) & 0x1; 1336 cdp_rx_ppdu->pre_fec_pad = (ppdu_info->rx_status.he_data5 >> 1337 QDF_MON_STATUS_PRE_FEC_PAD_SHIFT) & 0x3; 1338 cdp_rx_ppdu->dopplar = (ppdu_info->rx_status.he_data6 >> 1339 QDF_MON_STATUS_DOPPLER_SHIFT) & 0x1; 1340 cdp_rx_ppdu->txop_duration = (ppdu_info->rx_status.he_data6 >> 1341 QDF_MON_STATUS_TXOP_SHIFT) & 0x7F; 1342 cdp_rx_ppdu->sig_b_mcs = ppdu_info->rx_status.he_flags1 & 0x7; 1343 cdp_rx_ppdu->sig_b_dcm = (ppdu_info->rx_status.he_flags1 >> 1344 QDF_MON_STATUS_DCM_FLAG_1_SHIFT) & 0x1; 1345 cdp_rx_ppdu->sig_b_sym = (ppdu_info->rx_status.he_flags2 >> 1346 QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT) & 0xF; 1347 cdp_rx_ppdu->sig_b_comp = (ppdu_info->rx_status.he_flags2 >> 1348 QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT) & 0x1; 1349 } 1350 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev); 1351 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); 1352 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; 1353 1354 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; 1355 for (chain = 0; chain < MAX_CHAIN; chain++) 1356 cdp_rx_ppdu->per_chain_rssi[chain] = 1357 ppdu_info->rx_status.rssi[chain]; 1358 1359 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; 1360 1361 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; 1362 1363 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); 1364 } 1365 1366 /** 1367 * dp_rx_is_valid_undecoded_frame() - Check unencoded frame received valid 1368 * or not against configured error mask 1369 * @err_mask: configured err mask 1370 * @err_code: Received error reason code for phy abort 1371 * 1372 * Return: true / false 1373 */ 1374 static inline bool 1375 dp_rx_is_valid_undecoded_frame(uint64_t err_mask, uint8_t err_code) 1376 { 1377 if (err_code < CDP_PHYRX_ERR_MAX && 1378 (err_mask & (1L << err_code))) 1379 return true; 1380 1381 return false; 1382 } 1383 1384 void 1385 dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc *soc, struct dp_pdev *pdev, 1386 struct hal_rx_ppdu_info *ppdu_info) 1387 { 1388 qdf_nbuf_t ppdu_nbuf; 1389 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 1390 uint8_t abort_reason = 0; 1391 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1392 uint64_t mask64; 1393 1394 /* Return if RX_ABORT not set */ 1395 if (ppdu_info->rx_status.phyrx_abort == 0) 1396 return; 1397 1398 mask64 = RX_PHYERR_MASK_GET64(mon_pdev->phyrx_error_mask, 1399 mon_pdev->phyrx_error_mask_cont); 1400 abort_reason = ppdu_info->rx_status.phyrx_abort_reason; 1401 1402 if (!dp_rx_is_valid_undecoded_frame(mask64, abort_reason)) 1403 return; 1404 1405 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 1406 sizeof(struct cdp_rx_indication_ppdu), 1407 0, 0, FALSE); 1408 if (ppdu_nbuf) { 1409 cdp_rx_ppdu = ((struct cdp_rx_indication_ppdu *) 1410 qdf_nbuf_data(ppdu_nbuf)); 1411 1412 qdf_mem_zero(cdp_rx_ppdu, 1413 sizeof(struct cdp_rx_indication_ppdu)); 1414 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(pdev, 1415 ppdu_info, cdp_rx_ppdu); 1416 1417 if (!qdf_nbuf_put_tail(ppdu_nbuf, 1418 sizeof(struct cdp_rx_indication_ppdu))) { 1419 return; 1420 } 1421 1422 mon_pdev->rx_mon_stats.rx_undecoded_count++; 1423 mon_pdev->rx_mon_stats.rx_undecoded_error[abort_reason] += 1; 1424 1425 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC_UNDECODED_METADATA, 1426 soc, ppdu_nbuf, HTT_INVALID_PEER, 1427 WDI_NO_VAL, pdev->pdev_id); 1428 } 1429 } 1430 #endif/* QCA_UNDECODED_METADATA_SUPPORT */ 1431 1432 #ifdef QCA_MCOPY_SUPPORT 1433 QDF_STATUS 1434 dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1435 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf, 1436 uint8_t fcs_ok_mpdu_cnt, bool deliver_frame) 1437 { 1438 uint16_t size = 0; 1439 struct ieee80211_frame *wh; 1440 uint32_t *nbuf_data; 1441 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1442 1443 if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload) 1444 return QDF_STATUS_SUCCESS; 1445 1446 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1447 if (mon_pdev->mcopy_mode == M_COPY) { 1448 if (mon_pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id) 1449 return QDF_STATUS_SUCCESS; 1450 } 1451 1452 wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4); 1453 1454 size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload - 1455 qdf_nbuf_data(nbuf)); 1456 1457 if (qdf_nbuf_pull_head(nbuf, size) == NULL) 1458 return QDF_STATUS_SUCCESS; 1459 1460 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1461 IEEE80211_FC0_TYPE_MGT) || 1462 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1463 IEEE80211_FC0_TYPE_CTL)) { 1464 return QDF_STATUS_SUCCESS; 1465 } 1466 1467 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf); 1468 *nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id; 1469 /* only retain RX MSDU payload in the skb */ 1470 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len); 1471 if (deliver_frame) { 1472 mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; 1473 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, 1474 nbuf, HTT_INVALID_PEER, 1475 WDI_NO_VAL, pdev->pdev_id); 1476 } 1477 return QDF_STATUS_E_ALREADY; 1478 } 1479 1480 void 1481 dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev, 1482 struct hal_rx_ppdu_info *ppdu_info, 1483 qdf_nbuf_t status_nbuf) 1484 { 1485 QDF_STATUS mcopy_status; 1486 qdf_nbuf_t nbuf_clone = NULL; 1487 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1488 1489 /* If the MPDU end tlv and RX header are received in different buffers, 1490 * process the RX header based on fcs status. 1491 */ 1492 if (mon_pdev->mcopy_status_nbuf) { 1493 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1494 if (mon_pdev->mcopy_mode == M_COPY) { 1495 if (mon_pdev->m_copy_id.rx_ppdu_id == 1496 ppdu_info->com_info.ppdu_id) 1497 goto end1; 1498 } 1499 1500 if (ppdu_info->is_fcs_passed) { 1501 nbuf_clone = qdf_nbuf_clone(mon_pdev->mcopy_status_nbuf); 1502 if (!nbuf_clone) { 1503 QDF_TRACE(QDF_MODULE_ID_TXRX, 1504 QDF_TRACE_LEVEL_ERROR, 1505 "Failed to clone nbuf"); 1506 goto end1; 1507 } 1508 1509 mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; 1510 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, 1511 nbuf_clone, 1512 HTT_INVALID_PEER, 1513 WDI_NO_VAL, pdev->pdev_id); 1514 ppdu_info->is_fcs_passed = false; 1515 } 1516 end1: 1517 qdf_nbuf_free(mon_pdev->mcopy_status_nbuf); 1518 mon_pdev->mcopy_status_nbuf = NULL; 1519 } 1520 1521 /* If the MPDU end tlv and RX header are received in different buffers, 1522 * preserve the RX header as the fcs status will be received in MPDU 1523 * end tlv in next buffer. So, cache the buffer to be processd in next 1524 * iteration 1525 */ 1526 if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) != 1527 ppdu_info->com_info.mpdu_cnt) { 1528 mon_pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf); 1529 if (mon_pdev->mcopy_status_nbuf) { 1530 mcopy_status = dp_rx_handle_mcopy_mode( 1531 soc, pdev, 1532 ppdu_info, 1533 mon_pdev->mcopy_status_nbuf, 1534 ppdu_info->fcs_ok_cnt, 1535 false); 1536 if (mcopy_status == QDF_STATUS_SUCCESS) { 1537 qdf_nbuf_free(mon_pdev->mcopy_status_nbuf); 1538 mon_pdev->mcopy_status_nbuf = NULL; 1539 } 1540 } 1541 } 1542 } 1543 1544 void 1545 dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev, 1546 struct hal_rx_ppdu_info *ppdu_info, 1547 uint32_t tlv_status) 1548 { 1549 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1550 1551 if (qdf_unlikely(!mon_pdev->mcopy_mode)) 1552 return; 1553 1554 /* The fcs status is received in MPDU end tlv. If the RX header 1555 * and its MPDU end tlv are received in different status buffer then 1556 * to process that header ppdu_info->is_fcs_passed is used. 1557 * If end tlv is received in next status buffer then com_info.mpdu_cnt 1558 * will be 0 at the time of receiving MPDU end tlv and we update the 1559 * is_fcs_passed flag based on ppdu_info->fcs_err. 1560 */ 1561 if (tlv_status != HAL_TLV_STATUS_MPDU_END) 1562 return; 1563 1564 if (!ppdu_info->fcs_err) { 1565 if (ppdu_info->fcs_ok_cnt > 1566 HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) { 1567 dp_err("No. of MPDUs(%d) per status buff exceeded", 1568 ppdu_info->fcs_ok_cnt); 1569 return; 1570 } 1571 if (ppdu_info->com_info.mpdu_cnt) 1572 ppdu_info->fcs_ok_cnt++; 1573 else 1574 ppdu_info->is_fcs_passed = true; 1575 } else { 1576 if (ppdu_info->com_info.mpdu_cnt) 1577 ppdu_info->fcs_err_cnt++; 1578 else 1579 ppdu_info->is_fcs_passed = false; 1580 } 1581 } 1582 1583 void 1584 dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1585 struct hal_rx_ppdu_info *ppdu_info, 1586 uint32_t tlv_status, 1587 qdf_nbuf_t status_nbuf) 1588 { 1589 QDF_STATUS mcopy_status; 1590 qdf_nbuf_t nbuf_clone = NULL; 1591 uint8_t fcs_ok_mpdu_cnt = 0; 1592 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1593 1594 dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf); 1595 1596 if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) 1597 goto end; 1598 1599 if (qdf_unlikely(!ppdu_info->fcs_ok_cnt)) 1600 goto end; 1601 1602 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1603 if (mon_pdev->mcopy_mode == M_COPY) 1604 ppdu_info->fcs_ok_cnt = 1; 1605 1606 while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) { 1607 nbuf_clone = qdf_nbuf_clone(status_nbuf); 1608 if (!nbuf_clone) { 1609 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1610 "Failed to clone nbuf"); 1611 goto end; 1612 } 1613 1614 mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev, 1615 ppdu_info, 1616 nbuf_clone, 1617 fcs_ok_mpdu_cnt, 1618 true); 1619 1620 if (mcopy_status == QDF_STATUS_SUCCESS) 1621 qdf_nbuf_free(nbuf_clone); 1622 1623 fcs_ok_mpdu_cnt++; 1624 } 1625 end: 1626 qdf_nbuf_free(status_nbuf); 1627 ppdu_info->fcs_ok_cnt = 0; 1628 ppdu_info->fcs_err_cnt = 0; 1629 ppdu_info->com_info.mpdu_cnt = 0; 1630 qdf_mem_zero(&ppdu_info->ppdu_msdu_info, 1631 HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER 1632 * sizeof(struct hal_rx_msdu_payload_info)); 1633 } 1634 #endif /* QCA_MCOPY_SUPPORT */ 1635 1636 int 1637 dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1638 struct hal_rx_ppdu_info *ppdu_info, 1639 qdf_nbuf_t nbuf) 1640 { 1641 uint8_t size = 0; 1642 struct dp_mon_vdev *mon_vdev; 1643 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1644 1645 if (!mon_pdev->mvdev) { 1646 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1647 "[%s]:[%d] Monitor vdev is NULL !!", 1648 __func__, __LINE__); 1649 return 1; 1650 } 1651 1652 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1653 1654 if (!ppdu_info->msdu_info.first_msdu_payload) { 1655 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1656 "[%s]:[%d] First msdu payload not present", 1657 __func__, __LINE__); 1658 return 1; 1659 } 1660 1661 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */ 1662 size = (ppdu_info->msdu_info.first_msdu_payload - 1663 qdf_nbuf_data(nbuf)) + 4; 1664 ppdu_info->msdu_info.first_msdu_payload = NULL; 1665 1666 if (!qdf_nbuf_pull_head(nbuf, size)) { 1667 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1668 "[%s]:[%d] No header present", 1669 __func__, __LINE__); 1670 return 1; 1671 } 1672 1673 /* Only retain RX MSDU payload in the skb */ 1674 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - 1675 ppdu_info->msdu_info.payload_len); 1676 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf, 1677 qdf_nbuf_headroom(nbuf))) { 1678 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1679 return 1; 1680 } 1681 1682 mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1683 nbuf, NULL); 1684 mon_pdev->ppdu_info.rx_status.monitor_direct_used = 0; 1685 return 0; 1686 } 1687 1688 qdf_nbuf_t 1689 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1690 { 1691 uint8_t *buf; 1692 int32_t nbuf_retry_count; 1693 QDF_STATUS ret; 1694 qdf_nbuf_t nbuf = NULL; 1695 1696 for (nbuf_retry_count = 0; nbuf_retry_count < 1697 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1698 nbuf_retry_count++) { 1699 /* Allocate a new skb using alloc_skb */ 1700 nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE, 1701 RX_MON_STATUS_BUF_RESERVATION, 1702 RX_DATA_BUFFER_ALIGNMENT); 1703 1704 if (!nbuf) { 1705 DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1); 1706 continue; 1707 } 1708 1709 buf = qdf_nbuf_data(nbuf); 1710 1711 memset(buf, 0, RX_MON_STATUS_BUF_SIZE); 1712 1713 ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf, 1714 QDF_DMA_FROM_DEVICE, 1715 RX_MON_STATUS_BUF_SIZE); 1716 1717 /* nbuf map failed */ 1718 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1719 qdf_nbuf_free(nbuf); 1720 DP_STATS_INC(pdev, replenish.map_err, 1); 1721 continue; 1722 } 1723 /* qdf_nbuf alloc and map succeeded */ 1724 break; 1725 } 1726 1727 /* qdf_nbuf still alloc or map failed */ 1728 if (qdf_unlikely(nbuf_retry_count >= 1729 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1730 return NULL; 1731 1732 return nbuf; 1733 } 1734 1735 #ifndef DISABLE_MON_CONFIG 1736 uint32_t 1737 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1738 uint32_t mac_id, uint32_t quota) 1739 { 1740 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1741 1742 if (mon_soc && mon_soc->mon_rx_process) 1743 return mon_soc->mon_rx_process(soc, int_ctx, 1744 mac_id, quota); 1745 return 0; 1746 } 1747 #else 1748 uint32_t 1749 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1750 uint32_t mac_id, uint32_t quota) 1751 { 1752 return 0; 1753 } 1754 #endif 1755 1756 /** 1757 * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers 1758 * 1759 * @soc: soc handle 1760 * @nbuf: Mgmt packet 1761 * @pdev: pdev handle 1762 * 1763 * Return: QDF_STATUS_SUCCESS on success 1764 * QDF_STATUS_E_INVAL in error 1765 */ 1766 #ifdef QCA_MCOPY_SUPPORT 1767 static inline QDF_STATUS 1768 dp_send_mgmt_packet_to_stack(struct dp_soc *soc, 1769 qdf_nbuf_t nbuf, 1770 struct dp_pdev *pdev) 1771 { 1772 uint32_t *nbuf_data; 1773 struct ieee80211_frame *wh; 1774 qdf_frag_t addr; 1775 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1776 1777 if (!nbuf) 1778 return QDF_STATUS_E_INVAL; 1779 1780 /* Get addr pointing to80211 header */ 1781 addr = dp_rx_mon_get_nbuf_80211_hdr(nbuf); 1782 if (qdf_unlikely(!addr)) { 1783 qdf_nbuf_free(nbuf); 1784 return QDF_STATUS_E_INVAL; 1785 } 1786 1787 /*check if this is not a mgmt packet*/ 1788 wh = (struct ieee80211_frame *)addr; 1789 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1790 IEEE80211_FC0_TYPE_MGT) && 1791 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1792 IEEE80211_FC0_TYPE_CTL)) { 1793 qdf_nbuf_free(nbuf); 1794 return QDF_STATUS_E_INVAL; 1795 } 1796 nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4); 1797 if (!nbuf_data) { 1798 QDF_TRACE(QDF_MODULE_ID_DP, 1799 QDF_TRACE_LEVEL_ERROR, 1800 FL("No headroom")); 1801 qdf_nbuf_free(nbuf); 1802 return QDF_STATUS_E_INVAL; 1803 } 1804 *nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id; 1805 1806 dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf, 1807 HTT_INVALID_PEER, 1808 WDI_NO_VAL, pdev->pdev_id); 1809 return QDF_STATUS_SUCCESS; 1810 } 1811 #else 1812 static inline QDF_STATUS 1813 dp_send_mgmt_packet_to_stack(struct dp_soc *soc, 1814 qdf_nbuf_t nbuf, 1815 struct dp_pdev *pdev) 1816 { 1817 return QDF_STATUS_SUCCESS; 1818 } 1819 #endif /* QCA_MCOPY_SUPPORT */ 1820 1821 QDF_STATUS dp_rx_mon_process_dest_pktlog(struct dp_soc *soc, 1822 uint32_t mac_id, 1823 qdf_nbuf_t mpdu) 1824 { 1825 uint32_t event, msdu_timestamp = 0; 1826 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1827 void *data; 1828 struct ieee80211_frame *wh; 1829 uint8_t type, subtype; 1830 struct dp_mon_pdev *mon_pdev; 1831 1832 if (!pdev) 1833 return QDF_STATUS_E_INVAL; 1834 1835 mon_pdev = pdev->monitor_pdev; 1836 1837 if (mon_pdev->rx_pktlog_cbf) { 1838 if (qdf_nbuf_get_nr_frags(mpdu)) 1839 data = qdf_nbuf_get_frag_addr(mpdu, 0); 1840 else 1841 data = qdf_nbuf_data(mpdu); 1842 1843 /* CBF logging required, doesn't matter if it is a full mode 1844 * or lite mode. 1845 * Need to look for mpdu with: 1846 * TYPE = ACTION, SUBTYPE = NO ACK in the header 1847 */ 1848 event = WDI_EVENT_RX_CBF; 1849 1850 wh = (struct ieee80211_frame *)data; 1851 type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1852 subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1853 if (type == IEEE80211_FC0_TYPE_MGT && 1854 subtype == IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK) { 1855 msdu_timestamp = mon_pdev->ppdu_info.rx_status.tsft; 1856 dp_rx_populate_cbf_hdr(soc, 1857 mac_id, event, 1858 mpdu, 1859 msdu_timestamp); 1860 } 1861 } 1862 return QDF_STATUS_SUCCESS; 1863 } 1864 1865 QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id, 1866 qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu) 1867 { 1868 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1869 struct cdp_mon_status *rs; 1870 qdf_nbuf_t mon_skb, skb_next; 1871 qdf_nbuf_t mon_mpdu = NULL; 1872 struct dp_mon_vdev *mon_vdev; 1873 struct dp_mon_pdev *mon_pdev; 1874 1875 if (!pdev) 1876 goto mon_deliver_fail; 1877 1878 mon_pdev = pdev->monitor_pdev; 1879 rs = &mon_pdev->rx_mon_recv_status; 1880 1881 if (!mon_pdev->mvdev && !mon_pdev->mcopy_mode && 1882 !mon_pdev->rx_pktlog_cbf) 1883 goto mon_deliver_fail; 1884 1885 /* restitch mon MPDU for delivery via monitor interface */ 1886 mon_mpdu = dp_rx_mon_restitch_mpdu(soc, mac_id, head_msdu, 1887 tail_msdu, rs); 1888 1889 /* If MPDU restitch fails, free buffers*/ 1890 if (!mon_mpdu) { 1891 dp_info("MPDU restitch failed, free buffers"); 1892 goto mon_deliver_fail; 1893 } 1894 1895 dp_rx_mon_process_dest_pktlog(soc, mac_id, mon_mpdu); 1896 1897 /* monitor vap cannot be present when mcopy is enabled 1898 * hence same skb can be consumed 1899 */ 1900 if (mon_pdev->mcopy_mode) 1901 return dp_send_mgmt_packet_to_stack(soc, mon_mpdu, pdev); 1902 1903 if (mon_mpdu && mon_pdev->mvdev && 1904 mon_pdev->mvdev->osif_vdev && 1905 mon_pdev->mvdev->monitor_vdev && 1906 mon_pdev->mvdev->monitor_vdev->osif_rx_mon) { 1907 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1908 1909 mon_pdev->ppdu_info.rx_status.ppdu_id = 1910 mon_pdev->ppdu_info.com_info.ppdu_id; 1911 mon_pdev->ppdu_info.rx_status.device_id = soc->device_id; 1912 mon_pdev->ppdu_info.rx_status.chan_noise_floor = 1913 pdev->chan_noise_floor; 1914 dp_handle_tx_capture(soc, pdev, mon_mpdu); 1915 1916 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, 1917 mon_mpdu, 1918 qdf_nbuf_headroom(mon_mpdu))) { 1919 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1920 goto mon_deliver_fail; 1921 } 1922 1923 dp_rx_mon_update_pf_tag_to_buf_headroom(soc, mon_mpdu); 1924 mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1925 mon_mpdu, 1926 &mon_pdev->ppdu_info.rx_status); 1927 } else { 1928 dp_rx_mon_dest_debug("%pK: mon_mpdu=%pK monitor_vdev %pK osif_vdev %pK" 1929 , soc, mon_mpdu, mon_pdev->mvdev, 1930 (mon_pdev->mvdev ? mon_pdev->mvdev->osif_vdev 1931 : NULL)); 1932 goto mon_deliver_fail; 1933 } 1934 1935 return QDF_STATUS_SUCCESS; 1936 1937 mon_deliver_fail: 1938 mon_skb = head_msdu; 1939 while (mon_skb) { 1940 skb_next = qdf_nbuf_next(mon_skb); 1941 1942 dp_rx_mon_dest_debug("%pK: [%s][%d] mon_skb=%pK len %u", 1943 soc, __func__, __LINE__, mon_skb, mon_skb->len); 1944 1945 qdf_nbuf_free(mon_skb); 1946 mon_skb = skb_next; 1947 } 1948 return QDF_STATUS_E_INVAL; 1949 } 1950 1951 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc, 1952 uint32_t mac_id) 1953 { 1954 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1955 ol_txrx_rx_mon_fp osif_rx_mon; 1956 qdf_nbuf_t dummy_msdu; 1957 struct dp_mon_pdev *mon_pdev; 1958 struct dp_mon_vdev *mon_vdev; 1959 1960 /* Sanity checking */ 1961 if (!pdev || !pdev->monitor_pdev) 1962 goto mon_deliver_non_std_fail; 1963 1964 mon_pdev = pdev->monitor_pdev; 1965 1966 if (!mon_pdev->mvdev || !mon_pdev->mvdev || 1967 !mon_pdev->mvdev->monitor_vdev || 1968 !mon_pdev->mvdev->monitor_vdev->osif_rx_mon) 1969 goto mon_deliver_non_std_fail; 1970 1971 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1972 /* Generate a dummy skb_buff */ 1973 osif_rx_mon = mon_vdev->osif_rx_mon; 1974 dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER, 1975 MAX_MONITOR_HEADER, 4, FALSE); 1976 if (!dummy_msdu) 1977 goto allocate_dummy_msdu_fail; 1978 1979 qdf_nbuf_set_pktlen(dummy_msdu, 0); 1980 qdf_nbuf_set_next(dummy_msdu, NULL); 1981 1982 mon_pdev->ppdu_info.rx_status.ppdu_id = 1983 mon_pdev->ppdu_info.com_info.ppdu_id; 1984 1985 /* Apply the radio header to this dummy skb */ 1986 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, dummy_msdu, 1987 qdf_nbuf_headroom(dummy_msdu))) { 1988 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1989 qdf_nbuf_free(dummy_msdu); 1990 goto mon_deliver_non_std_fail; 1991 } 1992 1993 /* deliver to the user layer application */ 1994 osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1995 dummy_msdu, NULL); 1996 1997 /* Clear rx_status*/ 1998 qdf_mem_zero(&mon_pdev->ppdu_info.rx_status, 1999 sizeof(mon_pdev->ppdu_info.rx_status)); 2000 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 2001 2002 return QDF_STATUS_SUCCESS; 2003 2004 allocate_dummy_msdu_fail: 2005 dp_rx_mon_dest_debug("%pK: mon_skb=%pK ", 2006 soc, dummy_msdu); 2007 2008 mon_deliver_non_std_fail: 2009 return QDF_STATUS_E_INVAL; 2010 } 2011 2012 /** 2013 * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based 2014 * filtering enabled 2015 * @soc: core txrx main context 2016 * @ppdu_info: Structure for rx ppdu info 2017 * @status_nbuf: Qdf nbuf abstraction for linux skb 2018 * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN 2019 * 2020 * Return: none 2021 */ 2022 void 2023 dp_rx_process_peer_based_pktlog(struct dp_soc *soc, 2024 struct hal_rx_ppdu_info *ppdu_info, 2025 qdf_nbuf_t status_nbuf, uint32_t pdev_id) 2026 { 2027 struct dp_peer *peer; 2028 struct mon_rx_user_status *rx_user_status; 2029 uint32_t num_users = ppdu_info->com_info.num_users; 2030 uint16_t sw_peer_id; 2031 2032 /* Sanity check for num_users */ 2033 if (!num_users) 2034 return; 2035 2036 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 2037 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 2038 2039 sw_peer_id = rx_user_status->sw_peer_id; 2040 2041 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 2042 DP_MOD_ID_RX_PPDU_STATS); 2043 2044 if (!peer) 2045 return; 2046 2047 if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) && 2048 (peer->monitor_peer->peer_based_pktlog_filter)) { 2049 dp_wdi_event_handler( 2050 WDI_EVENT_RX_DESC, soc, 2051 status_nbuf, 2052 peer->peer_id, 2053 WDI_NO_VAL, pdev_id); 2054 } 2055 dp_peer_unref_delete(peer, 2056 DP_MOD_ID_RX_PPDU_STATS); 2057 } 2058 2059 uint32_t 2060 dp_mon_rx_add_tlv(uint8_t id, uint16_t len, void *value, qdf_nbuf_t mpdu_nbuf) 2061 { 2062 uint8_t *dest = NULL; 2063 uint32_t num_bytes_pushed = 0; 2064 2065 /* Add tlv id field */ 2066 dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint8_t)); 2067 if (qdf_likely(dest)) { 2068 *((uint8_t *)dest) = id; 2069 num_bytes_pushed += sizeof(uint8_t); 2070 } 2071 2072 /* Add tlv len field */ 2073 dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint16_t)); 2074 if (qdf_likely(dest)) { 2075 *((uint16_t *)dest) = len; 2076 num_bytes_pushed += sizeof(uint16_t); 2077 } 2078 2079 /* Add tlv value field */ 2080 dest = qdf_nbuf_push_head(mpdu_nbuf, len); 2081 if (qdf_likely(dest)) { 2082 qdf_mem_copy(dest, value, len); 2083 num_bytes_pushed += len; 2084 } 2085 2086 return num_bytes_pushed; 2087 } 2088 2089 void 2090 dp_mon_rx_stats_update_rssi_dbm_params(struct dp_mon_pdev *mon_pdev, 2091 struct hal_rx_ppdu_info *ppdu_info) 2092 { 2093 ppdu_info->rx_status.rssi_offset = mon_pdev->rssi_offsets.rssi_offset; 2094 ppdu_info->rx_status.rssi_dbm_conv_support = 2095 mon_pdev->rssi_dbm_conv_support; 2096 ppdu_info->rx_status.chan_noise_floor = 2097 mon_pdev->rssi_offsets.rssi_offset; 2098 } 2099 2100 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS 2101 void dp_rx_mon_update_user_ctrl_frame_stats(struct dp_pdev *pdev, 2102 struct hal_rx_ppdu_info *ppdu_info) 2103 { 2104 struct dp_peer *peer; 2105 struct dp_mon_peer *mon_peer; 2106 struct dp_soc *soc = pdev->soc; 2107 uint16_t fc, sw_peer_id; 2108 uint8_t i; 2109 2110 if (qdf_unlikely(!ppdu_info)) 2111 return; 2112 2113 fc = ppdu_info->nac_info.frame_control; 2114 if (qdf_likely((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) != 2115 QDF_IEEE80211_FC0_TYPE_CTL)) 2116 return; 2117 2118 for (i = 0; i < ppdu_info->com_info.num_users; i++) { 2119 sw_peer_id = ppdu_info->rx_user_status[i].sw_peer_id; 2120 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 2121 DP_MOD_ID_RX_PPDU_STATS); 2122 if (qdf_unlikely(!peer)) 2123 continue; 2124 mon_peer = peer->monitor_peer; 2125 if (qdf_unlikely(!mon_peer)) { 2126 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 2127 continue; 2128 } 2129 DP_STATS_INCC(mon_peer, rx.ndpa_cnt, 1, 2130 ppdu_info->ctrl_frm_info[i].ndpa); 2131 DP_STATS_INCC(mon_peer, rx.bar_cnt, 1, 2132 ppdu_info->ctrl_frm_info[i].bar); 2133 2134 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 2135 } 2136 } 2137 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */ 2138