1 /* 2 * Copyright (c) 2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "hal_hw_headers.h" 21 #include "dp_types.h" 22 #include "dp_rx.h" 23 #include "dp_peer.h" 24 #include "hal_rx.h" 25 #include "hal_api.h" 26 #include "qdf_trace.h" 27 #include "qdf_nbuf.h" 28 #include "hal_api_mon.h" 29 #include "dp_internal.h" 30 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 31 #include "dp_htt.h" 32 #include "dp_mon.h" 33 #include "dp_rx_mon.h" 34 35 #include "htt.h" 36 #ifdef FEATURE_PERPKT_INFO 37 #include "dp_ratetable.h" 38 #endif 39 40 #ifndef IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 41 #define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0 42 #endif 43 44 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) 45 void 46 dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev, 47 struct hal_rx_ppdu_info *ppdu_info, 48 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 49 { 50 struct dp_peer *peer; 51 struct dp_soc *soc = pdev->soc; 52 struct mon_rx_user_status *rx_user_status; 53 struct cdp_rx_stats_ppdu_user *rx_stats_peruser; 54 uint32_t num_users; 55 int user_id; 56 uint16_t sw_peer_id; 57 58 num_users = ppdu_info->com_info.num_users; 59 for (user_id = 0; user_id < num_users; user_id++) { 60 if (user_id > OFDMA_NUM_USERS) { 61 return; 62 } 63 64 rx_user_status = &ppdu_info->rx_user_status[user_id]; 65 rx_stats_peruser = &cdp_rx_ppdu->user[user_id]; 66 sw_peer_id = rx_user_status->sw_peer_id; 67 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 68 DP_MOD_ID_RX_PPDU_STATS); 69 if (!peer) { 70 rx_stats_peruser->peer_id = HTT_INVALID_PEER; 71 continue; 72 } 73 74 qdf_mem_copy(rx_stats_peruser->mac_addr, 75 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 76 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 77 } 78 } 79 80 void 81 dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev, 82 struct hal_rx_ppdu_info *ppdu_info, 83 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 84 { 85 struct dp_peer *peer; 86 struct dp_soc *soc = pdev->soc; 87 int chain; 88 uint16_t sw_peer_id; 89 struct mon_rx_user_status *rx_user_status; 90 uint32_t num_users = ppdu_info->com_info.num_users; 91 92 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 93 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 94 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 95 96 for (chain = 0; chain < MAX_CHAIN; chain++) 97 cdp_rx_ppdu->per_chain_rssi[chain] = 98 ppdu_info->rx_status.rssi[chain]; 99 100 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 101 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 102 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 103 104 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && 105 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) 106 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 107 else 108 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 109 110 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 111 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 112 } else if (ppdu_info->rx_status.preamble_type == 113 HAL_RX_PKT_TYPE_11AX) { 114 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 115 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 116 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 117 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 118 } 119 120 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 121 dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu); 122 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 123 sw_peer_id = rx_user_status->sw_peer_id; 124 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, DP_MOD_ID_RX_PPDU_STATS); 125 if (!peer) { 126 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; 127 cdp_rx_ppdu->num_users = 0; 128 return; 129 } 130 131 cdp_rx_ppdu->peer_id = peer->peer_id; 132 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 133 cdp_rx_ppdu->num_users = num_users; 134 } 135 136 bool 137 dp_cfr_rcc_mode_status(struct dp_pdev *pdev) 138 { 139 return pdev->cfr_rcc_mode; 140 } 141 142 void 143 dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev, 144 struct hal_rx_ppdu_info *ppdu_info, 145 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 146 { 147 struct cdp_rx_ppdu_cfr_info *cfr_info; 148 149 if (!dp_cfr_rcc_mode_status(pdev)) 150 return; 151 152 cfr_info = &cdp_rx_ppdu->cfr_info; 153 154 cfr_info->bb_captured_channel 155 = ppdu_info->cfr_info.bb_captured_channel; 156 cfr_info->bb_captured_timeout 157 = ppdu_info->cfr_info.bb_captured_timeout; 158 cfr_info->bb_captured_reason 159 = ppdu_info->cfr_info.bb_captured_reason; 160 cfr_info->rx_location_info_valid 161 = ppdu_info->cfr_info.rx_location_info_valid; 162 cfr_info->chan_capture_status 163 = ppdu_info->cfr_info.chan_capture_status; 164 cfr_info->rtt_che_buffer_pointer_high8 165 = ppdu_info->cfr_info.rtt_che_buffer_pointer_high8; 166 cfr_info->rtt_che_buffer_pointer_low32 167 = ppdu_info->cfr_info.rtt_che_buffer_pointer_low32; 168 cfr_info->rtt_cfo_measurement 169 = (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement; 170 cfr_info->agc_gain_info0 171 = ppdu_info->cfr_info.agc_gain_info0; 172 cfr_info->agc_gain_info1 173 = ppdu_info->cfr_info.agc_gain_info1; 174 cfr_info->agc_gain_info2 175 = ppdu_info->cfr_info.agc_gain_info2; 176 cfr_info->agc_gain_info3 177 = ppdu_info->cfr_info.agc_gain_info3; 178 cfr_info->rx_start_ts 179 = ppdu_info->cfr_info.rx_start_ts; 180 cfr_info->mcs_rate 181 = ppdu_info->cfr_info.mcs_rate; 182 cfr_info->gi_type 183 = ppdu_info->cfr_info.gi_type; 184 } 185 186 void 187 dp_update_cfr_dbg_stats(struct dp_pdev *pdev, 188 struct hal_rx_ppdu_info *ppdu_info) 189 { 190 struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; 191 192 DP_STATS_INC(pdev, 193 rcc.chan_capture_status[cfr->chan_capture_status], 1); 194 if (cfr->rx_location_info_valid) { 195 DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1); 196 if (cfr->bb_captured_channel) { 197 DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1); 198 DP_STATS_INC(pdev, 199 rcc.reason_cnt[cfr->bb_captured_reason], 200 1); 201 } else if (cfr->bb_captured_timeout) { 202 DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1); 203 DP_STATS_INC(pdev, 204 rcc.reason_cnt[cfr->bb_captured_reason], 205 1); 206 } 207 } 208 } 209 210 void 211 dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev, 212 struct hal_rx_ppdu_info *ppdu_info) 213 { 214 qdf_nbuf_t ppdu_nbuf; 215 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 216 217 dp_update_cfr_dbg_stats(pdev, ppdu_info); 218 if (!ppdu_info->cfr_info.bb_captured_channel) 219 return; 220 221 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 222 sizeof(struct cdp_rx_indication_ppdu), 223 0, 224 0, 225 FALSE); 226 if (ppdu_nbuf) { 227 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; 228 229 dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); 230 dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); 231 qdf_nbuf_put_tail(ppdu_nbuf, 232 sizeof(struct cdp_rx_indication_ppdu)); 233 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, 234 ppdu_nbuf, HTT_INVALID_PEER, 235 WDI_NO_VAL, pdev->pdev_id); 236 } 237 } 238 239 void 240 dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, 241 struct hal_rx_ppdu_info *ppdu_info, 242 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 243 { 244 if (!dp_cfr_rcc_mode_status(pdev)) 245 return; 246 247 if (ppdu_info->cfr_info.bb_captured_channel) 248 dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); 249 } 250 251 /** 252 * dp_bb_captured_chan_status() - Get the bb_captured_channel status 253 * @ppdu_info: structure for rx ppdu ring 254 * 255 * Return: Success/ Failure 256 */ 257 static inline QDF_STATUS 258 dp_bb_captured_chan_status(struct dp_pdev *pdev, 259 struct hal_rx_ppdu_info *ppdu_info) 260 { 261 QDF_STATUS status = QDF_STATUS_E_FAILURE; 262 struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; 263 264 if (dp_cfr_rcc_mode_status(pdev)) { 265 if (cfr->bb_captured_channel) 266 status = QDF_STATUS_SUCCESS; 267 } 268 269 return status; 270 } 271 #else 272 static inline QDF_STATUS 273 dp_bb_captured_chan_status(struct dp_pdev *pdev, 274 struct hal_rx_ppdu_info *ppdu_info) 275 { 276 return QDF_STATUS_E_NOSUPPORT; 277 } 278 #endif /* WLAN_CFR_ENABLE */ 279 280 #ifdef QCA_ENHANCED_STATS_SUPPORT 281 #ifdef QCA_RSSI_DB2DBM 282 /** 283 * dp_rx_mon_rf_index_conv() - this function will convert BB index to RF 284 * index in the rssi_chain[chain][bw] array 285 * 286 * @chain: BB chain index 287 * @pdev: pdev structure 288 * 289 * Return: return RF chain index 290 * 291 * Computation: 292 * 3 Bytes of xbar_config are used for RF to BB mapping 293 * Samples of xbar_config, 294 * 295 * If xbar_config is 0x688FAC(hex): 296 * RF chains 0-3 are connected to BB chains 4-7 297 * RF chains 4-7 are connected to BB chains 0-3 298 * here, 299 * bits 0 to 2 = 4, maps BB chain 4 for RF chain 0 300 * bits 3 to 5 = 5, maps BB chain 5 for RF chain 1 301 * bits 6 to 8 = 6, maps BB chain 6 for RF chain 2 302 * bits 9 to 11 = 7, maps BB chain 7 for RF chain 3 303 * bits 12 to 14 = 0, maps BB chain 0 for RF chain 4 304 * bits 15 to 17 = 1, maps BB chain 1 for RF chain 5 305 * bits 18 to 20 = 2, maps BB chain 2 for RF chain 6 306 * bits 21 to 23 = 3, maps BB chain 3 for RF chain 7 307 */ 308 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain, 309 struct hal_rx_ppdu_info *ppdu_info, 310 struct dp_pdev *pdev) 311 { 312 uint32_t xbar_config = ppdu_info->rx_status.xbar_config; 313 314 if (pdev->soc->features.rssi_dbm_conv_support && xbar_config) 315 return ((xbar_config >> (3 * chain)) & 0x07); 316 return chain; 317 } 318 #else 319 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain, 320 struct hal_rx_ppdu_info *ppdu_info, 321 struct dp_pdev *pdev) 322 { 323 return chain; 324 } 325 #endif 326 void 327 dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info, 328 struct cdp_rx_indication_ppdu *cdp_rx_ppdu, 329 struct dp_pdev *pdev) 330 { 331 uint8_t chain, bw; 332 uint8_t rssi; 333 334 for (chain = 0; chain < SS_COUNT; chain++) { 335 for (bw = 0; bw < MAX_BW; bw++) { 336 chain = dp_rx_mon_rf_index_conv(chain, 337 ppdu_info, pdev); 338 rssi = ppdu_info->rx_status.rssi_chain[chain][bw]; 339 if (rssi != DP_RSSI_INVAL) 340 cdp_rx_ppdu->rssi_chain[chain][bw] = rssi; 341 else 342 cdp_rx_ppdu->rssi_chain[chain][bw] = 0; 343 } 344 } 345 } 346 347 void 348 dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info, 349 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 350 { 351 uint8_t pilot_evm; 352 uint8_t nss_count; 353 uint8_t pilot_count; 354 355 nss_count = ppdu_info->evm_info.nss_count; 356 pilot_count = ppdu_info->evm_info.pilot_count; 357 358 if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) { 359 qdf_err("pilot evm count is more than expected"); 360 return; 361 } 362 cdp_rx_ppdu->evm_info.pilot_count = pilot_count; 363 cdp_rx_ppdu->evm_info.nss_count = nss_count; 364 365 /* Populate evm for pilot_evm = nss_count*pilot_count */ 366 for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) { 367 cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] = 368 ppdu_info->evm_info.pilot_evm[pilot_evm]; 369 } 370 } 371 372 /** 373 * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size 374 * @pdev: pdev ctx 375 * @rx_user_status: mon rx user status 376 * 377 * Return: bool 378 */ 379 static inline bool 380 dp_rx_inc_rusize_cnt(struct dp_pdev *pdev, 381 struct mon_rx_user_status *rx_user_status) 382 { 383 uint32_t ru_size; 384 bool is_data; 385 386 ru_size = rx_user_status->ofdma_ru_size; 387 388 if (dp_is_subtype_data(rx_user_status->frame_control)) { 389 DP_STATS_INC(pdev, 390 ul_ofdma.data_rx_ru_size[ru_size], 1); 391 is_data = true; 392 } else { 393 DP_STATS_INC(pdev, 394 ul_ofdma.nondata_rx_ru_size[ru_size], 1); 395 is_data = false; 396 } 397 398 return is_data; 399 } 400 401 /** 402 * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication 403 * @pdev: pdev ctx 404 * @ppdu_info: ppdu info structure from ppdu ring 405 * @cdp_rx_ppdu: Rx PPDU indication structure 406 * 407 * Return: none 408 */ 409 static void 410 dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev, 411 struct hal_rx_ppdu_info *ppdu_info, 412 struct cdp_rx_indication_ppdu 413 *cdp_rx_ppdu) 414 { 415 struct dp_peer *peer; 416 struct dp_soc *soc = pdev->soc; 417 int i; 418 struct mon_rx_user_status *rx_user_status; 419 struct mon_rx_user_info *rx_user_info; 420 struct cdp_rx_stats_ppdu_user *rx_stats_peruser; 421 int ru_size; 422 bool is_data = false; 423 uint32_t num_users; 424 struct dp_mon_ops *mon_ops; 425 uint16_t sw_peer_id; 426 427 num_users = ppdu_info->com_info.num_users; 428 for (i = 0; i < num_users; i++) { 429 if (i > OFDMA_NUM_USERS) 430 return; 431 432 rx_user_status = &ppdu_info->rx_user_status[i]; 433 rx_user_info = &ppdu_info->rx_user_info[i]; 434 rx_stats_peruser = &cdp_rx_ppdu->user[i]; 435 436 sw_peer_id = rx_user_status->sw_peer_id; 437 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 438 DP_MOD_ID_RX_PPDU_STATS); 439 if (!peer) { 440 rx_stats_peruser->peer_id = HTT_INVALID_PEER; 441 continue; 442 } 443 rx_stats_peruser->is_bss_peer = peer->bss_peer; 444 445 rx_stats_peruser->first_data_seq_ctrl = 446 rx_user_status->first_data_seq_ctrl; 447 448 rx_stats_peruser->frame_control_info_valid = 449 rx_user_status->frame_control_info_valid; 450 rx_stats_peruser->frame_control = 451 rx_user_status->frame_control; 452 453 rx_stats_peruser->qos_control_info_valid = 454 rx_user_info->qos_control_info_valid; 455 rx_stats_peruser->qos_control = 456 rx_user_info->qos_control; 457 rx_stats_peruser->tcp_msdu_count = 458 rx_user_status->tcp_msdu_count; 459 rx_stats_peruser->udp_msdu_count = 460 rx_user_status->udp_msdu_count; 461 rx_stats_peruser->other_msdu_count = 462 rx_user_status->other_msdu_count; 463 464 rx_stats_peruser->num_msdu = 465 rx_stats_peruser->tcp_msdu_count + 466 rx_stats_peruser->udp_msdu_count + 467 rx_stats_peruser->other_msdu_count; 468 469 rx_stats_peruser->preamble_type = 470 cdp_rx_ppdu->u.preamble; 471 rx_stats_peruser->mpdu_cnt_fcs_ok = 472 rx_user_status->mpdu_cnt_fcs_ok; 473 rx_stats_peruser->mpdu_cnt_fcs_err = 474 rx_user_status->mpdu_cnt_fcs_err; 475 qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap, 476 &rx_user_status->mpdu_fcs_ok_bitmap, 477 HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * 478 sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0])); 479 rx_stats_peruser->mpdu_ok_byte_count = 480 rx_user_status->mpdu_ok_byte_count; 481 rx_stats_peruser->mpdu_err_byte_count = 482 rx_user_status->mpdu_err_byte_count; 483 484 cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok; 485 cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu; 486 rx_stats_peruser->retries = 487 CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ? 488 rx_stats_peruser->mpdu_cnt_fcs_ok : 0; 489 cdp_rx_ppdu->retries += rx_stats_peruser->retries; 490 491 if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1) 492 rx_stats_peruser->is_ampdu = 1; 493 else 494 rx_stats_peruser->is_ampdu = 0; 495 496 rx_stats_peruser->tid = ppdu_info->rx_status.tid; 497 498 qdf_mem_copy(rx_stats_peruser->mac_addr, 499 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 500 rx_stats_peruser->peer_id = peer->peer_id; 501 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 502 rx_stats_peruser->vdev_id = peer->vdev->vdev_id; 503 rx_stats_peruser->mu_ul_info_valid = 0; 504 505 mon_ops = dp_mon_ops_get(soc); 506 if (mon_ops && mon_ops->mon_rx_populate_ppdu_usr_info) 507 mon_ops->mon_rx_populate_ppdu_usr_info(rx_user_status, 508 rx_stats_peruser); 509 510 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 511 if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || 512 cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { 513 if (rx_user_status->mu_ul_info_valid) { 514 rx_stats_peruser->nss = rx_user_status->nss; 515 cdp_rx_ppdu->usr_nss_sum += rx_stats_peruser->nss; 516 rx_stats_peruser->mcs = rx_user_status->mcs; 517 rx_stats_peruser->mu_ul_info_valid = 518 rx_user_status->mu_ul_info_valid; 519 rx_stats_peruser->ofdma_ru_start_index = 520 rx_user_status->ofdma_ru_start_index; 521 rx_stats_peruser->ofdma_ru_width = 522 rx_user_status->ofdma_ru_width; 523 cdp_rx_ppdu->usr_ru_tones_sum += 524 rx_stats_peruser->ofdma_ru_width; 525 rx_stats_peruser->user_index = i; 526 ru_size = rx_user_status->ofdma_ru_size; 527 /* 528 * max RU size will be equal to 529 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2 530 */ 531 if (ru_size >= OFDMA_NUM_RU_SIZE) { 532 dp_err("invalid ru_size %d\n", 533 ru_size); 534 return; 535 } 536 is_data = dp_rx_inc_rusize_cnt(pdev, 537 rx_user_status); 538 } 539 if (is_data) { 540 /* counter to get number of MU OFDMA */ 541 pdev->stats.ul_ofdma.data_rx_ppdu++; 542 pdev->stats.ul_ofdma.data_users[num_users]++; 543 } 544 } 545 } 546 } 547 548 /** 549 * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure 550 * @pdev: pdev ctx 551 * @ppdu_info: ppdu info structure from ppdu ring 552 * @cdp_rx_ppdu: Rx PPDU indication structure 553 * 554 * Return: none 555 */ 556 static void 557 dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, 558 struct hal_rx_ppdu_info *ppdu_info, 559 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 560 { 561 struct dp_peer *peer; 562 struct dp_soc *soc = pdev->soc; 563 uint32_t i; 564 struct dp_mon_ops *mon_ops; 565 uint16_t sw_peer_id; 566 struct mon_rx_user_status *rx_user_status; 567 uint32_t num_users = ppdu_info->com_info.num_users; 568 569 cdp_rx_ppdu->first_data_seq_ctrl = 570 ppdu_info->rx_status.first_data_seq_ctrl; 571 cdp_rx_ppdu->frame_ctrl = 572 ppdu_info->rx_status.frame_control; 573 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; 574 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; 575 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; 576 /* num mpdu is consolidated and added together in num user loop */ 577 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; 578 /* num msdu is consolidated and added together in num user loop */ 579 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + 580 cdp_rx_ppdu->udp_msdu_count + 581 cdp_rx_ppdu->other_msdu_count); 582 583 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? 584 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; 585 586 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) 587 cdp_rx_ppdu->is_ampdu = 1; 588 else 589 cdp_rx_ppdu->is_ampdu = 0; 590 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; 591 592 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 593 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 594 sw_peer_id = rx_user_status->sw_peer_id; 595 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 596 DP_MOD_ID_RX_PPDU_STATS); 597 if (!peer) { 598 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; 599 cdp_rx_ppdu->num_users = 0; 600 goto end; 601 } 602 603 qdf_mem_copy(cdp_rx_ppdu->mac_addr, 604 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); 605 cdp_rx_ppdu->peer_id = peer->peer_id; 606 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; 607 608 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 609 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; 610 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; 611 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; 612 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; 613 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && 614 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) 615 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 616 else 617 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 618 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 619 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 620 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> 621 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; 622 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; 623 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 624 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; 625 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 626 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; 627 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; 628 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 629 630 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 631 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 632 } else if (ppdu_info->rx_status.preamble_type == 633 HAL_RX_PKT_TYPE_11AX) { 634 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 635 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 636 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 637 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 638 } 639 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev); 640 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); 641 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; 642 643 mon_ops = dp_mon_ops_get(pdev->soc); 644 if (mon_ops && mon_ops->mon_rx_populate_ppdu_info) 645 mon_ops->mon_rx_populate_ppdu_info(ppdu_info, 646 cdp_rx_ppdu); 647 648 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; 649 for (i = 0; i < MAX_CHAIN; i++) 650 cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i]; 651 652 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; 653 654 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; 655 656 cdp_rx_ppdu->num_mpdu = 0; 657 cdp_rx_ppdu->num_msdu = 0; 658 cdp_rx_ppdu->retries = 0; 659 660 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); 661 662 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 663 664 return; 665 end: 666 dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu); 667 } 668 669 /** 670 * dp_rx_stats_update() - Update per-peer statistics 671 * @soc: Datapath SOC handle 672 * @peer: Datapath peer handle 673 * @ppdu: PPDU Descriptor 674 * 675 * Return: None 676 */ 677 static inline void dp_rx_rate_stats_update(struct dp_peer *peer, 678 struct cdp_rx_indication_ppdu *ppdu, 679 uint32_t user) 680 { 681 uint32_t ratekbps = 0; 682 uint32_t ppdu_rx_rate = 0; 683 uint32_t nss = 0; 684 uint8_t mcs = 0; 685 uint32_t rix; 686 uint16_t ratecode = 0; 687 struct cdp_rx_stats_ppdu_user *ppdu_user = NULL; 688 struct dp_mon_peer *mon_peer = NULL; 689 690 if (!peer || !ppdu) 691 return; 692 693 mon_peer = peer->monitor_peer; 694 ppdu_user = &ppdu->user[user]; 695 696 if (!mon_peer) 697 return; 698 699 if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) { 700 if (ppdu_user->nss == 0) 701 nss = 0; 702 else 703 nss = ppdu_user->nss - 1; 704 mcs = ppdu_user->mcs; 705 706 mon_peer->stats.rx.nss_info = ppdu_user->nss; 707 mon_peer->stats.rx.mcs_info = ppdu_user->mcs; 708 } else { 709 if (ppdu->u.nss == 0) 710 nss = 0; 711 else 712 nss = ppdu->u.nss - 1; 713 mcs = ppdu->u.mcs; 714 715 mon_peer->stats.rx.nss_info = ppdu->u.nss; 716 mon_peer->stats.rx.mcs_info = ppdu->u.mcs; 717 } 718 719 ratekbps = dp_getrateindex(ppdu->u.gi, 720 mcs, 721 nss, 722 ppdu->u.preamble, 723 ppdu->u.bw, 724 ppdu->punc_bw, 725 &rix, 726 &ratecode); 727 728 if (!ratekbps) { 729 ppdu->rix = 0; 730 ppdu_user->rix = 0; 731 ppdu->rx_ratekbps = 0; 732 ppdu->rx_ratecode = 0; 733 ppdu_user->rx_ratekbps = 0; 734 return; 735 } 736 737 mon_peer->stats.rx.bw_info = ppdu->u.bw; 738 mon_peer->stats.rx.gi_info = ppdu->u.gi; 739 mon_peer->stats.rx.preamble_info = ppdu->u.preamble; 740 741 ppdu->rix = rix; 742 ppdu_user->rix = rix; 743 DP_STATS_UPD(mon_peer, rx.last_rx_rate, ratekbps); 744 mon_peer->stats.rx.avg_rx_rate = 745 dp_ath_rate_lpf(mon_peer->stats.rx.avg_rx_rate, ratekbps); 746 ppdu_rx_rate = dp_ath_rate_out(mon_peer->stats.rx.avg_rx_rate); 747 DP_STATS_UPD(mon_peer, rx.rnd_avg_rx_rate, ppdu_rx_rate); 748 ppdu->rx_ratekbps = ratekbps; 749 ppdu->rx_ratecode = ratecode; 750 ppdu_user->rx_ratekbps = ratekbps; 751 752 if (peer->vdev) 753 peer->vdev->stats.rx.last_rx_rate = ratekbps; 754 } 755 756 #ifdef WLAN_FEATURE_11BE 757 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc, 758 enum CMN_BW_TYPES bw) 759 { 760 uint8_t pkt_bw_offset; 761 762 switch (bw) { 763 case CMN_BW_20MHZ: 764 pkt_bw_offset = PKT_BW_GAIN_20MHZ; 765 break; 766 case CMN_BW_40MHZ: 767 pkt_bw_offset = PKT_BW_GAIN_40MHZ; 768 break; 769 case CMN_BW_80MHZ: 770 pkt_bw_offset = PKT_BW_GAIN_80MHZ; 771 break; 772 case CMN_BW_160MHZ: 773 pkt_bw_offset = PKT_BW_GAIN_160MHZ; 774 break; 775 case CMN_BW_320MHZ: 776 pkt_bw_offset = PKT_BW_GAIN_320MHZ; 777 break; 778 default: 779 pkt_bw_offset = 0; 780 dp_rx_mon_status_debug("%pK: Invalid BW index = %d", 781 soc, bw); 782 } 783 784 return pkt_bw_offset; 785 } 786 #else 787 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc, 788 enum CMN_BW_TYPES bw) 789 { 790 uint8_t pkt_bw_offset; 791 792 switch (bw) { 793 case CMN_BW_20MHZ: 794 pkt_bw_offset = PKT_BW_GAIN_20MHZ; 795 break; 796 case CMN_BW_40MHZ: 797 pkt_bw_offset = PKT_BW_GAIN_40MHZ; 798 break; 799 case CMN_BW_80MHZ: 800 pkt_bw_offset = PKT_BW_GAIN_80MHZ; 801 break; 802 case CMN_BW_160MHZ: 803 pkt_bw_offset = PKT_BW_GAIN_160MHZ; 804 break; 805 default: 806 pkt_bw_offset = 0; 807 dp_rx_mon_status_debug("%pK: Invalid BW index = %d", 808 soc, bw); 809 } 810 811 return pkt_bw_offset; 812 } 813 #endif 814 815 #ifdef WLAN_TELEMETRY_STATS_SUPPORT 816 static void 817 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev, 818 struct dp_peer *peer, 819 struct cdp_rx_indication_ppdu *ppdu_desc, 820 struct cdp_rx_stats_ppdu_user *user) 821 { 822 uint32_t nss_ru_width_sum = 0; 823 struct dp_mon_peer *mon_peer = NULL; 824 825 if (!pdev || !ppdu_desc || !user || !peer) 826 return; 827 828 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; 829 if (!nss_ru_width_sum) 830 nss_ru_width_sum = 1; 831 832 if (ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || 833 ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { 834 user->rx_time_us = (ppdu_desc->duration * 835 user->nss * user->ofdma_ru_width) / 836 nss_ru_width_sum; 837 } else { 838 user->rx_time_us = ppdu_desc->duration; 839 } 840 841 mon_peer = peer->monitor_peer; 842 if (qdf_unlikely(!mon_peer)) 843 return; 844 845 DP_STATS_INC(mon_peer, airtime_consumption.consumption, 846 user->rx_time_us); 847 } 848 #else 849 static inline void 850 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev, 851 struct dp_peer *peer, 852 struct cdp_rx_indication_ppdu *ppdu_desc, 853 struct cdp_rx_stats_ppdu_user *user) 854 { } 855 #endif 856 857 static void dp_rx_stats_update(struct dp_pdev *pdev, 858 struct cdp_rx_indication_ppdu *ppdu) 859 { 860 struct dp_soc *soc = NULL; 861 uint8_t mcs, preamble, ac = 0, nss, ppdu_type; 862 uint32_t num_msdu; 863 uint8_t pkt_bw_offset; 864 struct dp_peer *peer; 865 struct dp_mon_peer *mon_peer; 866 struct cdp_rx_stats_ppdu_user *ppdu_user; 867 uint32_t i; 868 enum cdp_mu_packet_type mu_pkt_type; 869 struct dp_mon_ops *mon_ops; 870 struct dp_mon_pdev *mon_pdev = NULL; 871 872 if (pdev) 873 soc = pdev->soc; 874 else 875 return; 876 877 if (!soc || soc->process_rx_status) 878 return; 879 880 mon_pdev = pdev->monitor_pdev; 881 882 preamble = ppdu->u.preamble; 883 ppdu_type = ppdu->u.ppdu_type; 884 885 for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) { 886 peer = NULL; 887 ppdu_user = &ppdu->user[i]; 888 peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id, 889 DP_MOD_ID_RX_PPDU_STATS); 890 891 if (!peer) 892 mon_peer = mon_pdev->invalid_mon_peer; 893 else 894 mon_peer = peer->monitor_peer; 895 896 if (!mon_peer) { 897 if (peer) 898 dp_peer_unref_delete(peer, 899 DP_MOD_ID_RX_PPDU_STATS); 900 901 continue; 902 } 903 904 if ((preamble == DOT11_A) || (preamble == DOT11_B)) 905 ppdu->u.nss = 1; 906 907 if (ppdu_type == HAL_RX_TYPE_SU) { 908 mcs = ppdu->u.mcs; 909 nss = ppdu->u.nss; 910 } else { 911 mcs = ppdu_user->mcs; 912 nss = ppdu_user->nss; 913 } 914 915 num_msdu = ppdu_user->num_msdu; 916 917 pkt_bw_offset = dp_get_bw_offset_frm_bw(soc, ppdu->u.bw); 918 DP_STATS_UPD(mon_peer, rx.snr, (ppdu->rssi + pkt_bw_offset)); 919 920 if (mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR) 921 mon_peer->stats.rx.avg_snr = 922 CDP_SNR_IN(mon_peer->stats.rx.snr); 923 else 924 CDP_SNR_UPDATE_AVG(mon_peer->stats.rx.avg_snr, 925 mon_peer->stats.rx.snr); 926 927 if (ppdu_type == HAL_RX_TYPE_SU) { 928 if (nss) { 929 DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu); 930 DP_STATS_INC(mon_peer, rx.ppdu_nss[nss - 1], 1); 931 } 932 933 DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_ok, 934 ppdu_user->mpdu_cnt_fcs_ok); 935 DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_err, 936 ppdu_user->mpdu_cnt_fcs_err); 937 } 938 939 if (ppdu_type >= HAL_RX_TYPE_MU_MIMO && 940 ppdu_type <= HAL_RX_TYPE_MU_OFDMA) { 941 if (ppdu_type == HAL_RX_TYPE_MU_MIMO) 942 mu_pkt_type = TXRX_TYPE_MU_MIMO; 943 else 944 mu_pkt_type = TXRX_TYPE_MU_OFDMA; 945 946 if (nss) { 947 DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu); 948 DP_STATS_INC(mon_peer, 949 rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1], 950 1); 951 } 952 953 DP_STATS_INC(mon_peer, 954 rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok, 955 ppdu_user->mpdu_cnt_fcs_ok); 956 DP_STATS_INC(mon_peer, 957 rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err, 958 ppdu_user->mpdu_cnt_fcs_err); 959 } 960 961 DP_STATS_INC(mon_peer, rx.sgi_count[ppdu->u.gi], num_msdu); 962 DP_STATS_INC(mon_peer, rx.bw[ppdu->u.bw], num_msdu); 963 DP_STATS_INC(mon_peer, rx.reception_type[ppdu->u.ppdu_type], 964 num_msdu); 965 DP_STATS_INC(mon_peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1); 966 DP_STATS_INCC(mon_peer, rx.ampdu_cnt, num_msdu, 967 ppdu_user->is_ampdu); 968 DP_STATS_INCC(mon_peer, rx.non_ampdu_cnt, num_msdu, 969 !(ppdu_user->is_ampdu)); 970 DP_STATS_UPD(mon_peer, rx.rx_rate, mcs); 971 DP_STATS_INCC(mon_peer, 972 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 973 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); 974 DP_STATS_INCC(mon_peer, 975 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 976 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); 977 DP_STATS_INCC(mon_peer, 978 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 979 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); 980 DP_STATS_INCC(mon_peer, 981 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 982 ((mcs < MAX_MCS_11B) && (preamble == DOT11_B))); 983 DP_STATS_INCC(mon_peer, 984 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 985 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); 986 DP_STATS_INCC(mon_peer, 987 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 988 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); 989 DP_STATS_INCC(mon_peer, 990 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 991 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); 992 DP_STATS_INCC(mon_peer, 993 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 994 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); 995 DP_STATS_INCC(mon_peer, 996 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, 997 ((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX))); 998 DP_STATS_INCC(mon_peer, 999 rx.pkt_type[preamble].mcs_count[mcs], num_msdu, 1000 ((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX))); 1001 DP_STATS_INCC(mon_peer, 1002 rx.su_ax_ppdu_cnt.mcs_count[MAX_MCS - 1], 1, 1003 ((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX) && 1004 (ppdu_type == HAL_RX_TYPE_SU))); 1005 DP_STATS_INCC(mon_peer, 1006 rx.su_ax_ppdu_cnt.mcs_count[mcs], 1, 1007 ((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX) && 1008 (ppdu_type == HAL_RX_TYPE_SU))); 1009 DP_STATS_INCC(mon_peer, 1010 rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[MAX_MCS - 1], 1011 1, ((mcs >= (MAX_MCS_11AX)) && 1012 (preamble == DOT11_AX) && 1013 (ppdu_type == HAL_RX_TYPE_MU_OFDMA))); 1014 DP_STATS_INCC(mon_peer, 1015 rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[mcs], 1016 1, ((mcs < (MAX_MCS_11AX)) && 1017 (preamble == DOT11_AX) && 1018 (ppdu_type == HAL_RX_TYPE_MU_OFDMA))); 1019 DP_STATS_INCC(mon_peer, 1020 rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[MAX_MCS - 1], 1021 1, ((mcs >= (MAX_MCS_11AX)) && 1022 (preamble == DOT11_AX) && 1023 (ppdu_type == HAL_RX_TYPE_MU_MIMO))); 1024 DP_STATS_INCC(mon_peer, 1025 rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[mcs], 1026 1, ((mcs < (MAX_MCS_11AX)) && 1027 (preamble == DOT11_AX) && 1028 (ppdu_type == HAL_RX_TYPE_MU_MIMO))); 1029 1030 /* 1031 * If invalid TID, it could be a non-qos frame, hence do not 1032 * update any AC counters 1033 */ 1034 ac = TID_TO_WME_AC(ppdu_user->tid); 1035 1036 if (ppdu->tid != HAL_TID_INVALID) 1037 DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu); 1038 1039 DP_STATS_INC(mon_peer, rx.rx_ppdus, 1); 1040 DP_STATS_INC(mon_peer, rx.rx_mpdus, 1041 (ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err)); 1042 1043 mon_ops = dp_mon_ops_get(soc); 1044 if (mon_ops && mon_ops->mon_rx_stats_update) 1045 mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user); 1046 1047 if (!peer) 1048 continue; 1049 1050 dp_peer_stats_notify(pdev, peer); 1051 DP_STATS_UPD(mon_peer, rx.last_snr, 1052 (ppdu->rssi + pkt_bw_offset)); 1053 1054 dp_peer_qos_stats_notify(pdev, ppdu_user); 1055 1056 if (dp_is_subtype_data(ppdu->frame_ctrl)) 1057 dp_rx_rate_stats_update(peer, ppdu, i); 1058 1059 dp_send_stats_event(pdev, peer, ppdu_user->peer_id); 1060 1061 dp_ppdu_desc_user_rx_time_update(pdev, peer, ppdu, ppdu_user); 1062 dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS); 1063 } 1064 } 1065 1066 void 1067 dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, 1068 struct hal_rx_ppdu_info *ppdu_info) 1069 { 1070 qdf_nbuf_t ppdu_nbuf; 1071 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 1072 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1073 1074 /* 1075 * Do not allocate if fcs error, 1076 * ast idx invalid / fctl invalid 1077 * 1078 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed 1079 */ 1080 if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0) 1081 return; 1082 1083 if (ppdu_info->nac_info.fc_valid && 1084 ppdu_info->nac_info.to_ds_flag && 1085 ppdu_info->nac_info.mac_addr2_valid) { 1086 struct dp_neighbour_peer *peer = NULL; 1087 uint8_t rssi = ppdu_info->rx_status.rssi_comb; 1088 1089 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex); 1090 if (mon_pdev->neighbour_peers_added) { 1091 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list, 1092 neighbour_peer_list_elem) { 1093 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr, 1094 &ppdu_info->nac_info.mac_addr2, 1095 QDF_MAC_ADDR_SIZE)) { 1096 peer->rssi = rssi; 1097 break; 1098 } 1099 } 1100 } 1101 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex); 1102 } else { 1103 dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d", 1104 ppdu_info->nac_info.fc_valid, 1105 ppdu_info->nac_info.to_ds_flag, 1106 ppdu_info->nac_info.mac_addr2_valid); 1107 } 1108 1109 /* need not generate wdi event when mcopy, cfr rcc mode and 1110 * enhanced stats are not enabled 1111 */ 1112 if (!mon_pdev->mcopy_mode && !mon_pdev->enhanced_stats_en && 1113 !dp_cfr_rcc_mode_status(pdev)) 1114 return; 1115 1116 if (dp_cfr_rcc_mode_status(pdev)) 1117 dp_update_cfr_dbg_stats(pdev, ppdu_info); 1118 1119 if (!ppdu_info->rx_status.frame_control_info_valid || 1120 (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) { 1121 if (!(mon_pdev->mcopy_mode || 1122 (dp_bb_captured_chan_status(pdev, ppdu_info) == 1123 QDF_STATUS_SUCCESS))) 1124 return; 1125 } 1126 1127 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 1128 sizeof(struct cdp_rx_indication_ppdu), 1129 0, 0, FALSE); 1130 if (ppdu_nbuf) { 1131 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf); 1132 1133 qdf_mem_zero(cdp_rx_ppdu, sizeof(struct cdp_rx_indication_ppdu)); 1134 dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); 1135 dp_rx_populate_cdp_indication_ppdu(pdev, 1136 ppdu_info, cdp_rx_ppdu); 1137 if (!qdf_nbuf_put_tail(ppdu_nbuf, 1138 sizeof(struct cdp_rx_indication_ppdu))) 1139 return; 1140 1141 dp_rx_stats_update(pdev, cdp_rx_ppdu); 1142 1143 if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) { 1144 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, 1145 soc, ppdu_nbuf, 1146 cdp_rx_ppdu->peer_id, 1147 WDI_NO_VAL, pdev->pdev_id); 1148 } else if (mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev)) { 1149 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, 1150 ppdu_nbuf, HTT_INVALID_PEER, 1151 WDI_NO_VAL, pdev->pdev_id); 1152 } else { 1153 qdf_nbuf_free(ppdu_nbuf); 1154 } 1155 } 1156 } 1157 #endif/* QCA_ENHANCED_STATS_SUPPORT */ 1158 1159 #ifdef QCA_UNDECODED_METADATA_SUPPORT 1160 #define RX_PHYERR_MASK_GET64(_val1, _val2) (((uint64_t)(_val2) << 32) | (_val1)) 1161 /** 1162 * dp_rx_populate_cdp_indication_ppdu_undecoded_metadata() - Populate cdp 1163 * rx indication structure 1164 * @pdev: pdev ctx 1165 * @ppdu_info: ppdu info structure from ppdu ring 1166 * @cdp_rx_ppdu: Rx PPDU indication structure 1167 * 1168 * Return: none 1169 */ 1170 static void 1171 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev *pdev, 1172 struct hal_rx_ppdu_info *ppdu_info, 1173 struct cdp_rx_indication_ppdu *cdp_rx_ppdu) 1174 { 1175 uint32_t chain; 1176 1177 cdp_rx_ppdu->phyrx_abort = ppdu_info->rx_status.phyrx_abort; 1178 cdp_rx_ppdu->phyrx_abort_reason = 1179 ppdu_info->rx_status.phyrx_abort_reason; 1180 1181 cdp_rx_ppdu->first_data_seq_ctrl = 1182 ppdu_info->rx_status.first_data_seq_ctrl; 1183 cdp_rx_ppdu->frame_ctrl = 1184 ppdu_info->rx_status.frame_control; 1185 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; 1186 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; 1187 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; 1188 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type; 1189 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; 1190 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + 1191 cdp_rx_ppdu->udp_msdu_count + 1192 cdp_rx_ppdu->other_msdu_count); 1193 1194 cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? 1195 ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; 1196 1197 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) 1198 cdp_rx_ppdu->is_ampdu = 1; 1199 else 1200 cdp_rx_ppdu->is_ampdu = 0; 1201 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; 1202 1203 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; 1204 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; 1205 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; 1206 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw; 1207 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; 1208 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; 1209 if (ppdu_info->rx_status.sgi == VHT_SGI_NYSM && 1210 ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) 1211 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; 1212 else 1213 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; 1214 1215 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; 1216 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; 1217 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> 1218 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; 1219 1220 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; 1221 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; 1222 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; 1223 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; 1224 cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; 1225 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; 1226 cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; 1227 1228 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) { 1229 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc; 1230 cdp_rx_ppdu->vht_no_txop_ps = 1231 ppdu_info->rx_status.vht_no_txop_ps; 1232 cdp_rx_ppdu->vht_crc = ppdu_info->rx_status.vht_crc; 1233 cdp_rx_ppdu->group_id = ppdu_info->rx_status.vht_flag_values5; 1234 } else if (ppdu_info->rx_status.preamble_type == 1235 HAL_RX_PKT_TYPE_11AX) { 1236 cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >> 1237 QDF_MON_STATUS_STBC_SHIFT) & 0x1; 1238 cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >> 1239 QDF_MON_STATUS_DCM_SHIFT) & 0x1; 1240 } else { 1241 cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.ht_stbc; 1242 cdp_rx_ppdu->ht_length = ppdu_info->rx_status.ht_length; 1243 cdp_rx_ppdu->ht_smoothing = ppdu_info->rx_status.smoothing; 1244 cdp_rx_ppdu->ht_not_sounding = 1245 ppdu_info->rx_status.not_sounding; 1246 cdp_rx_ppdu->ht_aggregation = ppdu_info->rx_status.aggregation; 1247 cdp_rx_ppdu->ht_stbc = ppdu_info->rx_status.ht_stbc; 1248 cdp_rx_ppdu->ht_crc = ppdu_info->rx_status.ht_crc; 1249 } 1250 1251 cdp_rx_ppdu->l_sig_length = ppdu_info->rx_status.l_sig_length; 1252 cdp_rx_ppdu->l_sig_a_parity = ppdu_info->rx_status.l_sig_a_parity; 1253 cdp_rx_ppdu->l_sig_a_pkt_type = ppdu_info->rx_status.l_sig_a_pkt_type; 1254 1255 if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AX) { 1256 cdp_rx_ppdu->he_crc = ppdu_info->rx_status.he_crc; 1257 cdp_rx_ppdu->bss_color_id = 1258 ppdu_info->rx_status.he_data3 & 0x3F; 1259 cdp_rx_ppdu->beam_change = (ppdu_info->rx_status.he_data3 >> 1260 QDF_MON_STATUS_BEAM_CHANGE_SHIFT) & 0x1; 1261 cdp_rx_ppdu->dl_ul_flag = (ppdu_info->rx_status.he_data3 >> 1262 QDF_MON_STATUS_DL_UL_SHIFT) & 0x1; 1263 cdp_rx_ppdu->ldpc_extra_sym = (ppdu_info->rx_status.he_data3 >> 1264 QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT) & 0x1; 1265 cdp_rx_ppdu->special_reuse = 1266 ppdu_info->rx_status.he_data4 & 0xF; 1267 cdp_rx_ppdu->ltf_sym = (ppdu_info->rx_status.he_data5 >> 1268 QDF_MON_STATUS_HE_LTF_SYM_SHIFT) & 0x7; 1269 cdp_rx_ppdu->txbf = (ppdu_info->rx_status.he_data5 >> 1270 QDF_MON_STATUS_TXBF_SHIFT) & 0x1; 1271 cdp_rx_ppdu->pe_disambiguity = (ppdu_info->rx_status.he_data5 >> 1272 QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT) & 0x1; 1273 cdp_rx_ppdu->pre_fec_pad = (ppdu_info->rx_status.he_data5 >> 1274 QDF_MON_STATUS_PRE_FEC_PAD_SHIFT) & 0x3; 1275 cdp_rx_ppdu->dopplar = (ppdu_info->rx_status.he_data6 >> 1276 QDF_MON_STATUS_DOPPLER_SHIFT) & 0x1; 1277 cdp_rx_ppdu->txop_duration = (ppdu_info->rx_status.he_data6 >> 1278 QDF_MON_STATUS_TXOP_SHIFT) & 0x7F; 1279 cdp_rx_ppdu->sig_b_mcs = ppdu_info->rx_status.he_flags1 & 0x7; 1280 cdp_rx_ppdu->sig_b_dcm = (ppdu_info->rx_status.he_flags1 >> 1281 QDF_MON_STATUS_DCM_FLAG_1_SHIFT) & 0x1; 1282 cdp_rx_ppdu->sig_b_sym = (ppdu_info->rx_status.he_flags2 >> 1283 QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT) & 0xF; 1284 cdp_rx_ppdu->sig_b_comp = (ppdu_info->rx_status.he_flags2 >> 1285 QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT) & 0x1; 1286 } 1287 dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev); 1288 dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); 1289 cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; 1290 1291 cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; 1292 for (chain = 0; chain < MAX_CHAIN; chain++) 1293 cdp_rx_ppdu->per_chain_rssi[chain] = 1294 ppdu_info->rx_status.rssi[chain]; 1295 1296 cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; 1297 1298 cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; 1299 1300 dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); 1301 } 1302 1303 /** 1304 * dp_rx_is_valid_undecoded_frame() - Check unencoded frame received valid 1305 * or not against configured error mask 1306 * @err_mask: configured err mask 1307 * @err_code: Received error reason code for phy abort 1308 * 1309 * Return: true / false 1310 */ 1311 static inline bool 1312 dp_rx_is_valid_undecoded_frame(uint64_t err_mask, uint8_t err_code) 1313 { 1314 if (err_code < CDP_PHYRX_ERR_MAX && 1315 (err_mask & (1L << err_code))) 1316 return true; 1317 1318 return false; 1319 } 1320 1321 void 1322 dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc *soc, struct dp_pdev *pdev, 1323 struct hal_rx_ppdu_info *ppdu_info) 1324 { 1325 qdf_nbuf_t ppdu_nbuf; 1326 struct cdp_rx_indication_ppdu *cdp_rx_ppdu; 1327 uint8_t abort_reason = 0; 1328 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1329 uint64_t mask64; 1330 1331 /* Return if RX_ABORT not set */ 1332 if (ppdu_info->rx_status.phyrx_abort == 0) 1333 return; 1334 1335 mask64 = RX_PHYERR_MASK_GET64(mon_pdev->phyrx_error_mask, 1336 mon_pdev->phyrx_error_mask_cont); 1337 abort_reason = ppdu_info->rx_status.phyrx_abort_reason; 1338 1339 if (!dp_rx_is_valid_undecoded_frame(mask64, abort_reason)) 1340 return; 1341 1342 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, 1343 sizeof(struct cdp_rx_indication_ppdu), 1344 0, 0, FALSE); 1345 if (ppdu_nbuf) { 1346 cdp_rx_ppdu = ((struct cdp_rx_indication_ppdu *) 1347 qdf_nbuf_data(ppdu_nbuf)); 1348 1349 qdf_mem_zero(cdp_rx_ppdu, 1350 sizeof(struct cdp_rx_indication_ppdu)); 1351 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(pdev, 1352 ppdu_info, cdp_rx_ppdu); 1353 1354 if (!qdf_nbuf_put_tail(ppdu_nbuf, 1355 sizeof(struct cdp_rx_indication_ppdu))) { 1356 return; 1357 } 1358 1359 mon_pdev->rx_mon_stats.rx_undecoded_count++; 1360 mon_pdev->rx_mon_stats.rx_undecoded_error[abort_reason] += 1; 1361 1362 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC_UNDECODED_METADATA, 1363 soc, ppdu_nbuf, HTT_INVALID_PEER, 1364 WDI_NO_VAL, pdev->pdev_id); 1365 } 1366 } 1367 #endif/* QCA_UNDECODED_METADATA_SUPPORT */ 1368 1369 #ifdef QCA_MCOPY_SUPPORT 1370 QDF_STATUS 1371 dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1372 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf, 1373 uint8_t fcs_ok_mpdu_cnt, bool deliver_frame) 1374 { 1375 uint16_t size = 0; 1376 struct ieee80211_frame *wh; 1377 uint32_t *nbuf_data; 1378 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1379 1380 if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload) 1381 return QDF_STATUS_SUCCESS; 1382 1383 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1384 if (mon_pdev->mcopy_mode == M_COPY) { 1385 if (mon_pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id) 1386 return QDF_STATUS_SUCCESS; 1387 } 1388 1389 wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4); 1390 1391 size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload - 1392 qdf_nbuf_data(nbuf)); 1393 1394 if (qdf_nbuf_pull_head(nbuf, size) == NULL) 1395 return QDF_STATUS_SUCCESS; 1396 1397 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1398 IEEE80211_FC0_TYPE_MGT) || 1399 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1400 IEEE80211_FC0_TYPE_CTL)) { 1401 return QDF_STATUS_SUCCESS; 1402 } 1403 1404 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf); 1405 *nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id; 1406 /* only retain RX MSDU payload in the skb */ 1407 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len); 1408 if (deliver_frame) { 1409 mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; 1410 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, 1411 nbuf, HTT_INVALID_PEER, 1412 WDI_NO_VAL, pdev->pdev_id); 1413 } 1414 return QDF_STATUS_E_ALREADY; 1415 } 1416 1417 void 1418 dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev, 1419 struct hal_rx_ppdu_info *ppdu_info, 1420 qdf_nbuf_t status_nbuf) 1421 { 1422 QDF_STATUS mcopy_status; 1423 qdf_nbuf_t nbuf_clone = NULL; 1424 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1425 1426 /* If the MPDU end tlv and RX header are received in different buffers, 1427 * process the RX header based on fcs status. 1428 */ 1429 if (mon_pdev->mcopy_status_nbuf) { 1430 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1431 if (mon_pdev->mcopy_mode == M_COPY) { 1432 if (mon_pdev->m_copy_id.rx_ppdu_id == 1433 ppdu_info->com_info.ppdu_id) 1434 goto end1; 1435 } 1436 1437 if (ppdu_info->is_fcs_passed) { 1438 nbuf_clone = qdf_nbuf_clone(mon_pdev->mcopy_status_nbuf); 1439 if (!nbuf_clone) { 1440 QDF_TRACE(QDF_MODULE_ID_TXRX, 1441 QDF_TRACE_LEVEL_ERROR, 1442 "Failed to clone nbuf"); 1443 goto end1; 1444 } 1445 1446 mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; 1447 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, 1448 nbuf_clone, 1449 HTT_INVALID_PEER, 1450 WDI_NO_VAL, pdev->pdev_id); 1451 ppdu_info->is_fcs_passed = false; 1452 } 1453 end1: 1454 qdf_nbuf_free(mon_pdev->mcopy_status_nbuf); 1455 mon_pdev->mcopy_status_nbuf = NULL; 1456 } 1457 1458 /* If the MPDU end tlv and RX header are received in different buffers, 1459 * preserve the RX header as the fcs status will be received in MPDU 1460 * end tlv in next buffer. So, cache the buffer to be processd in next 1461 * iteration 1462 */ 1463 if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) != 1464 ppdu_info->com_info.mpdu_cnt) { 1465 mon_pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf); 1466 if (mon_pdev->mcopy_status_nbuf) { 1467 mcopy_status = dp_rx_handle_mcopy_mode( 1468 soc, pdev, 1469 ppdu_info, 1470 mon_pdev->mcopy_status_nbuf, 1471 ppdu_info->fcs_ok_cnt, 1472 false); 1473 if (mcopy_status == QDF_STATUS_SUCCESS) { 1474 qdf_nbuf_free(mon_pdev->mcopy_status_nbuf); 1475 mon_pdev->mcopy_status_nbuf = NULL; 1476 } 1477 } 1478 } 1479 } 1480 1481 void 1482 dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev, 1483 struct hal_rx_ppdu_info *ppdu_info, 1484 uint32_t tlv_status) 1485 { 1486 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1487 1488 if (!mon_pdev->mcopy_mode) 1489 return; 1490 1491 /* The fcs status is received in MPDU end tlv. If the RX header 1492 * and its MPDU end tlv are received in different status buffer then 1493 * to process that header ppdu_info->is_fcs_passed is used. 1494 * If end tlv is received in next status buffer then com_info.mpdu_cnt 1495 * will be 0 at the time of receiving MPDU end tlv and we update the 1496 * is_fcs_passed flag based on ppdu_info->fcs_err. 1497 */ 1498 if (tlv_status != HAL_TLV_STATUS_MPDU_END) 1499 return; 1500 1501 if (!ppdu_info->fcs_err) { 1502 if (ppdu_info->fcs_ok_cnt > 1503 HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) { 1504 dp_err("No. of MPDUs(%d) per status buff exceeded", 1505 ppdu_info->fcs_ok_cnt); 1506 return; 1507 } 1508 if (ppdu_info->com_info.mpdu_cnt) 1509 ppdu_info->fcs_ok_cnt++; 1510 else 1511 ppdu_info->is_fcs_passed = true; 1512 } else { 1513 if (ppdu_info->com_info.mpdu_cnt) 1514 ppdu_info->fcs_err_cnt++; 1515 else 1516 ppdu_info->is_fcs_passed = false; 1517 } 1518 } 1519 1520 void 1521 dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1522 struct hal_rx_ppdu_info *ppdu_info, 1523 uint32_t tlv_status, 1524 qdf_nbuf_t status_nbuf) 1525 { 1526 QDF_STATUS mcopy_status; 1527 qdf_nbuf_t nbuf_clone = NULL; 1528 uint8_t fcs_ok_mpdu_cnt = 0; 1529 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1530 1531 dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf); 1532 1533 if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) 1534 goto end; 1535 1536 if (qdf_unlikely(!ppdu_info->fcs_ok_cnt)) 1537 goto end; 1538 1539 /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/ 1540 if (mon_pdev->mcopy_mode == M_COPY) 1541 ppdu_info->fcs_ok_cnt = 1; 1542 1543 while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) { 1544 nbuf_clone = qdf_nbuf_clone(status_nbuf); 1545 if (!nbuf_clone) { 1546 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1547 "Failed to clone nbuf"); 1548 goto end; 1549 } 1550 1551 mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev, 1552 ppdu_info, 1553 nbuf_clone, 1554 fcs_ok_mpdu_cnt, 1555 true); 1556 1557 if (mcopy_status == QDF_STATUS_SUCCESS) 1558 qdf_nbuf_free(nbuf_clone); 1559 1560 fcs_ok_mpdu_cnt++; 1561 } 1562 end: 1563 qdf_nbuf_free(status_nbuf); 1564 ppdu_info->fcs_ok_cnt = 0; 1565 ppdu_info->fcs_err_cnt = 0; 1566 ppdu_info->com_info.mpdu_cnt = 0; 1567 qdf_mem_zero(&ppdu_info->ppdu_msdu_info, 1568 HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER 1569 * sizeof(struct hal_rx_msdu_payload_info)); 1570 } 1571 #endif /* QCA_MCOPY_SUPPORT */ 1572 1573 int 1574 dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev, 1575 struct hal_rx_ppdu_info *ppdu_info, 1576 qdf_nbuf_t nbuf) 1577 { 1578 uint8_t size = 0; 1579 struct dp_mon_vdev *mon_vdev; 1580 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1581 1582 if (!mon_pdev->mvdev) { 1583 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1584 "[%s]:[%d] Monitor vdev is NULL !!", 1585 __func__, __LINE__); 1586 return 1; 1587 } 1588 1589 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1590 1591 if (!ppdu_info->msdu_info.first_msdu_payload) { 1592 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1593 "[%s]:[%d] First msdu payload not present", 1594 __func__, __LINE__); 1595 return 1; 1596 } 1597 1598 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */ 1599 size = (ppdu_info->msdu_info.first_msdu_payload - 1600 qdf_nbuf_data(nbuf)) + 4; 1601 ppdu_info->msdu_info.first_msdu_payload = NULL; 1602 1603 if (!qdf_nbuf_pull_head(nbuf, size)) { 1604 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1605 "[%s]:[%d] No header present", 1606 __func__, __LINE__); 1607 return 1; 1608 } 1609 1610 /* Only retain RX MSDU payload in the skb */ 1611 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - 1612 ppdu_info->msdu_info.payload_len); 1613 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf, 1614 qdf_nbuf_headroom(nbuf))) { 1615 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1616 return 1; 1617 } 1618 1619 mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1620 nbuf, NULL); 1621 mon_pdev->ppdu_info.rx_status.monitor_direct_used = 0; 1622 return 0; 1623 } 1624 1625 qdf_nbuf_t 1626 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) 1627 { 1628 uint8_t *buf; 1629 int32_t nbuf_retry_count; 1630 QDF_STATUS ret; 1631 qdf_nbuf_t nbuf = NULL; 1632 1633 for (nbuf_retry_count = 0; nbuf_retry_count < 1634 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; 1635 nbuf_retry_count++) { 1636 /* Allocate a new skb using alloc_skb */ 1637 nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE, 1638 RX_MON_STATUS_BUF_RESERVATION, 1639 RX_DATA_BUFFER_ALIGNMENT); 1640 1641 if (!nbuf) { 1642 DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1); 1643 continue; 1644 } 1645 1646 buf = qdf_nbuf_data(nbuf); 1647 1648 memset(buf, 0, RX_MON_STATUS_BUF_SIZE); 1649 1650 ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf, 1651 QDF_DMA_FROM_DEVICE, 1652 RX_MON_STATUS_BUF_SIZE); 1653 1654 /* nbuf map failed */ 1655 if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { 1656 qdf_nbuf_free(nbuf); 1657 DP_STATS_INC(pdev, replenish.map_err, 1); 1658 continue; 1659 } 1660 /* qdf_nbuf alloc and map succeeded */ 1661 break; 1662 } 1663 1664 /* qdf_nbuf still alloc or map failed */ 1665 if (qdf_unlikely(nbuf_retry_count >= 1666 QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) 1667 return NULL; 1668 1669 return nbuf; 1670 } 1671 1672 #ifndef DISABLE_MON_CONFIG 1673 uint32_t 1674 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1675 uint32_t mac_id, uint32_t quota) 1676 { 1677 struct dp_mon_soc *mon_soc = soc->monitor_soc; 1678 1679 if (mon_soc && mon_soc->mon_rx_process) 1680 return mon_soc->mon_rx_process(soc, int_ctx, 1681 mac_id, quota); 1682 return 0; 1683 } 1684 #else 1685 uint32_t 1686 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx, 1687 uint32_t mac_id, uint32_t quota) 1688 { 1689 return 0; 1690 } 1691 #endif 1692 1693 /** 1694 * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers 1695 * 1696 * @soc: soc handle 1697 * @nbuf: Mgmt packet 1698 * @pdev: pdev handle 1699 * 1700 * Return: QDF_STATUS_SUCCESS on success 1701 * QDF_STATUS_E_INVAL in error 1702 */ 1703 #ifdef QCA_MCOPY_SUPPORT 1704 static inline QDF_STATUS 1705 dp_send_mgmt_packet_to_stack(struct dp_soc *soc, 1706 qdf_nbuf_t nbuf, 1707 struct dp_pdev *pdev) 1708 { 1709 uint32_t *nbuf_data; 1710 struct ieee80211_frame *wh; 1711 qdf_frag_t addr; 1712 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 1713 1714 if (!nbuf) 1715 return QDF_STATUS_E_INVAL; 1716 1717 /* Get addr pointing to80211 header */ 1718 addr = dp_rx_mon_get_nbuf_80211_hdr(nbuf); 1719 if (qdf_unlikely(!addr)) { 1720 qdf_nbuf_free(nbuf); 1721 return QDF_STATUS_E_INVAL; 1722 } 1723 1724 /*check if this is not a mgmt packet*/ 1725 wh = (struct ieee80211_frame *)addr; 1726 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1727 IEEE80211_FC0_TYPE_MGT) && 1728 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1729 IEEE80211_FC0_TYPE_CTL)) { 1730 qdf_nbuf_free(nbuf); 1731 return QDF_STATUS_E_INVAL; 1732 } 1733 nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4); 1734 if (!nbuf_data) { 1735 QDF_TRACE(QDF_MODULE_ID_DP, 1736 QDF_TRACE_LEVEL_ERROR, 1737 FL("No headroom")); 1738 qdf_nbuf_free(nbuf); 1739 return QDF_STATUS_E_INVAL; 1740 } 1741 *nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id; 1742 1743 dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf, 1744 HTT_INVALID_PEER, 1745 WDI_NO_VAL, pdev->pdev_id); 1746 return QDF_STATUS_SUCCESS; 1747 } 1748 #else 1749 static inline QDF_STATUS 1750 dp_send_mgmt_packet_to_stack(struct dp_soc *soc, 1751 qdf_nbuf_t nbuf, 1752 struct dp_pdev *pdev) 1753 { 1754 return QDF_STATUS_SUCCESS; 1755 } 1756 #endif /* QCA_MCOPY_SUPPORT */ 1757 1758 QDF_STATUS dp_rx_mon_process_dest_pktlog(struct dp_soc *soc, 1759 uint32_t mac_id, 1760 qdf_nbuf_t mpdu) 1761 { 1762 uint32_t event, msdu_timestamp = 0; 1763 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1764 void *data; 1765 struct ieee80211_frame *wh; 1766 uint8_t type, subtype; 1767 struct dp_mon_pdev *mon_pdev; 1768 1769 if (!pdev) 1770 return QDF_STATUS_E_INVAL; 1771 1772 mon_pdev = pdev->monitor_pdev; 1773 1774 if (mon_pdev->rx_pktlog_cbf) { 1775 if (qdf_nbuf_get_nr_frags(mpdu)) 1776 data = qdf_nbuf_get_frag_addr(mpdu, 0); 1777 else 1778 data = qdf_nbuf_data(mpdu); 1779 1780 /* CBF logging required, doesn't matter if it is a full mode 1781 * or lite mode. 1782 * Need to look for mpdu with: 1783 * TYPE = ACTION, SUBTYPE = NO ACK in the header 1784 */ 1785 event = WDI_EVENT_RX_CBF; 1786 1787 wh = (struct ieee80211_frame *)data; 1788 type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1789 subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1790 if (type == IEEE80211_FC0_TYPE_MGT && 1791 subtype == IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK) { 1792 msdu_timestamp = mon_pdev->ppdu_info.rx_status.tsft; 1793 dp_rx_populate_cbf_hdr(soc, 1794 mac_id, event, 1795 mpdu, 1796 msdu_timestamp); 1797 } 1798 } 1799 return QDF_STATUS_SUCCESS; 1800 } 1801 1802 QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id, 1803 qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu) 1804 { 1805 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1806 struct cdp_mon_status *rs; 1807 qdf_nbuf_t mon_skb, skb_next; 1808 qdf_nbuf_t mon_mpdu = NULL; 1809 struct dp_mon_vdev *mon_vdev; 1810 struct dp_mon_pdev *mon_pdev; 1811 1812 if (!pdev) 1813 goto mon_deliver_fail; 1814 1815 mon_pdev = pdev->monitor_pdev; 1816 rs = &mon_pdev->rx_mon_recv_status; 1817 1818 if (!mon_pdev->mvdev && !mon_pdev->mcopy_mode && 1819 !mon_pdev->rx_pktlog_cbf) 1820 goto mon_deliver_fail; 1821 1822 /* restitch mon MPDU for delivery via monitor interface */ 1823 mon_mpdu = dp_rx_mon_restitch_mpdu(soc, mac_id, head_msdu, 1824 tail_msdu, rs); 1825 1826 /* If MPDU restitch fails, free buffers*/ 1827 if (!mon_mpdu) { 1828 dp_info("MPDU restitch failed, free buffers"); 1829 goto mon_deliver_fail; 1830 } 1831 1832 dp_rx_mon_process_dest_pktlog(soc, mac_id, mon_mpdu); 1833 1834 /* monitor vap cannot be present when mcopy is enabled 1835 * hence same skb can be consumed 1836 */ 1837 if (mon_pdev->mcopy_mode) 1838 return dp_send_mgmt_packet_to_stack(soc, mon_mpdu, pdev); 1839 1840 if (mon_mpdu && mon_pdev->mvdev && 1841 mon_pdev->mvdev->osif_vdev && 1842 mon_pdev->mvdev->monitor_vdev && 1843 mon_pdev->mvdev->monitor_vdev->osif_rx_mon) { 1844 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1845 1846 mon_pdev->ppdu_info.rx_status.ppdu_id = 1847 mon_pdev->ppdu_info.com_info.ppdu_id; 1848 mon_pdev->ppdu_info.rx_status.device_id = soc->device_id; 1849 mon_pdev->ppdu_info.rx_status.chan_noise_floor = 1850 pdev->chan_noise_floor; 1851 dp_handle_tx_capture(soc, pdev, mon_mpdu); 1852 1853 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, 1854 mon_mpdu, 1855 qdf_nbuf_headroom(mon_mpdu))) { 1856 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1857 goto mon_deliver_fail; 1858 } 1859 1860 dp_rx_mon_update_pf_tag_to_buf_headroom(soc, mon_mpdu); 1861 mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1862 mon_mpdu, 1863 &mon_pdev->ppdu_info.rx_status); 1864 } else { 1865 dp_rx_mon_dest_debug("%pK: mon_mpdu=%pK monitor_vdev %pK osif_vdev %pK" 1866 , soc, mon_mpdu, mon_pdev->mvdev, 1867 (mon_pdev->mvdev ? mon_pdev->mvdev->osif_vdev 1868 : NULL)); 1869 goto mon_deliver_fail; 1870 } 1871 1872 return QDF_STATUS_SUCCESS; 1873 1874 mon_deliver_fail: 1875 mon_skb = head_msdu; 1876 while (mon_skb) { 1877 skb_next = qdf_nbuf_next(mon_skb); 1878 1879 dp_rx_mon_dest_debug("%pK: [%s][%d] mon_skb=%pK len %u", 1880 soc, __func__, __LINE__, mon_skb, mon_skb->len); 1881 1882 qdf_nbuf_free(mon_skb); 1883 mon_skb = skb_next; 1884 } 1885 return QDF_STATUS_E_INVAL; 1886 } 1887 1888 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc, 1889 uint32_t mac_id) 1890 { 1891 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1892 ol_txrx_rx_mon_fp osif_rx_mon; 1893 qdf_nbuf_t dummy_msdu; 1894 struct dp_mon_pdev *mon_pdev; 1895 struct dp_mon_vdev *mon_vdev; 1896 1897 /* Sanity checking */ 1898 if (!pdev || !pdev->monitor_pdev) 1899 goto mon_deliver_non_std_fail; 1900 1901 mon_pdev = pdev->monitor_pdev; 1902 1903 if (!mon_pdev->mvdev || !mon_pdev->mvdev || 1904 !mon_pdev->mvdev->monitor_vdev || 1905 !mon_pdev->mvdev->monitor_vdev->osif_rx_mon) 1906 goto mon_deliver_non_std_fail; 1907 1908 mon_vdev = mon_pdev->mvdev->monitor_vdev; 1909 /* Generate a dummy skb_buff */ 1910 osif_rx_mon = mon_vdev->osif_rx_mon; 1911 dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER, 1912 MAX_MONITOR_HEADER, 4, FALSE); 1913 if (!dummy_msdu) 1914 goto allocate_dummy_msdu_fail; 1915 1916 qdf_nbuf_set_pktlen(dummy_msdu, 0); 1917 qdf_nbuf_set_next(dummy_msdu, NULL); 1918 1919 mon_pdev->ppdu_info.rx_status.ppdu_id = 1920 mon_pdev->ppdu_info.com_info.ppdu_id; 1921 1922 /* Apply the radio header to this dummy skb */ 1923 if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, dummy_msdu, 1924 qdf_nbuf_headroom(dummy_msdu))) { 1925 DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); 1926 qdf_nbuf_free(dummy_msdu); 1927 goto mon_deliver_non_std_fail; 1928 } 1929 1930 /* deliver to the user layer application */ 1931 osif_rx_mon(mon_pdev->mvdev->osif_vdev, 1932 dummy_msdu, NULL); 1933 1934 /* Clear rx_status*/ 1935 qdf_mem_zero(&mon_pdev->ppdu_info.rx_status, 1936 sizeof(mon_pdev->ppdu_info.rx_status)); 1937 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 1938 1939 return QDF_STATUS_SUCCESS; 1940 1941 allocate_dummy_msdu_fail: 1942 dp_rx_mon_dest_debug("%pK: mon_skb=%pK ", 1943 soc, dummy_msdu); 1944 1945 mon_deliver_non_std_fail: 1946 return QDF_STATUS_E_INVAL; 1947 } 1948 1949 /** 1950 * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based 1951 * filtering enabled 1952 * @soc: core txrx main context 1953 * @ppdu_info: Structure for rx ppdu info 1954 * @status_nbuf: Qdf nbuf abstraction for linux skb 1955 * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN 1956 * 1957 * Return: none 1958 */ 1959 void 1960 dp_rx_process_peer_based_pktlog(struct dp_soc *soc, 1961 struct hal_rx_ppdu_info *ppdu_info, 1962 qdf_nbuf_t status_nbuf, uint32_t pdev_id) 1963 { 1964 struct dp_peer *peer; 1965 struct mon_rx_user_status *rx_user_status; 1966 uint32_t num_users = ppdu_info->com_info.num_users; 1967 uint16_t sw_peer_id; 1968 1969 /* Sanity check for num_users */ 1970 if (!num_users) 1971 return; 1972 1973 qdf_assert_always(num_users <= CDP_MU_MAX_USERS); 1974 rx_user_status = &ppdu_info->rx_user_status[num_users - 1]; 1975 1976 sw_peer_id = rx_user_status->sw_peer_id; 1977 1978 peer = dp_peer_get_ref_by_id(soc, sw_peer_id, 1979 DP_MOD_ID_RX_PPDU_STATS); 1980 1981 if (!peer) 1982 return; 1983 1984 if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) && 1985 (peer->monitor_peer->peer_based_pktlog_filter)) { 1986 dp_wdi_event_handler( 1987 WDI_EVENT_RX_DESC, soc, 1988 status_nbuf, 1989 peer->peer_id, 1990 WDI_NO_VAL, pdev_id); 1991 } 1992 dp_peer_unref_delete(peer, 1993 DP_MOD_ID_RX_PPDU_STATS); 1994 } 1995 1996 uint32_t 1997 dp_mon_rx_add_tlv(uint8_t id, uint16_t len, void *value, qdf_nbuf_t mpdu_nbuf) 1998 { 1999 uint8_t *dest = NULL; 2000 uint32_t num_bytes_pushed = 0; 2001 2002 /* Add tlv id field */ 2003 dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint8_t)); 2004 if (qdf_likely(dest)) { 2005 *((uint8_t *)dest) = id; 2006 num_bytes_pushed += sizeof(uint8_t); 2007 } 2008 2009 /* Add tlv len field */ 2010 dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint16_t)); 2011 if (qdf_likely(dest)) { 2012 *((uint16_t *)dest) = len; 2013 num_bytes_pushed += sizeof(uint16_t); 2014 } 2015 2016 /* Add tlv value field */ 2017 dest = qdf_nbuf_push_head(mpdu_nbuf, len); 2018 if (qdf_likely(dest)) { 2019 qdf_mem_copy(dest, value, len); 2020 num_bytes_pushed += len; 2021 } 2022 2023 return num_bytes_pushed; 2024 } 2025