1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "hal_hw_headers.h" 18 #include "dp_types.h" 19 #include "dp_rx.h" 20 #include "dp_peer.h" 21 #include "hal_rx.h" 22 #include "hal_api.h" 23 #include "qdf_trace.h" 24 #include "qdf_nbuf.h" 25 #include "hal_api_mon.h" 26 #include "dp_internal.h" 27 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 28 #include "dp_htt.h" 29 #include "dp_mon.h" 30 #include "dp_rx_mon.h" 31 #include "htt.h" 32 #include <dp_mon_1.0.h> 33 #include <dp_rx_mon_1.0.h> 34 35 #ifdef FEATURE_PERPKT_INFO 36 #include "dp_ratetable.h" 37 #endif 38 39 static inline 40 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, 41 uint32_t mac_id, 42 struct dp_srng *dp_rxdma_srng, 43 struct rx_desc_pool *rx_desc_pool, 44 uint32_t num_req_buffers, 45 union dp_rx_desc_list_elem_t **desc_list, 46 union dp_rx_desc_list_elem_t **tail, 47 uint8_t owner); 48 49 /** 50 * dp_rx_mon_handle_status_buf_done () - Handle status buf DMA not done 51 * 52 * @pdev: DP pdev handle 53 * @mon_status_srng: Monitor status SRNG 54 * 55 * As per MAC team's suggestion, If HP + 2 entry's DMA done is set, 56 * skip HP + 1 entry and start processing in next interrupt. 57 * If HP + 2 entry's DMA done is not set, poll onto HP + 1 entry 58 * for it's DMA done TLV to be set. 59 * 60 * Return: enum dp_mon_reap_status 61 */ 62 enum dp_mon_reap_status 63 dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev, 64 void *mon_status_srng) 65 { 66 struct dp_soc *soc = pdev->soc; 67 hal_soc_handle_t hal_soc; 68 void *ring_entry; 69 struct hal_buf_info hbi; 70 qdf_nbuf_t status_nbuf; 71 struct dp_rx_desc *rx_desc; 72 void *rx_tlv; 73 QDF_STATUS buf_status; 74 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 75 76 hal_soc = soc->hal_soc; 77 78 ring_entry = hal_srng_src_peek_n_get_next_next(hal_soc, 79 mon_status_srng); 80 if (!ring_entry) { 81 dp_rx_mon_status_debug("%pK: Monitor status ring entry is NULL for SRNG: %pK", 82 soc, mon_status_srng); 83 return DP_MON_STATUS_NO_DMA; 84 } 85 86 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_entry, 87 &hbi); 88 rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie); 89 90 qdf_assert_always(rx_desc); 91 92 status_nbuf = rx_desc->nbuf; 93 94 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 95 QDF_DMA_FROM_DEVICE); 96 97 rx_tlv = qdf_nbuf_data(status_nbuf); 98 buf_status = hal_get_rx_status_done(rx_tlv); 99 100 /* If status buffer DMA is not done, 101 * 1. As per MAC team's suggestion, If HP + 2 entry's DMA done is set, 102 * replenish HP + 1 entry and start processing in next interrupt. 103 * 2. If HP + 2 entry's DMA done is not set 104 * hold on to mon destination ring. 105 */ 106 if (buf_status != QDF_STATUS_SUCCESS) { 107 dp_err_rl("Monitor status ring: DMA is not done " 108 "for nbuf: %pK", status_nbuf); 109 mon_pdev->rx_mon_stats.tlv_tag_status_err++; 110 return DP_MON_STATUS_REPLENISH; 111 } 112 113 mon_pdev->rx_mon_stats.status_buf_done_war++; 114 115 return DP_MON_STATUS_REPLENISH; 116 } 117 118 #ifdef WLAN_RX_PKT_CAPTURE_ENH 119 #include "dp_rx_mon_feature.h" 120 #else 121 static QDF_STATUS 122 dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev, 123 struct hal_rx_ppdu_info *ppdu_info) 124 { 125 return QDF_STATUS_SUCCESS; 126 } 127 128 static void 129 dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status, 130 qdf_nbuf_t status_nbuf, 131 struct hal_rx_ppdu_info *ppdu_info, 132 bool *nbuf_used) 133 { 134 } 135 #endif 136 137 #ifdef WLAN_TX_PKT_CAPTURE_ENH 138 #include "dp_rx_mon_feature.h" 139 #else 140 static QDF_STATUS 141 dp_send_ack_frame_to_stack(struct dp_soc *soc, 142 struct dp_pdev *pdev, 143 struct hal_rx_ppdu_info *ppdu_info) 144 { 145 return QDF_STATUS_SUCCESS; 146 } 147 #endif 148 149 #if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M) 150 static inline void 151 dp_rx_ul_ofdma_ru_size_to_width( 152 uint32_t ru_size, 153 uint32_t *ru_width) 154 { 155 uint32_t width; 156 157 width = 0; 158 switch (ru_size) { 159 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26: 160 width = 1; 161 break; 162 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52: 163 width = 2; 164 break; 165 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106: 166 width = 4; 167 break; 168 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242: 169 width = 9; 170 break; 171 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484: 172 width = 18; 173 break; 174 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996: 175 width = 37; 176 break; 177 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2: 178 width = 74; 179 break; 180 default: 181 dp_rx_mon_status_err("RU size to width convert err"); 182 break; 183 } 184 *ru_width = width; 185 } 186 187 static inline void 188 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) 189 { 190 struct mon_rx_user_status *mon_rx_user_status; 191 uint32_t num_users; 192 uint32_t i; 193 uint32_t mu_ul_user_v0_word0; 194 uint32_t mu_ul_user_v0_word1; 195 uint32_t ru_width; 196 uint32_t ru_size; 197 198 if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA || 199 ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO)) 200 return; 201 202 num_users = ppdu_info->com_info.num_users; 203 if (num_users > HAL_MAX_UL_MU_USERS) 204 num_users = HAL_MAX_UL_MU_USERS; 205 for (i = 0; i < num_users; i++) { 206 mon_rx_user_status = &ppdu_info->rx_user_status[i]; 207 mu_ul_user_v0_word0 = 208 mon_rx_user_status->mu_ul_user_v0_word0; 209 mu_ul_user_v0_word1 = 210 mon_rx_user_status->mu_ul_user_v0_word1; 211 212 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET( 213 mu_ul_user_v0_word0) && 214 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET( 215 mu_ul_user_v0_word0)) { 216 mon_rx_user_status->mcs = 217 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET( 218 mu_ul_user_v0_word1); 219 mon_rx_user_status->nss = 220 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET( 221 mu_ul_user_v0_word1) + 1; 222 223 mon_rx_user_status->mu_ul_info_valid = 1; 224 mon_rx_user_status->ofdma_ru_start_index = 225 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET( 226 mu_ul_user_v0_word1); 227 228 ru_size = 229 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET( 230 mu_ul_user_v0_word1); 231 dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width); 232 mon_rx_user_status->ofdma_ru_width = ru_width; 233 mon_rx_user_status->ofdma_ru_size = ru_size; 234 } 235 } 236 } 237 #else 238 static inline void 239 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) 240 { 241 } 242 #endif 243 244 #ifdef QCA_UNDECODED_METADATA_SUPPORT 245 static inline bool 246 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev, 247 struct hal_rx_ppdu_info *ppdu_info) 248 { 249 return (pdev->monitor_pdev->undecoded_metadata_capture && 250 ppdu_info->rx_status.phyrx_abort); 251 } 252 253 static inline void 254 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc, 255 struct dp_pdev *pdev, 256 struct hal_rx_ppdu_info *ppdu_info) 257 { 258 if (pdev->monitor_pdev->undecoded_metadata_capture) 259 dp_rx_handle_ppdu_undecoded_metadata(soc, pdev, ppdu_info); 260 261 pdev->monitor_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 262 } 263 #else 264 static inline bool 265 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev, 266 struct hal_rx_ppdu_info *ppdu_info) 267 { 268 return false; 269 } 270 271 static inline void 272 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc, 273 struct dp_pdev *pdev, 274 struct hal_rx_ppdu_info *ppdu_info) 275 { 276 } 277 #endif 278 279 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 280 /** 281 * dp_rx_mon_update_scan_spcl_vap_stats() - Update special vap stats 282 * @pdev: dp pdev context 283 * @ppdu_info: ppdu info structure from ppdu ring 284 * 285 * Return: none 286 */ 287 static inline void 288 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev, 289 struct hal_rx_ppdu_info *ppdu_info) 290 { 291 struct mon_rx_user_status *rx_user_status = NULL; 292 struct dp_mon_pdev *mon_pdev = NULL; 293 struct dp_mon_vdev *mon_vdev = NULL; 294 uint32_t num_users = 0; 295 uint32_t user = 0; 296 297 mon_pdev = pdev->monitor_pdev; 298 if (!mon_pdev || !mon_pdev->mvdev) 299 return; 300 301 mon_vdev = mon_pdev->mvdev->monitor_vdev; 302 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) 303 return; 304 305 num_users = ppdu_info->com_info.num_users; 306 for (user = 0; user < num_users; user++) { 307 rx_user_status = &ppdu_info->rx_user_status[user]; 308 mon_vdev->scan_spcl_vap_stats->rx_ok_pkts += 309 rx_user_status->mpdu_cnt_fcs_ok; 310 mon_vdev->scan_spcl_vap_stats->rx_ok_bytes += 311 rx_user_status->mpdu_ok_byte_count; 312 mon_vdev->scan_spcl_vap_stats->rx_err_pkts += 313 rx_user_status->mpdu_cnt_fcs_err; 314 mon_vdev->scan_spcl_vap_stats->rx_err_bytes += 315 rx_user_status->mpdu_err_byte_count; 316 } 317 mon_vdev->scan_spcl_vap_stats->rx_mgmt_pkts += 318 ppdu_info->frm_type_info.rx_mgmt_cnt; 319 mon_vdev->scan_spcl_vap_stats->rx_ctrl_pkts += 320 ppdu_info->frm_type_info.rx_ctrl_cnt; 321 mon_vdev->scan_spcl_vap_stats->rx_data_pkts += 322 ppdu_info->frm_type_info.rx_data_cnt; 323 } 324 #else 325 static inline void 326 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev, 327 struct hal_rx_ppdu_info *ppdu_info) 328 { 329 } 330 #endif 331 332 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY 333 /** 334 * dp_rx_mon_status_ring_record_entry() - Record one entry of a particular 335 * event type into the monitor status 336 * buffer tracking history. 337 * @soc: DP soc handle 338 * @event: event type 339 * @ring_desc: Monitor status ring descriptor 340 * @rx_desc: RX descriptor 341 * @nbuf: status buffer. 342 * 343 * Return: None 344 */ 345 static void 346 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc, 347 enum dp_mon_status_process_event event, 348 hal_ring_desc_t ring_desc, 349 struct dp_rx_desc *rx_desc, 350 qdf_nbuf_t nbuf) 351 { 352 struct dp_mon_stat_info_record *record; 353 struct hal_buf_info hbi; 354 uint32_t idx; 355 356 if (qdf_unlikely(!soc->mon_status_ring_history)) 357 return; 358 359 idx = dp_history_get_next_index(&soc->mon_status_ring_history->index, 360 DP_MON_STATUS_HIST_MAX); 361 362 /* No NULL check needed for record since its an array */ 363 record = &soc->mon_status_ring_history->entry[idx]; 364 365 record->timestamp = qdf_get_log_timestamp(); 366 if (event == DP_MON_STATUS_BUF_REAP) { 367 hal_rx_buffer_addr_info_get_paddr(ring_desc, &hbi); 368 369 /* buffer_addr_info is the first element of ring_desc */ 370 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 371 &hbi); 372 373 record->hbi.paddr = hbi.paddr; 374 record->hbi.sw_cookie = hbi.sw_cookie; 375 record->hbi.rbm = hbi.rbm; 376 record->rx_desc = rx_desc; 377 if (rx_desc) { 378 record->nbuf = rx_desc->nbuf; 379 record->rx_desc_nbuf_data = qdf_nbuf_data(rx_desc->nbuf); 380 } else { 381 record->nbuf = NULL; 382 record->rx_desc_nbuf_data = NULL; 383 } 384 } 385 386 if (event == DP_MON_STATUS_BUF_ENQUEUE) { 387 record->nbuf = nbuf; 388 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf); 389 } 390 391 if (event == DP_MON_STATUS_BUF_DEQUEUE) { 392 record->nbuf = nbuf; 393 if (nbuf) 394 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf); 395 else 396 record->rx_desc_nbuf_data = NULL; 397 } 398 } 399 #else 400 static void 401 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc, 402 enum dp_mon_status_process_event event, 403 hal_ring_desc_t ring_desc, 404 struct dp_rx_desc *rx_desc, 405 qdf_nbuf_t nbuf) 406 { 407 } 408 #endif 409 410 /** 411 * dp_rx_mon_status_process_tlv() - Process status TLV in status 412 * buffer on Rx status Queue posted by status SRNG processing. 413 * @soc: core txrx main context 414 * @int_ctx: interrupt context 415 * @mac_id: mac_id which is one of 3 mac_ids _ring 416 * @quota: amount of work which can be done 417 * 418 * Return: none 419 */ 420 static inline void 421 dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx, 422 uint32_t mac_id, uint32_t quota) 423 { 424 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 425 struct hal_rx_ppdu_info *ppdu_info; 426 qdf_nbuf_t status_nbuf; 427 uint8_t *rx_tlv; 428 uint8_t *rx_tlv_start; 429 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE; 430 QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS; 431 struct cdp_pdev_mon_stats *rx_mon_stats; 432 int smart_mesh_status; 433 enum WDI_EVENT pktlog_mode = WDI_NO_VAL; 434 bool nbuf_used; 435 uint32_t rx_enh_capture_mode; 436 struct dp_mon_soc *mon_soc = soc->monitor_soc; 437 struct dp_mon_pdev *mon_pdev; 438 439 if (!pdev) { 440 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc, 441 mac_id); 442 return; 443 } 444 445 mon_pdev = pdev->monitor_pdev; 446 ppdu_info = &mon_pdev->ppdu_info; 447 rx_mon_stats = &mon_pdev->rx_mon_stats; 448 449 if (mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START) 450 return; 451 452 rx_enh_capture_mode = mon_pdev->rx_enh_capture_mode; 453 454 while (!qdf_nbuf_is_queue_empty(&mon_pdev->rx_status_q)) { 455 456 status_nbuf = qdf_nbuf_queue_remove(&mon_pdev->rx_status_q); 457 dp_rx_mon_status_ring_record_entry(soc, 458 DP_MON_STATUS_BUF_DEQUEUE, 459 NULL, NULL, status_nbuf); 460 461 if (!status_nbuf) 462 return; 463 464 rx_tlv = qdf_nbuf_data(status_nbuf); 465 rx_tlv_start = rx_tlv; 466 nbuf_used = false; 467 468 if ((mon_pdev->mvdev) || (mon_pdev->enhanced_stats_en) || 469 (mon_pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) || 470 (mon_pdev->undecoded_metadata_capture) || 471 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) { 472 do { 473 tlv_status = hal_rx_status_get_tlv_info(rx_tlv, 474 ppdu_info, pdev->soc->hal_soc, 475 status_nbuf); 476 477 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info, 478 rx_mon_stats); 479 480 dp_rx_mon_enh_capture_process(pdev, tlv_status, 481 status_nbuf, ppdu_info, 482 &nbuf_used); 483 484 dp_rx_mcopy_process_ppdu_info(pdev, 485 ppdu_info, 486 tlv_status); 487 488 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 489 mon_pdev->is_tlv_hdr_64_bit); 490 491 if ((rx_tlv - rx_tlv_start) >= 492 RX_MON_STATUS_BUF_SIZE) 493 break; 494 495 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) || 496 (tlv_status == HAL_TLV_STATUS_HEADER) || 497 (tlv_status == HAL_TLV_STATUS_MPDU_END) || 498 (tlv_status == HAL_TLV_STATUS_MPDU_START) || 499 (tlv_status == HAL_TLV_STATUS_MSDU_END)); 500 } 501 if (mon_pdev->dp_peer_based_pktlog) { 502 dp_rx_process_peer_based_pktlog(soc, ppdu_info, 503 status_nbuf, 504 pdev->pdev_id); 505 } else { 506 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL) 507 pktlog_mode = WDI_EVENT_RX_DESC; 508 else if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE) 509 pktlog_mode = WDI_EVENT_LITE_RX; 510 511 if (pktlog_mode != WDI_NO_VAL) 512 dp_wdi_event_handler(pktlog_mode, soc, 513 status_nbuf, 514 HTT_INVALID_PEER, 515 WDI_NO_VAL, pdev->pdev_id); 516 } 517 518 /* smart monitor vap and m_copy cannot co-exist */ 519 if (ppdu_info->rx_status.monitor_direct_used && 520 mon_pdev->neighbour_peers_added && 521 mon_pdev->mvdev) { 522 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc, 523 pdev, ppdu_info, status_nbuf); 524 if (smart_mesh_status) 525 qdf_nbuf_free(status_nbuf); 526 } else if (qdf_unlikely(mon_pdev->mcopy_mode)) { 527 dp_rx_process_mcopy_mode(soc, pdev, 528 ppdu_info, tlv_status, 529 status_nbuf); 530 } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) { 531 if (!nbuf_used) 532 qdf_nbuf_free(status_nbuf); 533 534 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) 535 enh_log_status = 536 dp_rx_handle_enh_capture(soc, 537 pdev, ppdu_info); 538 } else { 539 qdf_nbuf_free(status_nbuf); 540 } 541 542 if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) { 543 dp_rx_mon_deliver_non_std(soc, mac_id); 544 } else if ((tlv_status == HAL_TLV_STATUS_PPDU_DONE) && 545 (!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info))) { 546 rx_mon_stats->status_ppdu_done++; 547 dp_rx_mon_handle_mu_ul_info(ppdu_info); 548 549 if (mon_pdev->tx_capture_enabled 550 != CDP_TX_ENH_CAPTURE_DISABLED) 551 dp_send_ack_frame_to_stack(soc, pdev, 552 ppdu_info); 553 554 if (mon_pdev->enhanced_stats_en || 555 mon_pdev->mcopy_mode || 556 mon_pdev->neighbour_peers_added) 557 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info); 558 else if (dp_cfr_rcc_mode_status(pdev)) 559 dp_rx_handle_cfr(soc, pdev, ppdu_info); 560 561 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE; 562 563 /* Collect spcl vap stats if configured */ 564 if (mon_pdev->scan_spcl_vap_configured) 565 dp_rx_mon_update_scan_spcl_vap_stats(pdev, 566 ppdu_info); 567 568 /* 569 * if chan_num is not fetched correctly from ppdu RX TLV, 570 * get it from pdev saved. 571 */ 572 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_num == 0)) 573 mon_pdev->ppdu_info.rx_status.chan_num = 574 mon_pdev->mon_chan_num; 575 /* 576 * if chan_freq is not fetched correctly from ppdu RX TLV, 577 * get it from pdev saved. 578 */ 579 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_freq == 0)) { 580 mon_pdev->ppdu_info.rx_status.chan_freq = 581 mon_pdev->mon_chan_freq; 582 } 583 584 if (!mon_soc->full_mon_mode) 585 dp_rx_mon_dest_process(soc, int_ctx, mac_id, 586 quota); 587 588 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 589 } else { 590 dp_rx_mon_handle_ppdu_undecoded_metadata(soc, pdev, 591 ppdu_info); 592 } 593 } 594 return; 595 } 596 597 /* 598 * dp_rx_mon_status_srng_process() - Process monitor status ring 599 * post the status ring buffer to Rx status Queue for later 600 * processing when status ring is filled with status TLV. 601 * Allocate a new buffer to status ring if the filled buffer 602 * is posted. 603 * @soc: core txrx main context 604 * @int_ctx: interrupt context 605 * @mac_id: mac_id which is one of 3 mac_ids 606 * @quota: No. of ring entry that can be serviced in one shot. 607 608 * Return: uint32_t: No. of ring entry that is processed. 609 */ 610 static inline uint32_t 611 dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx, 612 uint32_t mac_id, uint32_t quota) 613 { 614 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 615 hal_soc_handle_t hal_soc; 616 void *mon_status_srng; 617 void *rxdma_mon_status_ring_entry; 618 QDF_STATUS status; 619 enum dp_mon_reap_status reap_status; 620 uint32_t work_done = 0; 621 struct dp_mon_pdev *mon_pdev; 622 623 if (!pdev) { 624 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", 625 soc, mac_id); 626 return work_done; 627 } 628 629 mon_pdev = pdev->monitor_pdev; 630 631 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 632 633 qdf_assert(mon_status_srng); 634 if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) { 635 636 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 637 "%s %d : HAL Monitor Status Ring Init Failed -- %pK", 638 __func__, __LINE__, mon_status_srng); 639 return work_done; 640 } 641 642 hal_soc = soc->hal_soc; 643 644 qdf_assert(hal_soc); 645 646 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_status_srng))) 647 goto done; 648 649 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT => 650 * BUFFER_ADDR_INFO STRUCT 651 */ 652 while (qdf_likely((rxdma_mon_status_ring_entry = 653 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) 654 && quota--)) { 655 struct hal_buf_info hbi; 656 qdf_nbuf_t status_nbuf; 657 struct dp_rx_desc *rx_desc; 658 uint8_t *status_buf; 659 qdf_dma_addr_t paddr; 660 uint64_t buf_addr; 661 struct rx_desc_pool *rx_desc_pool; 662 663 rx_desc_pool = &soc->rx_desc_status[mac_id]; 664 buf_addr = 665 (HAL_RX_BUFFER_ADDR_31_0_GET( 666 rxdma_mon_status_ring_entry) | 667 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET( 668 rxdma_mon_status_ring_entry)) << 32)); 669 670 if (qdf_likely(buf_addr)) { 671 672 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 673 (uint32_t *)rxdma_mon_status_ring_entry, 674 &hbi); 675 rx_desc = dp_rx_cookie_2_va_mon_status(soc, 676 hbi.sw_cookie); 677 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_REAP, 678 rxdma_mon_status_ring_entry, 679 rx_desc, NULL); 680 681 qdf_assert_always(rx_desc); 682 683 if (qdf_unlikely(!dp_rx_desc_paddr_sanity_check(rx_desc, 684 buf_addr))) { 685 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 686 hal_srng_src_get_next(hal_soc, mon_status_srng); 687 continue; 688 } 689 690 status_nbuf = rx_desc->nbuf; 691 692 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 693 QDF_DMA_FROM_DEVICE); 694 695 status_buf = qdf_nbuf_data(status_nbuf); 696 697 status = hal_get_rx_status_done(status_buf); 698 699 if (status != QDF_STATUS_SUCCESS) { 700 uint32_t hp, tp; 701 hal_get_sw_hptp(hal_soc, mon_status_srng, 702 &tp, &hp); 703 dp_info_rl("tlv tag status error hp:%u, tp:%u", 704 hp, tp); 705 706 /* RxDMA status done bit might not be set even 707 * though tp is moved by HW. 708 */ 709 710 /* If done status is missing: 711 * 1. As per MAC team's suggestion, 712 * when HP + 1 entry is peeked and if DMA 713 * is not done and if HP + 2 entry's DMA done 714 * is set. skip HP + 1 entry and 715 * start processing in next interrupt. 716 * 2. If HP + 2 entry's DMA done is not set, 717 * poll onto HP + 1 entry DMA done to be set. 718 * Check status for same buffer for next time 719 * dp_rx_mon_status_srng_process 720 */ 721 reap_status = dp_rx_mon_handle_status_buf_done(pdev, 722 mon_status_srng); 723 if (reap_status == DP_MON_STATUS_NO_DMA) 724 continue; 725 else if (reap_status == DP_MON_STATUS_REPLENISH) { 726 if (!rx_desc->unmapped) { 727 qdf_nbuf_unmap_nbytes_single( 728 soc->osdev, status_nbuf, 729 QDF_DMA_FROM_DEVICE, 730 rx_desc_pool->buf_size); 731 rx_desc->unmapped = 1; 732 } 733 qdf_nbuf_free(status_nbuf); 734 goto buf_replenish; 735 } 736 } 737 qdf_nbuf_set_pktlen(status_nbuf, 738 RX_MON_STATUS_BUF_SIZE); 739 740 if (!rx_desc->unmapped) { 741 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf, 742 QDF_DMA_FROM_DEVICE, 743 rx_desc_pool->buf_size); 744 rx_desc->unmapped = 1; 745 } 746 747 /* Put the status_nbuf to queue */ 748 qdf_nbuf_queue_add(&mon_pdev->rx_status_q, status_nbuf); 749 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_ENQUEUE, 750 rxdma_mon_status_ring_entry, 751 rx_desc, status_nbuf); 752 753 } else { 754 union dp_rx_desc_list_elem_t *desc_list = NULL; 755 union dp_rx_desc_list_elem_t *tail = NULL; 756 uint32_t num_alloc_desc; 757 758 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 759 rx_desc_pool, 760 1, 761 &desc_list, 762 &tail); 763 /* 764 * No free descriptors available 765 */ 766 if (qdf_unlikely(num_alloc_desc == 0)) { 767 work_done++; 768 break; 769 } 770 771 rx_desc = &desc_list->rx_desc; 772 } 773 774 buf_replenish: 775 status_nbuf = dp_rx_nbuf_prepare(soc, pdev); 776 777 /* 778 * qdf_nbuf alloc or map failed, 779 * free the dp rx desc to free list, 780 * fill in NULL dma address at current HP entry, 781 * keep HP in mon_status_ring unchanged, 782 * wait next time dp_rx_mon_status_srng_process 783 * to fill in buffer at current HP. 784 */ 785 if (qdf_unlikely(!status_nbuf)) { 786 union dp_rx_desc_list_elem_t *desc_list = NULL; 787 union dp_rx_desc_list_elem_t *tail = NULL; 788 struct rx_desc_pool *rx_desc_pool; 789 790 rx_desc_pool = &soc->rx_desc_status[mac_id]; 791 792 dp_info_rl("fail to allocate or map qdf_nbuf"); 793 dp_rx_add_to_free_desc_list(&desc_list, 794 &tail, rx_desc); 795 dp_rx_add_desc_list_to_free_list(soc, &desc_list, 796 &tail, mac_id, rx_desc_pool); 797 798 hal_rxdma_buff_addr_info_set( 799 hal_soc, rxdma_mon_status_ring_entry, 800 0, 0, 801 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 802 work_done++; 803 break; 804 } 805 806 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0); 807 808 rx_desc->nbuf = status_nbuf; 809 rx_desc->in_use = 1; 810 rx_desc->unmapped = 0; 811 812 hal_rxdma_buff_addr_info_set(hal_soc, 813 rxdma_mon_status_ring_entry, 814 paddr, rx_desc->cookie, 815 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 816 817 hal_srng_src_get_next(hal_soc, mon_status_srng); 818 work_done++; 819 } 820 done: 821 822 dp_srng_access_end(int_ctx, soc, mon_status_srng); 823 824 return work_done; 825 826 } 827 828 uint32_t 829 dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx, 830 uint32_t mac_id, uint32_t quota) 831 { 832 uint32_t work_done; 833 834 work_done = dp_rx_mon_status_srng_process(soc, int_ctx, mac_id, quota); 835 quota -= work_done; 836 dp_rx_mon_status_process_tlv(soc, int_ctx, mac_id, quota); 837 838 return work_done; 839 } 840 841 QDF_STATUS 842 dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id) 843 { 844 uint8_t pdev_id = pdev->pdev_id; 845 struct dp_soc *soc = pdev->soc; 846 struct dp_srng *mon_status_ring; 847 uint32_t num_entries; 848 struct rx_desc_pool *rx_desc_pool; 849 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 850 union dp_rx_desc_list_elem_t *desc_list = NULL; 851 union dp_rx_desc_list_elem_t *tail = NULL; 852 853 soc_cfg_ctx = soc->wlan_cfg_ctx; 854 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 855 856 num_entries = mon_status_ring->num_entries; 857 858 rx_desc_pool = &soc->rx_desc_status[mac_id]; 859 860 dp_debug("Mon RX Desc Pool[%d] entries=%u", 861 pdev_id, num_entries); 862 863 return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring, 864 rx_desc_pool, num_entries, 865 &desc_list, &tail, 866 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 867 } 868 869 QDF_STATUS 870 dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) 871 { 872 uint8_t pdev_id = pdev->pdev_id; 873 struct dp_soc *soc = pdev->soc; 874 struct dp_srng *mon_status_ring; 875 uint32_t num_entries; 876 struct rx_desc_pool *rx_desc_pool; 877 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 878 879 soc_cfg_ctx = soc->wlan_cfg_ctx; 880 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 881 882 num_entries = mon_status_ring->num_entries; 883 884 rx_desc_pool = &soc->rx_desc_status[mac_id]; 885 886 dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); 887 888 rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE; 889 return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool); 890 } 891 892 void 893 dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) 894 { 895 uint32_t i; 896 uint8_t pdev_id = pdev->pdev_id; 897 struct dp_soc *soc = pdev->soc; 898 struct dp_srng *mon_status_ring; 899 uint32_t num_entries; 900 struct rx_desc_pool *rx_desc_pool; 901 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 902 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 903 904 soc_cfg_ctx = soc->wlan_cfg_ctx; 905 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 906 907 num_entries = mon_status_ring->num_entries; 908 909 rx_desc_pool = &soc->rx_desc_status[mac_id]; 910 911 dp_debug("Mon RX Desc status Pool[%d] init entries=%u", 912 pdev_id, num_entries); 913 914 rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id); 915 rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE; 916 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 917 /* Disable frag processing flag */ 918 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 919 920 dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool); 921 922 qdf_nbuf_queue_init(&mon_pdev->rx_status_q); 923 924 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 925 926 qdf_mem_zero(&mon_pdev->ppdu_info, sizeof(mon_pdev->ppdu_info)); 927 928 /* 929 * Set last_ppdu_id to HAL_INVALID_PPDU_ID in order to avoid ppdu_id 930 * match with '0' ppdu_id from monitor status ring 931 */ 932 mon_pdev->ppdu_info.com_info.last_ppdu_id = HAL_INVALID_PPDU_ID; 933 934 qdf_mem_zero(&mon_pdev->rx_mon_stats, sizeof(mon_pdev->rx_mon_stats)); 935 936 dp_rx_mon_init_dbg_ppdu_stats(&mon_pdev->ppdu_info, 937 &mon_pdev->rx_mon_stats); 938 939 for (i = 0; i < MAX_MU_USERS; i++) { 940 qdf_nbuf_queue_init(&mon_pdev->mpdu_q[i]); 941 mon_pdev->is_mpdu_hdr[i] = true; 942 } 943 944 qdf_mem_zero(mon_pdev->msdu_list, 945 sizeof(mon_pdev->msdu_list[MAX_MU_USERS])); 946 947 mon_pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED; 948 } 949 950 void 951 dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) { 952 uint8_t pdev_id = pdev->pdev_id; 953 struct dp_soc *soc = pdev->soc; 954 struct rx_desc_pool *rx_desc_pool; 955 956 rx_desc_pool = &soc->rx_desc_status[mac_id]; 957 958 dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id); 959 960 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id); 961 } 962 963 void 964 dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) { 965 uint8_t pdev_id = pdev->pdev_id; 966 struct dp_soc *soc = pdev->soc; 967 struct rx_desc_pool *rx_desc_pool; 968 969 rx_desc_pool = &soc->rx_desc_status[mac_id]; 970 971 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); 972 973 dp_rx_desc_pool_free(soc, rx_desc_pool); 974 } 975 976 void 977 dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) 978 { 979 uint8_t pdev_id = pdev->pdev_id; 980 struct dp_soc *soc = pdev->soc; 981 struct rx_desc_pool *rx_desc_pool; 982 983 rx_desc_pool = &soc->rx_desc_status[mac_id]; 984 985 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); 986 987 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 988 } 989 990 /* 991 * dp_rx_buffers_replenish() - replenish monitor status ring with 992 * rx nbufs called during dp rx 993 * monitor status ring initialization 994 * 995 * @soc: core txrx main context 996 * @mac_id: mac_id which is one of 3 mac_ids 997 * @dp_rxdma_srng: dp monitor status circular ring 998 * @rx_desc_pool; Pointer to Rx descriptor pool 999 * @num_req_buffers: number of buffer to be replenished 1000 * @desc_list: list of descs if called from dp rx monitor status 1001 * process or NULL during dp rx initialization or 1002 * out of buffer interrupt 1003 * @tail: tail of descs list 1004 * @owner: who owns the nbuf (host, NSS etc...) 1005 * Return: return success or failure 1006 */ 1007 static inline 1008 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, 1009 uint32_t mac_id, 1010 struct dp_srng *dp_rxdma_srng, 1011 struct rx_desc_pool *rx_desc_pool, 1012 uint32_t num_req_buffers, 1013 union dp_rx_desc_list_elem_t **desc_list, 1014 union dp_rx_desc_list_elem_t **tail, 1015 uint8_t owner) 1016 { 1017 uint32_t num_alloc_desc; 1018 uint16_t num_desc_to_free = 0; 1019 uint32_t num_entries_avail; 1020 uint32_t count = 0; 1021 int sync_hw_ptr = 1; 1022 qdf_dma_addr_t paddr; 1023 qdf_nbuf_t rx_netbuf; 1024 void *rxdma_ring_entry; 1025 union dp_rx_desc_list_elem_t *next; 1026 void *rxdma_srng; 1027 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 1028 1029 if (!dp_pdev) { 1030 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", 1031 dp_soc, mac_id); 1032 return QDF_STATUS_E_FAILURE; 1033 } 1034 1035 rxdma_srng = dp_rxdma_srng->hal_srng; 1036 1037 qdf_assert(rxdma_srng); 1038 1039 dp_rx_mon_status_debug("%pK: requested %d buffers for replenish", 1040 dp_soc, num_req_buffers); 1041 1042 /* 1043 * if desc_list is NULL, allocate the descs from freelist 1044 */ 1045 if (!(*desc_list)) { 1046 1047 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1048 rx_desc_pool, 1049 num_req_buffers, 1050 desc_list, 1051 tail); 1052 1053 if (!num_alloc_desc) { 1054 dp_rx_mon_status_err("%pK: no free rx_descs in freelist", 1055 dp_soc); 1056 return QDF_STATUS_E_NOMEM; 1057 } 1058 1059 dp_rx_mon_status_debug("%pK: %d rx desc allocated", dp_soc, 1060 num_alloc_desc); 1061 1062 num_req_buffers = num_alloc_desc; 1063 } 1064 1065 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 1066 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 1067 rxdma_srng, sync_hw_ptr); 1068 1069 dp_rx_mon_status_debug("%pK: no of available entries in rxdma ring: %d", 1070 dp_soc, num_entries_avail); 1071 1072 if (num_entries_avail < num_req_buffers) { 1073 num_desc_to_free = num_req_buffers - num_entries_avail; 1074 num_req_buffers = num_entries_avail; 1075 } 1076 1077 while (count <= num_req_buffers) { 1078 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev); 1079 1080 /* 1081 * qdf_nbuf alloc or map failed, 1082 * keep HP in mon_status_ring unchanged, 1083 * wait dp_rx_mon_status_srng_process 1084 * to fill in buffer at current HP. 1085 */ 1086 if (qdf_unlikely(!rx_netbuf)) { 1087 dp_rx_mon_status_err("%pK: qdf_nbuf allocate or map fail, count %d", 1088 dp_soc, count); 1089 break; 1090 } 1091 1092 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 1093 1094 next = (*desc_list)->next; 1095 rxdma_ring_entry = hal_srng_src_get_cur_hp_n_move_next( 1096 dp_soc->hal_soc, 1097 rxdma_srng); 1098 1099 if (qdf_unlikely(!rxdma_ring_entry)) { 1100 dp_rx_mon_status_err("%pK: rxdma_ring_entry is NULL, count - %d", 1101 dp_soc, count); 1102 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf, 1103 QDF_DMA_FROM_DEVICE, 1104 rx_desc_pool->buf_size); 1105 qdf_nbuf_free(rx_netbuf); 1106 break; 1107 } 1108 1109 (*desc_list)->rx_desc.nbuf = rx_netbuf; 1110 (*desc_list)->rx_desc.in_use = 1; 1111 (*desc_list)->rx_desc.unmapped = 0; 1112 count++; 1113 1114 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, 1115 rxdma_ring_entry, paddr, 1116 (*desc_list)->rx_desc.cookie, 1117 owner); 1118 1119 dp_rx_mon_status_debug("%pK: rx_desc=%pK, cookie=%d, nbuf=%pK, paddr=%pK", 1120 dp_soc, &(*desc_list)->rx_desc, 1121 (*desc_list)->rx_desc.cookie, rx_netbuf, 1122 (void *)paddr); 1123 1124 *desc_list = next; 1125 } 1126 1127 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1128 1129 dp_rx_mon_status_debug("%pK: successfully replenished %d buffers", 1130 dp_soc, num_req_buffers); 1131 1132 dp_rx_mon_status_debug("%pK: %d rx desc added back to free list", 1133 dp_soc, num_desc_to_free); 1134 1135 /* 1136 * add any available free desc back to the free list 1137 */ 1138 if (*desc_list) { 1139 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1140 mac_id, rx_desc_pool); 1141 } 1142 1143 return QDF_STATUS_SUCCESS; 1144 } 1145 1146 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 1147 /** 1148 * dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for 1149 * a given mac 1150 * @pdev: DP pdev 1151 * @mac_id: mac id 1152 * @quota: maximum number of ring entries that can be processed 1153 * 1154 * Return: Number of ring entries reaped 1155 */ 1156 static uint32_t 1157 dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1158 uint32_t quota) 1159 { 1160 struct dp_soc *soc = pdev->soc; 1161 void *mon_status_srng; 1162 hal_soc_handle_t hal_soc; 1163 void *ring_desc; 1164 uint32_t reap_cnt = 0; 1165 1166 if (qdf_unlikely(!soc || !soc->hal_soc)) 1167 return reap_cnt; 1168 1169 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 1170 1171 if (qdf_unlikely(!mon_status_srng || 1172 !hal_srng_initialized(mon_status_srng))) 1173 return reap_cnt; 1174 1175 hal_soc = soc->hal_soc; 1176 1177 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) 1178 return reap_cnt; 1179 1180 while ((ring_desc = 1181 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) && 1182 reap_cnt < MON_DROP_REAP_LIMIT && quota--) { 1183 uint64_t buf_addr; 1184 struct hal_buf_info hbi; 1185 struct dp_rx_desc *rx_desc; 1186 qdf_nbuf_t status_nbuf; 1187 uint8_t *status_buf; 1188 enum dp_mon_reap_status reap_status; 1189 qdf_dma_addr_t iova; 1190 struct rx_desc_pool *rx_desc_pool; 1191 1192 rx_desc_pool = &soc->rx_desc_status[mac_id]; 1193 1194 buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) | 1195 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32)); 1196 1197 if (qdf_likely(buf_addr)) { 1198 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 1199 (uint32_t *)ring_desc, 1200 &hbi); 1201 rx_desc = dp_rx_cookie_2_va_mon_status(soc, 1202 hbi.sw_cookie); 1203 1204 qdf_assert_always(rx_desc); 1205 1206 status_nbuf = rx_desc->nbuf; 1207 1208 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 1209 QDF_DMA_FROM_DEVICE); 1210 1211 status_buf = qdf_nbuf_data(status_nbuf); 1212 1213 if (hal_get_rx_status_done(status_buf) != 1214 QDF_STATUS_SUCCESS) { 1215 /* If done status is missing: 1216 * 1. As per MAC team's suggestion, 1217 * when HP + 1 entry is peeked and if DMA 1218 * is not done and if HP + 2 entry's DMA done 1219 * is set. skip HP + 1 entry and 1220 * start processing in next interrupt. 1221 * 2. If HP + 2 entry's DMA done is not set, 1222 * poll onto HP + 1 entry DMA done to be set. 1223 * Check status for same buffer for next time 1224 * dp_rx_mon_status_srng_process 1225 */ 1226 reap_status = 1227 dp_rx_mon_handle_status_buf_done(pdev, 1228 mon_status_srng); 1229 if (reap_status == DP_MON_STATUS_NO_DMA) 1230 break; 1231 } 1232 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf, 1233 QDF_DMA_FROM_DEVICE, 1234 rx_desc_pool->buf_size); 1235 qdf_nbuf_free(status_nbuf); 1236 } else { 1237 union dp_rx_desc_list_elem_t *rx_desc_elem; 1238 1239 qdf_spin_lock_bh(&rx_desc_pool->lock); 1240 1241 if (!rx_desc_pool->freelist) { 1242 qdf_spin_unlock_bh(&rx_desc_pool->lock); 1243 break; 1244 } 1245 rx_desc_elem = rx_desc_pool->freelist; 1246 rx_desc_pool->freelist = rx_desc_pool->freelist->next; 1247 qdf_spin_unlock_bh(&rx_desc_pool->lock); 1248 1249 rx_desc = &rx_desc_elem->rx_desc; 1250 } 1251 1252 status_nbuf = dp_rx_nbuf_prepare(soc, pdev); 1253 1254 if (qdf_unlikely(!status_nbuf)) { 1255 union dp_rx_desc_list_elem_t *desc_list = NULL; 1256 union dp_rx_desc_list_elem_t *tail = NULL; 1257 1258 dp_info_rl("fail to allocate or map nbuf"); 1259 dp_rx_add_to_free_desc_list(&desc_list, &tail, 1260 rx_desc); 1261 dp_rx_add_desc_list_to_free_list(soc, 1262 &desc_list, 1263 &tail, mac_id, 1264 rx_desc_pool); 1265 1266 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, 0, 0, 1267 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 1268 break; 1269 } 1270 1271 iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0); 1272 1273 rx_desc->nbuf = status_nbuf; 1274 rx_desc->in_use = 1; 1275 1276 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, iova, 1277 rx_desc->cookie, 1278 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 1279 1280 reap_cnt++; 1281 hal_srng_src_get_next(hal_soc, mon_status_srng); 1282 } 1283 1284 hal_srng_access_end(hal_soc, mon_status_srng); 1285 1286 return reap_cnt; 1287 } 1288 1289 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1290 uint32_t quota) 1291 { 1292 uint32_t work_done; 1293 1294 work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota); 1295 if (!dp_is_rxdma_dst_ring_common(pdev)) 1296 dp_mon_dest_srng_drop_for_mac(pdev, mac_id); 1297 1298 return work_done; 1299 } 1300 #else 1301 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1302 uint32_t quota) 1303 { 1304 return 0; 1305 } 1306 #endif 1307