1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "hal_hw_headers.h" 18 #include "dp_types.h" 19 #include "dp_rx.h" 20 #include "dp_peer.h" 21 #include "hal_rx.h" 22 #include "hal_api.h" 23 #include "qdf_trace.h" 24 #include "qdf_nbuf.h" 25 #include "hal_api_mon.h" 26 #include "dp_internal.h" 27 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 28 #include "dp_htt.h" 29 #include "dp_mon.h" 30 #include "dp_rx_mon.h" 31 #include "htt.h" 32 #include <dp_mon_1.0.h> 33 #include <dp_rx_mon_1.0.h> 34 35 #ifdef FEATURE_PERPKT_INFO 36 #include "dp_ratetable.h" 37 #endif 38 39 static inline 40 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, 41 uint32_t mac_id, 42 struct dp_srng *dp_rxdma_srng, 43 struct rx_desc_pool *rx_desc_pool, 44 uint32_t num_req_buffers, 45 union dp_rx_desc_list_elem_t **desc_list, 46 union dp_rx_desc_list_elem_t **tail, 47 uint8_t owner); 48 49 /** 50 * dp_rx_mon_handle_status_buf_done () - Handle status buf DMA not done 51 * 52 * @pdev: DP pdev handle 53 * @mon_status_srng: Monitor status SRNG 54 * 55 * As per MAC team's suggestion, If HP + 2 entry's DMA done is set, 56 * skip HP + 1 entry and start processing in next interrupt. 57 * If HP + 2 entry's DMA done is not set, poll onto HP + 1 entry 58 * for it's DMA done TLV to be set. 59 * 60 * Return: enum dp_mon_reap_status 61 */ 62 enum dp_mon_reap_status 63 dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev, 64 void *mon_status_srng) 65 { 66 struct dp_soc *soc = pdev->soc; 67 hal_soc_handle_t hal_soc; 68 void *ring_entry; 69 struct hal_buf_info hbi; 70 qdf_nbuf_t status_nbuf; 71 struct dp_rx_desc *rx_desc; 72 void *rx_tlv; 73 QDF_STATUS buf_status; 74 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 75 76 hal_soc = soc->hal_soc; 77 78 ring_entry = hal_srng_src_peek_n_get_next_next(hal_soc, 79 mon_status_srng); 80 if (!ring_entry) { 81 dp_rx_mon_status_debug("%pK: Monitor status ring entry is NULL for SRNG: %pK", 82 soc, mon_status_srng); 83 return DP_MON_STATUS_NO_DMA; 84 } 85 86 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_entry, 87 &hbi); 88 rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie); 89 90 qdf_assert_always(rx_desc); 91 92 status_nbuf = rx_desc->nbuf; 93 94 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 95 QDF_DMA_FROM_DEVICE); 96 97 rx_tlv = qdf_nbuf_data(status_nbuf); 98 buf_status = hal_get_rx_status_done(rx_tlv); 99 100 /* If status buffer DMA is not done, 101 * 1. As per MAC team's suggestion, If HP + 2 entry's DMA done is set, 102 * replenish HP + 1 entry and start processing in next interrupt. 103 * 2. If HP + 2 entry's DMA done is not set 104 * hold on to mon destination ring. 105 */ 106 if (buf_status != QDF_STATUS_SUCCESS) { 107 dp_err_rl("Monitor status ring: DMA is not done " 108 "for nbuf: %pK", status_nbuf); 109 mon_pdev->rx_mon_stats.tlv_tag_status_err++; 110 return DP_MON_STATUS_REPLENISH; 111 } 112 113 mon_pdev->rx_mon_stats.status_buf_done_war++; 114 115 return DP_MON_STATUS_REPLENISH; 116 } 117 118 #ifdef WLAN_RX_PKT_CAPTURE_ENH 119 #include "dp_rx_mon_feature.h" 120 #else 121 static QDF_STATUS 122 dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev, 123 struct hal_rx_ppdu_info *ppdu_info) 124 { 125 return QDF_STATUS_SUCCESS; 126 } 127 128 static void 129 dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status, 130 qdf_nbuf_t status_nbuf, 131 struct hal_rx_ppdu_info *ppdu_info, 132 bool *nbuf_used) 133 { 134 } 135 #endif 136 137 #ifdef WLAN_TX_PKT_CAPTURE_ENH 138 #include "dp_rx_mon_feature.h" 139 #else 140 static QDF_STATUS 141 dp_send_ack_frame_to_stack(struct dp_soc *soc, 142 struct dp_pdev *pdev, 143 struct hal_rx_ppdu_info *ppdu_info) 144 { 145 return QDF_STATUS_SUCCESS; 146 } 147 #endif 148 149 #if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M) 150 static inline void 151 dp_rx_ul_ofdma_ru_size_to_width( 152 uint32_t ru_size, 153 uint32_t *ru_width) 154 { 155 uint32_t width; 156 157 width = 0; 158 switch (ru_size) { 159 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26: 160 width = 1; 161 break; 162 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52: 163 width = 2; 164 break; 165 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106: 166 width = 4; 167 break; 168 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242: 169 width = 9; 170 break; 171 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484: 172 width = 18; 173 break; 174 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996: 175 width = 37; 176 break; 177 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2: 178 width = 74; 179 break; 180 default: 181 dp_rx_mon_status_err("RU size to width convert err"); 182 break; 183 } 184 *ru_width = width; 185 } 186 187 static inline void 188 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) 189 { 190 struct mon_rx_user_status *mon_rx_user_status; 191 uint32_t num_users; 192 uint32_t i; 193 uint32_t mu_ul_user_v0_word0; 194 uint32_t mu_ul_user_v0_word1; 195 uint32_t ru_width; 196 uint32_t ru_size; 197 198 if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA || 199 ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO)) 200 return; 201 202 num_users = ppdu_info->com_info.num_users; 203 if (num_users > HAL_MAX_UL_MU_USERS) 204 num_users = HAL_MAX_UL_MU_USERS; 205 for (i = 0; i < num_users; i++) { 206 mon_rx_user_status = &ppdu_info->rx_user_status[i]; 207 mu_ul_user_v0_word0 = 208 mon_rx_user_status->mu_ul_user_v0_word0; 209 mu_ul_user_v0_word1 = 210 mon_rx_user_status->mu_ul_user_v0_word1; 211 212 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET( 213 mu_ul_user_v0_word0) && 214 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET( 215 mu_ul_user_v0_word0)) { 216 mon_rx_user_status->mcs = 217 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET( 218 mu_ul_user_v0_word1); 219 mon_rx_user_status->nss = 220 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET( 221 mu_ul_user_v0_word1) + 1; 222 223 mon_rx_user_status->mu_ul_info_valid = 1; 224 mon_rx_user_status->ofdma_ru_start_index = 225 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET( 226 mu_ul_user_v0_word1); 227 228 ru_size = 229 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET( 230 mu_ul_user_v0_word1); 231 dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width); 232 mon_rx_user_status->ofdma_ru_width = ru_width; 233 mon_rx_user_status->ofdma_ru_size = ru_size; 234 } 235 } 236 } 237 #else 238 static inline void 239 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) 240 { 241 } 242 #endif 243 244 #ifdef QCA_UNDECODED_METADATA_SUPPORT 245 static inline bool 246 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev, 247 struct hal_rx_ppdu_info *ppdu_info) 248 { 249 return (pdev->monitor_pdev->undecoded_metadata_capture && 250 ppdu_info->rx_status.phyrx_abort); 251 } 252 253 static inline void 254 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc, 255 struct dp_pdev *pdev, 256 struct hal_rx_ppdu_info *ppdu_info) 257 { 258 if (pdev->monitor_pdev->undecoded_metadata_capture) 259 dp_rx_handle_ppdu_undecoded_metadata(soc, pdev, ppdu_info); 260 261 pdev->monitor_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 262 } 263 #else 264 static inline bool 265 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev, 266 struct hal_rx_ppdu_info *ppdu_info) 267 { 268 return false; 269 } 270 271 static inline void 272 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc, 273 struct dp_pdev *pdev, 274 struct hal_rx_ppdu_info *ppdu_info) 275 { 276 } 277 #endif 278 279 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 280 /** 281 * dp_rx_mon_update_scan_spcl_vap_stats() - Update special vap stats 282 * @pdev: dp pdev context 283 * @ppdu_info: ppdu info structure from ppdu ring 284 * 285 * Return: none 286 */ 287 static inline void 288 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev, 289 struct hal_rx_ppdu_info *ppdu_info) 290 { 291 struct mon_rx_user_status *rx_user_status = NULL; 292 struct dp_mon_pdev *mon_pdev = NULL; 293 struct dp_mon_vdev *mon_vdev = NULL; 294 uint32_t num_users = 0; 295 uint32_t user = 0; 296 297 mon_pdev = pdev->monitor_pdev; 298 if (!mon_pdev || !mon_pdev->mvdev) 299 return; 300 301 mon_vdev = mon_pdev->mvdev->monitor_vdev; 302 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) 303 return; 304 305 num_users = ppdu_info->com_info.num_users; 306 for (user = 0; user < num_users; user++) { 307 rx_user_status = &ppdu_info->rx_user_status[user]; 308 mon_vdev->scan_spcl_vap_stats->rx_ok_pkts += 309 rx_user_status->mpdu_cnt_fcs_ok; 310 mon_vdev->scan_spcl_vap_stats->rx_ok_bytes += 311 rx_user_status->mpdu_ok_byte_count; 312 mon_vdev->scan_spcl_vap_stats->rx_err_pkts += 313 rx_user_status->mpdu_cnt_fcs_err; 314 mon_vdev->scan_spcl_vap_stats->rx_err_bytes += 315 rx_user_status->mpdu_err_byte_count; 316 } 317 mon_vdev->scan_spcl_vap_stats->rx_mgmt_pkts += 318 ppdu_info->frm_type_info.rx_mgmt_cnt; 319 mon_vdev->scan_spcl_vap_stats->rx_ctrl_pkts += 320 ppdu_info->frm_type_info.rx_ctrl_cnt; 321 mon_vdev->scan_spcl_vap_stats->rx_data_pkts += 322 ppdu_info->frm_type_info.rx_data_cnt; 323 } 324 #else 325 static inline void 326 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev, 327 struct hal_rx_ppdu_info *ppdu_info) 328 { 329 } 330 #endif 331 332 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY 333 /** 334 * dp_rx_mon_status_ring_record_entry() - Record one entry of a particular 335 * event type into the monitor status 336 * buffer tracking history. 337 * @soc: DP soc handle 338 * @event: event type 339 * @ring_desc: Monitor status ring descriptor 340 * @rx_desc: RX descriptor 341 * @nbuf: status buffer. 342 * 343 * Return: None 344 */ 345 static void 346 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc, 347 enum dp_mon_status_process_event event, 348 hal_ring_desc_t ring_desc, 349 struct dp_rx_desc *rx_desc, 350 qdf_nbuf_t nbuf) 351 { 352 struct dp_mon_stat_info_record *record; 353 struct hal_buf_info hbi; 354 uint32_t idx; 355 356 if (qdf_unlikely(!soc->mon_status_ring_history)) 357 return; 358 359 idx = dp_history_get_next_index(&soc->mon_status_ring_history->index, 360 DP_MON_STATUS_HIST_MAX); 361 362 /* No NULL check needed for record since its an array */ 363 record = &soc->mon_status_ring_history->entry[idx]; 364 365 record->timestamp = qdf_get_log_timestamp(); 366 if (event == DP_MON_STATUS_BUF_REAP) { 367 hal_rx_buffer_addr_info_get_paddr(ring_desc, &hbi); 368 369 /* buffer_addr_info is the first element of ring_desc */ 370 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 371 &hbi); 372 373 record->hbi.paddr = hbi.paddr; 374 record->hbi.sw_cookie = hbi.sw_cookie; 375 record->hbi.rbm = hbi.rbm; 376 record->rx_desc = rx_desc; 377 if (rx_desc) { 378 record->nbuf = rx_desc->nbuf; 379 record->rx_desc_nbuf_data = qdf_nbuf_data(rx_desc->nbuf); 380 } else { 381 record->nbuf = NULL; 382 record->rx_desc_nbuf_data = NULL; 383 } 384 } 385 386 if (event == DP_MON_STATUS_BUF_ENQUEUE) { 387 record->nbuf = nbuf; 388 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf); 389 } 390 391 if (event == DP_MON_STATUS_BUF_DEQUEUE) { 392 record->nbuf = nbuf; 393 if (nbuf) 394 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf); 395 else 396 record->rx_desc_nbuf_data = NULL; 397 } 398 } 399 #else 400 static void 401 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc, 402 enum dp_mon_status_process_event event, 403 hal_ring_desc_t ring_desc, 404 struct dp_rx_desc *rx_desc, 405 qdf_nbuf_t nbuf) 406 { 407 } 408 #endif 409 410 /** 411 * dp_rx_mon_status_process_tlv() - Process status TLV in status 412 * buffer on Rx status Queue posted by status SRNG processing. 413 * @soc: core txrx main context 414 * @int_ctx: interrupt context 415 * @mac_id: mac_id which is one of 3 mac_ids _ring 416 * @quota: amount of work which can be done 417 * 418 * Return: none 419 */ 420 static inline void 421 dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx, 422 uint32_t mac_id, uint32_t quota) 423 { 424 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 425 struct hal_rx_ppdu_info *ppdu_info; 426 qdf_nbuf_t status_nbuf; 427 uint8_t *rx_tlv; 428 uint8_t *rx_tlv_start; 429 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE; 430 QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS; 431 struct cdp_pdev_mon_stats *rx_mon_stats; 432 int smart_mesh_status; 433 enum WDI_EVENT pktlog_mode = WDI_NO_VAL; 434 bool nbuf_used; 435 uint32_t rx_enh_capture_mode; 436 struct dp_mon_soc *mon_soc = soc->monitor_soc; 437 struct dp_mon_pdev *mon_pdev; 438 439 if (qdf_unlikely(!pdev)) { 440 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc, 441 mac_id); 442 return; 443 } 444 445 mon_pdev = pdev->monitor_pdev; 446 ppdu_info = &mon_pdev->ppdu_info; 447 rx_mon_stats = &mon_pdev->rx_mon_stats; 448 449 if (qdf_unlikely(mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START)) 450 return; 451 452 rx_enh_capture_mode = mon_pdev->rx_enh_capture_mode; 453 454 while (!qdf_nbuf_is_queue_empty(&mon_pdev->rx_status_q)) { 455 456 status_nbuf = qdf_nbuf_queue_remove(&mon_pdev->rx_status_q); 457 dp_rx_mon_status_ring_record_entry(soc, 458 DP_MON_STATUS_BUF_DEQUEUE, 459 NULL, NULL, status_nbuf); 460 461 if (qdf_unlikely(!status_nbuf)) 462 return; 463 464 rx_tlv = qdf_nbuf_data(status_nbuf); 465 rx_tlv_start = rx_tlv; 466 nbuf_used = false; 467 468 if ((mon_pdev->mvdev) || (mon_pdev->enhanced_stats_en) || 469 (mon_pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) || 470 (mon_pdev->undecoded_metadata_capture) || 471 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) { 472 do { 473 tlv_status = hal_rx_status_get_tlv_info(rx_tlv, 474 ppdu_info, pdev->soc->hal_soc, 475 status_nbuf); 476 477 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info, 478 rx_mon_stats); 479 480 dp_rx_mon_enh_capture_process(pdev, tlv_status, 481 status_nbuf, ppdu_info, 482 &nbuf_used); 483 484 dp_rx_mcopy_process_ppdu_info(pdev, 485 ppdu_info, 486 tlv_status); 487 488 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 489 mon_pdev->is_tlv_hdr_64_bit); 490 491 if (qdf_unlikely((rx_tlv - rx_tlv_start)) >= 492 RX_MON_STATUS_BUF_SIZE) 493 break; 494 495 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) || 496 (tlv_status == HAL_TLV_STATUS_HEADER) || 497 (tlv_status == HAL_TLV_STATUS_MPDU_END) || 498 (tlv_status == HAL_TLV_STATUS_MPDU_START) || 499 (tlv_status == HAL_TLV_STATUS_MSDU_END)); 500 } 501 if (qdf_unlikely(mon_pdev->dp_peer_based_pktlog)) { 502 dp_rx_process_peer_based_pktlog(soc, ppdu_info, 503 status_nbuf, 504 pdev->pdev_id); 505 } else { 506 if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)) 507 pktlog_mode = WDI_EVENT_RX_DESC; 508 else if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)) 509 pktlog_mode = WDI_EVENT_LITE_RX; 510 511 if (qdf_unlikely(pktlog_mode != WDI_NO_VAL)) 512 dp_wdi_event_handler(pktlog_mode, soc, 513 status_nbuf, 514 HTT_INVALID_PEER, 515 WDI_NO_VAL, pdev->pdev_id); 516 } 517 518 /* smart monitor vap and m_copy cannot co-exist */ 519 if (qdf_unlikely(ppdu_info->rx_status.monitor_direct_used && 520 mon_pdev->neighbour_peers_added && 521 mon_pdev->mvdev)) { 522 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc, 523 pdev, ppdu_info, status_nbuf); 524 if (smart_mesh_status) 525 qdf_nbuf_free(status_nbuf); 526 } else if (qdf_unlikely(mon_pdev->mcopy_mode)) { 527 dp_rx_process_mcopy_mode(soc, pdev, 528 ppdu_info, tlv_status, 529 status_nbuf); 530 } else if (qdf_unlikely(rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) { 531 if (!nbuf_used) 532 qdf_nbuf_free(status_nbuf); 533 534 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) 535 enh_log_status = 536 dp_rx_handle_enh_capture(soc, 537 pdev, ppdu_info); 538 } else { 539 qdf_nbuf_free(status_nbuf); 540 } 541 542 if (qdf_unlikely(tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE)) { 543 dp_rx_mon_deliver_non_std(soc, mac_id); 544 } else if ((qdf_likely(tlv_status == HAL_TLV_STATUS_PPDU_DONE)) && 545 (qdf_likely(!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info)))) { 546 rx_mon_stats->status_ppdu_done++; 547 dp_rx_mon_handle_mu_ul_info(ppdu_info); 548 549 if (qdf_unlikely(mon_pdev->tx_capture_enabled 550 != CDP_TX_ENH_CAPTURE_DISABLED)) 551 dp_send_ack_frame_to_stack(soc, pdev, 552 ppdu_info); 553 554 if (qdf_likely(mon_pdev->enhanced_stats_en || 555 mon_pdev->mcopy_mode || 556 mon_pdev->neighbour_peers_added)) 557 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info); 558 else if (dp_cfr_rcc_mode_status(pdev)) 559 dp_rx_handle_cfr(soc, pdev, ppdu_info); 560 561 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE; 562 563 /* Collect spcl vap stats if configured */ 564 if (qdf_unlikely(mon_pdev->scan_spcl_vap_configured)) 565 dp_rx_mon_update_scan_spcl_vap_stats(pdev, 566 ppdu_info); 567 568 /* 569 * if chan_num is not fetched correctly from ppdu RX TLV, 570 * get it from pdev saved. 571 */ 572 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_num == 0)) 573 mon_pdev->ppdu_info.rx_status.chan_num = 574 mon_pdev->mon_chan_num; 575 /* 576 * if chan_freq is not fetched correctly from ppdu RX TLV, 577 * get it from pdev saved. 578 */ 579 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_freq == 0)) { 580 mon_pdev->ppdu_info.rx_status.chan_freq = 581 mon_pdev->mon_chan_freq; 582 } 583 584 if (!mon_soc->full_mon_mode) 585 dp_rx_mon_dest_process(soc, int_ctx, mac_id, 586 quota); 587 588 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 589 } else { 590 dp_rx_mon_handle_ppdu_undecoded_metadata(soc, pdev, 591 ppdu_info); 592 } 593 } 594 return; 595 } 596 597 /* 598 * dp_rx_mon_status_srng_process() - Process monitor status ring 599 * post the status ring buffer to Rx status Queue for later 600 * processing when status ring is filled with status TLV. 601 * Allocate a new buffer to status ring if the filled buffer 602 * is posted. 603 * @soc: core txrx main context 604 * @int_ctx: interrupt context 605 * @mac_id: mac_id which is one of 3 mac_ids 606 * @quota: No. of ring entry that can be serviced in one shot. 607 608 * Return: uint32_t: No. of ring entry that is processed. 609 */ 610 static inline uint32_t 611 dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx, 612 uint32_t mac_id, uint32_t quota) 613 { 614 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 615 hal_soc_handle_t hal_soc; 616 void *mon_status_srng; 617 void *rxdma_mon_status_ring_entry; 618 QDF_STATUS status; 619 enum dp_mon_reap_status reap_status; 620 uint32_t work_done = 0; 621 struct dp_mon_pdev *mon_pdev; 622 623 if (qdf_unlikely(!pdev)) { 624 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", 625 soc, mac_id); 626 return work_done; 627 } 628 629 mon_pdev = pdev->monitor_pdev; 630 631 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 632 633 qdf_assert(mon_status_srng); 634 if (qdf_unlikely(!mon_status_srng || 635 !hal_srng_initialized(mon_status_srng))) { 636 637 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 638 "%s %d : HAL Monitor Status Ring Init Failed -- %pK", 639 __func__, __LINE__, mon_status_srng); 640 return work_done; 641 } 642 643 hal_soc = soc->hal_soc; 644 645 qdf_assert(hal_soc); 646 647 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_status_srng))) 648 goto done; 649 650 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT => 651 * BUFFER_ADDR_INFO STRUCT 652 */ 653 while (qdf_likely((rxdma_mon_status_ring_entry = 654 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) 655 && quota--)) { 656 struct hal_buf_info hbi; 657 qdf_nbuf_t status_nbuf; 658 struct dp_rx_desc *rx_desc; 659 uint8_t *status_buf; 660 qdf_dma_addr_t paddr; 661 uint64_t buf_addr; 662 struct rx_desc_pool *rx_desc_pool; 663 664 rx_desc_pool = &soc->rx_desc_status[mac_id]; 665 buf_addr = 666 (HAL_RX_BUFFER_ADDR_31_0_GET( 667 rxdma_mon_status_ring_entry) | 668 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET( 669 rxdma_mon_status_ring_entry)) << 32)); 670 671 if (qdf_likely(buf_addr)) { 672 673 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 674 (uint32_t *)rxdma_mon_status_ring_entry, 675 &hbi); 676 rx_desc = dp_rx_cookie_2_va_mon_status(soc, 677 hbi.sw_cookie); 678 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_REAP, 679 rxdma_mon_status_ring_entry, 680 rx_desc, NULL); 681 682 qdf_assert_always(rx_desc); 683 684 if (qdf_unlikely(!dp_rx_desc_paddr_sanity_check(rx_desc, 685 buf_addr))) { 686 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 687 hal_srng_src_get_next(hal_soc, mon_status_srng); 688 continue; 689 } 690 691 status_nbuf = rx_desc->nbuf; 692 693 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 694 QDF_DMA_FROM_DEVICE); 695 696 status_buf = qdf_nbuf_data(status_nbuf); 697 698 status = hal_get_rx_status_done(status_buf); 699 700 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 701 uint32_t hp, tp; 702 hal_get_sw_hptp(hal_soc, mon_status_srng, 703 &tp, &hp); 704 dp_info_rl("tlv tag status error hp:%u, tp:%u", 705 hp, tp); 706 707 /* RxDMA status done bit might not be set even 708 * though tp is moved by HW. 709 */ 710 711 /* If done status is missing: 712 * 1. As per MAC team's suggestion, 713 * when HP + 1 entry is peeked and if DMA 714 * is not done and if HP + 2 entry's DMA done 715 * is set. skip HP + 1 entry and 716 * start processing in next interrupt. 717 * 2. If HP + 2 entry's DMA done is not set, 718 * poll onto HP + 1 entry DMA done to be set. 719 * Check status for same buffer for next time 720 * dp_rx_mon_status_srng_process 721 */ 722 reap_status = dp_rx_mon_handle_status_buf_done(pdev, 723 mon_status_srng); 724 if (qdf_unlikely(reap_status == DP_MON_STATUS_NO_DMA)) 725 continue; 726 else if (qdf_unlikely(reap_status == DP_MON_STATUS_REPLENISH)) { 727 if (!rx_desc->unmapped) { 728 qdf_nbuf_unmap_nbytes_single( 729 soc->osdev, status_nbuf, 730 QDF_DMA_FROM_DEVICE, 731 rx_desc_pool->buf_size); 732 rx_desc->unmapped = 1; 733 } 734 qdf_nbuf_free(status_nbuf); 735 goto buf_replenish; 736 } 737 } 738 qdf_nbuf_set_pktlen(status_nbuf, 739 RX_MON_STATUS_BUF_SIZE); 740 741 if (qdf_likely(!rx_desc->unmapped)) { 742 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf, 743 QDF_DMA_FROM_DEVICE, 744 rx_desc_pool->buf_size); 745 rx_desc->unmapped = 1; 746 } 747 748 /* Put the status_nbuf to queue */ 749 qdf_nbuf_queue_add(&mon_pdev->rx_status_q, status_nbuf); 750 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_ENQUEUE, 751 rxdma_mon_status_ring_entry, 752 rx_desc, status_nbuf); 753 754 } else { 755 union dp_rx_desc_list_elem_t *desc_list = NULL; 756 union dp_rx_desc_list_elem_t *tail = NULL; 757 uint32_t num_alloc_desc; 758 759 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 760 rx_desc_pool, 761 1, 762 &desc_list, 763 &tail); 764 /* 765 * No free descriptors available 766 */ 767 if (qdf_unlikely(num_alloc_desc == 0)) { 768 work_done++; 769 break; 770 } 771 772 rx_desc = &desc_list->rx_desc; 773 } 774 775 buf_replenish: 776 status_nbuf = dp_rx_nbuf_prepare(soc, pdev); 777 778 /* 779 * qdf_nbuf alloc or map failed, 780 * free the dp rx desc to free list, 781 * fill in NULL dma address at current HP entry, 782 * keep HP in mon_status_ring unchanged, 783 * wait next time dp_rx_mon_status_srng_process 784 * to fill in buffer at current HP. 785 */ 786 if (qdf_unlikely(!status_nbuf)) { 787 union dp_rx_desc_list_elem_t *desc_list = NULL; 788 union dp_rx_desc_list_elem_t *tail = NULL; 789 struct rx_desc_pool *rx_desc_pool; 790 791 rx_desc_pool = &soc->rx_desc_status[mac_id]; 792 793 dp_info_rl("fail to allocate or map qdf_nbuf"); 794 dp_rx_add_to_free_desc_list(&desc_list, 795 &tail, rx_desc); 796 dp_rx_add_desc_list_to_free_list(soc, &desc_list, 797 &tail, mac_id, rx_desc_pool); 798 799 hal_rxdma_buff_addr_info_set( 800 hal_soc, rxdma_mon_status_ring_entry, 801 0, 0, 802 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 803 work_done++; 804 break; 805 } 806 807 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0); 808 809 rx_desc->nbuf = status_nbuf; 810 rx_desc->in_use = 1; 811 rx_desc->unmapped = 0; 812 813 hal_rxdma_buff_addr_info_set(hal_soc, 814 rxdma_mon_status_ring_entry, 815 paddr, rx_desc->cookie, 816 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 817 818 hal_srng_src_get_next(hal_soc, mon_status_srng); 819 work_done++; 820 } 821 done: 822 823 dp_srng_access_end(int_ctx, soc, mon_status_srng); 824 825 return work_done; 826 827 } 828 829 uint32_t 830 dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx, 831 uint32_t mac_id, uint32_t quota) 832 { 833 uint32_t work_done; 834 835 work_done = dp_rx_mon_status_srng_process(soc, int_ctx, mac_id, quota); 836 quota -= work_done; 837 dp_rx_mon_status_process_tlv(soc, int_ctx, mac_id, quota); 838 839 return work_done; 840 } 841 842 QDF_STATUS 843 dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id) 844 { 845 uint8_t pdev_id = pdev->pdev_id; 846 struct dp_soc *soc = pdev->soc; 847 struct dp_srng *mon_status_ring; 848 uint32_t num_entries; 849 struct rx_desc_pool *rx_desc_pool; 850 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 851 union dp_rx_desc_list_elem_t *desc_list = NULL; 852 union dp_rx_desc_list_elem_t *tail = NULL; 853 854 soc_cfg_ctx = soc->wlan_cfg_ctx; 855 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 856 857 num_entries = mon_status_ring->num_entries; 858 859 rx_desc_pool = &soc->rx_desc_status[mac_id]; 860 861 dp_debug("Mon RX Desc Pool[%d] entries=%u", 862 pdev_id, num_entries); 863 864 return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring, 865 rx_desc_pool, num_entries, 866 &desc_list, &tail, 867 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 868 } 869 870 QDF_STATUS 871 dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) 872 { 873 uint8_t pdev_id = pdev->pdev_id; 874 struct dp_soc *soc = pdev->soc; 875 struct dp_srng *mon_status_ring; 876 uint32_t num_entries; 877 struct rx_desc_pool *rx_desc_pool; 878 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 879 880 soc_cfg_ctx = soc->wlan_cfg_ctx; 881 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 882 883 num_entries = mon_status_ring->num_entries; 884 885 rx_desc_pool = &soc->rx_desc_status[mac_id]; 886 887 dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); 888 889 rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE; 890 return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool); 891 } 892 893 void 894 dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) 895 { 896 uint32_t i; 897 uint8_t pdev_id = pdev->pdev_id; 898 struct dp_soc *soc = pdev->soc; 899 struct dp_srng *mon_status_ring; 900 uint32_t num_entries; 901 struct rx_desc_pool *rx_desc_pool; 902 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 903 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 904 905 soc_cfg_ctx = soc->wlan_cfg_ctx; 906 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 907 908 num_entries = mon_status_ring->num_entries; 909 910 rx_desc_pool = &soc->rx_desc_status[mac_id]; 911 912 dp_debug("Mon RX Desc status Pool[%d] init entries=%u", 913 pdev_id, num_entries); 914 915 rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id); 916 rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE; 917 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 918 /* Disable frag processing flag */ 919 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 920 921 dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool); 922 923 qdf_nbuf_queue_init(&mon_pdev->rx_status_q); 924 925 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 926 927 qdf_mem_zero(&mon_pdev->ppdu_info, sizeof(mon_pdev->ppdu_info)); 928 929 /* 930 * Set last_ppdu_id to HAL_INVALID_PPDU_ID in order to avoid ppdu_id 931 * match with '0' ppdu_id from monitor status ring 932 */ 933 mon_pdev->ppdu_info.com_info.last_ppdu_id = HAL_INVALID_PPDU_ID; 934 935 qdf_mem_zero(&mon_pdev->rx_mon_stats, sizeof(mon_pdev->rx_mon_stats)); 936 937 dp_rx_mon_init_dbg_ppdu_stats(&mon_pdev->ppdu_info, 938 &mon_pdev->rx_mon_stats); 939 940 for (i = 0; i < MAX_MU_USERS; i++) { 941 qdf_nbuf_queue_init(&mon_pdev->mpdu_q[i]); 942 mon_pdev->is_mpdu_hdr[i] = true; 943 } 944 945 qdf_mem_zero(mon_pdev->msdu_list, 946 sizeof(mon_pdev->msdu_list[MAX_MU_USERS])); 947 948 mon_pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED; 949 } 950 951 void 952 dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) { 953 uint8_t pdev_id = pdev->pdev_id; 954 struct dp_soc *soc = pdev->soc; 955 struct rx_desc_pool *rx_desc_pool; 956 957 rx_desc_pool = &soc->rx_desc_status[mac_id]; 958 959 dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id); 960 961 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id); 962 } 963 964 void 965 dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) { 966 uint8_t pdev_id = pdev->pdev_id; 967 struct dp_soc *soc = pdev->soc; 968 struct rx_desc_pool *rx_desc_pool; 969 970 rx_desc_pool = &soc->rx_desc_status[mac_id]; 971 972 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); 973 974 dp_rx_desc_pool_free(soc, rx_desc_pool); 975 } 976 977 void 978 dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) 979 { 980 uint8_t pdev_id = pdev->pdev_id; 981 struct dp_soc *soc = pdev->soc; 982 struct rx_desc_pool *rx_desc_pool; 983 984 rx_desc_pool = &soc->rx_desc_status[mac_id]; 985 986 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); 987 988 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 989 } 990 991 /* 992 * dp_rx_buffers_replenish() - replenish monitor status ring with 993 * rx nbufs called during dp rx 994 * monitor status ring initialization 995 * 996 * @soc: core txrx main context 997 * @mac_id: mac_id which is one of 3 mac_ids 998 * @dp_rxdma_srng: dp monitor status circular ring 999 * @rx_desc_pool; Pointer to Rx descriptor pool 1000 * @num_req_buffers: number of buffer to be replenished 1001 * @desc_list: list of descs if called from dp rx monitor status 1002 * process or NULL during dp rx initialization or 1003 * out of buffer interrupt 1004 * @tail: tail of descs list 1005 * @owner: who owns the nbuf (host, NSS etc...) 1006 * Return: return success or failure 1007 */ 1008 static inline 1009 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, 1010 uint32_t mac_id, 1011 struct dp_srng *dp_rxdma_srng, 1012 struct rx_desc_pool *rx_desc_pool, 1013 uint32_t num_req_buffers, 1014 union dp_rx_desc_list_elem_t **desc_list, 1015 union dp_rx_desc_list_elem_t **tail, 1016 uint8_t owner) 1017 { 1018 uint32_t num_alloc_desc; 1019 uint16_t num_desc_to_free = 0; 1020 uint32_t num_entries_avail; 1021 uint32_t count = 0; 1022 int sync_hw_ptr = 1; 1023 qdf_dma_addr_t paddr; 1024 qdf_nbuf_t rx_netbuf; 1025 void *rxdma_ring_entry; 1026 union dp_rx_desc_list_elem_t *next; 1027 void *rxdma_srng; 1028 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 1029 1030 if (!dp_pdev) { 1031 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", 1032 dp_soc, mac_id); 1033 return QDF_STATUS_E_FAILURE; 1034 } 1035 1036 rxdma_srng = dp_rxdma_srng->hal_srng; 1037 1038 qdf_assert(rxdma_srng); 1039 1040 dp_rx_mon_status_debug("%pK: requested %d buffers for replenish", 1041 dp_soc, num_req_buffers); 1042 1043 /* 1044 * if desc_list is NULL, allocate the descs from freelist 1045 */ 1046 if (!(*desc_list)) { 1047 1048 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1049 rx_desc_pool, 1050 num_req_buffers, 1051 desc_list, 1052 tail); 1053 1054 if (!num_alloc_desc) { 1055 dp_rx_mon_status_err("%pK: no free rx_descs in freelist", 1056 dp_soc); 1057 return QDF_STATUS_E_NOMEM; 1058 } 1059 1060 dp_rx_mon_status_debug("%pK: %d rx desc allocated", dp_soc, 1061 num_alloc_desc); 1062 1063 num_req_buffers = num_alloc_desc; 1064 } 1065 1066 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 1067 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 1068 rxdma_srng, sync_hw_ptr); 1069 1070 dp_rx_mon_status_debug("%pK: no of available entries in rxdma ring: %d", 1071 dp_soc, num_entries_avail); 1072 1073 if (num_entries_avail < num_req_buffers) { 1074 num_desc_to_free = num_req_buffers - num_entries_avail; 1075 num_req_buffers = num_entries_avail; 1076 } 1077 1078 while (count <= num_req_buffers) { 1079 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev); 1080 1081 /* 1082 * qdf_nbuf alloc or map failed, 1083 * keep HP in mon_status_ring unchanged, 1084 * wait dp_rx_mon_status_srng_process 1085 * to fill in buffer at current HP. 1086 */ 1087 if (qdf_unlikely(!rx_netbuf)) { 1088 dp_rx_mon_status_err("%pK: qdf_nbuf allocate or map fail, count %d", 1089 dp_soc, count); 1090 break; 1091 } 1092 1093 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 1094 1095 next = (*desc_list)->next; 1096 rxdma_ring_entry = hal_srng_src_get_cur_hp_n_move_next( 1097 dp_soc->hal_soc, 1098 rxdma_srng); 1099 1100 if (qdf_unlikely(!rxdma_ring_entry)) { 1101 dp_rx_mon_status_err("%pK: rxdma_ring_entry is NULL, count - %d", 1102 dp_soc, count); 1103 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf, 1104 QDF_DMA_FROM_DEVICE, 1105 rx_desc_pool->buf_size); 1106 qdf_nbuf_free(rx_netbuf); 1107 break; 1108 } 1109 1110 (*desc_list)->rx_desc.nbuf = rx_netbuf; 1111 (*desc_list)->rx_desc.in_use = 1; 1112 (*desc_list)->rx_desc.unmapped = 0; 1113 count++; 1114 1115 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, 1116 rxdma_ring_entry, paddr, 1117 (*desc_list)->rx_desc.cookie, 1118 owner); 1119 1120 dp_rx_mon_status_debug("%pK: rx_desc=%pK, cookie=%d, nbuf=%pK, paddr=%pK", 1121 dp_soc, &(*desc_list)->rx_desc, 1122 (*desc_list)->rx_desc.cookie, rx_netbuf, 1123 (void *)paddr); 1124 1125 *desc_list = next; 1126 } 1127 1128 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1129 1130 dp_rx_mon_status_debug("%pK: successfully replenished %d buffers", 1131 dp_soc, num_req_buffers); 1132 1133 dp_rx_mon_status_debug("%pK: %d rx desc added back to free list", 1134 dp_soc, num_desc_to_free); 1135 1136 /* 1137 * add any available free desc back to the free list 1138 */ 1139 if (*desc_list) { 1140 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1141 mac_id, rx_desc_pool); 1142 } 1143 1144 return QDF_STATUS_SUCCESS; 1145 } 1146 1147 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 1148 /** 1149 * dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for 1150 * a given mac 1151 * @pdev: DP pdev 1152 * @mac_id: mac id 1153 * @quota: maximum number of ring entries that can be processed 1154 * 1155 * Return: Number of ring entries reaped 1156 */ 1157 static uint32_t 1158 dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1159 uint32_t quota) 1160 { 1161 struct dp_soc *soc = pdev->soc; 1162 void *mon_status_srng; 1163 hal_soc_handle_t hal_soc; 1164 void *ring_desc; 1165 uint32_t reap_cnt = 0; 1166 1167 if (qdf_unlikely(!soc || !soc->hal_soc)) 1168 return reap_cnt; 1169 1170 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 1171 1172 if (qdf_unlikely(!mon_status_srng || 1173 !hal_srng_initialized(mon_status_srng))) 1174 return reap_cnt; 1175 1176 hal_soc = soc->hal_soc; 1177 1178 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) 1179 return reap_cnt; 1180 1181 while ((ring_desc = 1182 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) && 1183 reap_cnt < MON_DROP_REAP_LIMIT && quota--) { 1184 uint64_t buf_addr; 1185 struct hal_buf_info hbi; 1186 struct dp_rx_desc *rx_desc; 1187 qdf_nbuf_t status_nbuf; 1188 uint8_t *status_buf; 1189 enum dp_mon_reap_status reap_status; 1190 qdf_dma_addr_t iova; 1191 struct rx_desc_pool *rx_desc_pool; 1192 1193 rx_desc_pool = &soc->rx_desc_status[mac_id]; 1194 1195 buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) | 1196 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32)); 1197 1198 if (qdf_likely(buf_addr)) { 1199 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 1200 (uint32_t *)ring_desc, 1201 &hbi); 1202 rx_desc = dp_rx_cookie_2_va_mon_status(soc, 1203 hbi.sw_cookie); 1204 1205 qdf_assert_always(rx_desc); 1206 1207 status_nbuf = rx_desc->nbuf; 1208 1209 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 1210 QDF_DMA_FROM_DEVICE); 1211 1212 status_buf = qdf_nbuf_data(status_nbuf); 1213 1214 if (hal_get_rx_status_done(status_buf) != 1215 QDF_STATUS_SUCCESS) { 1216 /* If done status is missing: 1217 * 1. As per MAC team's suggestion, 1218 * when HP + 1 entry is peeked and if DMA 1219 * is not done and if HP + 2 entry's DMA done 1220 * is set. skip HP + 1 entry and 1221 * start processing in next interrupt. 1222 * 2. If HP + 2 entry's DMA done is not set, 1223 * poll onto HP + 1 entry DMA done to be set. 1224 * Check status for same buffer for next time 1225 * dp_rx_mon_status_srng_process 1226 */ 1227 reap_status = 1228 dp_rx_mon_handle_status_buf_done(pdev, 1229 mon_status_srng); 1230 if (reap_status == DP_MON_STATUS_NO_DMA) 1231 break; 1232 } 1233 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf, 1234 QDF_DMA_FROM_DEVICE, 1235 rx_desc_pool->buf_size); 1236 qdf_nbuf_free(status_nbuf); 1237 } else { 1238 union dp_rx_desc_list_elem_t *rx_desc_elem; 1239 1240 qdf_spin_lock_bh(&rx_desc_pool->lock); 1241 1242 if (!rx_desc_pool->freelist) { 1243 qdf_spin_unlock_bh(&rx_desc_pool->lock); 1244 break; 1245 } 1246 rx_desc_elem = rx_desc_pool->freelist; 1247 rx_desc_pool->freelist = rx_desc_pool->freelist->next; 1248 qdf_spin_unlock_bh(&rx_desc_pool->lock); 1249 1250 rx_desc = &rx_desc_elem->rx_desc; 1251 } 1252 1253 status_nbuf = dp_rx_nbuf_prepare(soc, pdev); 1254 1255 if (qdf_unlikely(!status_nbuf)) { 1256 union dp_rx_desc_list_elem_t *desc_list = NULL; 1257 union dp_rx_desc_list_elem_t *tail = NULL; 1258 1259 dp_info_rl("fail to allocate or map nbuf"); 1260 dp_rx_add_to_free_desc_list(&desc_list, &tail, 1261 rx_desc); 1262 dp_rx_add_desc_list_to_free_list(soc, 1263 &desc_list, 1264 &tail, mac_id, 1265 rx_desc_pool); 1266 1267 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, 0, 0, 1268 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 1269 break; 1270 } 1271 1272 iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0); 1273 1274 rx_desc->nbuf = status_nbuf; 1275 rx_desc->in_use = 1; 1276 1277 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, iova, 1278 rx_desc->cookie, 1279 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 1280 1281 reap_cnt++; 1282 hal_srng_src_get_next(hal_soc, mon_status_srng); 1283 } 1284 1285 hal_srng_access_end(hal_soc, mon_status_srng); 1286 1287 return reap_cnt; 1288 } 1289 1290 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1291 uint32_t quota) 1292 { 1293 uint32_t work_done; 1294 1295 work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota); 1296 if (!dp_is_rxdma_dst_ring_common(pdev)) 1297 dp_mon_dest_srng_drop_for_mac(pdev, mac_id); 1298 1299 return work_done; 1300 } 1301 #else 1302 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1303 uint32_t quota) 1304 { 1305 return 0; 1306 } 1307 #endif 1308