1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "hal_hw_headers.h" 18 #include "dp_types.h" 19 #include "dp_rx.h" 20 #include "dp_peer.h" 21 #include "hal_rx.h" 22 #include "hal_api.h" 23 #include "qdf_trace.h" 24 #include "qdf_nbuf.h" 25 #include "hal_api_mon.h" 26 #include "dp_internal.h" 27 #include "qdf_mem.h" /* qdf_mem_malloc,free */ 28 #include "dp_htt.h" 29 #include "dp_mon.h" 30 #include "dp_rx_mon.h" 31 #include "htt.h" 32 #include <dp_mon_1.0.h> 33 #include <dp_rx_mon_1.0.h> 34 35 #ifdef FEATURE_PERPKT_INFO 36 #include "dp_ratetable.h" 37 #endif 38 39 static inline 40 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, 41 uint32_t mac_id, 42 struct dp_srng *dp_rxdma_srng, 43 struct rx_desc_pool *rx_desc_pool, 44 uint32_t num_req_buffers, 45 union dp_rx_desc_list_elem_t **desc_list, 46 union dp_rx_desc_list_elem_t **tail, 47 uint8_t owner); 48 49 /** 50 * dp_rx_mon_handle_status_buf_done () - Handle status buf DMA not done 51 * 52 * @pdev: DP pdev handle 53 * @mon_status_srng: Monitor status SRNG 54 * 55 * As per MAC team's suggestion, If HP + 2 entry's DMA done is set, 56 * skip HP + 1 entry and start processing in next interrupt. 57 * If HP + 2 entry's DMA done is not set, poll onto HP + 1 entry 58 * for it's DMA done TLV to be set. 59 * 60 * Return: enum dp_mon_reap_status 61 */ 62 enum dp_mon_reap_status 63 dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev, 64 void *mon_status_srng) 65 { 66 struct dp_soc *soc = pdev->soc; 67 hal_soc_handle_t hal_soc; 68 void *ring_entry; 69 struct hal_buf_info hbi; 70 qdf_nbuf_t status_nbuf; 71 struct dp_rx_desc *rx_desc; 72 void *rx_tlv; 73 QDF_STATUS buf_status; 74 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 75 76 hal_soc = soc->hal_soc; 77 78 ring_entry = hal_srng_src_peek_n_get_next_next(hal_soc, 79 mon_status_srng); 80 if (!ring_entry) { 81 dp_rx_mon_status_debug("%pK: Monitor status ring entry is NULL for SRNG: %pK", 82 soc, mon_status_srng); 83 return DP_MON_STATUS_NO_DMA; 84 } 85 86 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_entry, 87 &hbi); 88 rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie); 89 90 qdf_assert_always(rx_desc); 91 92 status_nbuf = rx_desc->nbuf; 93 94 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 95 QDF_DMA_FROM_DEVICE); 96 97 rx_tlv = qdf_nbuf_data(status_nbuf); 98 buf_status = hal_get_rx_status_done(rx_tlv); 99 100 /* If status buffer DMA is not done, 101 * 1. As per MAC team's suggestion, If HP + 2 entry's DMA done is set, 102 * replenish HP + 1 entry and start processing in next interrupt. 103 * 2. If HP + 2 entry's DMA done is not set 104 * hold on to mon destination ring. 105 */ 106 if (buf_status != QDF_STATUS_SUCCESS) { 107 dp_err_rl("Monitor status ring: DMA is not done " 108 "for nbuf: %pK", status_nbuf); 109 mon_pdev->rx_mon_stats.tlv_tag_status_err++; 110 return DP_MON_STATUS_REPLENISH; 111 } 112 113 mon_pdev->rx_mon_stats.status_buf_done_war++; 114 115 return DP_MON_STATUS_REPLENISH; 116 } 117 118 #ifdef WLAN_RX_PKT_CAPTURE_ENH 119 #include "dp_rx_mon_feature.h" 120 #else 121 static QDF_STATUS 122 dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev, 123 struct hal_rx_ppdu_info *ppdu_info) 124 { 125 return QDF_STATUS_SUCCESS; 126 } 127 128 static void 129 dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status, 130 qdf_nbuf_t status_nbuf, 131 struct hal_rx_ppdu_info *ppdu_info, 132 bool *nbuf_used) 133 { 134 } 135 #endif 136 137 #ifdef WLAN_TX_PKT_CAPTURE_ENH 138 #include "dp_rx_mon_feature.h" 139 #else 140 static QDF_STATUS 141 dp_send_ack_frame_to_stack(struct dp_soc *soc, 142 struct dp_pdev *pdev, 143 struct hal_rx_ppdu_info *ppdu_info) 144 { 145 return QDF_STATUS_SUCCESS; 146 } 147 #endif 148 149 #if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M) 150 static inline void 151 dp_rx_ul_ofdma_ru_size_to_width( 152 uint32_t ru_size, 153 uint32_t *ru_width) 154 { 155 uint32_t width; 156 157 width = 0; 158 switch (ru_size) { 159 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26: 160 width = 1; 161 break; 162 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52: 163 width = 2; 164 break; 165 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106: 166 width = 4; 167 break; 168 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242: 169 width = 9; 170 break; 171 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484: 172 width = 18; 173 break; 174 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996: 175 width = 37; 176 break; 177 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2: 178 width = 74; 179 break; 180 default: 181 dp_rx_mon_status_err("RU size to width convert err"); 182 break; 183 } 184 *ru_width = width; 185 } 186 187 static inline void 188 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) 189 { 190 struct mon_rx_user_status *mon_rx_user_status; 191 uint32_t num_users; 192 uint32_t i; 193 uint32_t mu_ul_user_v0_word0; 194 uint32_t mu_ul_user_v0_word1; 195 uint32_t ru_width; 196 uint32_t ru_size; 197 198 if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA || 199 ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO)) 200 return; 201 202 num_users = ppdu_info->com_info.num_users; 203 if (num_users > HAL_MAX_UL_MU_USERS) 204 num_users = HAL_MAX_UL_MU_USERS; 205 for (i = 0; i < num_users; i++) { 206 mon_rx_user_status = &ppdu_info->rx_user_status[i]; 207 mu_ul_user_v0_word0 = 208 mon_rx_user_status->mu_ul_user_v0_word0; 209 mu_ul_user_v0_word1 = 210 mon_rx_user_status->mu_ul_user_v0_word1; 211 212 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET( 213 mu_ul_user_v0_word0) && 214 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET( 215 mu_ul_user_v0_word0)) { 216 mon_rx_user_status->mcs = 217 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET( 218 mu_ul_user_v0_word1); 219 mon_rx_user_status->nss = 220 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET( 221 mu_ul_user_v0_word1) + 1; 222 223 mon_rx_user_status->mu_ul_info_valid = 1; 224 mon_rx_user_status->ofdma_ru_start_index = 225 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET( 226 mu_ul_user_v0_word1); 227 228 ru_size = 229 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET( 230 mu_ul_user_v0_word1); 231 dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width); 232 mon_rx_user_status->ofdma_ru_width = ru_width; 233 mon_rx_user_status->ofdma_ru_size = ru_size; 234 } 235 } 236 } 237 #else 238 static inline void 239 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) 240 { 241 } 242 #endif 243 244 #ifdef QCA_UNDECODED_METADATA_SUPPORT 245 static inline bool 246 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev, 247 struct hal_rx_ppdu_info *ppdu_info) 248 { 249 return (pdev->monitor_pdev->undecoded_metadata_capture && 250 ppdu_info->rx_status.phyrx_abort); 251 } 252 253 static inline void 254 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc, 255 struct dp_pdev *pdev, 256 struct hal_rx_ppdu_info *ppdu_info) 257 { 258 if (pdev->monitor_pdev->undecoded_metadata_capture) 259 dp_rx_handle_ppdu_undecoded_metadata(soc, pdev, ppdu_info); 260 261 pdev->monitor_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 262 } 263 #else 264 static inline bool 265 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev, 266 struct hal_rx_ppdu_info *ppdu_info) 267 { 268 return false; 269 } 270 271 static inline void 272 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc, 273 struct dp_pdev *pdev, 274 struct hal_rx_ppdu_info *ppdu_info) 275 { 276 } 277 #endif 278 279 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS 280 /** 281 * dp_rx_mon_update_scan_spcl_vap_stats() - Update special vap stats 282 * @pdev: dp pdev context 283 * @ppdu_info: ppdu info structure from ppdu ring 284 * 285 * Return: none 286 */ 287 static inline void 288 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev, 289 struct hal_rx_ppdu_info *ppdu_info) 290 { 291 struct mon_rx_user_status *rx_user_status = NULL; 292 struct dp_mon_pdev *mon_pdev = NULL; 293 struct dp_mon_vdev *mon_vdev = NULL; 294 uint32_t num_users = 0; 295 uint32_t user = 0; 296 297 mon_pdev = pdev->monitor_pdev; 298 if (!mon_pdev || !mon_pdev->mvdev) 299 return; 300 301 mon_vdev = mon_pdev->mvdev->monitor_vdev; 302 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) 303 return; 304 305 num_users = ppdu_info->com_info.num_users; 306 for (user = 0; user < num_users; user++) { 307 rx_user_status = &ppdu_info->rx_user_status[user]; 308 mon_vdev->scan_spcl_vap_stats->rx_ok_pkts += 309 rx_user_status->mpdu_cnt_fcs_ok; 310 mon_vdev->scan_spcl_vap_stats->rx_ok_bytes += 311 rx_user_status->mpdu_ok_byte_count; 312 mon_vdev->scan_spcl_vap_stats->rx_err_pkts += 313 rx_user_status->mpdu_cnt_fcs_err; 314 mon_vdev->scan_spcl_vap_stats->rx_err_bytes += 315 rx_user_status->mpdu_err_byte_count; 316 } 317 mon_vdev->scan_spcl_vap_stats->rx_mgmt_pkts += 318 ppdu_info->frm_type_info.rx_mgmt_cnt; 319 mon_vdev->scan_spcl_vap_stats->rx_ctrl_pkts += 320 ppdu_info->frm_type_info.rx_ctrl_cnt; 321 mon_vdev->scan_spcl_vap_stats->rx_data_pkts += 322 ppdu_info->frm_type_info.rx_data_cnt; 323 } 324 #else 325 static inline void 326 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev, 327 struct hal_rx_ppdu_info *ppdu_info) 328 { 329 } 330 #endif 331 332 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY 333 /** 334 * dp_rx_mon_status_ring_record_entry() - Record one entry of a particular 335 * event type into the monitor status 336 * buffer tracking history. 337 * @soc: DP soc handle 338 * @event: event type 339 * @ring_desc: Monitor status ring descriptor 340 * @rx_desc: RX descriptor 341 * @nbuf: status buffer. 342 * 343 * Return: None 344 */ 345 static void 346 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc, 347 enum dp_mon_status_process_event event, 348 hal_ring_desc_t ring_desc, 349 struct dp_rx_desc *rx_desc, 350 qdf_nbuf_t nbuf) 351 { 352 struct dp_mon_stat_info_record *record; 353 struct hal_buf_info hbi; 354 uint32_t idx; 355 356 if (qdf_unlikely(!soc->mon_status_ring_history)) 357 return; 358 359 idx = dp_history_get_next_index(&soc->mon_status_ring_history->index, 360 DP_MON_STATUS_HIST_MAX); 361 362 /* No NULL check needed for record since its an array */ 363 record = &soc->mon_status_ring_history->entry[idx]; 364 365 record->timestamp = qdf_get_log_timestamp(); 366 if (event == DP_MON_STATUS_BUF_REAP) { 367 hal_rx_buffer_addr_info_get_paddr(ring_desc, &hbi); 368 369 /* buffer_addr_info is the first element of ring_desc */ 370 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc, 371 &hbi); 372 373 record->hbi.paddr = hbi.paddr; 374 record->hbi.sw_cookie = hbi.sw_cookie; 375 record->hbi.rbm = hbi.rbm; 376 record->rx_desc = rx_desc; 377 if (rx_desc) { 378 record->nbuf = rx_desc->nbuf; 379 record->rx_desc_nbuf_data = qdf_nbuf_data(rx_desc->nbuf); 380 } else { 381 record->nbuf = NULL; 382 record->rx_desc_nbuf_data = NULL; 383 } 384 } 385 386 if (event == DP_MON_STATUS_BUF_ENQUEUE) { 387 record->nbuf = nbuf; 388 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf); 389 } 390 391 if (event == DP_MON_STATUS_BUF_DEQUEUE) { 392 record->nbuf = nbuf; 393 if (nbuf) 394 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf); 395 else 396 record->rx_desc_nbuf_data = NULL; 397 } 398 } 399 #else 400 static void 401 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc, 402 enum dp_mon_status_process_event event, 403 hal_ring_desc_t ring_desc, 404 struct dp_rx_desc *rx_desc, 405 qdf_nbuf_t nbuf) 406 { 407 } 408 #endif 409 410 /** 411 * dp_rx_mon_status_process_tlv() - Process status TLV in status 412 * buffer on Rx status Queue posted by status SRNG processing. 413 * @soc: core txrx main context 414 * @int_ctx: interrupt context 415 * @mac_id: mac_id which is one of 3 mac_ids _ring 416 * @quota: amount of work which can be done 417 * 418 * Return: none 419 */ 420 static inline void 421 dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx, 422 uint32_t mac_id, uint32_t quota) 423 { 424 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 425 struct hal_rx_ppdu_info *ppdu_info; 426 qdf_nbuf_t status_nbuf; 427 uint8_t *rx_tlv; 428 uint8_t *rx_tlv_start; 429 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE; 430 QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS; 431 struct cdp_pdev_mon_stats *rx_mon_stats; 432 int smart_mesh_status; 433 enum WDI_EVENT pktlog_mode = WDI_NO_VAL; 434 bool nbuf_used; 435 uint32_t rx_enh_capture_mode; 436 struct dp_mon_soc *mon_soc = soc->monitor_soc; 437 struct dp_mon_pdev *mon_pdev; 438 439 if (qdf_unlikely(!pdev)) { 440 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc, 441 mac_id); 442 return; 443 } 444 445 mon_pdev = pdev->monitor_pdev; 446 ppdu_info = &mon_pdev->ppdu_info; 447 rx_mon_stats = &mon_pdev->rx_mon_stats; 448 449 if (qdf_unlikely(mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START)) 450 return; 451 452 rx_enh_capture_mode = mon_pdev->rx_enh_capture_mode; 453 454 while (!qdf_nbuf_is_queue_empty(&mon_pdev->rx_status_q)) { 455 456 status_nbuf = qdf_nbuf_queue_remove(&mon_pdev->rx_status_q); 457 dp_rx_mon_status_ring_record_entry(soc, 458 DP_MON_STATUS_BUF_DEQUEUE, 459 NULL, NULL, status_nbuf); 460 461 if (qdf_unlikely(!status_nbuf)) 462 return; 463 464 rx_tlv = qdf_nbuf_data(status_nbuf); 465 rx_tlv_start = rx_tlv; 466 nbuf_used = false; 467 468 if ((mon_pdev->mvdev) || (mon_pdev->enhanced_stats_en) || 469 (mon_pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) || 470 (mon_pdev->undecoded_metadata_capture) || 471 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) { 472 do { 473 tlv_status = hal_rx_status_get_tlv_info(rx_tlv, 474 ppdu_info, pdev->soc->hal_soc, 475 status_nbuf); 476 477 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info, 478 rx_mon_stats); 479 480 dp_rx_mon_enh_capture_process(pdev, tlv_status, 481 status_nbuf, ppdu_info, 482 &nbuf_used); 483 484 dp_rx_mcopy_process_ppdu_info(pdev, 485 ppdu_info, 486 tlv_status); 487 488 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 489 mon_pdev->is_tlv_hdr_64_bit); 490 491 if (qdf_unlikely((rx_tlv - rx_tlv_start)) >= 492 RX_MON_STATUS_BUF_SIZE) 493 break; 494 495 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) || 496 (tlv_status == HAL_TLV_STATUS_HEADER) || 497 (tlv_status == HAL_TLV_STATUS_MPDU_END) || 498 (tlv_status == HAL_TLV_STATUS_MPDU_START) || 499 (tlv_status == HAL_TLV_STATUS_MSDU_END)); 500 } 501 dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info); 502 if (qdf_unlikely(mon_pdev->dp_peer_based_pktlog)) { 503 dp_rx_process_peer_based_pktlog(soc, ppdu_info, 504 status_nbuf, 505 pdev->pdev_id); 506 } else { 507 if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)) 508 pktlog_mode = WDI_EVENT_RX_DESC; 509 else if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)) 510 pktlog_mode = WDI_EVENT_LITE_RX; 511 512 if (qdf_unlikely(pktlog_mode != WDI_NO_VAL)) 513 dp_wdi_event_handler(pktlog_mode, soc, 514 status_nbuf, 515 HTT_INVALID_PEER, 516 WDI_NO_VAL, pdev->pdev_id); 517 } 518 519 /* smart monitor vap and m_copy cannot co-exist */ 520 if (qdf_unlikely(ppdu_info->rx_status.monitor_direct_used && 521 mon_pdev->neighbour_peers_added && 522 mon_pdev->mvdev)) { 523 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc, 524 pdev, ppdu_info, status_nbuf); 525 if (smart_mesh_status) 526 qdf_nbuf_free(status_nbuf); 527 } else if (qdf_unlikely(mon_pdev->mcopy_mode)) { 528 dp_rx_process_mcopy_mode(soc, pdev, 529 ppdu_info, tlv_status, 530 status_nbuf); 531 } else if (qdf_unlikely(rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) { 532 if (!nbuf_used) 533 qdf_nbuf_free(status_nbuf); 534 535 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) 536 enh_log_status = 537 dp_rx_handle_enh_capture(soc, 538 pdev, ppdu_info); 539 } else { 540 qdf_nbuf_free(status_nbuf); 541 } 542 543 if (qdf_unlikely(tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE)) { 544 dp_rx_mon_deliver_non_std(soc, mac_id); 545 } else if ((qdf_likely(tlv_status == HAL_TLV_STATUS_PPDU_DONE)) && 546 (qdf_likely(!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info)))) { 547 rx_mon_stats->status_ppdu_done++; 548 dp_rx_mon_handle_mu_ul_info(ppdu_info); 549 550 if (qdf_unlikely(mon_pdev->tx_capture_enabled 551 != CDP_TX_ENH_CAPTURE_DISABLED)) 552 dp_send_ack_frame_to_stack(soc, pdev, 553 ppdu_info); 554 555 if (qdf_likely(mon_pdev->enhanced_stats_en || 556 mon_pdev->mcopy_mode || 557 mon_pdev->neighbour_peers_added)) 558 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info); 559 else if (dp_cfr_rcc_mode_status(pdev)) 560 dp_rx_handle_cfr(soc, pdev, ppdu_info); 561 562 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE; 563 564 /* Collect spcl vap stats if configured */ 565 if (qdf_unlikely(mon_pdev->scan_spcl_vap_configured)) 566 dp_rx_mon_update_scan_spcl_vap_stats(pdev, 567 ppdu_info); 568 569 dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info); 570 571 /* 572 * if chan_num is not fetched correctly from ppdu RX TLV, 573 * get it from pdev saved. 574 */ 575 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_num == 0)) 576 mon_pdev->ppdu_info.rx_status.chan_num = 577 mon_pdev->mon_chan_num; 578 /* 579 * if chan_freq is not fetched correctly from ppdu RX TLV, 580 * get it from pdev saved. 581 */ 582 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_freq == 0)) { 583 mon_pdev->ppdu_info.rx_status.chan_freq = 584 mon_pdev->mon_chan_freq; 585 } 586 587 if (!mon_soc->full_mon_mode) 588 dp_rx_mon_dest_process(soc, int_ctx, mac_id, 589 quota); 590 591 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 592 } else { 593 dp_rx_mon_handle_ppdu_undecoded_metadata(soc, pdev, 594 ppdu_info); 595 } 596 } 597 return; 598 } 599 600 /* 601 * dp_rx_mon_status_srng_process() - Process monitor status ring 602 * post the status ring buffer to Rx status Queue for later 603 * processing when status ring is filled with status TLV. 604 * Allocate a new buffer to status ring if the filled buffer 605 * is posted. 606 * @soc: core txrx main context 607 * @int_ctx: interrupt context 608 * @mac_id: mac_id which is one of 3 mac_ids 609 * @quota: No. of ring entry that can be serviced in one shot. 610 611 * Return: uint32_t: No. of ring entry that is processed. 612 */ 613 static inline uint32_t 614 dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx, 615 uint32_t mac_id, uint32_t quota) 616 { 617 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 618 hal_soc_handle_t hal_soc; 619 void *mon_status_srng; 620 void *rxdma_mon_status_ring_entry; 621 QDF_STATUS status; 622 enum dp_mon_reap_status reap_status; 623 uint32_t work_done = 0; 624 struct dp_mon_pdev *mon_pdev; 625 626 if (qdf_unlikely(!pdev)) { 627 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", 628 soc, mac_id); 629 return work_done; 630 } 631 632 mon_pdev = pdev->monitor_pdev; 633 634 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 635 636 qdf_assert(mon_status_srng); 637 if (qdf_unlikely(!mon_status_srng || 638 !hal_srng_initialized(mon_status_srng))) { 639 640 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 641 "%s %d : HAL Monitor Status Ring Init Failed -- %pK", 642 __func__, __LINE__, mon_status_srng); 643 return work_done; 644 } 645 646 hal_soc = soc->hal_soc; 647 648 qdf_assert(hal_soc); 649 650 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_status_srng))) 651 goto done; 652 653 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT => 654 * BUFFER_ADDR_INFO STRUCT 655 */ 656 while (qdf_likely((rxdma_mon_status_ring_entry = 657 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) 658 && quota--)) { 659 struct hal_buf_info hbi; 660 qdf_nbuf_t status_nbuf; 661 struct dp_rx_desc *rx_desc; 662 uint8_t *status_buf; 663 qdf_dma_addr_t paddr; 664 uint64_t buf_addr; 665 struct rx_desc_pool *rx_desc_pool; 666 667 rx_desc_pool = &soc->rx_desc_status[mac_id]; 668 buf_addr = 669 (HAL_RX_BUFFER_ADDR_31_0_GET( 670 rxdma_mon_status_ring_entry) | 671 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET( 672 rxdma_mon_status_ring_entry)) << 32)); 673 674 if (qdf_likely(buf_addr)) { 675 676 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 677 (uint32_t *)rxdma_mon_status_ring_entry, 678 &hbi); 679 rx_desc = dp_rx_cookie_2_va_mon_status(soc, 680 hbi.sw_cookie); 681 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_REAP, 682 rxdma_mon_status_ring_entry, 683 rx_desc, NULL); 684 685 qdf_assert_always(rx_desc); 686 687 if (qdf_unlikely(!dp_rx_desc_paddr_sanity_check(rx_desc, 688 buf_addr))) { 689 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); 690 hal_srng_src_get_next(hal_soc, mon_status_srng); 691 continue; 692 } 693 694 status_nbuf = rx_desc->nbuf; 695 696 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 697 QDF_DMA_FROM_DEVICE); 698 699 status_buf = qdf_nbuf_data(status_nbuf); 700 701 status = hal_get_rx_status_done(status_buf); 702 703 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 704 uint32_t hp, tp; 705 hal_get_sw_hptp(hal_soc, mon_status_srng, 706 &tp, &hp); 707 dp_info_rl("tlv tag status error hp:%u, tp:%u", 708 hp, tp); 709 710 /* RxDMA status done bit might not be set even 711 * though tp is moved by HW. 712 */ 713 714 /* If done status is missing: 715 * 1. As per MAC team's suggestion, 716 * when HP + 1 entry is peeked and if DMA 717 * is not done and if HP + 2 entry's DMA done 718 * is set. skip HP + 1 entry and 719 * start processing in next interrupt. 720 * 2. If HP + 2 entry's DMA done is not set, 721 * poll onto HP + 1 entry DMA done to be set. 722 * Check status for same buffer for next time 723 * dp_rx_mon_status_srng_process 724 */ 725 reap_status = dp_rx_mon_handle_status_buf_done(pdev, 726 mon_status_srng); 727 if (qdf_unlikely(reap_status == DP_MON_STATUS_NO_DMA)) 728 continue; 729 else if (qdf_unlikely(reap_status == DP_MON_STATUS_REPLENISH)) { 730 if (!rx_desc->unmapped) { 731 qdf_nbuf_unmap_nbytes_single( 732 soc->osdev, status_nbuf, 733 QDF_DMA_FROM_DEVICE, 734 rx_desc_pool->buf_size); 735 rx_desc->unmapped = 1; 736 } 737 qdf_nbuf_free(status_nbuf); 738 goto buf_replenish; 739 } 740 } 741 qdf_nbuf_set_pktlen(status_nbuf, 742 RX_MON_STATUS_BUF_SIZE); 743 744 if (qdf_likely(!rx_desc->unmapped)) { 745 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf, 746 QDF_DMA_FROM_DEVICE, 747 rx_desc_pool->buf_size); 748 rx_desc->unmapped = 1; 749 } 750 751 /* Put the status_nbuf to queue */ 752 qdf_nbuf_queue_add(&mon_pdev->rx_status_q, status_nbuf); 753 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_ENQUEUE, 754 rxdma_mon_status_ring_entry, 755 rx_desc, status_nbuf); 756 757 } else { 758 union dp_rx_desc_list_elem_t *desc_list = NULL; 759 union dp_rx_desc_list_elem_t *tail = NULL; 760 uint32_t num_alloc_desc; 761 762 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, 763 rx_desc_pool, 764 1, 765 &desc_list, 766 &tail); 767 /* 768 * No free descriptors available 769 */ 770 if (qdf_unlikely(num_alloc_desc == 0)) { 771 work_done++; 772 break; 773 } 774 775 rx_desc = &desc_list->rx_desc; 776 } 777 778 buf_replenish: 779 status_nbuf = dp_rx_nbuf_prepare(soc, pdev); 780 781 /* 782 * qdf_nbuf alloc or map failed, 783 * free the dp rx desc to free list, 784 * fill in NULL dma address at current HP entry, 785 * keep HP in mon_status_ring unchanged, 786 * wait next time dp_rx_mon_status_srng_process 787 * to fill in buffer at current HP. 788 */ 789 if (qdf_unlikely(!status_nbuf)) { 790 union dp_rx_desc_list_elem_t *desc_list = NULL; 791 union dp_rx_desc_list_elem_t *tail = NULL; 792 struct rx_desc_pool *rx_desc_pool; 793 794 rx_desc_pool = &soc->rx_desc_status[mac_id]; 795 796 dp_info_rl("fail to allocate or map qdf_nbuf"); 797 dp_rx_add_to_free_desc_list(&desc_list, 798 &tail, rx_desc); 799 dp_rx_add_desc_list_to_free_list(soc, &desc_list, 800 &tail, mac_id, rx_desc_pool); 801 802 hal_rxdma_buff_addr_info_set( 803 hal_soc, rxdma_mon_status_ring_entry, 804 0, 0, 805 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 806 work_done++; 807 break; 808 } 809 810 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0); 811 812 rx_desc->nbuf = status_nbuf; 813 rx_desc->in_use = 1; 814 rx_desc->unmapped = 0; 815 816 hal_rxdma_buff_addr_info_set(hal_soc, 817 rxdma_mon_status_ring_entry, 818 paddr, rx_desc->cookie, 819 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 820 821 hal_srng_src_get_next(hal_soc, mon_status_srng); 822 work_done++; 823 } 824 done: 825 826 dp_srng_access_end(int_ctx, soc, mon_status_srng); 827 828 return work_done; 829 830 } 831 832 uint32_t 833 dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx, 834 uint32_t mac_id, uint32_t quota) 835 { 836 uint32_t work_done; 837 838 work_done = dp_rx_mon_status_srng_process(soc, int_ctx, mac_id, quota); 839 quota -= work_done; 840 dp_rx_mon_status_process_tlv(soc, int_ctx, mac_id, quota); 841 842 return work_done; 843 } 844 845 QDF_STATUS 846 dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id) 847 { 848 uint8_t pdev_id = pdev->pdev_id; 849 struct dp_soc *soc = pdev->soc; 850 struct dp_srng *mon_status_ring; 851 uint32_t num_entries; 852 struct rx_desc_pool *rx_desc_pool; 853 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 854 union dp_rx_desc_list_elem_t *desc_list = NULL; 855 union dp_rx_desc_list_elem_t *tail = NULL; 856 857 soc_cfg_ctx = soc->wlan_cfg_ctx; 858 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 859 860 num_entries = mon_status_ring->num_entries; 861 862 rx_desc_pool = &soc->rx_desc_status[mac_id]; 863 864 dp_debug("Mon RX Desc Pool[%d] entries=%u", 865 pdev_id, num_entries); 866 867 return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring, 868 rx_desc_pool, num_entries, 869 &desc_list, &tail, 870 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 871 } 872 873 QDF_STATUS 874 dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) 875 { 876 uint8_t pdev_id = pdev->pdev_id; 877 struct dp_soc *soc = pdev->soc; 878 struct dp_srng *mon_status_ring; 879 uint32_t num_entries; 880 struct rx_desc_pool *rx_desc_pool; 881 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 882 883 soc_cfg_ctx = soc->wlan_cfg_ctx; 884 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 885 886 num_entries = mon_status_ring->num_entries; 887 888 rx_desc_pool = &soc->rx_desc_status[mac_id]; 889 890 dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); 891 892 rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE; 893 return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool); 894 } 895 896 void 897 dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) 898 { 899 uint32_t i; 900 uint8_t pdev_id = pdev->pdev_id; 901 struct dp_soc *soc = pdev->soc; 902 struct dp_srng *mon_status_ring; 903 uint32_t num_entries; 904 struct rx_desc_pool *rx_desc_pool; 905 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; 906 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 907 908 soc_cfg_ctx = soc->wlan_cfg_ctx; 909 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; 910 911 num_entries = mon_status_ring->num_entries; 912 913 rx_desc_pool = &soc->rx_desc_status[mac_id]; 914 915 dp_debug("Mon RX Desc status Pool[%d] init entries=%u", 916 pdev_id, num_entries); 917 918 rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id); 919 rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE; 920 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; 921 /* Disable frag processing flag */ 922 dp_rx_enable_mon_dest_frag(rx_desc_pool, false); 923 924 dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool); 925 926 qdf_nbuf_queue_init(&mon_pdev->rx_status_q); 927 928 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 929 930 qdf_mem_zero(&mon_pdev->ppdu_info, sizeof(mon_pdev->ppdu_info)); 931 932 /* 933 * Set last_ppdu_id to HAL_INVALID_PPDU_ID in order to avoid ppdu_id 934 * match with '0' ppdu_id from monitor status ring 935 */ 936 mon_pdev->ppdu_info.com_info.last_ppdu_id = HAL_INVALID_PPDU_ID; 937 938 qdf_mem_zero(&mon_pdev->rx_mon_stats, sizeof(mon_pdev->rx_mon_stats)); 939 940 dp_rx_mon_init_dbg_ppdu_stats(&mon_pdev->ppdu_info, 941 &mon_pdev->rx_mon_stats); 942 943 for (i = 0; i < MAX_MU_USERS; i++) { 944 qdf_nbuf_queue_init(&mon_pdev->mpdu_q[i]); 945 mon_pdev->is_mpdu_hdr[i] = true; 946 } 947 948 qdf_mem_zero(mon_pdev->msdu_list, 949 sizeof(mon_pdev->msdu_list[MAX_MU_USERS])); 950 951 mon_pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED; 952 } 953 954 void 955 dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) { 956 uint8_t pdev_id = pdev->pdev_id; 957 struct dp_soc *soc = pdev->soc; 958 struct rx_desc_pool *rx_desc_pool; 959 960 rx_desc_pool = &soc->rx_desc_status[mac_id]; 961 962 dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id); 963 964 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id); 965 } 966 967 void 968 dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) { 969 uint8_t pdev_id = pdev->pdev_id; 970 struct dp_soc *soc = pdev->soc; 971 struct rx_desc_pool *rx_desc_pool; 972 973 rx_desc_pool = &soc->rx_desc_status[mac_id]; 974 975 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); 976 977 dp_rx_desc_pool_free(soc, rx_desc_pool); 978 } 979 980 void 981 dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) 982 { 983 uint8_t pdev_id = pdev->pdev_id; 984 struct dp_soc *soc = pdev->soc; 985 struct rx_desc_pool *rx_desc_pool; 986 987 rx_desc_pool = &soc->rx_desc_status[mac_id]; 988 989 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); 990 991 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 992 } 993 994 /* 995 * dp_rx_buffers_replenish() - replenish monitor status ring with 996 * rx nbufs called during dp rx 997 * monitor status ring initialization 998 * 999 * @soc: core txrx main context 1000 * @mac_id: mac_id which is one of 3 mac_ids 1001 * @dp_rxdma_srng: dp monitor status circular ring 1002 * @rx_desc_pool; Pointer to Rx descriptor pool 1003 * @num_req_buffers: number of buffer to be replenished 1004 * @desc_list: list of descs if called from dp rx monitor status 1005 * process or NULL during dp rx initialization or 1006 * out of buffer interrupt 1007 * @tail: tail of descs list 1008 * @owner: who owns the nbuf (host, NSS etc...) 1009 * Return: return success or failure 1010 */ 1011 static inline 1012 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, 1013 uint32_t mac_id, 1014 struct dp_srng *dp_rxdma_srng, 1015 struct rx_desc_pool *rx_desc_pool, 1016 uint32_t num_req_buffers, 1017 union dp_rx_desc_list_elem_t **desc_list, 1018 union dp_rx_desc_list_elem_t **tail, 1019 uint8_t owner) 1020 { 1021 uint32_t num_alloc_desc; 1022 uint16_t num_desc_to_free = 0; 1023 uint32_t num_entries_avail; 1024 uint32_t count = 0; 1025 int sync_hw_ptr = 1; 1026 qdf_dma_addr_t paddr; 1027 qdf_nbuf_t rx_netbuf; 1028 void *rxdma_ring_entry; 1029 union dp_rx_desc_list_elem_t *next; 1030 void *rxdma_srng; 1031 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); 1032 1033 if (!dp_pdev) { 1034 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", 1035 dp_soc, mac_id); 1036 return QDF_STATUS_E_FAILURE; 1037 } 1038 1039 rxdma_srng = dp_rxdma_srng->hal_srng; 1040 1041 qdf_assert(rxdma_srng); 1042 1043 dp_rx_mon_status_debug("%pK: requested %d buffers for replenish", 1044 dp_soc, num_req_buffers); 1045 1046 /* 1047 * if desc_list is NULL, allocate the descs from freelist 1048 */ 1049 if (!(*desc_list)) { 1050 1051 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, 1052 rx_desc_pool, 1053 num_req_buffers, 1054 desc_list, 1055 tail); 1056 1057 if (!num_alloc_desc) { 1058 dp_rx_mon_status_err("%pK: no free rx_descs in freelist", 1059 dp_soc); 1060 return QDF_STATUS_E_NOMEM; 1061 } 1062 1063 dp_rx_mon_status_debug("%pK: %d rx desc allocated", dp_soc, 1064 num_alloc_desc); 1065 1066 num_req_buffers = num_alloc_desc; 1067 } 1068 1069 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); 1070 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, 1071 rxdma_srng, sync_hw_ptr); 1072 1073 dp_rx_mon_status_debug("%pK: no of available entries in rxdma ring: %d", 1074 dp_soc, num_entries_avail); 1075 1076 if (num_entries_avail < num_req_buffers) { 1077 num_desc_to_free = num_req_buffers - num_entries_avail; 1078 num_req_buffers = num_entries_avail; 1079 } 1080 1081 while (count <= num_req_buffers) { 1082 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev); 1083 1084 /* 1085 * qdf_nbuf alloc or map failed, 1086 * keep HP in mon_status_ring unchanged, 1087 * wait dp_rx_mon_status_srng_process 1088 * to fill in buffer at current HP. 1089 */ 1090 if (qdf_unlikely(!rx_netbuf)) { 1091 dp_rx_mon_status_err("%pK: qdf_nbuf allocate or map fail, count %d", 1092 dp_soc, count); 1093 break; 1094 } 1095 1096 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); 1097 1098 next = (*desc_list)->next; 1099 rxdma_ring_entry = hal_srng_src_get_cur_hp_n_move_next( 1100 dp_soc->hal_soc, 1101 rxdma_srng); 1102 1103 if (qdf_unlikely(!rxdma_ring_entry)) { 1104 dp_rx_mon_status_err("%pK: rxdma_ring_entry is NULL, count - %d", 1105 dp_soc, count); 1106 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf, 1107 QDF_DMA_FROM_DEVICE, 1108 rx_desc_pool->buf_size); 1109 qdf_nbuf_free(rx_netbuf); 1110 break; 1111 } 1112 1113 (*desc_list)->rx_desc.nbuf = rx_netbuf; 1114 (*desc_list)->rx_desc.in_use = 1; 1115 (*desc_list)->rx_desc.unmapped = 0; 1116 count++; 1117 1118 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, 1119 rxdma_ring_entry, paddr, 1120 (*desc_list)->rx_desc.cookie, 1121 owner); 1122 1123 dp_rx_mon_status_debug("%pK: rx_desc=%pK, cookie=%d, nbuf=%pK, paddr=%pK", 1124 dp_soc, &(*desc_list)->rx_desc, 1125 (*desc_list)->rx_desc.cookie, rx_netbuf, 1126 (void *)paddr); 1127 1128 *desc_list = next; 1129 } 1130 1131 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); 1132 1133 dp_rx_mon_status_debug("%pK: successfully replenished %d buffers", 1134 dp_soc, num_req_buffers); 1135 1136 dp_rx_mon_status_debug("%pK: %d rx desc added back to free list", 1137 dp_soc, num_desc_to_free); 1138 1139 /* 1140 * add any available free desc back to the free list 1141 */ 1142 if (*desc_list) { 1143 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, 1144 mac_id, rx_desc_pool); 1145 } 1146 1147 return QDF_STATUS_SUCCESS; 1148 } 1149 1150 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 1151 /** 1152 * dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for 1153 * a given mac 1154 * @pdev: DP pdev 1155 * @mac_id: mac id 1156 * @quota: maximum number of ring entries that can be processed 1157 * 1158 * Return: Number of ring entries reaped 1159 */ 1160 static uint32_t 1161 dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1162 uint32_t quota) 1163 { 1164 struct dp_soc *soc = pdev->soc; 1165 void *mon_status_srng; 1166 hal_soc_handle_t hal_soc; 1167 void *ring_desc; 1168 uint32_t reap_cnt = 0; 1169 1170 if (qdf_unlikely(!soc || !soc->hal_soc)) 1171 return reap_cnt; 1172 1173 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; 1174 1175 if (qdf_unlikely(!mon_status_srng || 1176 !hal_srng_initialized(mon_status_srng))) 1177 return reap_cnt; 1178 1179 hal_soc = soc->hal_soc; 1180 1181 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) 1182 return reap_cnt; 1183 1184 while ((ring_desc = 1185 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) && 1186 reap_cnt < MON_DROP_REAP_LIMIT && quota--) { 1187 uint64_t buf_addr; 1188 struct hal_buf_info hbi; 1189 struct dp_rx_desc *rx_desc; 1190 qdf_nbuf_t status_nbuf; 1191 uint8_t *status_buf; 1192 enum dp_mon_reap_status reap_status; 1193 qdf_dma_addr_t iova; 1194 struct rx_desc_pool *rx_desc_pool; 1195 1196 rx_desc_pool = &soc->rx_desc_status[mac_id]; 1197 1198 buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) | 1199 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32)); 1200 1201 if (qdf_likely(buf_addr)) { 1202 hal_rx_buf_cookie_rbm_get(soc->hal_soc, 1203 (uint32_t *)ring_desc, 1204 &hbi); 1205 rx_desc = dp_rx_cookie_2_va_mon_status(soc, 1206 hbi.sw_cookie); 1207 1208 qdf_assert_always(rx_desc); 1209 1210 status_nbuf = rx_desc->nbuf; 1211 1212 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, 1213 QDF_DMA_FROM_DEVICE); 1214 1215 status_buf = qdf_nbuf_data(status_nbuf); 1216 1217 if (hal_get_rx_status_done(status_buf) != 1218 QDF_STATUS_SUCCESS) { 1219 /* If done status is missing: 1220 * 1. As per MAC team's suggestion, 1221 * when HP + 1 entry is peeked and if DMA 1222 * is not done and if HP + 2 entry's DMA done 1223 * is set. skip HP + 1 entry and 1224 * start processing in next interrupt. 1225 * 2. If HP + 2 entry's DMA done is not set, 1226 * poll onto HP + 1 entry DMA done to be set. 1227 * Check status for same buffer for next time 1228 * dp_rx_mon_status_srng_process 1229 */ 1230 reap_status = 1231 dp_rx_mon_handle_status_buf_done(pdev, 1232 mon_status_srng); 1233 if (reap_status == DP_MON_STATUS_NO_DMA) 1234 break; 1235 } 1236 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf, 1237 QDF_DMA_FROM_DEVICE, 1238 rx_desc_pool->buf_size); 1239 qdf_nbuf_free(status_nbuf); 1240 } else { 1241 union dp_rx_desc_list_elem_t *rx_desc_elem; 1242 1243 qdf_spin_lock_bh(&rx_desc_pool->lock); 1244 1245 if (!rx_desc_pool->freelist) { 1246 qdf_spin_unlock_bh(&rx_desc_pool->lock); 1247 break; 1248 } 1249 rx_desc_elem = rx_desc_pool->freelist; 1250 rx_desc_pool->freelist = rx_desc_pool->freelist->next; 1251 qdf_spin_unlock_bh(&rx_desc_pool->lock); 1252 1253 rx_desc = &rx_desc_elem->rx_desc; 1254 } 1255 1256 status_nbuf = dp_rx_nbuf_prepare(soc, pdev); 1257 1258 if (qdf_unlikely(!status_nbuf)) { 1259 union dp_rx_desc_list_elem_t *desc_list = NULL; 1260 union dp_rx_desc_list_elem_t *tail = NULL; 1261 1262 dp_info_rl("fail to allocate or map nbuf"); 1263 dp_rx_add_to_free_desc_list(&desc_list, &tail, 1264 rx_desc); 1265 dp_rx_add_desc_list_to_free_list(soc, 1266 &desc_list, 1267 &tail, mac_id, 1268 rx_desc_pool); 1269 1270 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, 0, 0, 1271 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 1272 break; 1273 } 1274 1275 iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0); 1276 1277 rx_desc->nbuf = status_nbuf; 1278 rx_desc->in_use = 1; 1279 1280 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, iova, 1281 rx_desc->cookie, 1282 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)); 1283 1284 reap_cnt++; 1285 hal_srng_src_get_next(hal_soc, mon_status_srng); 1286 } 1287 1288 hal_srng_access_end(hal_soc, mon_status_srng); 1289 1290 return reap_cnt; 1291 } 1292 1293 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1294 uint32_t quota) 1295 { 1296 uint32_t work_done; 1297 1298 work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota); 1299 if (!dp_is_rxdma_dst_ring_common(pdev)) 1300 dp_mon_dest_srng_drop_for_mac(pdev, mac_id); 1301 1302 return work_done; 1303 } 1304 #else 1305 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id, 1306 uint32_t quota) 1307 { 1308 return 0; 1309 } 1310 #endif 1311