1 /* 2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "hal_hw_headers.h" 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "hal_rx.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "hal_api_mon.h" 27 #include "dp_htt.h" 28 #include "dp_mon.h" 29 #include "dp_rx_mon.h" 30 #include "wlan_cfg.h" 31 #include "dp_internal.h" 32 #include "dp_rx_buffer_pool.h" 33 #include <dp_mon_1.0.h> 34 #include <dp_rx_mon_1.0.h> 35 36 #ifdef WLAN_TX_PKT_CAPTURE_ENH 37 #include "dp_rx_mon_feature.h" 38 #endif 39 40 /* 41 * PPDU id is from 0 to 64k-1. PPDU id read from status ring and PPDU id 42 * read from destination ring shall track each other. If the distance of 43 * two ppdu id is less than 20000. It is assume no wrap around. Otherwise, 44 * It is assume wrap around. 45 */ 46 #define NOT_PPDU_ID_WRAP_AROUND 20000 47 /* 48 * The destination ring processing is stuck if the destrination is not 49 * moving while status ring moves 16 ppdu. the destination ring processing 50 * skips this destination ring ppdu as walkaround 51 */ 52 #define MON_DEST_RING_STUCK_MAX_CNT 16 53 54 #ifdef WLAN_TX_PKT_CAPTURE_ENH 55 void 56 dp_handle_tx_capture(struct dp_soc *soc, struct dp_pdev *pdev, 57 qdf_nbuf_t mon_mpdu) 58 { 59 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 60 struct hal_rx_ppdu_info *ppdu_info = &mon_pdev->ppdu_info; 61 62 if (mon_pdev->tx_capture_enabled 63 == CDP_TX_ENH_CAPTURE_DISABLED) 64 return; 65 66 if ((ppdu_info->sw_frame_group_id == 67 HAL_MPDU_SW_FRAME_GROUP_CTRL_NDPA) || 68 (ppdu_info->sw_frame_group_id == 69 HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR)) 70 dp_handle_tx_capture_from_dest(soc, pdev, mon_mpdu); 71 } 72 73 #ifdef QCA_MONITOR_PKT_SUPPORT 74 static void 75 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv) 76 { 77 struct dp_mon_pdev *mon_pdev = dp_pdev->monitor_pdev; 78 79 if (mon_pdev->tx_capture_enabled 80 != CDP_TX_ENH_CAPTURE_DISABLED) 81 mon_pdev->ppdu_info.rx_info.user_id = 82 hal_rx_hw_desc_mpdu_user_id(dp_pdev->soc->hal_soc, 83 rx_desc_tlv); 84 } 85 #endif 86 #else 87 static void 88 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv) 89 { 90 } 91 #endif 92 93 #ifdef QCA_MONITOR_PKT_SUPPORT 94 /** 95 * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW 96 * (WBM), following error handling 97 * 98 * @dp_pdev: core txrx pdev context 99 * @buf_addr_info: void pointer to monitor link descriptor buf addr info 100 * Return: QDF_STATUS 101 */ 102 QDF_STATUS 103 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev, 104 hal_buff_addrinfo_t buf_addr_info, int mac_id) 105 { 106 struct dp_srng *dp_srng; 107 hal_ring_handle_t hal_ring_hdl; 108 hal_soc_handle_t hal_soc; 109 QDF_STATUS status = QDF_STATUS_E_FAILURE; 110 void *src_srng_desc; 111 112 hal_soc = dp_pdev->soc->hal_soc; 113 114 dp_srng = &dp_pdev->soc->rxdma_mon_desc_ring[mac_id]; 115 hal_ring_hdl = dp_srng->hal_srng; 116 117 qdf_assert(hal_ring_hdl); 118 119 if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring_hdl))) { 120 121 /* TODO */ 122 /* 123 * Need API to convert from hal_ring pointer to 124 * Ring Type / Ring Id combo 125 */ 126 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 127 "%s %d : \ 128 HAL RING Access For WBM Release SRNG Failed -- %pK", 129 __func__, __LINE__, hal_ring_hdl); 130 goto done; 131 } 132 133 src_srng_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); 134 135 if (qdf_likely(src_srng_desc)) { 136 /* Return link descriptor through WBM ring (SW2WBM)*/ 137 hal_rx_mon_msdu_link_desc_set(hal_soc, 138 src_srng_desc, buf_addr_info); 139 status = QDF_STATUS_SUCCESS; 140 } else { 141 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 142 "%s %d -- Monitor Link Desc WBM Release Ring Full", 143 __func__, __LINE__); 144 } 145 done: 146 hal_srng_access_end(hal_soc, hal_ring_hdl); 147 return status; 148 } 149 150 /** 151 * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW 152 * (WBM), following error handling 153 * 154 * @soc: core DP main context 155 * @mac_id: mac id which is one of 3 mac_ids 156 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info 157 * @head_msdu: head of msdu to be popped 158 * @tail_msdu: tail of msdu to be popped 159 * @npackets: number of packet to be popped 160 * @ppdu_id: ppdu id of processing ppdu 161 * @head: head of descs list to be freed 162 * @tail: tail of decs list to be freed 163 * 164 * Return: number of msdu in MPDU to be popped 165 */ 166 static inline uint32_t 167 dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, 168 hal_rxdma_desc_t rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu, 169 qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id, 170 union dp_rx_desc_list_elem_t **head, 171 union dp_rx_desc_list_elem_t **tail) 172 { 173 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 174 void *rx_desc_tlv, *first_rx_desc_tlv = NULL; 175 void *rx_msdu_link_desc; 176 qdf_nbuf_t msdu; 177 qdf_nbuf_t last; 178 struct hal_rx_msdu_list msdu_list; 179 uint16_t num_msdus; 180 uint32_t rx_buf_size, rx_pkt_offset; 181 struct hal_buf_info buf_info; 182 uint32_t rx_bufs_used = 0; 183 uint32_t msdu_ppdu_id, msdu_cnt; 184 uint8_t *data = NULL; 185 uint32_t i; 186 uint32_t total_frag_len = 0, frag_len = 0; 187 bool is_frag, is_first_msdu; 188 bool drop_mpdu = false, is_frag_non_raw = false; 189 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 190 qdf_dma_addr_t buf_paddr = 0; 191 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 192 struct cdp_mon_status *rs; 193 struct dp_mon_pdev *mon_pdev; 194 195 if (qdf_unlikely(!dp_pdev)) { 196 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 197 return rx_bufs_used; 198 } 199 200 mon_pdev = dp_pdev->monitor_pdev; 201 msdu = 0; 202 203 last = NULL; 204 205 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc, 206 &buf_info, &msdu_cnt); 207 208 rs = &mon_pdev->rx_mon_recv_status; 209 rs->cdp_rs_rxdma_err = false; 210 if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) == 211 HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) { 212 uint8_t rxdma_err = 213 hal_rx_reo_ent_rxdma_error_code_get( 214 rxdma_dst_ring_desc); 215 if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) || 216 (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) || 217 (rxdma_err == HAL_RXDMA_ERR_OVERFLOW) || 218 (rxdma_err == HAL_RXDMA_ERR_FCS && mon_pdev->mcopy_mode) || 219 (rxdma_err == HAL_RXDMA_ERR_FCS && 220 mon_pdev->rx_pktlog_cbf))) { 221 drop_mpdu = true; 222 mon_pdev->rx_mon_stats.dest_mpdu_drop++; 223 } 224 rs->cdp_rs_rxdma_err = true; 225 } 226 227 is_frag = false; 228 is_first_msdu = true; 229 230 do { 231 /* WAR for duplicate link descriptors received from HW */ 232 if (qdf_unlikely(mon_pdev->mon_last_linkdesc_paddr == 233 buf_info.paddr)) { 234 mon_pdev->rx_mon_stats.dup_mon_linkdesc_cnt++; 235 return rx_bufs_used; 236 } 237 238 rx_msdu_link_desc = 239 dp_rx_cookie_2_mon_link_desc(dp_pdev, 240 buf_info, mac_id); 241 242 qdf_assert_always(rx_msdu_link_desc); 243 244 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 245 &msdu_list, &num_msdus); 246 247 for (i = 0; i < num_msdus; i++) { 248 uint16_t l2_hdr_offset; 249 struct dp_rx_desc *rx_desc = NULL; 250 struct rx_desc_pool *rx_desc_pool; 251 252 rx_desc = dp_rx_get_mon_desc(soc, 253 msdu_list.sw_cookie[i]); 254 255 qdf_assert_always(rx_desc); 256 257 msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc); 258 buf_paddr = dp_rx_mon_get_paddr_from_desc(rx_desc); 259 260 /* WAR for duplicate buffers received from HW */ 261 if (qdf_unlikely(mon_pdev->mon_last_buf_cookie == 262 msdu_list.sw_cookie[i] || 263 DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) || 264 msdu_list.paddr[i] != buf_paddr || 265 !rx_desc->in_use)) { 266 /* Skip duplicate buffer and drop subsequent 267 * buffers in this MPDU 268 */ 269 drop_mpdu = true; 270 mon_pdev->rx_mon_stats.dup_mon_buf_cnt++; 271 mon_pdev->mon_last_linkdesc_paddr = 272 buf_info.paddr; 273 continue; 274 } 275 276 if (rx_desc->unmapped == 0) { 277 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, 278 mac_id, 279 dp_pdev->pdev_id); 280 dp_rx_mon_buffer_unmap(soc, rx_desc, 281 rx_desc_pool->buf_size); 282 rx_desc->unmapped = 1; 283 } 284 285 if (dp_rx_buffer_pool_refill(soc, msdu, 286 rx_desc->pool_id)) { 287 drop_mpdu = true; 288 msdu = NULL; 289 mon_pdev->mon_last_linkdesc_paddr = 290 buf_info.paddr; 291 goto next_msdu; 292 } 293 294 if (drop_mpdu) { 295 mon_pdev->mon_last_linkdesc_paddr = 296 buf_info.paddr; 297 dp_rx_mon_buffer_free(rx_desc); 298 msdu = NULL; 299 goto next_msdu; 300 } 301 302 data = dp_rx_mon_get_buffer_data(rx_desc); 303 rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data); 304 305 dp_rx_mon_dest_debug("%pK: i=%d, ppdu_id=%x, num_msdus = %u", 306 soc, i, *ppdu_id, num_msdus); 307 308 if (is_first_msdu) { 309 if (!hal_rx_mpdu_start_tlv_tag_valid( 310 soc->hal_soc, 311 rx_desc_tlv)) { 312 drop_mpdu = true; 313 dp_rx_mon_buffer_free(rx_desc); 314 msdu = NULL; 315 mon_pdev->mon_last_linkdesc_paddr = 316 buf_info.paddr; 317 goto next_msdu; 318 } 319 320 msdu_ppdu_id = hal_rx_hw_desc_get_ppduid_get( 321 soc->hal_soc, 322 rx_desc_tlv, 323 rxdma_dst_ring_desc); 324 is_first_msdu = false; 325 326 dp_rx_mon_dest_debug("%pK: msdu_ppdu_id=%x", 327 soc, msdu_ppdu_id); 328 329 if (*ppdu_id > msdu_ppdu_id) 330 dp_rx_mon_dest_debug("%pK: ppdu_id=%d " 331 "msdu_ppdu_id=%d", soc, 332 *ppdu_id, msdu_ppdu_id); 333 334 if ((*ppdu_id < msdu_ppdu_id) && ( 335 (msdu_ppdu_id - *ppdu_id) < 336 NOT_PPDU_ID_WRAP_AROUND)) { 337 *ppdu_id = msdu_ppdu_id; 338 return rx_bufs_used; 339 } else if ((*ppdu_id > msdu_ppdu_id) && ( 340 (*ppdu_id - msdu_ppdu_id) > 341 NOT_PPDU_ID_WRAP_AROUND)) { 342 *ppdu_id = msdu_ppdu_id; 343 return rx_bufs_used; 344 } 345 346 dp_tx_capture_get_user_id(dp_pdev, 347 rx_desc_tlv); 348 349 if (*ppdu_id == msdu_ppdu_id) 350 mon_pdev->rx_mon_stats.ppdu_id_match++; 351 else 352 mon_pdev->rx_mon_stats.ppdu_id_mismatch 353 ++; 354 355 mon_pdev->mon_last_linkdesc_paddr = 356 buf_info.paddr; 357 358 if (dp_rx_mon_alloc_parent_buffer(head_msdu) 359 != QDF_STATUS_SUCCESS) { 360 DP_STATS_INC(dp_pdev, 361 replenish.nbuf_alloc_fail, 362 1); 363 qdf_frag_free(rx_desc_tlv); 364 dp_rx_mon_dest_debug("failed to allocate parent buffer to hold all frag"); 365 drop_mpdu = true; 366 goto next_msdu; 367 } 368 } 369 370 if (hal_rx_desc_is_first_msdu(soc->hal_soc, 371 rx_desc_tlv)) 372 hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, 373 rx_desc_tlv, 374 &mon_pdev->ppdu_info.rx_status); 375 376 dp_rx_mon_parse_desc_buffer(soc, 377 &(msdu_list.msdu_info[i]), 378 &is_frag, 379 &total_frag_len, 380 &frag_len, 381 &l2_hdr_offset, 382 rx_desc_tlv, 383 &first_rx_desc_tlv, 384 &is_frag_non_raw, data); 385 if (!is_frag) 386 msdu_cnt--; 387 388 dp_rx_mon_dest_debug("total_len %u frag_len %u flags %u", 389 total_frag_len, frag_len, 390 msdu_list.msdu_info[i].msdu_flags); 391 392 rx_pkt_offset = soc->rx_mon_pkt_tlv_size; 393 394 rx_buf_size = rx_pkt_offset + l2_hdr_offset 395 + frag_len; 396 397 dp_rx_mon_buffer_set_pktlen(msdu, rx_buf_size); 398 #if 0 399 /* Disable it.see packet on msdu done set to 0 */ 400 /* 401 * Check if DMA completed -- msdu_done is the 402 * last bit to be written 403 */ 404 if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) { 405 406 QDF_TRACE(QDF_MODULE_ID_DP, 407 QDF_TRACE_LEVEL_ERROR, 408 "%s:%d: Pkt Desc", 409 __func__, __LINE__); 410 411 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, 412 QDF_TRACE_LEVEL_ERROR, 413 rx_desc_tlv, 128); 414 415 qdf_assert_always(0); 416 } 417 #endif 418 dp_rx_mon_dest_debug("%pK: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, frag_len %u", 419 soc, rx_pkt_offset, l2_hdr_offset, 420 msdu_list.msdu_info[i].msdu_len, 421 frag_len); 422 423 if (dp_rx_mon_add_msdu_to_list(soc, head_msdu, msdu, 424 &last, rx_desc_tlv, 425 frag_len, l2_hdr_offset) 426 != QDF_STATUS_SUCCESS) { 427 dp_rx_mon_add_msdu_to_list_failure_handler(rx_desc_tlv, 428 dp_pdev, &last, head_msdu, 429 tail_msdu, __func__); 430 drop_mpdu = true; 431 goto next_msdu; 432 } 433 434 next_msdu: 435 mon_pdev->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 436 rx_bufs_used++; 437 dp_rx_add_to_free_desc_list(head, 438 tail, rx_desc); 439 } 440 441 /* 442 * Store the current link buffer into to the local 443 * structure to be used for release purpose. 444 */ 445 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info, 446 buf_info.paddr, 447 buf_info.sw_cookie, buf_info.rbm); 448 449 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc, 450 &buf_info); 451 if (dp_rx_monitor_link_desc_return(dp_pdev, 452 (hal_buff_addrinfo_t) 453 rx_link_buf_info, 454 mac_id, 455 bm_action) 456 != QDF_STATUS_SUCCESS) 457 dp_err_rl("monitor link desc return failed"); 458 } while (buf_info.paddr && msdu_cnt); 459 460 dp_rx_mon_init_tail_msdu(head_msdu, msdu, last, tail_msdu); 461 dp_rx_mon_remove_raw_frame_fcs_len(soc, head_msdu, tail_msdu); 462 463 return rx_bufs_used; 464 } 465 466 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx, 467 uint32_t mac_id, uint32_t quota) 468 { 469 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 470 uint8_t pdev_id; 471 hal_rxdma_desc_t rxdma_dst_ring_desc; 472 hal_soc_handle_t hal_soc; 473 void *mon_dst_srng; 474 union dp_rx_desc_list_elem_t *head = NULL; 475 union dp_rx_desc_list_elem_t *tail = NULL; 476 uint32_t ppdu_id; 477 uint32_t rx_bufs_used; 478 uint32_t mpdu_rx_bufs_used; 479 int mac_for_pdev = mac_id; 480 struct cdp_pdev_mon_stats *rx_mon_stats; 481 struct dp_mon_pdev *mon_pdev; 482 483 if (!pdev) { 484 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id); 485 return; 486 } 487 488 mon_pdev = pdev->monitor_pdev; 489 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev); 490 491 if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { 492 dp_rx_mon_dest_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK", 493 soc, mon_dst_srng); 494 return; 495 } 496 497 hal_soc = soc->hal_soc; 498 499 qdf_assert((hal_soc && pdev)); 500 501 qdf_spin_lock_bh(&mon_pdev->mon_lock); 502 503 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) { 504 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 505 "%s %d : HAL Mon Dest Ring access Failed -- %pK", 506 __func__, __LINE__, mon_dst_srng); 507 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 508 return; 509 } 510 511 pdev_id = pdev->pdev_id; 512 ppdu_id = mon_pdev->ppdu_info.com_info.ppdu_id; 513 rx_bufs_used = 0; 514 rx_mon_stats = &mon_pdev->rx_mon_stats; 515 516 while (qdf_likely(rxdma_dst_ring_desc = 517 hal_srng_dst_peek(hal_soc, mon_dst_srng))) { 518 qdf_nbuf_t head_msdu, tail_msdu; 519 uint32_t npackets; 520 521 head_msdu = (qdf_nbuf_t)NULL; 522 tail_msdu = (qdf_nbuf_t)NULL; 523 524 mpdu_rx_bufs_used = 525 dp_rx_mon_mpdu_pop(soc, mac_id, 526 rxdma_dst_ring_desc, 527 &head_msdu, &tail_msdu, 528 &npackets, &ppdu_id, 529 &head, &tail); 530 531 rx_bufs_used += mpdu_rx_bufs_used; 532 533 if (mpdu_rx_bufs_used) 534 mon_pdev->mon_dest_ring_stuck_cnt = 0; 535 else 536 mon_pdev->mon_dest_ring_stuck_cnt++; 537 538 if (mon_pdev->mon_dest_ring_stuck_cnt > 539 MON_DEST_RING_STUCK_MAX_CNT) { 540 dp_info("destination ring stuck"); 541 dp_info("ppdu_id status=%d dest=%d", 542 mon_pdev->ppdu_info.com_info.ppdu_id, ppdu_id); 543 rx_mon_stats->mon_rx_dest_stuck++; 544 mon_pdev->ppdu_info.com_info.ppdu_id = ppdu_id; 545 continue; 546 } 547 548 if (ppdu_id != mon_pdev->ppdu_info.com_info.ppdu_id) { 549 rx_mon_stats->stat_ring_ppdu_id_hist[ 550 rx_mon_stats->ppdu_id_hist_idx] = 551 mon_pdev->ppdu_info.com_info.ppdu_id; 552 rx_mon_stats->dest_ring_ppdu_id_hist[ 553 rx_mon_stats->ppdu_id_hist_idx] = ppdu_id; 554 rx_mon_stats->ppdu_id_hist_idx = 555 (rx_mon_stats->ppdu_id_hist_idx + 1) & 556 (MAX_PPDU_ID_HIST - 1); 557 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START; 558 qdf_mem_zero(&mon_pdev->ppdu_info.rx_status, 559 sizeof(mon_pdev->ppdu_info.rx_status)); 560 dp_rx_mon_dest_debug("%pK: ppdu_id %x != ppdu_info.com_info.ppdu_id %x", 561 soc, ppdu_id, 562 mon_pdev->ppdu_info.com_info.ppdu_id); 563 break; 564 } 565 566 if (qdf_likely((head_msdu) && (tail_msdu))) { 567 rx_mon_stats->dest_mpdu_done++; 568 dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu); 569 } 570 571 rxdma_dst_ring_desc = 572 hal_srng_dst_get_next(hal_soc, 573 mon_dst_srng); 574 } 575 576 dp_srng_access_end(int_ctx, soc, mon_dst_srng); 577 578 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 579 580 if (rx_bufs_used) { 581 rx_mon_stats->dest_ppdu_done++; 582 dp_rx_buffers_replenish(soc, mac_id, 583 dp_rxdma_get_mon_buf_ring(pdev, 584 mac_for_pdev), 585 dp_rx_get_mon_desc_pool(soc, mac_id, 586 pdev_id), 587 rx_bufs_used, &head, &tail, false); 588 } 589 } 590 591 QDF_STATUS 592 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id, 593 bool delayed_replenish) 594 { 595 uint8_t pdev_id = pdev->pdev_id; 596 struct dp_soc *soc = pdev->soc; 597 struct dp_srng *mon_buf_ring; 598 uint32_t num_entries; 599 struct rx_desc_pool *rx_desc_pool; 600 QDF_STATUS status = QDF_STATUS_SUCCESS; 601 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 602 603 mon_buf_ring = dp_rxdma_get_mon_buf_ring(pdev, mac_id); 604 605 num_entries = mon_buf_ring->num_entries; 606 607 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev_id); 608 609 dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); 610 611 /* Replenish RXDMA monitor buffer ring with 8 buffers only 612 * delayed_replenish_entries is actually 8 but when we call 613 * dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence 614 * added 1 to delayed_replenish_entries to ensure we have 8 615 * entries. Once the monitor VAP is configured we replenish 616 * the complete RXDMA monitor buffer ring. 617 */ 618 if (delayed_replenish) { 619 num_entries = soc_cfg_ctx->delayed_replenish_entries + 1; 620 status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring, 621 rx_desc_pool, 622 num_entries - 1); 623 } else { 624 union dp_rx_desc_list_elem_t *tail = NULL; 625 union dp_rx_desc_list_elem_t *desc_list = NULL; 626 627 status = dp_rx_buffers_replenish(soc, mac_id, 628 mon_buf_ring, 629 rx_desc_pool, 630 num_entries, 631 &desc_list, 632 &tail, false); 633 } 634 635 return status; 636 } 637 638 void 639 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) 640 { 641 uint8_t pdev_id = pdev->pdev_id; 642 struct dp_soc *soc = pdev->soc; 643 struct dp_srng *mon_buf_ring; 644 uint32_t num_entries; 645 struct rx_desc_pool *rx_desc_pool; 646 uint32_t rx_desc_pool_size; 647 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 648 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev; 649 650 mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; 651 652 num_entries = mon_buf_ring->num_entries; 653 654 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 655 656 /* If descriptor pool is already initialized, do not initialize it */ 657 if (rx_desc_pool->freelist) 658 return; 659 660 dp_debug("Mon RX Desc buf Pool[%d] init entries=%u", 661 pdev_id, num_entries); 662 663 rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) * 664 num_entries; 665 666 rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id); 667 rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; 668 rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; 669 /* Enable frag processing if feature is enabled */ 670 dp_rx_enable_mon_dest_frag(rx_desc_pool, true); 671 672 dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool); 673 674 mon_pdev->mon_last_linkdesc_paddr = 0; 675 676 mon_pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 677 678 /* Attach full monitor mode resources */ 679 dp_full_mon_attach(pdev); 680 } 681 682 static void 683 dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) 684 { 685 uint8_t pdev_id = pdev->pdev_id; 686 struct dp_soc *soc = pdev->soc; 687 struct rx_desc_pool *rx_desc_pool; 688 689 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 690 691 dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id); 692 693 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id); 694 695 /* Detach full monitor mode resources */ 696 dp_full_mon_detach(pdev); 697 } 698 699 static void 700 dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) 701 { 702 uint8_t pdev_id = pdev->pdev_id; 703 struct dp_soc *soc = pdev->soc; 704 struct rx_desc_pool *rx_desc_pool; 705 706 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 707 708 dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id); 709 710 dp_rx_desc_pool_free(soc, rx_desc_pool); 711 } 712 713 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) 714 { 715 uint8_t pdev_id = pdev->pdev_id; 716 struct dp_soc *soc = pdev->soc; 717 struct rx_desc_pool *rx_desc_pool; 718 719 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 720 721 dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id); 722 723 if (rx_desc_pool->rx_mon_dest_frag_enable) 724 dp_rx_desc_frag_free(soc, rx_desc_pool); 725 else 726 dp_rx_desc_nbuf_free(soc, rx_desc_pool); 727 } 728 729 QDF_STATUS 730 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) 731 { 732 uint8_t pdev_id = pdev->pdev_id; 733 struct dp_soc *soc = pdev->soc; 734 struct dp_srng *mon_buf_ring; 735 uint32_t num_entries; 736 struct rx_desc_pool *rx_desc_pool; 737 uint32_t rx_desc_pool_size; 738 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 739 740 mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; 741 742 num_entries = mon_buf_ring->num_entries; 743 744 rx_desc_pool = &soc->rx_desc_mon[mac_id]; 745 746 dp_debug("Mon RX Desc Pool[%d] entries=%u", 747 pdev_id, num_entries); 748 749 rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) * 750 num_entries; 751 752 if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_SUCCESS) 753 return QDF_STATUS_SUCCESS; 754 755 return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool); 756 } 757 758 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 759 uint32_t 760 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id) 761 { 762 struct dp_soc *soc = pdev->soc; 763 hal_rxdma_desc_t rxdma_dst_ring_desc; 764 hal_soc_handle_t hal_soc; 765 void *mon_dst_srng; 766 union dp_rx_desc_list_elem_t *head = NULL; 767 union dp_rx_desc_list_elem_t *tail = NULL; 768 uint32_t rx_bufs_used = 0; 769 void *rx_msdu_link_desc; 770 uint32_t msdu_count = 0; 771 uint16_t num_msdus; 772 struct hal_buf_info buf_info; 773 struct hal_rx_msdu_list msdu_list; 774 qdf_nbuf_t nbuf; 775 uint32_t i; 776 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; 777 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; 778 struct rx_desc_pool *rx_desc_pool; 779 uint32_t reap_cnt = 0; 780 struct dp_mon_pdev *mon_pdev; 781 782 if (qdf_unlikely(!soc || !soc->hal_soc)) 783 return reap_cnt; 784 785 mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id); 786 787 if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng))) 788 return reap_cnt; 789 790 hal_soc = soc->hal_soc; 791 mon_pdev = pdev->monitor_pdev; 792 793 qdf_spin_lock_bh(&mon_pdev->mon_lock); 794 795 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) { 796 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 797 return reap_cnt; 798 } 799 800 rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id); 801 802 while ((rxdma_dst_ring_desc = 803 hal_srng_dst_peek(hal_soc, mon_dst_srng)) && 804 reap_cnt < MON_DROP_REAP_LIMIT) { 805 806 hal_rx_reo_ent_buf_paddr_get(hal_soc, rxdma_dst_ring_desc, 807 &buf_info, &msdu_count); 808 809 do { 810 rx_msdu_link_desc = dp_rx_cookie_2_mon_link_desc(pdev, 811 buf_info, mac_id); 812 813 if (qdf_unlikely(!rx_msdu_link_desc)) { 814 mon_pdev->rx_mon_stats.mon_link_desc_invalid++; 815 goto next_entry; 816 } 817 818 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, 819 &msdu_list, &num_msdus); 820 821 for (i = 0; i < num_msdus; i++) { 822 struct dp_rx_desc *rx_desc; 823 qdf_dma_addr_t buf_paddr; 824 825 rx_desc = dp_rx_get_mon_desc(soc, 826 msdu_list.sw_cookie[i]); 827 828 if (qdf_unlikely(!rx_desc)) { 829 mon_pdev->rx_mon_stats. 830 mon_rx_desc_invalid++; 831 continue; 832 } 833 834 nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc); 835 buf_paddr = 836 dp_rx_mon_get_paddr_from_desc(rx_desc); 837 838 if (qdf_unlikely(!rx_desc->in_use || !nbuf || 839 msdu_list.paddr[i] != 840 buf_paddr)) { 841 mon_pdev->rx_mon_stats. 842 mon_nbuf_sanity_err++; 843 continue; 844 } 845 rx_bufs_used++; 846 847 if (!rx_desc->unmapped) { 848 dp_rx_mon_buffer_unmap(soc, rx_desc, 849 rx_desc_pool->buf_size); 850 rx_desc->unmapped = 1; 851 } 852 853 qdf_nbuf_free(nbuf); 854 dp_rx_add_to_free_desc_list(&head, &tail, 855 rx_desc); 856 857 if (!(msdu_list.msdu_info[i].msdu_flags & 858 HAL_MSDU_F_MSDU_CONTINUATION)) 859 msdu_count--; 860 } 861 862 /* 863 * Store the current link buffer into to the local 864 * structure to be used for release purpose. 865 */ 866 hal_rxdma_buff_addr_info_set(soc->hal_soc, 867 rx_link_buf_info, 868 buf_info.paddr, 869 buf_info.sw_cookie, 870 buf_info.rbm); 871 872 hal_rx_mon_next_link_desc_get(soc->hal_soc, 873 rx_msdu_link_desc, 874 &buf_info); 875 if (dp_rx_monitor_link_desc_return(pdev, 876 (hal_buff_addrinfo_t) 877 rx_link_buf_info, 878 mac_id, bm_action) != 879 QDF_STATUS_SUCCESS) 880 dp_info_rl("monitor link desc return failed"); 881 } while (buf_info.paddr && msdu_count); 882 883 next_entry: 884 reap_cnt++; 885 rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc, 886 mon_dst_srng); 887 } 888 889 hal_srng_access_end(hal_soc, mon_dst_srng); 890 891 qdf_spin_unlock_bh(&mon_pdev->mon_lock); 892 893 if (rx_bufs_used) { 894 dp_rx_buffers_replenish(soc, mac_id, 895 dp_rxdma_get_mon_buf_ring(pdev, mac_id), 896 rx_desc_pool, 897 rx_bufs_used, &head, &tail, false); 898 } 899 900 return reap_cnt; 901 } 902 #endif 903 904 static void 905 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev) 906 { 907 struct dp_soc *soc = pdev->soc; 908 909 dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev); 910 dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev); 911 } 912 913 static void 914 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev) 915 { 916 struct dp_soc *soc = pdev->soc; 917 918 if (!soc->wlan_cfg_ctx->rxdma1_enable) 919 return; 920 921 dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev); 922 } 923 924 static void 925 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev) 926 { 927 struct dp_soc *soc = pdev->soc; 928 929 if (!soc->wlan_cfg_ctx->rxdma1_enable || 930 !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) 931 return; 932 933 dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev); 934 dp_link_desc_ring_replenish(soc, mac_for_pdev); 935 } 936 937 static void 938 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev) 939 { 940 struct dp_soc *soc = pdev->soc; 941 942 if (!soc->wlan_cfg_ctx->rxdma1_enable) 943 return; 944 945 dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev); 946 } 947 948 static QDF_STATUS 949 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev) 950 { 951 struct dp_soc *soc = pdev->soc; 952 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; 953 bool delayed_replenish; 954 QDF_STATUS status = QDF_STATUS_SUCCESS; 955 956 delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0; 957 if (!soc->wlan_cfg_ctx->rxdma1_enable || 958 !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) 959 return status; 960 961 status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, 962 delayed_replenish); 963 if (!QDF_IS_STATUS_SUCCESS(status)) 964 dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed"); 965 966 return status; 967 } 968 969 static QDF_STATUS 970 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev) 971 { 972 struct dp_soc *soc = pdev->soc; 973 QDF_STATUS status = QDF_STATUS_SUCCESS; 974 975 if (!soc->wlan_cfg_ctx->rxdma1_enable || 976 !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) 977 return status; 978 979 /* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */ 980 status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev); 981 if (!QDF_IS_STATUS_SUCCESS(status)) { 982 dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed"); 983 goto fail; 984 } 985 986 /* Allocate link descriptors for the monitor link descriptor ring */ 987 status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev); 988 if (!QDF_IS_STATUS_SUCCESS(status)) { 989 dp_err("dp_hw_link_desc_pool_banks_alloc() failed"); 990 goto mon_buf_dealloc; 991 } 992 993 return status; 994 995 mon_buf_dealloc: 996 dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); 997 fail: 998 return status; 999 } 1000 #else 1001 static void 1002 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev) 1003 { 1004 } 1005 1006 static void 1007 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev) 1008 { 1009 } 1010 1011 static void 1012 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev) 1013 { 1014 } 1015 1016 static void 1017 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev) 1018 { 1019 } 1020 1021 static QDF_STATUS 1022 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev) 1023 { 1024 return QDF_STATUS_SUCCESS; 1025 } 1026 1027 static QDF_STATUS 1028 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev) 1029 { 1030 return QDF_STATUS_SUCCESS; 1031 } 1032 1033 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 1034 uint32_t 1035 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id) 1036 { 1037 return 0; 1038 } 1039 #endif 1040 #endif 1041 1042 static void 1043 dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id) 1044 { 1045 struct dp_soc *soc = pdev->soc; 1046 uint8_t pdev_id = pdev->pdev_id; 1047 int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); 1048 1049 dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); 1050 dp_rx_pdev_mon_dest_desc_pool_free(pdev, mac_for_pdev); 1051 } 1052 1053 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev) 1054 { 1055 int mac_id; 1056 1057 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) 1058 dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id); 1059 } 1060 1061 static void 1062 dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id) 1063 { 1064 struct dp_soc *soc = pdev->soc; 1065 uint8_t pdev_id = pdev->pdev_id; 1066 int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); 1067 1068 dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev); 1069 1070 dp_rx_pdev_mon_dest_desc_pool_deinit(pdev, mac_for_pdev); 1071 } 1072 1073 void 1074 dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev) 1075 { 1076 int mac_id; 1077 1078 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) 1079 dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id); 1080 qdf_spinlock_destroy(&pdev->monitor_pdev->mon_lock); 1081 } 1082 1083 static void 1084 dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id) 1085 { 1086 struct dp_soc *soc = pdev->soc; 1087 uint32_t mac_for_pdev; 1088 1089 mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); 1090 dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev); 1091 1092 dp_rx_pdev_mon_dest_desc_pool_init(pdev, mac_for_pdev); 1093 } 1094 1095 void 1096 dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev) 1097 { 1098 int mac_id; 1099 1100 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) 1101 dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id); 1102 qdf_spinlock_create(&pdev->monitor_pdev->mon_lock); 1103 } 1104 1105 static void 1106 dp_rx_pdev_mon_cmn_buffers_free(struct dp_pdev *pdev, int mac_id) 1107 { 1108 uint8_t pdev_id = pdev->pdev_id; 1109 int mac_for_pdev; 1110 1111 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id); 1112 dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev); 1113 1114 dp_rx_pdev_mon_dest_buffers_free(pdev, mac_for_pdev); 1115 } 1116 1117 void 1118 dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev) 1119 { 1120 int mac_id; 1121 1122 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) 1123 dp_rx_pdev_mon_cmn_buffers_free(pdev, mac_id); 1124 pdev->monitor_pdev->pdev_mon_init = 0; 1125 } 1126 1127 QDF_STATUS 1128 dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev) 1129 { 1130 int mac_id; 1131 int mac_for_pdev; 1132 QDF_STATUS status = QDF_STATUS_SUCCESS; 1133 uint8_t pdev_id = pdev->pdev_id; 1134 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx; 1135 1136 for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev; 1137 mac_id++) { 1138 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 1139 pdev_id); 1140 status = dp_rx_pdev_mon_status_buffers_alloc(pdev, 1141 mac_for_pdev); 1142 if (!QDF_IS_STATUS_SUCCESS(status)) { 1143 dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed"); 1144 goto mon_status_buf_fail; 1145 } 1146 } 1147 1148 for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev; 1149 mac_id++) { 1150 mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, 1151 pdev_id); 1152 status = dp_rx_pdev_mon_dest_buffers_alloc(pdev, mac_for_pdev); 1153 if (!QDF_IS_STATUS_SUCCESS(status)) 1154 goto mon_stat_buf_dealloc; 1155 } 1156 1157 return status; 1158 1159 mon_stat_buf_dealloc: 1160 dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev); 1161 mon_status_buf_fail: 1162 return status; 1163 } 1164 1165 static QDF_STATUS 1166 dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id) 1167 { 1168 struct dp_soc *soc = pdev->soc; 1169 uint8_t pdev_id = pdev->pdev_id; 1170 uint32_t mac_for_pdev; 1171 QDF_STATUS status; 1172 1173 mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); 1174 1175 /* Allocate sw rx descriptor pool for monitor status ring */ 1176 status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev); 1177 if (!QDF_IS_STATUS_SUCCESS(status)) { 1178 dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed"); 1179 goto fail; 1180 } 1181 1182 status = dp_rx_pdev_mon_dest_desc_pool_alloc(pdev, mac_for_pdev); 1183 if (!QDF_IS_STATUS_SUCCESS(status)) 1184 goto mon_status_dealloc; 1185 1186 return status; 1187 1188 mon_status_dealloc: 1189 dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); 1190 fail: 1191 return status; 1192 } 1193 1194 QDF_STATUS 1195 dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev) 1196 { 1197 QDF_STATUS status; 1198 int mac_id, count; 1199 1200 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { 1201 status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id); 1202 if (!QDF_IS_STATUS_SUCCESS(status)) { 1203 dp_rx_mon_dest_err("%pK: %d failed\n", 1204 pdev->soc, mac_id); 1205 1206 for (count = 0; count < mac_id; count++) 1207 dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count); 1208 1209 return status; 1210 } 1211 } 1212 return status; 1213 } 1214 1215 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT 1216 static inline void 1217 hal_rx_populate_buf_info(struct dp_soc *soc, 1218 struct hal_rx_mon_dest_buf_info *buf_info, 1219 void *rx_desc) 1220 { 1221 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc, 1222 (uint8_t *)buf_info, 1223 sizeof(*buf_info)); 1224 } 1225 1226 static inline uint8_t 1227 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc, 1228 struct hal_rx_mon_dest_buf_info *buf_info, 1229 void *rx_desc, bool is_first_frag) 1230 { 1231 if (is_first_frag) 1232 return buf_info->l2_hdr_pad; 1233 else 1234 return DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 1235 } 1236 #else 1237 static inline void 1238 hal_rx_populate_buf_info(struct dp_soc *soc, 1239 struct hal_rx_mon_dest_buf_info *buf_info, 1240 void *rx_desc) 1241 { 1242 if (hal_rx_tlv_decap_format_get(soc->hal_soc, rx_desc) == 1243 HAL_HW_RX_DECAP_FORMAT_RAW) 1244 buf_info->is_decap_raw = 1; 1245 1246 if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) 1247 buf_info->mpdu_len_err = 1; 1248 } 1249 1250 static inline uint8_t 1251 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc, 1252 struct hal_rx_mon_dest_buf_info *buf_info, 1253 void *rx_desc, bool is_first_frag) 1254 { 1255 return hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_desc); 1256 } 1257 #endif 1258 1259 static inline 1260 void dp_rx_msdus_set_payload(struct dp_soc *soc, qdf_nbuf_t msdu, 1261 uint8_t l2_hdr_offset) 1262 { 1263 uint8_t *data; 1264 uint32_t rx_pkt_offset; 1265 1266 data = qdf_nbuf_data(msdu); 1267 rx_pkt_offset = soc->rx_mon_pkt_tlv_size; 1268 qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset); 1269 } 1270 1271 static inline qdf_nbuf_t 1272 dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc, 1273 uint32_t mac_id, 1274 qdf_nbuf_t head_msdu, 1275 qdf_nbuf_t last_msdu, 1276 struct cdp_mon_status *rx_status) 1277 { 1278 qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list; 1279 uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len, 1280 mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir, 1281 is_amsdu, is_first_frag, amsdu_pad; 1282 void *rx_desc; 1283 char *hdr_desc; 1284 unsigned char *dest; 1285 struct ieee80211_frame *wh; 1286 struct ieee80211_qoscntl *qos; 1287 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1288 struct dp_mon_pdev *mon_pdev; 1289 struct hal_rx_mon_dest_buf_info buf_info; 1290 uint8_t l2_hdr_offset; 1291 1292 head_frag_list = NULL; 1293 mpdu_buf = NULL; 1294 1295 if (qdf_unlikely(!dp_pdev)) { 1296 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", 1297 soc, mac_id); 1298 return NULL; 1299 } 1300 1301 mon_pdev = dp_pdev->monitor_pdev; 1302 1303 /* The nbuf has been pulled just beyond the status and points to the 1304 * payload 1305 */ 1306 if (!head_msdu) 1307 goto mpdu_stitch_fail; 1308 1309 msdu_orig = head_msdu; 1310 1311 rx_desc = qdf_nbuf_data(msdu_orig); 1312 qdf_mem_zero(&buf_info, sizeof(buf_info)); 1313 hal_rx_populate_buf_info(soc, &buf_info, rx_desc); 1314 1315 if (buf_info.mpdu_len_err) { 1316 /* It looks like there is some issue on MPDU len err */ 1317 /* Need further investigate if drop the packet */ 1318 DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); 1319 return NULL; 1320 } 1321 1322 rx_desc = qdf_nbuf_data(last_msdu); 1323 1324 rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc, 1325 rx_desc); 1326 mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err; 1327 1328 /* Fill out the rx_status from the PPDU start and end fields */ 1329 /* HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */ 1330 1331 rx_desc = qdf_nbuf_data(head_msdu); 1332 1333 /* Easy case - The MSDU status indicates that this is a non-decapped 1334 * packet in RAW mode. 1335 */ 1336 if (buf_info.is_decap_raw) { 1337 /* Note that this path might suffer from headroom unavailabilty 1338 * - but the RX status is usually enough 1339 */ 1340 1341 l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc, 1342 &buf_info, 1343 rx_desc, 1344 true); 1345 dp_rx_msdus_set_payload(soc, head_msdu, l2_hdr_offset); 1346 1347 dp_rx_mon_dest_debug("%pK: decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK", 1348 soc, head_msdu, head_msdu->next, 1349 last_msdu, last_msdu->next); 1350 1351 mpdu_buf = head_msdu; 1352 1353 prev_buf = mpdu_buf; 1354 1355 frag_list_sum_len = 0; 1356 msdu = qdf_nbuf_next(head_msdu); 1357 is_first_frag = 1; 1358 1359 while (msdu) { 1360 l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset( 1361 soc, &buf_info, 1362 rx_desc, false); 1363 dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset); 1364 1365 if (is_first_frag) { 1366 is_first_frag = 0; 1367 head_frag_list = msdu; 1368 } 1369 1370 frag_list_sum_len += qdf_nbuf_len(msdu); 1371 1372 /* Maintain the linking of the cloned MSDUS */ 1373 qdf_nbuf_set_next_ext(prev_buf, msdu); 1374 1375 /* Move to the next */ 1376 prev_buf = msdu; 1377 msdu = qdf_nbuf_next(msdu); 1378 } 1379 1380 qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN); 1381 1382 /* If there were more fragments to this RAW frame */ 1383 if (head_frag_list) { 1384 if (frag_list_sum_len < 1385 sizeof(struct ieee80211_frame_min_one)) { 1386 DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); 1387 return NULL; 1388 } 1389 frag_list_sum_len -= HAL_RX_FCS_LEN; 1390 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, 1391 frag_list_sum_len); 1392 qdf_nbuf_set_next(mpdu_buf, NULL); 1393 } 1394 1395 goto mpdu_stitch_done; 1396 } 1397 1398 /* Decap mode: 1399 * Calculate the amount of header in decapped packet to knock off based 1400 * on the decap type and the corresponding number of raw bytes to copy 1401 * status header 1402 */ 1403 rx_desc = qdf_nbuf_data(head_msdu); 1404 1405 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc); 1406 1407 dp_rx_mon_dest_debug("%pK: decap format not raw", soc); 1408 1409 /* Base size */ 1410 wifi_hdr_len = sizeof(struct ieee80211_frame); 1411 wh = (struct ieee80211_frame *)hdr_desc; 1412 1413 dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; 1414 1415 if (dir == IEEE80211_FC1_DIR_DSTODS) 1416 wifi_hdr_len += 6; 1417 1418 is_amsdu = 0; 1419 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 1420 qos = (struct ieee80211_qoscntl *) 1421 (hdr_desc + wifi_hdr_len); 1422 wifi_hdr_len += 2; 1423 1424 is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU); 1425 } 1426 1427 /* Calculate security header length based on 'Protected' 1428 * and 'EXT_IV' flag 1429 */ 1430 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 1431 char *iv = (char *)wh + wifi_hdr_len; 1432 1433 if (iv[3] & KEY_EXTIV) 1434 sec_hdr_len = 8; 1435 else 1436 sec_hdr_len = 4; 1437 } else { 1438 sec_hdr_len = 0; 1439 } 1440 wifi_hdr_len += sec_hdr_len; 1441 1442 /* MSDU related stuff LLC - AMSDU subframe header etc */ 1443 msdu_llc_len = is_amsdu ? (14 + 8) : 8; 1444 1445 mpdu_buf_len = wifi_hdr_len + msdu_llc_len; 1446 1447 /* "Decap" header to remove from MSDU buffer */ 1448 decap_hdr_pull_bytes = 14; 1449 1450 /* Allocate a new nbuf for holding the 802.11 header retrieved from the 1451 * status of the now decapped first msdu. Leave enough headroom for 1452 * accommodating any radio-tap /prism like PHY header 1453 */ 1454 mpdu_buf = qdf_nbuf_alloc(soc->osdev, 1455 MAX_MONITOR_HEADER + mpdu_buf_len, 1456 MAX_MONITOR_HEADER, 4, FALSE); 1457 1458 if (!mpdu_buf) 1459 goto mpdu_stitch_done; 1460 1461 /* Copy the MPDU related header and enc headers into the first buffer 1462 * - Note that there can be a 2 byte pad between heaader and enc header 1463 */ 1464 1465 prev_buf = mpdu_buf; 1466 dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len); 1467 if (!dest) 1468 goto mpdu_stitch_fail; 1469 1470 qdf_mem_copy(dest, hdr_desc, wifi_hdr_len); 1471 hdr_desc += wifi_hdr_len; 1472 1473 #if 0 1474 dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len); 1475 adf_os_mem_copy(dest, hdr_desc, sec_hdr_len); 1476 hdr_desc += sec_hdr_len; 1477 #endif 1478 1479 /* The first LLC len is copied into the MPDU buffer */ 1480 frag_list_sum_len = 0; 1481 1482 msdu_orig = head_msdu; 1483 is_first_frag = 1; 1484 amsdu_pad = 0; 1485 1486 while (msdu_orig) { 1487 1488 /* TODO: intra AMSDU padding - do we need it ??? */ 1489 1490 msdu = msdu_orig; 1491 1492 if (is_first_frag) { 1493 head_frag_list = msdu; 1494 } else { 1495 /* Reload the hdr ptr only on non-first MSDUs */ 1496 rx_desc = qdf_nbuf_data(msdu_orig); 1497 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, 1498 rx_desc); 1499 } 1500 1501 /* Copy this buffers MSDU related status into the prev buffer */ 1502 1503 if (is_first_frag) 1504 is_first_frag = 0; 1505 1506 /* Update protocol and flow tag for MSDU */ 1507 dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, 1508 msdu_orig, rx_desc); 1509 1510 dest = qdf_nbuf_put_tail(prev_buf, 1511 msdu_llc_len + amsdu_pad); 1512 1513 if (!dest) 1514 goto mpdu_stitch_fail; 1515 1516 dest += amsdu_pad; 1517 qdf_mem_copy(dest, hdr_desc, msdu_llc_len); 1518 1519 l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc, 1520 &buf_info, 1521 rx_desc, 1522 true); 1523 dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset); 1524 1525 /* Push the MSDU buffer beyond the decap header */ 1526 qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes); 1527 frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu) 1528 + amsdu_pad; 1529 1530 /* Set up intra-AMSDU pad to be added to start of next buffer - 1531 * AMSDU pad is 4 byte pad on AMSDU subframe 1532 */ 1533 amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3; 1534 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; 1535 1536 /* TODO FIXME How do we handle MSDUs that have fraglist - Should 1537 * probably iterate all the frags cloning them along the way and 1538 * and also updating the prev_buf pointer 1539 */ 1540 1541 /* Move to the next */ 1542 prev_buf = msdu; 1543 msdu_orig = qdf_nbuf_next(msdu_orig); 1544 } 1545 1546 #if 0 1547 /* Add in the trailer section - encryption trailer + FCS */ 1548 qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN); 1549 frag_list_sum_len += HAL_RX_FCS_LEN; 1550 #endif 1551 1552 frag_list_sum_len -= msdu_llc_len; 1553 1554 /* TODO: Convert this to suitable adf routines */ 1555 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, 1556 frag_list_sum_len); 1557 1558 dp_rx_mon_dest_debug("%pK: mpdu_buf %pK mpdu_buf->len %u", 1559 soc, mpdu_buf, mpdu_buf->len); 1560 1561 mpdu_stitch_done: 1562 /* Check if this buffer contains the PPDU end status for TSF */ 1563 /* Need revist this code to see where we can get tsf timestamp */ 1564 #if 0 1565 /* PPDU end TLV will be retrieved from monitor status ring */ 1566 last_mpdu = 1567 (*(((u_int32_t *)&rx_desc->attention)) & 1568 RX_ATTENTION_0_LAST_MPDU_MASK) >> 1569 RX_ATTENTION_0_LAST_MPDU_LSB; 1570 1571 if (last_mpdu) 1572 rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp; 1573 1574 #endif 1575 return mpdu_buf; 1576 1577 mpdu_stitch_fail: 1578 if ((mpdu_buf) && !buf_info.is_decap_raw) { 1579 dp_rx_mon_dest_err("%pK: mpdu_stitch_fail mpdu_buf %pK", 1580 soc, mpdu_buf); 1581 /* Free the head buffer */ 1582 qdf_nbuf_free(mpdu_buf); 1583 } 1584 return NULL; 1585 } 1586 1587 #ifdef DP_RX_MON_MEM_FRAG 1588 /** 1589 * dp_rx_mon_fraglist_prepare() - Prepare nbuf fraglist from chained skb 1590 * 1591 * @head_msdu: Parent SKB 1592 * @tail_msdu: Last skb in the chained list 1593 * 1594 * Return: Void 1595 */ 1596 void dp_rx_mon_fraglist_prepare(qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu) 1597 { 1598 qdf_nbuf_t msdu, mpdu_buf, prev_buf, head_frag_list; 1599 uint32_t frag_list_sum_len; 1600 1601 dp_err("[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK", 1602 __func__, __LINE__, head_msdu, head_msdu->next, 1603 tail_msdu, tail_msdu->next); 1604 1605 /* Single skb accommodating MPDU worth Data */ 1606 if (tail_msdu == head_msdu) 1607 return; 1608 1609 mpdu_buf = head_msdu; 1610 prev_buf = mpdu_buf; 1611 frag_list_sum_len = 0; 1612 1613 msdu = qdf_nbuf_next(head_msdu); 1614 /* msdu can't be NULL here as it is multiple skb case here */ 1615 1616 /* Head frag list to point to second skb */ 1617 head_frag_list = msdu; 1618 1619 while (msdu) { 1620 frag_list_sum_len += qdf_nbuf_len(msdu); 1621 prev_buf = msdu; 1622 msdu = qdf_nbuf_next(msdu); 1623 } 1624 1625 qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, frag_list_sum_len); 1626 1627 /* Make Parent skb next to NULL */ 1628 qdf_nbuf_set_next(mpdu_buf, NULL); 1629 } 1630 1631 /** 1632 * dp_rx_mon_frag_restitch_mpdu_from_msdus() - Restitch logic to 1633 * convert to 802.3 header and adjust frag memory pointing to 1634 * dot3 header and payload in case of Non-Raw frame. 1635 * 1636 * @soc: struct dp_soc * 1637 * @mac_id: MAC id 1638 * @head_msdu: MPDU containing all MSDU as a frag 1639 * @tail_msdu: last skb which accommodate MPDU info 1640 * @rx_status: struct cdp_mon_status * 1641 * 1642 * Return: Adjusted nbuf containing MPDU worth info. 1643 */ 1644 static inline qdf_nbuf_t 1645 dp_rx_mon_frag_restitch_mpdu_from_msdus(struct dp_soc *soc, 1646 uint32_t mac_id, 1647 qdf_nbuf_t head_msdu, 1648 qdf_nbuf_t tail_msdu, 1649 struct cdp_mon_status *rx_status) 1650 { 1651 uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len, 1652 mpdu_buf_len, decap_hdr_pull_bytes, dir, 1653 is_amsdu, amsdu_pad, frag_size, tot_msdu_len; 1654 qdf_frag_t rx_desc, rx_src_desc, rx_dest_desc, frag_addr; 1655 char *hdr_desc; 1656 uint8_t num_frags, frags_iter, l2_hdr_offset; 1657 struct ieee80211_frame *wh; 1658 struct ieee80211_qoscntl *qos; 1659 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); 1660 int16_t frag_page_offset = 0; 1661 struct hal_rx_mon_dest_buf_info buf_info; 1662 uint32_t pad_byte_pholder = 0; 1663 qdf_nbuf_t msdu_curr; 1664 uint16_t rx_mon_tlv_size = soc->rx_mon_pkt_tlv_size; 1665 struct dp_mon_pdev *mon_pdev; 1666 1667 if (qdf_unlikely(!dp_pdev)) { 1668 dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", 1669 soc, mac_id); 1670 return NULL; 1671 } 1672 1673 mon_pdev = dp_pdev->monitor_pdev; 1674 qdf_mem_zero(&buf_info, sizeof(struct hal_rx_mon_dest_buf_info)); 1675 1676 if (!head_msdu || !tail_msdu) 1677 goto mpdu_stitch_fail; 1678 1679 rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size; 1680 1681 if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) { 1682 /* It looks like there is some issue on MPDU len err */ 1683 /* Need further investigate if drop the packet */ 1684 DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); 1685 return NULL; 1686 } 1687 1688 /* Look for FCS error */ 1689 num_frags = qdf_nbuf_get_nr_frags(tail_msdu); 1690 rx_desc = qdf_nbuf_get_frag_addr(tail_msdu, num_frags - 1) - 1691 rx_mon_tlv_size; 1692 rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc, 1693 rx_desc); 1694 mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err; 1695 1696 rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size; 1697 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc, 1698 (uint8_t *)&buf_info, 1699 sizeof(buf_info)); 1700 1701 /* Easy case - The MSDU status indicates that this is a non-decapped 1702 * packet in RAW mode. 1703 */ 1704 if (buf_info.is_decap_raw == 1) { 1705 dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu); 1706 goto mpdu_stitch_done; 1707 } 1708 1709 l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE; 1710 1711 /* Decap mode: 1712 * Calculate the amount of header in decapped packet to knock off based 1713 * on the decap type and the corresponding number of raw bytes to copy 1714 * status header 1715 */ 1716 hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc); 1717 1718 dp_rx_mon_dest_debug("%pK: decap format not raw", soc); 1719 1720 /* Base size */ 1721 wifi_hdr_len = sizeof(struct ieee80211_frame); 1722 wh = (struct ieee80211_frame *)hdr_desc; 1723 1724 dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; 1725 1726 if (dir == IEEE80211_FC1_DIR_DSTODS) 1727 wifi_hdr_len += 6; 1728 1729 is_amsdu = 0; 1730 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 1731 qos = (struct ieee80211_qoscntl *) 1732 (hdr_desc + wifi_hdr_len); 1733 wifi_hdr_len += 2; 1734 1735 is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU); 1736 } 1737 1738 /*Calculate security header length based on 'Protected' 1739 * and 'EXT_IV' flag 1740 */ 1741 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 1742 char *iv = (char *)wh + wifi_hdr_len; 1743 1744 if (iv[3] & KEY_EXTIV) 1745 sec_hdr_len = 8; 1746 else 1747 sec_hdr_len = 4; 1748 } else { 1749 sec_hdr_len = 0; 1750 } 1751 wifi_hdr_len += sec_hdr_len; 1752 1753 /* MSDU related stuff LLC - AMSDU subframe header etc */ 1754 msdu_llc_len = is_amsdu ? (14 + 8) : 8; 1755 1756 mpdu_buf_len = wifi_hdr_len + msdu_llc_len; 1757 1758 /* "Decap" header to remove from MSDU buffer */ 1759 decap_hdr_pull_bytes = 14; 1760 1761 amsdu_pad = 0; 1762 tot_msdu_len = 0; 1763 1764 /* 1765 * keeping first MSDU ops outside of loop to avoid multiple 1766 * check handling 1767 */ 1768 1769 /* Construct src header */ 1770 rx_src_desc = hdr_desc; 1771 1772 /* 1773 * Update protocol and flow tag for MSDU 1774 * update frag index in ctx_idx field. 1775 * Reset head pointer data of nbuf before updating. 1776 */ 1777 QDF_NBUF_CB_RX_CTX_ID(head_msdu) = 0; 1778 dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, head_msdu, rx_desc); 1779 1780 /* Construct destination address */ 1781 frag_addr = qdf_nbuf_get_frag_addr(head_msdu, 0); 1782 frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0); 1783 /* We will come here in 2 scenario: 1784 * 1. First MSDU of MPDU with single buffer 1785 * 2. First buffer of First MSDU of MPDU with continuation 1786 * 1787 * ------------------------------------------------------------ 1788 * | SINGLE BUFFER (<= RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN)| 1789 * ------------------------------------------------------------ 1790 * 1791 * ------------------------------------------------------------ 1792 * | First BUFFER with Continuation | ... | 1793 * | (RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN) | | 1794 * ------------------------------------------------------------ 1795 */ 1796 pad_byte_pholder = 1797 (RX_MONITOR_BUFFER_SIZE - soc->rx_pkt_tlv_size) - frag_size; 1798 /* Construct destination address 1799 * -------------------------------------------------------------- 1800 * | RX_PKT_TLV | L2_HDR_PAD | Decap HDR | Payload | 1801 * | | / | 1802 * | >Frag address points here / | 1803 * | \ / | 1804 * | \ This bytes needs to / | 1805 * | \ removed to frame pkt / | 1806 * | ----------------------- | 1807 * | | | 1808 * | | | 1809 * | WIFI +LLC HDR will be added here <-| | 1810 * | | | | 1811 * | >Dest addr will point | | 1812 * | somewhere in this area | | 1813 * -------------------------------------------------------------- 1814 */ 1815 rx_dest_desc = 1816 (frag_addr + decap_hdr_pull_bytes + l2_hdr_offset) - 1817 mpdu_buf_len; 1818 /* Add WIFI and LLC header for 1st MSDU of MPDU */ 1819 qdf_mem_copy(rx_dest_desc, rx_src_desc, mpdu_buf_len); 1820 1821 frag_page_offset = 1822 (decap_hdr_pull_bytes + l2_hdr_offset) - mpdu_buf_len; 1823 1824 qdf_nbuf_move_frag_page_offset(head_msdu, 0, frag_page_offset); 1825 1826 frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0); 1827 1828 if (buf_info.first_buffer && buf_info.last_buffer) { 1829 /* MSDU with single buffer */ 1830 amsdu_pad = frag_size & 0x3; 1831 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; 1832 if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) { 1833 char *frag_addr_temp; 1834 1835 qdf_nbuf_trim_add_frag_size(head_msdu, 0, amsdu_pad, 1836 0); 1837 frag_addr_temp = 1838 (char *)qdf_nbuf_get_frag_addr(head_msdu, 0); 1839 frag_addr_temp = (frag_addr_temp + 1840 qdf_nbuf_get_frag_size_by_idx(head_msdu, 0)) - 1841 amsdu_pad; 1842 qdf_mem_zero(frag_addr_temp, amsdu_pad); 1843 amsdu_pad = 0; 1844 } 1845 } else { 1846 /* 1847 * First buffer of Continuation frame and hence 1848 * amsdu_padding doesn't need to be added 1849 * Increase tot_msdu_len so that amsdu_pad byte 1850 * will be calculated for last frame of MSDU 1851 */ 1852 tot_msdu_len = frag_size; 1853 amsdu_pad = 0; 1854 } 1855 1856 /* Here amsdu_pad byte will have some value if 1sf buffer was 1857 * Single buffer MSDU and dint had pholder to adjust amsdu padding 1858 * byte in the end 1859 * So dont initialize to ZERO here 1860 */ 1861 pad_byte_pholder = 0; 1862 for (msdu_curr = head_msdu; msdu_curr;) { 1863 /* frag_iter will start from 0 for second skb onwards */ 1864 if (msdu_curr == head_msdu) 1865 frags_iter = 1; 1866 else 1867 frags_iter = 0; 1868 1869 num_frags = qdf_nbuf_get_nr_frags(msdu_curr); 1870 1871 for (; frags_iter < num_frags; frags_iter++) { 1872 /* Construct destination address 1873 * ---------------------------------------------------------- 1874 * | RX_PKT_TLV | L2_HDR_PAD | Decap HDR | Payload | Pad | 1875 * | | (First buffer) | | | 1876 * | | / / | 1877 * | >Frag address points here / / | 1878 * | \ / / | 1879 * | \ This bytes needs to / / | 1880 * | \ removed to frame pkt/ / | 1881 * | ---------------------- / | 1882 * | | / Add | 1883 * | | / amsdu pad | 1884 * | LLC HDR will be added here <-| | Byte for | 1885 * | | | | last frame | 1886 * | >Dest addr will point | | if space | 1887 * | somewhere in this area | | available | 1888 * | And amsdu_pad will be created if | | | 1889 * | dint get added in last buffer | | | 1890 * | (First Buffer) | | | 1891 * ---------------------------------------------------------- 1892 */ 1893 frag_addr = 1894 qdf_nbuf_get_frag_addr(msdu_curr, frags_iter); 1895 rx_desc = frag_addr - rx_mon_tlv_size; 1896 1897 /* 1898 * Update protocol and flow tag for MSDU 1899 * update frag index in ctx_idx field 1900 */ 1901 QDF_NBUF_CB_RX_CTX_ID(msdu_curr) = frags_iter; 1902 dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, 1903 msdu_curr, rx_desc); 1904 1905 /* Read buffer info from stored data in tlvs */ 1906 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc, 1907 (uint8_t *)&buf_info, 1908 sizeof(buf_info)); 1909 1910 frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_curr, 1911 frags_iter); 1912 1913 /* If Middle buffer, dont add any header */ 1914 if ((!buf_info.first_buffer) && 1915 (!buf_info.last_buffer)) { 1916 tot_msdu_len += frag_size; 1917 amsdu_pad = 0; 1918 pad_byte_pholder = 0; 1919 continue; 1920 } 1921 1922 /* Calculate if current buffer has placeholder 1923 * to accommodate amsdu pad byte 1924 */ 1925 pad_byte_pholder = 1926 (RX_MONITOR_BUFFER_SIZE - soc->rx_pkt_tlv_size) 1927 - frag_size; 1928 /* 1929 * We will come here only only three condition: 1930 * 1. Msdu with single Buffer 1931 * 2. First buffer in case MSDU is spread in multiple 1932 * buffer 1933 * 3. Last buffer in case MSDU is spread in multiple 1934 * buffer 1935 * 1936 * First buffER | Last buffer 1937 * Case 1: 1 | 1 1938 * Case 2: 1 | 0 1939 * Case 3: 0 | 1 1940 * 1941 * In 3rd case only l2_hdr_padding byte will be Zero and 1942 * in other case, It will be 2 Bytes. 1943 */ 1944 if (buf_info.first_buffer) 1945 l2_hdr_offset = 1946 DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE; 1947 else 1948 l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 1949 1950 if (buf_info.first_buffer) { 1951 /* Src addr from where llc header needs to be copied */ 1952 rx_src_desc = 1953 hal_rx_desc_get_80211_hdr(soc->hal_soc, 1954 rx_desc); 1955 1956 /* Size of buffer with llc header */ 1957 frag_size = frag_size - 1958 (l2_hdr_offset + decap_hdr_pull_bytes); 1959 frag_size += msdu_llc_len; 1960 1961 /* Construct destination address */ 1962 rx_dest_desc = frag_addr + 1963 decap_hdr_pull_bytes + l2_hdr_offset; 1964 rx_dest_desc = rx_dest_desc - (msdu_llc_len); 1965 1966 qdf_mem_copy(rx_dest_desc, rx_src_desc, 1967 msdu_llc_len); 1968 1969 /* 1970 * Calculate new page offset and create hole 1971 * if amsdu_pad required. 1972 */ 1973 frag_page_offset = l2_hdr_offset + 1974 decap_hdr_pull_bytes; 1975 frag_page_offset = frag_page_offset - 1976 (msdu_llc_len + amsdu_pad); 1977 1978 qdf_nbuf_move_frag_page_offset(msdu_curr, 1979 frags_iter, 1980 frag_page_offset); 1981 1982 tot_msdu_len = frag_size; 1983 /* 1984 * No amsdu padding required for first frame of 1985 * continuation buffer 1986 */ 1987 if (!buf_info.last_buffer) { 1988 amsdu_pad = 0; 1989 continue; 1990 } 1991 } else { 1992 tot_msdu_len += frag_size; 1993 } 1994 1995 /* Will reach to this place in only two case: 1996 * 1. Single buffer MSDU 1997 * 2. Last buffer of MSDU in case of multiple buf MSDU 1998 */ 1999 2000 /* Check size of buffer if amsdu padding required */ 2001 amsdu_pad = tot_msdu_len & 0x3; 2002 amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; 2003 2004 /* Create placeholder if current buffer can 2005 * accommodate padding. 2006 */ 2007 if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) { 2008 char *frag_addr_temp; 2009 2010 qdf_nbuf_trim_add_frag_size(msdu_curr, 2011 frags_iter, 2012 amsdu_pad, 0); 2013 frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_curr, 2014 frags_iter); 2015 frag_addr_temp = (frag_addr_temp + 2016 qdf_nbuf_get_frag_size_by_idx(msdu_curr, frags_iter)) - 2017 amsdu_pad; 2018 qdf_mem_zero(frag_addr_temp, amsdu_pad); 2019 amsdu_pad = 0; 2020 } 2021 2022 /* reset tot_msdu_len */ 2023 tot_msdu_len = 0; 2024 } 2025 msdu_curr = qdf_nbuf_next(msdu_curr); 2026 } 2027 2028 dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu); 2029 2030 dp_rx_mon_dest_debug("%pK: head_msdu %pK head_msdu->len %u", 2031 soc, head_msdu, head_msdu->len); 2032 2033 mpdu_stitch_done: 2034 return head_msdu; 2035 2036 mpdu_stitch_fail: 2037 dp_rx_mon_dest_err("%pK: mpdu_stitch_fail head_msdu %pK", 2038 soc, head_msdu); 2039 return NULL; 2040 } 2041 #endif 2042 2043 #ifdef DP_RX_MON_MEM_FRAG 2044 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id, 2045 qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu, 2046 struct cdp_mon_status *rs) 2047 { 2048 if (qdf_nbuf_get_nr_frags(head_msdu)) 2049 return dp_rx_mon_frag_restitch_mpdu_from_msdus(soc, mac_id, 2050 head_msdu, 2051 tail_msdu, rs); 2052 else 2053 return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, 2054 head_msdu, 2055 tail_msdu, rs); 2056 } 2057 #else 2058 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id, 2059 qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu, 2060 struct cdp_mon_status *rs) 2061 { 2062 return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu, 2063 tail_msdu, rs); 2064 } 2065 #endif 2066 2067 #ifdef DP_RX_MON_MEM_FRAG 2068 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 2069 defined(WLAN_SUPPORT_RX_FLOW_TAG) 2070 void dp_rx_mon_update_pf_tag_to_buf_headroom(struct dp_soc *soc, 2071 qdf_nbuf_t nbuf) 2072 { 2073 qdf_nbuf_t ext_list; 2074 2075 if (qdf_unlikely(!soc)) { 2076 dp_err("Soc[%pK] Null. Can't update pftag to nbuf headroom\n", 2077 soc); 2078 qdf_assert_always(0); 2079 } 2080 2081 if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx)) 2082 return; 2083 2084 if (qdf_unlikely(!nbuf)) 2085 return; 2086 2087 /* Return if it dint came from mon Path */ 2088 if (!qdf_nbuf_get_nr_frags(nbuf)) 2089 return; 2090 2091 /* Headroom must be double of PF_TAG_SIZE as we copy it 1stly to head */ 2092 if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) { 2093 dp_err("Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]", 2094 qdf_nbuf_headroom(nbuf), DP_RX_MON_TOT_PF_TAG_LEN); 2095 return; 2096 } 2097 2098 qdf_nbuf_push_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN); 2099 qdf_mem_copy(qdf_nbuf_data(nbuf), qdf_nbuf_head(nbuf), 2100 DP_RX_MON_TOT_PF_TAG_LEN); 2101 qdf_nbuf_pull_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN); 2102 2103 ext_list = qdf_nbuf_get_ext_list(nbuf); 2104 while (ext_list) { 2105 /* Headroom must be double of PF_TAG_SIZE 2106 * as we copy it 1stly to head 2107 */ 2108 if (qdf_unlikely(qdf_nbuf_headroom(ext_list) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) { 2109 dp_err("Fraglist Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]", 2110 qdf_nbuf_headroom(ext_list), 2111 DP_RX_MON_TOT_PF_TAG_LEN); 2112 ext_list = qdf_nbuf_queue_next(ext_list); 2113 continue; 2114 } 2115 qdf_nbuf_push_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN); 2116 qdf_mem_copy(qdf_nbuf_data(ext_list), qdf_nbuf_head(ext_list), 2117 DP_RX_MON_TOT_PF_TAG_LEN); 2118 qdf_nbuf_pull_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN); 2119 ext_list = qdf_nbuf_queue_next(ext_list); 2120 } 2121 } 2122 #endif 2123 #endif 2124 2125 #ifdef QCA_MONITOR_PKT_SUPPORT 2126 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc, 2127 struct dp_pdev *pdev, 2128 int mac_id, 2129 int mac_for_pdev) 2130 { 2131 QDF_STATUS status = QDF_STATUS_SUCCESS; 2132 2133 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2134 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 2135 soc->rxdma_mon_buf_ring[mac_id] 2136 .hal_srng, 2137 RXDMA_MONITOR_BUF); 2138 2139 if (status != QDF_STATUS_SUCCESS) { 2140 dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring"); 2141 return status; 2142 } 2143 2144 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 2145 soc->rxdma_mon_dst_ring[mac_id] 2146 .hal_srng, 2147 RXDMA_MONITOR_DST); 2148 2149 if (status != QDF_STATUS_SUCCESS) { 2150 dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring"); 2151 return status; 2152 } 2153 2154 status = htt_srng_setup(soc->htt_handle, mac_for_pdev, 2155 soc->rxdma_mon_desc_ring[mac_id] 2156 .hal_srng, 2157 RXDMA_MONITOR_DESC); 2158 2159 if (status != QDF_STATUS_SUCCESS) { 2160 dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring"); 2161 return status; 2162 } 2163 } 2164 2165 return status; 2166 } 2167 #endif /* QCA_MONITOR_PKT_SUPPORT */ 2168 2169 #ifdef QCA_MONITOR_PKT_SUPPORT 2170 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id) 2171 { 2172 struct dp_soc *soc = pdev->soc; 2173 2174 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2175 dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id], 2176 RXDMA_MONITOR_BUF, 0); 2177 dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id], 2178 RXDMA_MONITOR_DST, 0); 2179 dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id], 2180 RXDMA_MONITOR_DESC, 0); 2181 } 2182 } 2183 2184 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id) 2185 { 2186 struct dp_soc *soc = pdev->soc; 2187 2188 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2189 dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]); 2190 dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]); 2191 dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]); 2192 } 2193 } 2194 2195 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id) 2196 { 2197 struct dp_soc *soc = pdev->soc; 2198 2199 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2200 if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id], 2201 RXDMA_MONITOR_BUF, 0, lmac_id)) { 2202 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc); 2203 goto fail1; 2204 } 2205 2206 if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id], 2207 RXDMA_MONITOR_DST, 0, lmac_id)) { 2208 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc); 2209 goto fail1; 2210 } 2211 2212 if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id], 2213 RXDMA_MONITOR_DESC, 0, lmac_id)) { 2214 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc); 2215 goto fail1; 2216 } 2217 } 2218 return QDF_STATUS_SUCCESS; 2219 2220 fail1: 2221 return QDF_STATUS_E_NOMEM; 2222 } 2223 2224 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id) 2225 { 2226 int entries; 2227 struct dp_soc *soc = pdev->soc; 2228 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx; 2229 2230 if (soc->wlan_cfg_ctx->rxdma1_enable) { 2231 entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx); 2232 if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id], 2233 RXDMA_MONITOR_BUF, entries, 0)) { 2234 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc); 2235 goto fail1; 2236 } 2237 entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx); 2238 if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id], 2239 RXDMA_MONITOR_DST, entries, 0)) { 2240 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc); 2241 goto fail1; 2242 } 2243 entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx); 2244 if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id], 2245 RXDMA_MONITOR_DESC, entries, 0)) { 2246 dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc); 2247 goto fail1; 2248 } 2249 } 2250 return QDF_STATUS_SUCCESS; 2251 2252 fail1: 2253 return QDF_STATUS_E_NOMEM; 2254 } 2255 #endif 2256