1 /* 2 * Copyright (c) 2021, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #ifndef _DP_RX_MON_1_0_H_ 18 #define _DP_RX_MON_1_0_H_ 19 20 #include <dp_rx.h> 21 /* 22 * MON_BUF_MIN_ENTRIES macro defines minimum number of network buffers 23 * to be refilled in the RXDMA monitor buffer ring at init, remaining 24 * buffers are replenished at the time of monitor vap creation 25 */ 26 #define MON_BUF_MIN_ENTRIES 64 27 28 /* 29 * The below macro defines the maximum number of ring entries that would 30 * be processed in a single instance when processing each of the non-monitoring 31 * RXDMA2SW ring. 32 */ 33 #define MON_DROP_REAP_LIMIT 64 34 35 QDF_STATUS dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, 36 uint32_t mac_id); 37 QDF_STATUS dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, 38 uint32_t mac_id); 39 void dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, 40 uint32_t mac_id); 41 void dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, 42 uint32_t mac_id); 43 void dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, 44 uint32_t mac_id); 45 void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id); 46 47 QDF_STATUS dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev); 48 QDF_STATUS dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev); 49 void dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev); 50 void dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev); 51 void dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev); 52 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev); 53 54 /** 55 * dp_rx_mon_dest_process() - Brain of the Rx processing functionality 56 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) 57 * @soc: core txrx main context 58 * @int_ctx: interrupt context 59 * @mac_id: mac id 60 * @quota: No. of units (packets) that can be serviced in one shot. 61 * 62 * This function implements the core of Rx functionality. This is 63 * expected to handle only non-error frames. 64 * 65 * Return: none 66 */ 67 #ifdef QCA_MONITOR_PKT_SUPPORT 68 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx, 69 uint32_t mac_id, uint32_t quota); 70 71 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id); 72 QDF_STATUS 73 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id, 74 bool delayed_replenish); 75 QDF_STATUS 76 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id); 77 void 78 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id); 79 #else 80 static inline 81 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx, 82 uint32_t mac_id, uint32_t quota) 83 { 84 } 85 86 static inline 87 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) 88 { 89 } 90 91 static inline QDF_STATUS 92 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id, 93 bool delayed_replenish) 94 { 95 return QDF_STATUS_SUCCESS; 96 } 97 98 static inline QDF_STATUS 99 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) 100 { 101 return QDF_STATUS_SUCCESS; 102 } 103 104 static inline void 105 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) 106 { 107 } 108 #endif 109 110 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) 111 /** 112 * dp_mon_dest_srng_drop_for_mac() - Drop the mon dest ring packets for 113 * a given mac 114 * @pdev: DP pdev 115 * @mac_id: mac id 116 * 117 * Return: None 118 */ 119 uint32_t 120 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id); 121 #endif 122 123 /** 124 * dp_rxdma_err_process() - RxDMA error processing functionality 125 * @int_ctx: interrupt context 126 * @soc: core txrx main context 127 * @mac_id: mac id 128 * @quota: No. of units (packets) that can be serviced in one shot. 129 * 130 * Return: num of buffers processed 131 */ 132 uint32_t dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 133 uint32_t mac_id, uint32_t quota); 134 135 /** 136 * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf 137 * @pdev: DP pdev object 138 * 139 * Return: None 140 */ 141 void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev); 142 143 #ifdef QCA_MONITOR_PKT_SUPPORT 144 /** 145 * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW 146 * (WBM), following error handling 147 * 148 * @dp_pdev: core txrx pdev context 149 * @buf_addr_info: void pointer to monitor link descriptor buf addr info 150 * @mac_id: mac id which is one of 3 mac_ids 151 * 152 * Return: QDF_STATUS 153 */ 154 QDF_STATUS 155 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev, 156 hal_buff_addrinfo_t buf_addr_info, 157 int mac_id); 158 #else 159 static inline QDF_STATUS 160 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev, 161 hal_buff_addrinfo_t buf_addr_info, 162 int mac_id) 163 { 164 return QDF_STATUS_SUCCESS; 165 } 166 #endif 167 168 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 169 static inline uint16_t dp_rx_mon_get_rx_pkt_tlv_size(struct dp_soc *soc) 170 { 171 return soc->curr_rx_pkt_tlv_size; 172 } 173 #else 174 static inline uint16_t dp_rx_mon_get_rx_pkt_tlv_size(struct dp_soc *soc) 175 { 176 return soc->rx_mon_pkt_tlv_size; 177 } 178 #endif 179 180 /** 181 * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across 182 * multiple nbufs. This function 183 * is to return data length in 184 * fragmented buffer 185 * @soc: Datapath soc handle 186 * @total_len: pointer to remaining data length. 187 * @frag_len: pointer to data length in this fragment. 188 * @l2_hdr_pad: l2 header padding 189 */ 190 static inline void dp_mon_adjust_frag_len(struct dp_soc *soc, 191 uint32_t *total_len, 192 uint32_t *frag_len, 193 uint16_t l2_hdr_pad) 194 { 195 uint32_t rx_pkt_tlv_len = soc->rx_pkt_tlv_size; 196 197 if (*total_len >= (RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len)) { 198 *frag_len = RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len - 199 l2_hdr_pad; 200 *total_len -= *frag_len; 201 } else { 202 *frag_len = *total_len; 203 *total_len = 0; 204 } 205 } 206 207 /** 208 * dp_rx_mon_frag_adjust_frag_len() - MPDU and MSDU may spread across 209 * multiple nbufs. This function is to return data length in 210 * fragmented buffer. 211 * It takes input as max_limit for any buffer(as it changes based 212 * on decap type and buffer sequence in MSDU. 213 * 214 * If MSDU is divided into multiple buffer then below format will 215 * be max limit. 216 * Decap type Non-Raw 217 *-------------------------------- 218 *| 1st | 2nd | ... | Last | 219 *| 1662 | 1664 | 1664 | <=1664 | 220 *-------------------------------- 221 * Decap type Raw 222 *-------------------------------- 223 *| 1st | 2nd | ... | Last | 224 *| 1664 | 1664 | 1664 | <=1664 | 225 *-------------------------------- 226 * 227 * It also calculate if current buffer has placeholder to keep padding byte. 228 * -------------------------------- 229 * | MAX LIMIT(1662/1664) | 230 * -------------------------------- 231 * | Actual Data | Pad byte Pholder | 232 * -------------------------------- 233 * 234 * @total_len: Remaining data length. 235 * @frag_len: Data length in this fragment. 236 * @max_limit: Max limit of current buffer/MSDU. 237 */ 238 #ifdef DP_RX_MON_MEM_FRAG 239 static inline 240 void dp_rx_mon_frag_adjust_frag_len(uint32_t *total_len, uint32_t *frag_len, 241 uint32_t max_limit) 242 { 243 if (*total_len >= max_limit) { 244 *frag_len = max_limit; 245 *total_len -= *frag_len; 246 } else { 247 *frag_len = *total_len; 248 *total_len = 0; 249 } 250 } 251 252 /** 253 * DP_RX_MON_GET_NBUF_FROM_DESC() - Get nbuf from desc 254 * @rx_desc: RX descriptor 255 * 256 * Return: nbuf address 257 */ 258 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \ 259 NULL 260 261 /** 262 * dp_rx_mon_add_msdu_to_list_failure_handler() - Handler for nbuf buffer 263 * attach failure 264 * 265 * @rx_tlv_hdr: rx_tlv_hdr 266 * @pdev: struct dp_pdev * 267 * @last: skb pointing to last skb in chained list at any moment 268 * @head_msdu: parent skb in the chained list 269 * @tail_msdu: Last skb in the chained list 270 * @func_name: caller function name 271 * 272 * Return: void 273 */ 274 static inline void 275 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr, 276 struct dp_pdev *pdev, 277 qdf_nbuf_t *last, 278 qdf_nbuf_t *head_msdu, 279 qdf_nbuf_t *tail_msdu, 280 const char *func_name) 281 { 282 DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1); 283 qdf_frag_free(rx_tlv_hdr); 284 if (head_msdu) 285 qdf_nbuf_list_free(*head_msdu); 286 dp_err("[%s] failed to allocate subsequent parent buffer to hold all frag\n", 287 func_name); 288 if (head_msdu) 289 *head_msdu = NULL; 290 if (last) 291 *last = NULL; 292 if (tail_msdu) 293 *tail_msdu = NULL; 294 } 295 296 /** 297 * dp_rx_mon_get_paddr_from_desc() - Get paddr from desc 298 * @rx_desc: RX descriptor 299 * 300 * Return: Physical address of the buffer 301 */ 302 static inline 303 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc) 304 { 305 return rx_desc->paddr_buf_start; 306 } 307 308 /** 309 * DP_RX_MON_IS_BUFFER_ADDR_NULL() - Is Buffer received from hw is NULL 310 * @rx_desc: RX descriptor 311 * 312 * Return: true if the buffer is NULL, otherwise false 313 */ 314 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \ 315 (!(rx_desc->rx_buf_start)) 316 317 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \ 318 true 319 320 /** 321 * dp_rx_mon_buffer_free() - Free nbuf or frag memory 322 * Free nbuf if feature is disabled, else free frag. 323 * 324 * @rx_desc: Rx desc 325 */ 326 static inline void 327 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc) 328 { 329 qdf_frag_free(rx_desc->rx_buf_start); 330 } 331 332 /** 333 * dp_rx_mon_buffer_unmap() - Unmap nbuf or frag memory 334 * Unmap nbuf if feature is disabled, else unmap frag. 335 * 336 * @soc: struct dp_soc * 337 * @rx_desc: struct dp_rx_desc * 338 * @size: Size to be unmapped 339 */ 340 static inline void 341 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc, 342 uint16_t size) 343 { 344 qdf_mem_unmap_page(soc->osdev, rx_desc->paddr_buf_start, 345 size, QDF_DMA_FROM_DEVICE); 346 } 347 348 /** 349 * dp_rx_mon_alloc_parent_buffer() - Allocate parent buffer to hold 350 * radiotap header and accommodate all frag memory in nr_frag. 351 * 352 * @head_msdu: Ptr to hold allocated Msdu 353 * 354 * Return: QDF_STATUS 355 */ 356 static inline 357 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu) 358 { 359 /* 360 * Headroom should accommodate radiotap header 361 * and protocol and flow tag for all frag 362 * Length reserved to accommodate Radiotap header 363 * is 128 bytes and length reserved for Protocol 364 * flow tag will vary based on QDF_NBUF_MAX_FRAGS. 365 */ 366 /* ------------------------------------------------- 367 * | Protocol & Flow TAG | Radiotap header| 368 * | | Length(128 B) | 369 * | ((4* QDF_NBUF_MAX_FRAGS) * 2) | | 370 * ------------------------------------------------- 371 */ 372 373 *head_msdu = qdf_nbuf_alloc_no_recycler(DP_RX_MON_MAX_MONITOR_HEADER, 374 DP_RX_MON_MAX_MONITOR_HEADER, 4); 375 376 if (!(*head_msdu)) 377 return QDF_STATUS_E_FAILURE; 378 379 qdf_mem_zero(qdf_nbuf_head(*head_msdu), qdf_nbuf_headroom(*head_msdu)); 380 381 /* Set *head_msdu->next as NULL as all msdus are 382 * mapped via nr frags 383 */ 384 qdf_nbuf_set_next(*head_msdu, NULL); 385 386 return QDF_STATUS_SUCCESS; 387 } 388 389 /** 390 * dp_rx_mon_parse_desc_buffer() - Parse desc buffer based. 391 * @dp_soc: struct dp_soc* 392 * @msdu_info: struct hal_rx_msdu_desc_info* 393 * @is_frag_p: is_frag * 394 * @total_frag_len_p: Remaining frag len to be updated 395 * @frag_len_p: frag len 396 * @l2_hdr_offset_p: l2 hdr offset 397 * @rx_desc_tlv: rx_desc_tlv 398 * @first_rx_desc_tlv: 399 * @is_frag_non_raw_p: Non raw frag 400 * @data: NBUF Data 401 * 402 * Below code will parse desc buffer, handle continuation frame, 403 * adjust frag length and update l2_hdr_padding 404 * 405 */ 406 static inline void 407 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc, 408 struct hal_rx_msdu_desc_info *msdu_info, 409 bool *is_frag_p, uint32_t *total_frag_len_p, 410 uint32_t *frag_len_p, uint16_t *l2_hdr_offset_p, 411 qdf_frag_t rx_desc_tlv, 412 void **first_rx_desc_tlv, 413 bool *is_frag_non_raw_p, void *data) 414 { 415 struct hal_rx_mon_dest_buf_info frame_info; 416 uint16_t tot_payload_len = 417 RX_MONITOR_BUFFER_SIZE - dp_soc->rx_pkt_tlv_size; 418 419 if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) { 420 /* First buffer of MSDU */ 421 if (!(*is_frag_p)) { 422 /* Set total frag_len from msdu_len */ 423 *total_frag_len_p = msdu_info->msdu_len; 424 425 *is_frag_p = true; 426 if (HAL_HW_RX_DECAP_FORMAT_RAW == 427 hal_rx_tlv_decap_format_get(dp_soc->hal_soc, 428 rx_desc_tlv)) { 429 *l2_hdr_offset_p = 430 DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 431 frame_info.is_decap_raw = 1; 432 } else { 433 *l2_hdr_offset_p = 434 DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE; 435 frame_info.is_decap_raw = 0; 436 *is_frag_non_raw_p = true; 437 } 438 dp_rx_mon_frag_adjust_frag_len(total_frag_len_p, 439 frag_len_p, 440 tot_payload_len - 441 *l2_hdr_offset_p); 442 443 frame_info.first_buffer = 1; 444 frame_info.last_buffer = 0; 445 hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc, 446 rx_desc_tlv, 447 (uint8_t *)&frame_info, 448 sizeof(frame_info)); 449 } else { 450 /* 451 * Continuation Middle frame 452 * Here max limit will be same for Raw and Non raw case. 453 */ 454 *l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 455 dp_rx_mon_frag_adjust_frag_len(total_frag_len_p, 456 frag_len_p, 457 tot_payload_len); 458 459 /* Update frame info if is non raw frame */ 460 if (*is_frag_non_raw_p) 461 frame_info.is_decap_raw = 0; 462 else 463 frame_info.is_decap_raw = 1; 464 465 frame_info.first_buffer = 0; 466 frame_info.last_buffer = 0; 467 hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc, 468 rx_desc_tlv, 469 (uint8_t *)&frame_info, 470 sizeof(frame_info)); 471 } 472 } else { 473 /** 474 * Last buffer of MSDU spread among multiple buffer 475 * Here max limit will be same for Raw and Non raw case. 476 */ 477 if (*is_frag_p) { 478 *l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 479 480 dp_rx_mon_frag_adjust_frag_len(total_frag_len_p, 481 frag_len_p, 482 tot_payload_len); 483 484 /* Update frame info if is non raw frame */ 485 if (*is_frag_non_raw_p) 486 frame_info.is_decap_raw = 0; 487 else 488 frame_info.is_decap_raw = 1; 489 490 frame_info.first_buffer = 0; 491 frame_info.last_buffer = 1; 492 hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc, 493 rx_desc_tlv, 494 (uint8_t *)&frame_info, 495 sizeof(frame_info)); 496 } else { 497 /* MSDU with single buffer */ 498 *frag_len_p = msdu_info->msdu_len; 499 if (HAL_HW_RX_DECAP_FORMAT_RAW == 500 hal_rx_tlv_decap_format_get(dp_soc->hal_soc, 501 rx_desc_tlv)) { 502 *l2_hdr_offset_p = 503 DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 504 frame_info.is_decap_raw = 1; 505 } else { 506 *l2_hdr_offset_p = 507 DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE; 508 frame_info.is_decap_raw = 0; 509 } 510 511 frame_info.first_buffer = 1; 512 frame_info.last_buffer = 1; 513 hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc, 514 rx_desc_tlv, 515 (uint8_t *)&frame_info, 516 sizeof(frame_info)); 517 } 518 /* Reset bool after complete processing of MSDU */ 519 *is_frag_p = false; 520 *is_frag_non_raw_p = false; 521 } 522 } 523 524 /** 525 * dp_rx_mon_buffer_set_pktlen() - set pktlen for buffer 526 * @msdu: MSDU 527 * @size: MSDU size 528 */ 529 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size) 530 { 531 } 532 533 /** 534 * dp_rx_mon_add_msdu_to_list()- Add msdu to list and update head_msdu 535 * It will add reaped buffer frag to nr frag of parent msdu. 536 * @soc: DP soc handle 537 * @head_msdu: NULL if first time called else &msdu 538 * @msdu: Msdu where frag address needs to be added via nr_frag 539 * @last: Used to traverse in list if this feature is disabled. 540 * @rx_desc_tlv: Frag address 541 * @frag_len: Frag len 542 * @l2_hdr_offset: l2 hdr padding 543 */ 544 static inline 545 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu, 546 qdf_nbuf_t msdu, qdf_nbuf_t *last, 547 qdf_frag_t rx_desc_tlv, uint32_t frag_len, 548 uint32_t l2_hdr_offset) 549 { 550 uint32_t num_frags; 551 qdf_nbuf_t msdu_curr; 552 553 /* Here head_msdu and *head_msdu must not be NULL */ 554 /* Dont add frag to skb if frag length is zero. Drop frame */ 555 if (qdf_unlikely(!frag_len || !head_msdu || !(*head_msdu))) { 556 dp_err("[%s] frag_len[%d] || head_msdu[%pK] || *head_msdu is Null while adding frag to skb\n", 557 __func__, frag_len, head_msdu); 558 return QDF_STATUS_E_FAILURE; 559 } 560 561 /* In case of first desc of MPDU, assign curr msdu to *head_msdu */ 562 if (!qdf_nbuf_get_nr_frags(*head_msdu)) 563 msdu_curr = *head_msdu; 564 else 565 msdu_curr = *last; 566 567 /* Current msdu must not be NULL */ 568 if (qdf_unlikely(!msdu_curr)) { 569 dp_err("[%s] Current msdu can't be Null while adding frag to skb\n", 570 __func__); 571 return QDF_STATUS_E_FAILURE; 572 } 573 574 num_frags = qdf_nbuf_get_nr_frags(msdu_curr); 575 if (num_frags < QDF_NBUF_MAX_FRAGS) { 576 qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr, 577 soc->rx_mon_pkt_tlv_size, 578 frag_len + l2_hdr_offset, 579 RX_MONITOR_BUFFER_SIZE, 580 false); 581 if (*last != msdu_curr) 582 *last = msdu_curr; 583 return QDF_STATUS_SUCCESS; 584 } 585 586 /* Execution will reach here only if num_frags == QDF_NBUF_MAX_FRAGS */ 587 msdu_curr = NULL; 588 if ((dp_rx_mon_alloc_parent_buffer(&msdu_curr)) 589 != QDF_STATUS_SUCCESS) 590 return QDF_STATUS_E_FAILURE; 591 592 qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr, soc->rx_mon_pkt_tlv_size, 593 frag_len + l2_hdr_offset, RX_MONITOR_BUFFER_SIZE, 594 false); 595 596 /* Add allocated nbuf in the chain */ 597 qdf_nbuf_set_next(*last, msdu_curr); 598 599 /* Assign current msdu to last to avoid traversal */ 600 *last = msdu_curr; 601 602 return QDF_STATUS_SUCCESS; 603 } 604 605 /** 606 * dp_rx_mon_init_tail_msdu() - Initialize tail msdu 607 * 608 * @head_msdu: Parent buffer to hold MPDU data 609 * @msdu: Msdu to be updated in tail_msdu 610 * @last: last msdu 611 * @tail_msdu: Last msdu 612 */ 613 static inline 614 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu, 615 qdf_nbuf_t last, qdf_nbuf_t *tail_msdu) 616 { 617 if (!head_msdu || !(*head_msdu)) { 618 *tail_msdu = NULL; 619 return; 620 } 621 622 if (last) 623 qdf_nbuf_set_next(last, NULL); 624 *tail_msdu = last; 625 } 626 627 /** 628 * dp_rx_mon_remove_raw_frame_fcs_len() - Remove FCS length for Raw Frame 629 * 630 * If feature is disabled, then removal happens in restitch logic. 631 * 632 * @soc: Datapath soc handle 633 * @head_msdu: Head msdu 634 * @tail_msdu: Tail msdu 635 */ 636 static inline 637 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc, 638 qdf_nbuf_t *head_msdu, 639 qdf_nbuf_t *tail_msdu) 640 { 641 qdf_frag_t addr; 642 643 if (qdf_unlikely(!head_msdu || !tail_msdu || !(*head_msdu))) 644 return; 645 646 /* If *head_msdu is valid, then *tail_msdu must be valid */ 647 /* If head_msdu is valid, then it must have nr_frags */ 648 /* If tail_msdu is valid, then it must have nr_frags */ 649 650 /* Strip FCS_LEN for Raw frame */ 651 addr = qdf_nbuf_get_frag_addr(*head_msdu, 0); 652 addr -= soc->rx_mon_pkt_tlv_size; 653 if (hal_rx_tlv_decap_format_get(soc->hal_soc, addr) == 654 HAL_HW_RX_DECAP_FORMAT_RAW) { 655 qdf_nbuf_trim_add_frag_size(*tail_msdu, 656 qdf_nbuf_get_nr_frags(*tail_msdu) - 1, 657 -HAL_RX_FCS_LEN, 0); 658 } 659 } 660 661 /** 662 * dp_rx_mon_get_buffer_data()- Get data from desc buffer 663 * @rx_desc: desc 664 * 665 * Return address containing actual tlv content 666 */ 667 static inline 668 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc) 669 { 670 return rx_desc->rx_buf_start; 671 } 672 673 #else 674 675 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \ 676 (rx_desc->nbuf) 677 678 static inline void 679 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr, 680 struct dp_pdev *pdev, 681 qdf_nbuf_t *last, 682 qdf_nbuf_t *head_msdu, 683 qdf_nbuf_t *tail_msdu, 684 const char *func_name) 685 { 686 } 687 688 static inline 689 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc) 690 { 691 qdf_dma_addr_t paddr = 0; 692 qdf_nbuf_t msdu = NULL; 693 694 msdu = rx_desc->nbuf; 695 if (msdu) 696 paddr = qdf_nbuf_get_frag_paddr(msdu, 0); 697 698 return paddr; 699 } 700 701 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \ 702 (!(rx_desc->nbuf)) 703 704 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \ 705 (msdu) 706 707 static inline void 708 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc) 709 { 710 qdf_nbuf_free(rx_desc->nbuf); 711 } 712 713 static inline void 714 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc, 715 uint16_t size) 716 { 717 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 718 QDF_DMA_FROM_DEVICE, size); 719 } 720 721 static inline 722 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu) 723 { 724 return QDF_STATUS_SUCCESS; 725 } 726 727 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT 728 729 #define RXDMA_DATA_DMA_BLOCK_SIZE 128 730 static inline void 731 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc, 732 struct hal_rx_msdu_desc_info *msdu_info, 733 bool *is_frag_p, uint32_t *total_frag_len_p, 734 uint32_t *frag_len_p, 735 uint16_t *l2_hdr_offset_p, 736 qdf_frag_t rx_desc_tlv, 737 void **first_rx_desc_tlv, 738 bool *is_frag_non_raw_p, void *data) 739 { 740 struct hal_rx_mon_dest_buf_info frame_info; 741 uint32_t rx_pkt_tlv_len = dp_rx_mon_get_rx_pkt_tlv_size(dp_soc); 742 743 /* 744 * HW structures call this L3 header padding 745 * -- even though this is actually the offset 746 * from the buffer beginning where the L2 747 * header begins. 748 */ 749 *l2_hdr_offset_p = 750 hal_rx_msdu_end_l3_hdr_padding_get(dp_soc->hal_soc, data); 751 752 if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) { 753 /* 754 * Set l3_hdr_pad for first frag. This can be later 755 * changed based on decap format, detected in last frag 756 */ 757 *l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 758 if (!(*is_frag_p)) { 759 *l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE; 760 *first_rx_desc_tlv = rx_desc_tlv; 761 } 762 763 *is_frag_p = true; 764 *frag_len_p = (RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len - 765 *l2_hdr_offset_p) & 766 ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1); 767 *total_frag_len_p += *frag_len_p; 768 } else { 769 if (hal_rx_tlv_decap_format_get(dp_soc->hal_soc, rx_desc_tlv) == 770 HAL_HW_RX_DECAP_FORMAT_RAW) 771 frame_info.is_decap_raw = 1; 772 773 if (hal_rx_tlv_mpdu_len_err_get(dp_soc->hal_soc, rx_desc_tlv)) 774 frame_info.mpdu_len_err = 1; 775 776 frame_info.l2_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get( 777 dp_soc->hal_soc, rx_desc_tlv); 778 779 if (*is_frag_p) { 780 /* Last fragment of msdu */ 781 *frag_len_p = msdu_info->msdu_len - *total_frag_len_p; 782 783 /* Set this in the first frag priv data */ 784 hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc, 785 *first_rx_desc_tlv, 786 (uint8_t *)&frame_info, 787 sizeof(frame_info)); 788 } else { 789 *frag_len_p = msdu_info->msdu_len; 790 hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc, 791 rx_desc_tlv, 792 (uint8_t *)&frame_info, 793 sizeof(frame_info)); 794 } 795 *is_frag_p = false; 796 *first_rx_desc_tlv = NULL; 797 } 798 } 799 #else 800 801 static inline void 802 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc, 803 struct hal_rx_msdu_desc_info *msdu_info, 804 bool *is_frag_p, uint32_t *total_frag_len_p, 805 uint32_t *frag_len_p, 806 uint16_t *l2_hdr_offset_p, 807 qdf_frag_t rx_desc_tlv, 808 qdf_frag_t first_rx_desc_tlv, 809 bool *is_frag_non_raw_p, void *data) 810 { 811 /* 812 * HW structures call this L3 header padding 813 * -- even though this is actually the offset 814 * from the buffer beginning where the L2 815 * header begins. 816 */ 817 *l2_hdr_offset_p = 818 hal_rx_msdu_end_l3_hdr_padding_get(dp_soc->hal_soc, data); 819 820 if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) { 821 if (!*(is_frag_p)) { 822 *total_frag_len_p = msdu_info->msdu_len; 823 *is_frag_p = true; 824 } 825 dp_mon_adjust_frag_len(dp_soc, total_frag_len_p, frag_len_p, 826 *l2_hdr_offset_p); 827 } else { 828 if (*is_frag_p) { 829 dp_mon_adjust_frag_len(dp_soc, total_frag_len_p, 830 frag_len_p, 831 *l2_hdr_offset_p); 832 } else { 833 *frag_len_p = msdu_info->msdu_len; 834 } 835 *is_frag_p = false; 836 } 837 } 838 #endif 839 840 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size) 841 { 842 qdf_nbuf_set_pktlen(msdu, size); 843 } 844 845 static inline 846 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu, 847 qdf_nbuf_t msdu, qdf_nbuf_t *last, 848 qdf_frag_t rx_desc_tlv, uint32_t frag_len, 849 uint32_t l2_hdr_offset) 850 { 851 if (head_msdu && !*head_msdu) { 852 *head_msdu = msdu; 853 } else { 854 if (*last) 855 qdf_nbuf_set_next(*last, msdu); 856 } 857 *last = msdu; 858 return QDF_STATUS_SUCCESS; 859 } 860 861 static inline 862 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu, 863 qdf_nbuf_t last, qdf_nbuf_t *tail_msdu) 864 { 865 if (last) 866 qdf_nbuf_set_next(last, NULL); 867 868 *tail_msdu = msdu; 869 } 870 871 static inline 872 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc, 873 qdf_nbuf_t *head_msdu, 874 qdf_nbuf_t *tail_msdu) 875 { 876 } 877 878 static inline 879 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc) 880 { 881 qdf_nbuf_t msdu = NULL; 882 uint8_t *data = NULL; 883 884 msdu = rx_desc->nbuf; 885 if (qdf_likely(msdu)) 886 data = qdf_nbuf_data(msdu); 887 return data; 888 } 889 890 #endif 891 892 /** 893 * dp_rx_cookie_2_mon_link_desc() - Retrieve Link descriptor based on target 894 * @pdev: core physical device context 895 * @buf_info: structure holding the buffer info 896 * @mac_id: mac number 897 * 898 * Return: link descriptor address 899 */ 900 static inline 901 void *dp_rx_cookie_2_mon_link_desc(struct dp_pdev *pdev, 902 struct hal_buf_info buf_info, 903 uint8_t mac_id) 904 { 905 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) 906 return dp_rx_cookie_2_mon_link_desc_va(pdev, &buf_info, 907 mac_id); 908 909 return dp_rx_cookie_2_link_desc_va(pdev->soc, &buf_info); 910 } 911 912 /** 913 * dp_rx_monitor_link_desc_return() - Return Link descriptor based on target 914 * @pdev: core physical device context 915 * @p_last_buf_addr_info: MPDU Link descriptor 916 * @mac_id: mac number 917 * @bm_action: 918 * 919 * Return: QDF_STATUS 920 */ 921 static inline 922 QDF_STATUS dp_rx_monitor_link_desc_return(struct dp_pdev *pdev, 923 hal_buff_addrinfo_t 924 p_last_buf_addr_info, 925 uint8_t mac_id, uint8_t bm_action) 926 { 927 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) 928 return dp_rx_mon_link_desc_return(pdev, p_last_buf_addr_info, 929 mac_id); 930 931 return dp_rx_link_desc_return_by_addr(pdev->soc, p_last_buf_addr_info, 932 bm_action); 933 } 934 935 static inline bool dp_is_rxdma_dst_ring_common(struct dp_pdev *pdev) 936 { 937 struct dp_soc *soc = pdev->soc; 938 939 return (soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev == 1); 940 } 941 942 /** 943 * dp_rxdma_get_mon_dst_ring() - Return the pointer to rxdma_err_dst_ring 944 * or mon_dst_ring based on the target 945 * @pdev: core physical device context 946 * @mac_for_pdev: mac_id number 947 * 948 * Return: ring address 949 */ 950 static inline 951 void *dp_rxdma_get_mon_dst_ring(struct dp_pdev *pdev, 952 uint8_t mac_for_pdev) 953 { 954 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) 955 return pdev->soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng; 956 957 /* For targets with 1 RXDMA DST ring for both mac */ 958 if (dp_is_rxdma_dst_ring_common(pdev)) 959 return pdev->soc->rxdma_err_dst_ring[0].hal_srng; 960 961 return pdev->soc->rxdma_err_dst_ring[mac_for_pdev].hal_srng; 962 } 963 964 /** 965 * dp_rxdma_get_mon_buf_ring() - Return monitor buf ring address 966 * based on target 967 * @pdev: core physical device context 968 * @mac_for_pdev: mac id number 969 * 970 * Return: ring address 971 */ 972 static inline 973 struct dp_srng *dp_rxdma_get_mon_buf_ring(struct dp_pdev *pdev, 974 uint8_t mac_for_pdev) 975 { 976 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) 977 return &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; 978 979 /* For MCL there is only 1 rx refill ring */ 980 return &pdev->soc->rx_refill_buf_ring[0]; 981 } 982 983 /** 984 * dp_rx_get_mon_desc() - Return Rx descriptor based on target 985 * @soc: soc handle 986 * @cookie: cookie value 987 * 988 * Return: Rx descriptor 989 */ 990 static inline 991 struct dp_rx_desc *dp_rx_get_mon_desc(struct dp_soc *soc, 992 uint32_t cookie) 993 { 994 if (soc->wlan_cfg_ctx->rxdma1_enable) 995 return dp_rx_cookie_2_va_mon_buf(soc, cookie); 996 997 return soc->arch_ops.dp_rx_desc_cookie_2_va(soc, cookie); 998 } 999 1000 #ifdef QCA_MONITOR_PKT_SUPPORT 1001 /* 1002 * dp_mon_htt_dest_srng_setup(): monitor dest srng setup 1003 * @soc: DP SOC handle 1004 * @pdev: DP PDEV handle 1005 * @mac_id: MAC ID 1006 * @mac_for_pdev: PDEV mac 1007 * 1008 * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure 1009 */ 1010 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc, 1011 struct dp_pdev *pdev, 1012 int mac_id, 1013 int mac_for_pdev); 1014 1015 /* 1016 * dp_mon_dest_rings_deinit(): deinit monitor dest rings 1017 * @pdev: DP PDEV handle 1018 * @lmac_id: MAC ID 1019 * 1020 * Return: status: None 1021 */ 1022 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id); 1023 1024 /* 1025 * dp_mon_dest_rings_free(): free monitor dest rings 1026 * @pdev: DP PDEV handle 1027 * @lmac_id: MAC ID 1028 * 1029 * Return: status: None 1030 */ 1031 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id); 1032 1033 /* 1034 * dp_mon_dest_rings_init(): init monitor dest rings 1035 * @pdev: DP PDEV handle 1036 * @lmac_id: MAC ID 1037 * 1038 * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure 1039 */ 1040 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id); 1041 1042 /* 1043 * dp_mon_dest_rings_allocate(): allocate monitor dest rings 1044 * @pdev: DP PDEV handle 1045 * @lmac_id: MAC ID 1046 * 1047 * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure 1048 */ 1049 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id); 1050 1051 #else 1052 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc, 1053 struct dp_pdev *pdev, 1054 int mac_id, 1055 int mac_for_pdev) 1056 { 1057 return QDF_STATUS_SUCCESS; 1058 } 1059 1060 static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id) 1061 { 1062 } 1063 1064 static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id) 1065 { 1066 } 1067 1068 static 1069 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id) 1070 { 1071 return QDF_STATUS_SUCCESS; 1072 } 1073 1074 static 1075 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id) 1076 { 1077 return QDF_STATUS_SUCCESS; 1078 } 1079 #endif /* QCA_MONITOR_PKT_SUPPORT */ 1080 1081 #endif /* _DP_RX_MON_1_0_H_ */ 1082