1 /* 2 * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _DP_RX_H 20 #define _DP_RX_H 21 22 #include "hal_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 26 #ifdef RXDMA_OPTIMIZATION 27 #define RX_BUFFER_ALIGNMENT 128 28 #else /* RXDMA_OPTIMIZATION */ 29 #define RX_BUFFER_ALIGNMENT 4 30 #endif /* RXDMA_OPTIMIZATION */ 31 32 #define RX_BUFFER_SIZE 2048 33 #define RX_BUFFER_RESERVATION 0 34 35 #define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff 36 #define DP_PEER_METADATA_PEER_ID_SHIFT 0 37 #define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000 38 #define DP_PEER_METADATA_VDEV_ID_SHIFT 16 39 40 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \ 41 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \ 42 >> DP_PEER_METADATA_PEER_ID_SHIFT) 43 44 #define DP_PEER_METADATA_ID_GET(_peer_metadata) \ 45 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \ 46 >> DP_PEER_METADATA_VDEV_ID_SHIFT) 47 48 #define DP_RX_DESC_MAGIC 0xdec0de 49 50 /** 51 * struct dp_rx_desc 52 * 53 * @nbuf : VA of the "skb" posted 54 * @rx_buf_start : VA of the original Rx buffer, before 55 * movement of any skb->data pointer 56 * @cookie : index into the sw array which holds 57 * the sw Rx descriptors 58 * Cookie space is 21 bits: 59 * lower 18 bits -- index 60 * upper 3 bits -- pool_id 61 * @pool_id : pool Id for which this allocated. 62 * Can only be used if there is no flow 63 * steering 64 */ 65 struct dp_rx_desc { 66 qdf_nbuf_t nbuf; 67 uint8_t *rx_buf_start; 68 uint32_t cookie; 69 uint8_t pool_id; 70 #ifdef RX_DESC_DEBUG_CHECK 71 uint32_t magic; 72 #endif 73 uint8_t in_use:1; 74 }; 75 76 #define RX_DESC_COOKIE_INDEX_SHIFT 0 77 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 78 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 79 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 80 81 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 82 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 83 RX_DESC_COOKIE_POOL_ID_SHIFT) 84 85 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 86 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 87 RX_DESC_COOKIE_INDEX_SHIFT) 88 89 /* 90 *dp_rx_xor_block() - xor block of data 91 *@b: destination data block 92 *@a: source data block 93 *@len: length of the data to process 94 * 95 *Returns: None 96 */ 97 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 98 { 99 qdf_size_t i; 100 101 for (i = 0; i < len; i++) 102 b[i] ^= a[i]; 103 } 104 105 /* 106 *dp_rx_rotl() - rotate the bits left 107 *@val: unsigned integer input value 108 *@bits: number of bits 109 * 110 *Returns: Integer with left rotated by number of 'bits' 111 */ 112 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 113 { 114 return (val << bits) | (val >> (32 - bits)); 115 } 116 117 /* 118 *dp_rx_rotr() - rotate the bits right 119 *@val: unsigned integer input value 120 *@bits: number of bits 121 * 122 *Returns: Integer with right rotated by number of 'bits' 123 */ 124 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 125 { 126 return (val >> bits) | (val << (32 - bits)); 127 } 128 129 /* 130 *dp_rx_xswap() - swap the bits left 131 *@val: unsigned integer input value 132 * 133 *Returns: Integer with bits swapped 134 */ 135 static inline uint32_t dp_rx_xswap(uint32_t val) 136 { 137 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 138 } 139 140 /* 141 *dp_rx_get_le32_split() - get little endian 32 bits split 142 *@b0: byte 0 143 *@b1: byte 1 144 *@b2: byte 2 145 *@b3: byte 3 146 * 147 *Returns: Integer with split little endian 32 bits 148 */ 149 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 150 uint8_t b3) 151 { 152 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 153 } 154 155 /* 156 *dp_rx_get_le32() - get little endian 32 bits 157 *@b0: byte 0 158 *@b1: byte 1 159 *@b2: byte 2 160 *@b3: byte 3 161 * 162 *Returns: Integer with little endian 32 bits 163 */ 164 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 165 { 166 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 167 } 168 169 /* 170 * dp_rx_put_le32() - put little endian 32 bits 171 * @p: destination char array 172 * @v: source 32-bit integer 173 * 174 * Returns: None 175 */ 176 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 177 { 178 p[0] = (v) & 0xff; 179 p[1] = (v >> 8) & 0xff; 180 p[2] = (v >> 16) & 0xff; 181 p[3] = (v >> 24) & 0xff; 182 } 183 184 /* Extract michal mic block of data */ 185 #define dp_rx_michael_block(l, r) \ 186 do { \ 187 r ^= dp_rx_rotl(l, 17); \ 188 l += r; \ 189 r ^= dp_rx_xswap(l); \ 190 l += r; \ 191 r ^= dp_rx_rotl(l, 3); \ 192 l += r; \ 193 r ^= dp_rx_rotr(l, 2); \ 194 l += r; \ 195 } while (0) 196 197 /** 198 * struct dp_rx_desc_list_elem_t 199 * 200 * @next : Next pointer to form free list 201 * @rx_desc : DP Rx descriptor 202 */ 203 union dp_rx_desc_list_elem_t { 204 union dp_rx_desc_list_elem_t *next; 205 struct dp_rx_desc rx_desc; 206 }; 207 208 /** 209 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 210 * the Rx descriptor on Rx DMA source ring buffer 211 * @soc: core txrx main context 212 * @cookie: cookie used to lookup virtual address 213 * 214 * Return: void *: Virtual Address of the Rx descriptor 215 */ 216 static inline 217 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 218 { 219 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 220 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 221 /* TODO */ 222 /* Add sanity for pool_id & index */ 223 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc); 224 } 225 226 /** 227 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 228 * the Rx descriptor on monitor ring buffer 229 * @soc: core txrx main context 230 * @cookie: cookie used to lookup virtual address 231 * 232 * Return: void *: Virtual Address of the Rx descriptor 233 */ 234 static inline 235 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 236 { 237 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 238 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 239 /* TODO */ 240 /* Add sanity for pool_id & index */ 241 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 242 } 243 244 /** 245 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 246 * the Rx descriptor on monitor status ring buffer 247 * @soc: core txrx main context 248 * @cookie: cookie used to lookup virtual address 249 * 250 * Return: void *: Virtual Address of the Rx descriptor 251 */ 252 static inline 253 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 254 { 255 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 256 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 257 /* TODO */ 258 /* Add sanity for pool_id & index */ 259 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 260 } 261 262 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 263 union dp_rx_desc_list_elem_t **local_desc_list, 264 union dp_rx_desc_list_elem_t **tail, 265 uint16_t pool_id, 266 struct rx_desc_pool *rx_desc_pool); 267 268 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 269 struct rx_desc_pool *rx_desc_pool, 270 uint16_t num_descs, 271 union dp_rx_desc_list_elem_t **desc_list, 272 union dp_rx_desc_list_elem_t **tail); 273 274 275 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 276 277 void dp_rx_pdev_detach(struct dp_pdev *pdev); 278 279 280 uint32_t 281 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota); 282 283 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); 284 285 uint32_t 286 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); 287 288 void 289 dp_rx_sg_create(qdf_nbuf_t nbuf, 290 uint8_t *rx_tlv_hdr, 291 uint16_t *mpdu_len, 292 bool *is_first_frag, 293 uint16_t *frag_list_len, 294 qdf_nbuf_t *head_frag_nbuf, 295 qdf_nbuf_t *frag_list_head, 296 qdf_nbuf_t *frag_list_tail); 297 298 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 299 uint32_t pool_id, 300 uint32_t pool_size, 301 struct rx_desc_pool *rx_desc_pool); 302 303 void dp_rx_desc_pool_free(struct dp_soc *soc, 304 uint32_t pool_id, 305 struct rx_desc_pool *rx_desc_pool); 306 307 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 308 struct dp_peer *peer); 309 310 /** 311 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 312 * 313 * @head: pointer to the head of local free list 314 * @tail: pointer to the tail of local free list 315 * @new: new descriptor that is added to the free list 316 * 317 * Return: void: 318 */ 319 static inline 320 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 321 union dp_rx_desc_list_elem_t **tail, 322 struct dp_rx_desc *new) 323 { 324 qdf_assert(head && new); 325 326 new->nbuf = NULL; 327 new->in_use = 0; 328 329 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 330 *head = (union dp_rx_desc_list_elem_t *)new; 331 if (*tail == NULL) 332 *tail = *head; 333 334 } 335 336 /** 337 * dp_rx_wds_srcport_learn() - Add or update the STA PEER which 338 * is behind the WDS repeater. 339 * 340 * @soc: core txrx main context 341 * @rx_tlv_hdr: base address of RX TLV header 342 * @ta_peer: WDS repeater peer 343 * @nbuf: rx pkt 344 * 345 * Return: void: 346 */ 347 #ifdef FEATURE_WDS 348 static inline void 349 dp_rx_wds_srcport_learn(struct dp_soc *soc, 350 uint8_t *rx_tlv_hdr, 351 struct dp_peer *ta_peer, 352 qdf_nbuf_t nbuf) 353 { 354 uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr); 355 uint32_t flags = IEEE80211_NODE_F_WDS_HM; 356 uint32_t ret = 0; 357 uint8_t wds_src_mac[IEEE80211_ADDR_LEN]; 358 359 /* Do wds source port learning only if it is a 4-address mpdu */ 360 if (!(qdf_nbuf_is_chfrag_start(nbuf) && 361 hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr))) 362 return; 363 364 memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN), 365 IEEE80211_ADDR_LEN); 366 367 if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) { 368 if (!dp_peer_add_ast(soc, ta_peer, wds_src_mac, 0)) { 369 ret = soc->cdp_soc.ol_ops->peer_add_wds_entry( 370 ta_peer->vdev->pdev->osif_pdev, 371 wds_src_mac, 372 ta_peer->mac_addr.raw, 373 flags); 374 } 375 } else { 376 /* 377 * Get the AST entry from HW SA index and mark it as active 378 */ 379 struct dp_ast_entry *ast; 380 uint16_t sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 381 ast = soc->ast_table[sa_idx]; 382 383 /* 384 * Ensure we are updating the right AST entry by 385 * validating ast_idx. 386 * There is a possibility we might arrive here without 387 * AST MAP event , so this check is mandatory 388 */ 389 if (ast && (ast->ast_idx == sa_idx)) { 390 ast->is_active = TRUE; 391 } 392 393 if (sa_sw_peer_id != ta_peer->peer_ids[0]) { 394 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( 395 ta_peer->vdev->pdev->osif_pdev, 396 wds_src_mac, 397 ta_peer->mac_addr.raw, 398 flags); 399 } 400 } 401 return; 402 } 403 #else 404 static inline void 405 dp_rx_wds_srcport_learn(struct dp_soc *soc, 406 uint8_t *rx_tlv_hdr, 407 struct dp_peer *ta_peer, 408 qdf_nbuf_t nbuf) 409 { 410 } 411 #endif 412 413 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 414 415 #define DP_RX_LIST_APPEND(head, tail, elem) \ 416 do { \ 417 if (!(head)) { \ 418 (head) = (elem); \ 419 } else { \ 420 qdf_nbuf_set_next((tail), (elem)); \ 421 } \ 422 (tail) = (elem); \ 423 qdf_nbuf_set_next((tail), NULL); \ 424 } while (0) 425 426 #ifndef BUILD_X86 427 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 428 qdf_dma_addr_t *paddr, struct dp_pdev *pdev) 429 { 430 return QDF_STATUS_SUCCESS; 431 } 432 #else 433 #define MAX_RETRY 100 434 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 435 qdf_dma_addr_t *paddr, struct dp_pdev *pdev) 436 { 437 uint32_t nbuf_retry = 0; 438 int32_t ret; 439 const uint32_t x86_phy_addr = 0x50000000; 440 /* 441 * in M2M emulation platforms (x86) the memory below 0x50000000 442 * is reserved for target use, so any memory allocated in this 443 * region should not be used by host 444 */ 445 do { 446 if (qdf_likely(*paddr > x86_phy_addr)) 447 return QDF_STATUS_SUCCESS; 448 else { 449 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 450 "phy addr %pK exceded 0x50000000 trying again\n", 451 paddr); 452 453 nbuf_retry++; 454 if ((*rx_netbuf)) { 455 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 456 QDF_DMA_BIDIRECTIONAL); 457 /* Not freeing buffer intentionally. 458 * Observed that same buffer is getting 459 * re-allocated resulting in longer load time 460 * WMI init timeout. 461 * This buffer is anyway not useful so skip it. 462 **/ 463 } 464 465 *rx_netbuf = qdf_nbuf_alloc(pdev->osif_pdev, 466 RX_BUFFER_SIZE, 467 RX_BUFFER_RESERVATION, 468 RX_BUFFER_ALIGNMENT, 469 FALSE); 470 471 if (qdf_unlikely(!(*rx_netbuf))) 472 return QDF_STATUS_E_FAILURE; 473 474 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf, 475 QDF_DMA_BIDIRECTIONAL); 476 477 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 478 qdf_nbuf_free(*rx_netbuf); 479 *rx_netbuf = NULL; 480 continue; 481 } 482 483 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 484 } 485 } while (nbuf_retry < MAX_RETRY); 486 487 if ((*rx_netbuf)) { 488 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 489 QDF_DMA_BIDIRECTIONAL); 490 qdf_nbuf_free(*rx_netbuf); 491 } 492 493 return QDF_STATUS_E_FAILURE; 494 } 495 #endif 496 497 /** 498 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 499 * the MSDU Link Descriptor 500 * @soc: core txrx main context 501 * @buf_info: buf_info include cookie that used to lookup virtual address of 502 * link descriptor Normally this is just an index into a per SOC array. 503 * 504 * This is the VA of the link descriptor, that HAL layer later uses to 505 * retrieve the list of MSDU's for a given MPDU. 506 * 507 * Return: void *: Virtual Address of the Rx descriptor 508 */ 509 static inline 510 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 511 struct hal_buf_info *buf_info) 512 { 513 void *link_desc_va; 514 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie); 515 516 517 /* TODO */ 518 /* Add sanity for cookie */ 519 520 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr + 521 (buf_info->paddr - 522 soc->link_desc_banks[bank_id].base_paddr); 523 524 return link_desc_va; 525 } 526 527 /** 528 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of 529 * the MSDU Link Descriptor 530 * @pdev: core txrx pdev context 531 * @buf_info: buf_info includes cookie that used to lookup virtual address of 532 * link descriptor. Normally this is just an index into a per pdev array. 533 * 534 * This is the VA of the link descriptor in monitor mode destination ring, 535 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU. 536 * 537 * Return: void *: Virtual Address of the Rx descriptor 538 */ 539 static inline 540 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, 541 struct hal_buf_info *buf_info) 542 { 543 void *link_desc_va; 544 545 /* TODO */ 546 /* Add sanity for cookie */ 547 548 link_desc_va = pdev->link_desc_banks[buf_info->sw_cookie].base_vaddr + 549 (buf_info->paddr - 550 pdev->link_desc_banks[buf_info->sw_cookie].base_paddr); 551 return link_desc_va; 552 } 553 554 /** 555 * dp_rx_defrag_concat() - Concatenate the fragments 556 * 557 * @dst: destination pointer to the buffer 558 * @src: source pointer from where the fragment payload is to be copied 559 * 560 * Return: QDF_STATUS 561 */ 562 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 563 { 564 /* 565 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 566 * to provide space for src, the headroom portion is copied from 567 * the original dst buffer to the larger new dst buffer. 568 * (This is needed, because the headroom of the dst buffer 569 * contains the rx desc.) 570 */ 571 if (qdf_nbuf_cat(dst, src)) 572 return QDF_STATUS_E_DEFRAG_ERROR; 573 574 return QDF_STATUS_SUCCESS; 575 } 576 577 /* 578 * dp_rx_ast_set_active() - set the active flag of the astentry 579 * corresponding to a hw index. 580 * @soc: core txrx main context 581 * @sa_idx: hw idx 582 * @is_active: active flag 583 * 584 */ 585 #ifdef FEATURE_WDS 586 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 587 { 588 struct dp_ast_entry *ast; 589 qdf_spin_lock_bh(&soc->ast_lock); 590 ast = soc->ast_table[sa_idx]; 591 592 /* 593 * Ensure we are updating the right AST entry by 594 * validating ast_idx. 595 * There is a possibility we might arrive here without 596 * AST MAP event , so this check is mandatory 597 */ 598 if (ast && (ast->ast_idx == sa_idx)) { 599 ast->is_active = is_active; 600 qdf_spin_unlock_bh(&soc->ast_lock); 601 return QDF_STATUS_SUCCESS; 602 } 603 604 qdf_spin_unlock_bh(&soc->ast_lock); 605 return QDF_STATUS_E_FAILURE; 606 } 607 #else 608 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 609 { 610 return QDF_STATUS_SUCCESS; 611 } 612 #endif 613 614 /* 615 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 616 * called during dp rx initialization 617 * and at the end of dp_rx_process. 618 * 619 * @soc: core txrx main context 620 * @mac_id: mac_id which is one of 3 mac_ids 621 * @dp_rxdma_srng: dp rxdma circular ring 622 * @rx_desc_pool: Poiter to free Rx descriptor pool 623 * @num_req_buffers: number of buffer to be replenished 624 * @desc_list: list of descs if called from dp_rx_process 625 * or NULL during dp rx initialization or out of buffer 626 * interrupt. 627 * @tail: tail of descs list 628 * @owner: who owns the nbuf (host, NSS etc...) 629 * Return: return success or failure 630 */ 631 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 632 struct dp_srng *dp_rxdma_srng, 633 struct rx_desc_pool *rx_desc_pool, 634 uint32_t num_req_buffers, 635 union dp_rx_desc_list_elem_t **desc_list, 636 union dp_rx_desc_list_elem_t **tail, 637 uint8_t owner); 638 639 /** 640 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 641 * (WBM), following error handling 642 * 643 * @soc: core DP main context 644 * @buf_addr_info: opaque pointer to the REO error ring descriptor 645 * @buf_addr_info: void pointer to the buffer_addr_info 646 * Return: QDF_STATUS 647 */ 648 QDF_STATUS 649 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 650 void *buf_addr_info); 651 652 uint32_t 653 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, 654 uint32_t quota); 655 656 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 657 uint8_t *rx_tlv_hdr, struct dp_peer *peer); 658 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 659 uint8_t *rx_tlv_hdr); 660 661 #endif /* _DP_RX_H */ 662