1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _DP_RX_H 20 #define _DP_RX_H 21 22 #include "hal_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 27 #ifdef RXDMA_OPTIMIZATION 28 #define RX_BUFFER_ALIGNMENT 128 29 #else /* RXDMA_OPTIMIZATION */ 30 #define RX_BUFFER_ALIGNMENT 4 31 #endif /* RXDMA_OPTIMIZATION */ 32 33 #ifdef QCA_HOST2FW_RXBUF_RING 34 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM 35 #else 36 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM 37 #endif 38 #define RX_BUFFER_SIZE 2048 39 #define RX_BUFFER_RESERVATION 0 40 41 #define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff 42 #define DP_PEER_METADATA_PEER_ID_SHIFT 0 43 #define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000 44 #define DP_PEER_METADATA_VDEV_ID_SHIFT 16 45 46 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \ 47 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \ 48 >> DP_PEER_METADATA_PEER_ID_SHIFT) 49 50 #define DP_PEER_METADATA_ID_GET(_peer_metadata) \ 51 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \ 52 >> DP_PEER_METADATA_VDEV_ID_SHIFT) 53 54 #define DP_RX_DESC_MAGIC 0xdec0de 55 56 /** 57 * struct dp_rx_desc 58 * 59 * @nbuf : VA of the "skb" posted 60 * @rx_buf_start : VA of the original Rx buffer, before 61 * movement of any skb->data pointer 62 * @cookie : index into the sw array which holds 63 * the sw Rx descriptors 64 * Cookie space is 21 bits: 65 * lower 18 bits -- index 66 * upper 3 bits -- pool_id 67 * @pool_id : pool Id for which this allocated. 68 * Can only be used if there is no flow 69 * steering 70 * @in_use rx_desc is in use 71 * @unmapped used to mark rx_desc an unmapped if the corresponding 72 * nbuf is already unmapped 73 */ 74 struct dp_rx_desc { 75 qdf_nbuf_t nbuf; 76 uint8_t *rx_buf_start; 77 uint32_t cookie; 78 uint8_t pool_id; 79 #ifdef RX_DESC_DEBUG_CHECK 80 uint32_t magic; 81 #endif 82 uint8_t in_use:1, 83 unmapped:1; 84 }; 85 86 #define RX_DESC_COOKIE_INDEX_SHIFT 0 87 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 88 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 89 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 90 91 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 92 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 93 RX_DESC_COOKIE_POOL_ID_SHIFT) 94 95 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 96 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 97 RX_DESC_COOKIE_INDEX_SHIFT) 98 99 /* 100 *dp_rx_xor_block() - xor block of data 101 *@b: destination data block 102 *@a: source data block 103 *@len: length of the data to process 104 * 105 *Returns: None 106 */ 107 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 108 { 109 qdf_size_t i; 110 111 for (i = 0; i < len; i++) 112 b[i] ^= a[i]; 113 } 114 115 /* 116 *dp_rx_rotl() - rotate the bits left 117 *@val: unsigned integer input value 118 *@bits: number of bits 119 * 120 *Returns: Integer with left rotated by number of 'bits' 121 */ 122 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 123 { 124 return (val << bits) | (val >> (32 - bits)); 125 } 126 127 /* 128 *dp_rx_rotr() - rotate the bits right 129 *@val: unsigned integer input value 130 *@bits: number of bits 131 * 132 *Returns: Integer with right rotated by number of 'bits' 133 */ 134 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 135 { 136 return (val >> bits) | (val << (32 - bits)); 137 } 138 139 /* 140 * dp_set_rx_queue() - set queue_mapping in skb 141 * @nbuf: skb 142 * @queue_id: rx queue_id 143 * 144 * Return: void 145 */ 146 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 147 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 148 { 149 qdf_nbuf_record_rx_queue(nbuf, queue_id); 150 return; 151 } 152 #else 153 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 154 { 155 } 156 #endif 157 158 /* 159 *dp_rx_xswap() - swap the bits left 160 *@val: unsigned integer input value 161 * 162 *Returns: Integer with bits swapped 163 */ 164 static inline uint32_t dp_rx_xswap(uint32_t val) 165 { 166 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 167 } 168 169 /* 170 *dp_rx_get_le32_split() - get little endian 32 bits split 171 *@b0: byte 0 172 *@b1: byte 1 173 *@b2: byte 2 174 *@b3: byte 3 175 * 176 *Returns: Integer with split little endian 32 bits 177 */ 178 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 179 uint8_t b3) 180 { 181 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 182 } 183 184 /* 185 *dp_rx_get_le32() - get little endian 32 bits 186 *@b0: byte 0 187 *@b1: byte 1 188 *@b2: byte 2 189 *@b3: byte 3 190 * 191 *Returns: Integer with little endian 32 bits 192 */ 193 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 194 { 195 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 196 } 197 198 /* 199 * dp_rx_put_le32() - put little endian 32 bits 200 * @p: destination char array 201 * @v: source 32-bit integer 202 * 203 * Returns: None 204 */ 205 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 206 { 207 p[0] = (v) & 0xff; 208 p[1] = (v >> 8) & 0xff; 209 p[2] = (v >> 16) & 0xff; 210 p[3] = (v >> 24) & 0xff; 211 } 212 213 /* Extract michal mic block of data */ 214 #define dp_rx_michael_block(l, r) \ 215 do { \ 216 r ^= dp_rx_rotl(l, 17); \ 217 l += r; \ 218 r ^= dp_rx_xswap(l); \ 219 l += r; \ 220 r ^= dp_rx_rotl(l, 3); \ 221 l += r; \ 222 r ^= dp_rx_rotr(l, 2); \ 223 l += r; \ 224 } while (0) 225 226 /** 227 * struct dp_rx_desc_list_elem_t 228 * 229 * @next : Next pointer to form free list 230 * @rx_desc : DP Rx descriptor 231 */ 232 union dp_rx_desc_list_elem_t { 233 union dp_rx_desc_list_elem_t *next; 234 struct dp_rx_desc rx_desc; 235 }; 236 237 /** 238 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 239 * the Rx descriptor on Rx DMA source ring buffer 240 * @soc: core txrx main context 241 * @cookie: cookie used to lookup virtual address 242 * 243 * Return: void *: Virtual Address of the Rx descriptor 244 */ 245 static inline 246 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 247 { 248 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 249 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 250 struct rx_desc_pool *rx_desc_pool; 251 252 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 253 return NULL; 254 255 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 256 257 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 258 return NULL; 259 260 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc); 261 } 262 263 /** 264 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 265 * the Rx descriptor on monitor ring buffer 266 * @soc: core txrx main context 267 * @cookie: cookie used to lookup virtual address 268 * 269 * Return: void *: Virtual Address of the Rx descriptor 270 */ 271 static inline 272 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 273 { 274 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 275 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 276 /* TODO */ 277 /* Add sanity for pool_id & index */ 278 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 279 } 280 281 /** 282 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 283 * the Rx descriptor on monitor status ring buffer 284 * @soc: core txrx main context 285 * @cookie: cookie used to lookup virtual address 286 * 287 * Return: void *: Virtual Address of the Rx descriptor 288 */ 289 static inline 290 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 291 { 292 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 293 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 294 /* TODO */ 295 /* Add sanity for pool_id & index */ 296 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 297 } 298 299 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 300 union dp_rx_desc_list_elem_t **local_desc_list, 301 union dp_rx_desc_list_elem_t **tail, 302 uint16_t pool_id, 303 struct rx_desc_pool *rx_desc_pool); 304 305 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 306 struct rx_desc_pool *rx_desc_pool, 307 uint16_t num_descs, 308 union dp_rx_desc_list_elem_t **desc_list, 309 union dp_rx_desc_list_elem_t **tail); 310 311 312 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 313 314 void dp_rx_pdev_detach(struct dp_pdev *pdev); 315 316 317 uint32_t 318 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota); 319 320 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); 321 322 uint32_t 323 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); 324 325 /** 326 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 327 * multiple nbufs. 328 * @nbuf: pointer to the first msdu of an amsdu. 329 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 330 * 331 * This function implements the creation of RX frag_list for cases 332 * where an MSDU is spread across multiple nbufs. 333 * 334 * Return: returns the head nbuf which contains complete frag_list. 335 */ 336 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr); 337 338 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 339 uint32_t pool_id, 340 uint32_t pool_size, 341 struct rx_desc_pool *rx_desc_pool); 342 343 void dp_rx_desc_pool_free(struct dp_soc *soc, 344 uint32_t pool_id, 345 struct rx_desc_pool *rx_desc_pool); 346 347 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 348 struct dp_peer *peer); 349 350 /** 351 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 352 * 353 * @head: pointer to the head of local free list 354 * @tail: pointer to the tail of local free list 355 * @new: new descriptor that is added to the free list 356 * 357 * Return: void: 358 */ 359 static inline 360 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 361 union dp_rx_desc_list_elem_t **tail, 362 struct dp_rx_desc *new) 363 { 364 qdf_assert(head && new); 365 366 new->nbuf = NULL; 367 new->in_use = 0; 368 new->unmapped = 0; 369 370 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 371 *head = (union dp_rx_desc_list_elem_t *)new; 372 if (*tail == NULL) 373 *tail = *head; 374 375 } 376 377 /** 378 * dp_rx_wds_srcport_learn() - Add or update the STA PEER which 379 * is behind the WDS repeater. 380 * 381 * @soc: core txrx main context 382 * @rx_tlv_hdr: base address of RX TLV header 383 * @ta_peer: WDS repeater peer 384 * @nbuf: rx pkt 385 * 386 * Return: void: 387 */ 388 #ifdef FEATURE_WDS 389 static inline void 390 dp_rx_wds_srcport_learn(struct dp_soc *soc, 391 uint8_t *rx_tlv_hdr, 392 struct dp_peer *ta_peer, 393 qdf_nbuf_t nbuf) 394 { 395 uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr); 396 uint32_t flags = IEEE80211_NODE_F_WDS_HM; 397 uint32_t ret = 0; 398 uint8_t wds_src_mac[IEEE80211_ADDR_LEN]; 399 struct dp_peer *sa_peer; 400 struct dp_ast_entry *ast; 401 uint16_t sa_idx; 402 403 if (qdf_unlikely(!ta_peer)) 404 return; 405 406 /* For AP mode : Do wds source port learning only if it is a 407 * 4-address mpdu 408 * 409 * For STA mode : Frames from RootAP backend will be in 3-address mode, 410 * till RootAP does the WDS source port learning; Hence in repeater/STA 411 * mode, we enable learning even in 3-address mode , to avoid RootAP 412 * backbone getting wrongly learnt as MEC on repeater 413 */ 414 if (ta_peer->vdev->opmode != wlan_op_mode_sta) { 415 if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) && 416 hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr))) 417 return; 418 } 419 420 memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN), 421 IEEE80211_ADDR_LEN); 422 423 if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) { 424 ret = dp_peer_add_ast(soc, 425 ta_peer, 426 wds_src_mac, 427 CDP_TXRX_AST_TYPE_WDS, 428 flags); 429 return; 430 431 } 432 433 /* 434 * Get the AST entry from HW SA index and mark it as active 435 */ 436 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 437 438 qdf_spin_lock_bh(&soc->ast_lock); 439 ast = soc->ast_table[sa_idx]; 440 441 if (!ast) { 442 qdf_spin_unlock_bh(&soc->ast_lock); 443 return; 444 } 445 446 /* 447 * Ensure we are updating the right AST entry by 448 * validating ast_idx. 449 * There is a possibility we might arrive here without 450 * AST MAP event , so this check is mandatory 451 */ 452 if (ast->ast_idx == sa_idx) 453 ast->is_active = TRUE; 454 455 if (sa_sw_peer_id != ta_peer->peer_ids[0]) { 456 sa_peer = ast->peer; 457 458 /* 459 * Do not kickout STA if it belongs to a different radio. 460 * For DBDC repeater, it is possible to arrive here 461 * for multicast loopback frames originated from connected 462 * clients and looped back (intrabss) by Root AP 463 */ 464 if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) { 465 qdf_spin_unlock_bh(&soc->ast_lock); 466 return; 467 } 468 469 if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) && 470 (ast->type != CDP_TXRX_AST_TYPE_SELF)) { 471 dp_peer_update_ast(soc, ta_peer, ast, flags); 472 qdf_spin_unlock_bh(&soc->ast_lock); 473 return; 474 } 475 476 /* 477 * Kickout, when direct associated peer(SA) roams 478 * to another AP and reachable via TA peer 479 */ 480 if (!sa_peer->delete_in_progress) { 481 sa_peer->delete_in_progress = true; 482 if (soc->cdp_soc.ol_ops->peer_sta_kickout) { 483 soc->cdp_soc.ol_ops->peer_sta_kickout( 484 sa_peer->vdev->pdev->ctrl_pdev, 485 wds_src_mac); 486 } 487 } 488 } 489 490 qdf_spin_unlock_bh(&soc->ast_lock); 491 return; 492 } 493 #else 494 static inline void 495 dp_rx_wds_srcport_learn(struct dp_soc *soc, 496 uint8_t *rx_tlv_hdr, 497 struct dp_peer *ta_peer, 498 qdf_nbuf_t nbuf) 499 { 500 } 501 #endif 502 503 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 504 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 505 qdf_nbuf_t mpdu, bool mpdu_done); 506 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr); 507 508 #define DP_RX_LIST_APPEND(head, tail, elem) \ 509 do { \ 510 if (!(head)) { \ 511 (head) = (elem); \ 512 } else { \ 513 qdf_nbuf_set_next((tail), (elem)); \ 514 } \ 515 (tail) = (elem); \ 516 qdf_nbuf_set_next((tail), NULL); \ 517 } while (0) 518 519 #ifndef BUILD_X86 520 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 521 qdf_dma_addr_t *paddr, struct dp_pdev *pdev) 522 { 523 return QDF_STATUS_SUCCESS; 524 } 525 #else 526 #define MAX_RETRY 100 527 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 528 qdf_dma_addr_t *paddr, struct dp_pdev *pdev) 529 { 530 uint32_t nbuf_retry = 0; 531 int32_t ret; 532 const uint32_t x86_phy_addr = 0x50000000; 533 /* 534 * in M2M emulation platforms (x86) the memory below 0x50000000 535 * is reserved for target use, so any memory allocated in this 536 * region should not be used by host 537 */ 538 do { 539 if (qdf_likely(*paddr > x86_phy_addr)) 540 return QDF_STATUS_SUCCESS; 541 else { 542 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 543 "phy addr %pK exceeded 0x50000000 trying again", 544 paddr); 545 546 nbuf_retry++; 547 if ((*rx_netbuf)) { 548 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 549 QDF_DMA_BIDIRECTIONAL); 550 /* Not freeing buffer intentionally. 551 * Observed that same buffer is getting 552 * re-allocated resulting in longer load time 553 * WMI init timeout. 554 * This buffer is anyway not useful so skip it. 555 **/ 556 } 557 558 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 559 RX_BUFFER_SIZE, 560 RX_BUFFER_RESERVATION, 561 RX_BUFFER_ALIGNMENT, 562 FALSE); 563 564 if (qdf_unlikely(!(*rx_netbuf))) 565 return QDF_STATUS_E_FAILURE; 566 567 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf, 568 QDF_DMA_BIDIRECTIONAL); 569 570 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 571 qdf_nbuf_free(*rx_netbuf); 572 *rx_netbuf = NULL; 573 continue; 574 } 575 576 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 577 } 578 } while (nbuf_retry < MAX_RETRY); 579 580 if ((*rx_netbuf)) { 581 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 582 QDF_DMA_BIDIRECTIONAL); 583 qdf_nbuf_free(*rx_netbuf); 584 } 585 586 return QDF_STATUS_E_FAILURE; 587 } 588 #endif 589 590 /** 591 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 592 * the MSDU Link Descriptor 593 * @soc: core txrx main context 594 * @buf_info: buf_info include cookie that used to lookup virtual address of 595 * link descriptor Normally this is just an index into a per SOC array. 596 * 597 * This is the VA of the link descriptor, that HAL layer later uses to 598 * retrieve the list of MSDU's for a given MPDU. 599 * 600 * Return: void *: Virtual Address of the Rx descriptor 601 */ 602 static inline 603 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 604 struct hal_buf_info *buf_info) 605 { 606 void *link_desc_va; 607 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie); 608 609 610 /* TODO */ 611 /* Add sanity for cookie */ 612 613 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr + 614 (buf_info->paddr - 615 soc->link_desc_banks[bank_id].base_paddr); 616 617 return link_desc_va; 618 } 619 620 /** 621 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of 622 * the MSDU Link Descriptor 623 * @pdev: core txrx pdev context 624 * @buf_info: buf_info includes cookie that used to lookup virtual address of 625 * link descriptor. Normally this is just an index into a per pdev array. 626 * 627 * This is the VA of the link descriptor in monitor mode destination ring, 628 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU. 629 * 630 * Return: void *: Virtual Address of the Rx descriptor 631 */ 632 static inline 633 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, 634 struct hal_buf_info *buf_info, 635 int mac_id) 636 { 637 void *link_desc_va; 638 int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id); 639 640 /* TODO */ 641 /* Add sanity for cookie */ 642 643 link_desc_va = 644 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr + 645 (buf_info->paddr - 646 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr); 647 648 return link_desc_va; 649 } 650 651 /** 652 * dp_rx_defrag_concat() - Concatenate the fragments 653 * 654 * @dst: destination pointer to the buffer 655 * @src: source pointer from where the fragment payload is to be copied 656 * 657 * Return: QDF_STATUS 658 */ 659 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 660 { 661 /* 662 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 663 * to provide space for src, the headroom portion is copied from 664 * the original dst buffer to the larger new dst buffer. 665 * (This is needed, because the headroom of the dst buffer 666 * contains the rx desc.) 667 */ 668 if (qdf_nbuf_cat(dst, src)) 669 return QDF_STATUS_E_DEFRAG_ERROR; 670 671 return QDF_STATUS_SUCCESS; 672 } 673 674 /* 675 * dp_rx_ast_set_active() - set the active flag of the astentry 676 * corresponding to a hw index. 677 * @soc: core txrx main context 678 * @sa_idx: hw idx 679 * @is_active: active flag 680 * 681 */ 682 #ifdef FEATURE_WDS 683 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 684 { 685 struct dp_ast_entry *ast; 686 qdf_spin_lock_bh(&soc->ast_lock); 687 ast = soc->ast_table[sa_idx]; 688 689 /* 690 * Ensure we are updating the right AST entry by 691 * validating ast_idx. 692 * There is a possibility we might arrive here without 693 * AST MAP event , so this check is mandatory 694 */ 695 if (ast && (ast->ast_idx == sa_idx)) { 696 ast->is_active = is_active; 697 qdf_spin_unlock_bh(&soc->ast_lock); 698 return QDF_STATUS_SUCCESS; 699 } 700 701 qdf_spin_unlock_bh(&soc->ast_lock); 702 return QDF_STATUS_E_FAILURE; 703 } 704 #else 705 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 706 { 707 return QDF_STATUS_SUCCESS; 708 } 709 #endif 710 711 /* 712 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 713 * In qwrap mode, packets originated from 714 * any vdev should not loopback and 715 * should be dropped. 716 * @vdev: vdev on which rx packet is received 717 * @nbuf: rx pkt 718 * 719 */ 720 #if ATH_SUPPORT_WRAP 721 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 722 qdf_nbuf_t nbuf) 723 { 724 struct dp_vdev *psta_vdev; 725 struct dp_pdev *pdev = vdev->pdev; 726 struct dp_soc *soc = pdev->soc; 727 uint8_t *data = qdf_nbuf_data(nbuf); 728 uint8_t i; 729 730 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { 731 pdev = soc->pdev_list[i]; 732 if (qdf_unlikely(vdev->proxysta_vdev)) { 733 /* In qwrap isolation mode, allow loopback packets as all 734 * packets go to RootAP and Loopback on the mpsta. 735 */ 736 if (vdev->isolation_vdev) 737 return false; 738 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 739 if (qdf_unlikely(psta_vdev->proxysta_vdev && 740 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 741 &data[DP_MAC_ADDR_LEN], DP_MAC_ADDR_LEN))) { 742 /* Drop packet if source address is equal to 743 * any of the vdev addresses. 744 */ 745 return true; 746 } 747 } 748 } 749 } 750 return false; 751 } 752 #else 753 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 754 qdf_nbuf_t nbuf) 755 { 756 return false; 757 } 758 #endif 759 760 /* 761 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 762 * called during dp rx initialization 763 * and at the end of dp_rx_process. 764 * 765 * @soc: core txrx main context 766 * @mac_id: mac_id which is one of 3 mac_ids 767 * @dp_rxdma_srng: dp rxdma circular ring 768 * @rx_desc_pool: Pointer to free Rx descriptor pool 769 * @num_req_buffers: number of buffer to be replenished 770 * @desc_list: list of descs if called from dp_rx_process 771 * or NULL during dp rx initialization or out of buffer 772 * interrupt. 773 * @tail: tail of descs list 774 * Return: return success or failure 775 */ 776 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 777 struct dp_srng *dp_rxdma_srng, 778 struct rx_desc_pool *rx_desc_pool, 779 uint32_t num_req_buffers, 780 union dp_rx_desc_list_elem_t **desc_list, 781 union dp_rx_desc_list_elem_t **tail); 782 783 /** 784 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 785 * (WBM), following error handling 786 * 787 * @soc: core DP main context 788 * @buf_addr_info: opaque pointer to the REO error ring descriptor 789 * @buf_addr_info: void pointer to the buffer_addr_info 790 * @bm_action: put to idle_list or release to msdu_list 791 * Return: QDF_STATUS 792 */ 793 QDF_STATUS 794 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action); 795 796 QDF_STATUS 797 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 798 void *buf_addr_info, uint8_t bm_action); 799 /** 800 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 801 * (WBM) by address 802 * 803 * @soc: core DP main context 804 * @link_desc_addr: link descriptor addr 805 * 806 * Return: QDF_STATUS 807 */ 808 QDF_STATUS 809 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 810 uint8_t bm_action); 811 812 uint32_t 813 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, 814 uint32_t quota); 815 816 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 817 uint8_t *rx_tlv_hdr, struct dp_peer *peer); 818 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 819 uint8_t *rx_tlv_hdr); 820 821 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 822 struct dp_peer *peer, int rx_mcast); 823 824 qdf_nbuf_t 825 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev); 826 827 #endif /* _DP_RX_H */ 828