1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _DP_RX_H 20 #define _DP_RX_H 21 22 #include "hal_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 27 #ifdef RXDMA_OPTIMIZATION 28 #ifdef NO_RX_PKT_HDR_TLV 29 #define RX_BUFFER_ALIGNMENT 0 30 #else 31 #define RX_BUFFER_ALIGNMENT 128 32 #endif /* NO_RX_PKT_HDR_TLV */ 33 #else /* RXDMA_OPTIMIZATION */ 34 #define RX_BUFFER_ALIGNMENT 4 35 #endif /* RXDMA_OPTIMIZATION */ 36 37 #ifdef QCA_HOST2FW_RXBUF_RING 38 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM 39 40 /** 41 * For MCL cases, allocate as many RX descriptors as buffers in the SW2RXDMA 42 * ring. This value may need to be tuned later. 43 */ 44 #define DP_RX_DESC_ALLOC_MULTIPLIER 1 45 #else 46 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM 47 48 /** 49 * AP use cases need to allocate more RX Descriptors than the number of 50 * entries avaialable in the SW2RXDMA buffer replenish ring. This is to account 51 * for frames sitting in REO queues, HW-HW DMA rings etc. Hence using a 52 * multiplication factor of 3, to allocate three times as many RX descriptors 53 * as RX buffers. 54 */ 55 #define DP_RX_DESC_ALLOC_MULTIPLIER 3 56 #endif /* QCA_HOST2FW_RXBUF_RING */ 57 58 #define RX_BUFFER_RESERVATION 0 59 60 #define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff 61 #define DP_PEER_METADATA_PEER_ID_SHIFT 0 62 #define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000 63 #define DP_PEER_METADATA_VDEV_ID_SHIFT 16 64 65 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \ 66 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \ 67 >> DP_PEER_METADATA_PEER_ID_SHIFT) 68 69 #define DP_PEER_METADATA_ID_GET(_peer_metadata) \ 70 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \ 71 >> DP_PEER_METADATA_VDEV_ID_SHIFT) 72 73 #define DP_RX_DESC_MAGIC 0xdec0de 74 75 /** 76 * struct dp_rx_desc 77 * 78 * @nbuf : VA of the "skb" posted 79 * @rx_buf_start : VA of the original Rx buffer, before 80 * movement of any skb->data pointer 81 * @cookie : index into the sw array which holds 82 * the sw Rx descriptors 83 * Cookie space is 21 bits: 84 * lower 18 bits -- index 85 * upper 3 bits -- pool_id 86 * @pool_id : pool Id for which this allocated. 87 * Can only be used if there is no flow 88 * steering 89 * @in_use rx_desc is in use 90 * @unmapped used to mark rx_desc an unmapped if the corresponding 91 * nbuf is already unmapped 92 */ 93 struct dp_rx_desc { 94 qdf_nbuf_t nbuf; 95 uint8_t *rx_buf_start; 96 uint32_t cookie; 97 uint8_t pool_id; 98 #ifdef RX_DESC_DEBUG_CHECK 99 uint32_t magic; 100 #endif 101 uint8_t in_use:1, 102 unmapped:1; 103 }; 104 105 /* RX Descriptor Multi Page memory alloc related */ 106 #define DP_RX_DESC_OFFSET_NUM_BITS 8 107 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 108 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 109 110 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 111 #define DP_RX_DESC_POOL_ID_SHIFT \ 112 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 113 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 114 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 115 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 116 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 117 DP_RX_DESC_PAGE_ID_SHIFT) 118 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 119 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 120 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 121 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 122 DP_RX_DESC_POOL_ID_SHIFT) 123 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 124 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 125 DP_RX_DESC_PAGE_ID_SHIFT) 126 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 127 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 128 129 #define RX_DESC_COOKIE_INDEX_SHIFT 0 130 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 131 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 132 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 133 134 #define DP_RX_DESC_COOKIE_MAX \ 135 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 136 137 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 138 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 139 RX_DESC_COOKIE_POOL_ID_SHIFT) 140 141 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 142 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 143 RX_DESC_COOKIE_INDEX_SHIFT) 144 145 /* DOC: Offset to obtain LLC hdr 146 * 147 * In the case of Wifi parse error 148 * to reach LLC header from beginning 149 * of VLAN tag we need to skip 8 bytes. 150 * Vlan_tag(4)+length(2)+length added 151 * by HW(2) = 8 bytes. 152 */ 153 #define DP_SKIP_VLAN 8 154 155 /** 156 * struct dp_rx_cached_buf - rx cached buffer 157 * @list: linked list node 158 * @buf: skb buffer 159 */ 160 struct dp_rx_cached_buf { 161 qdf_list_node_t node; 162 qdf_nbuf_t buf; 163 }; 164 165 /* 166 *dp_rx_xor_block() - xor block of data 167 *@b: destination data block 168 *@a: source data block 169 *@len: length of the data to process 170 * 171 *Returns: None 172 */ 173 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 174 { 175 qdf_size_t i; 176 177 for (i = 0; i < len; i++) 178 b[i] ^= a[i]; 179 } 180 181 /* 182 *dp_rx_rotl() - rotate the bits left 183 *@val: unsigned integer input value 184 *@bits: number of bits 185 * 186 *Returns: Integer with left rotated by number of 'bits' 187 */ 188 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 189 { 190 return (val << bits) | (val >> (32 - bits)); 191 } 192 193 /* 194 *dp_rx_rotr() - rotate the bits right 195 *@val: unsigned integer input value 196 *@bits: number of bits 197 * 198 *Returns: Integer with right rotated by number of 'bits' 199 */ 200 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 201 { 202 return (val >> bits) | (val << (32 - bits)); 203 } 204 205 /* 206 * dp_set_rx_queue() - set queue_mapping in skb 207 * @nbuf: skb 208 * @queue_id: rx queue_id 209 * 210 * Return: void 211 */ 212 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 213 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 214 { 215 qdf_nbuf_record_rx_queue(nbuf, queue_id); 216 return; 217 } 218 #else 219 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 220 { 221 } 222 #endif 223 224 /* 225 *dp_rx_xswap() - swap the bits left 226 *@val: unsigned integer input value 227 * 228 *Returns: Integer with bits swapped 229 */ 230 static inline uint32_t dp_rx_xswap(uint32_t val) 231 { 232 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 233 } 234 235 /* 236 *dp_rx_get_le32_split() - get little endian 32 bits split 237 *@b0: byte 0 238 *@b1: byte 1 239 *@b2: byte 2 240 *@b3: byte 3 241 * 242 *Returns: Integer with split little endian 32 bits 243 */ 244 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 245 uint8_t b3) 246 { 247 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 248 } 249 250 /* 251 *dp_rx_get_le32() - get little endian 32 bits 252 *@b0: byte 0 253 *@b1: byte 1 254 *@b2: byte 2 255 *@b3: byte 3 256 * 257 *Returns: Integer with little endian 32 bits 258 */ 259 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 260 { 261 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 262 } 263 264 /* 265 * dp_rx_put_le32() - put little endian 32 bits 266 * @p: destination char array 267 * @v: source 32-bit integer 268 * 269 * Returns: None 270 */ 271 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 272 { 273 p[0] = (v) & 0xff; 274 p[1] = (v >> 8) & 0xff; 275 p[2] = (v >> 16) & 0xff; 276 p[3] = (v >> 24) & 0xff; 277 } 278 279 /* Extract michal mic block of data */ 280 #define dp_rx_michael_block(l, r) \ 281 do { \ 282 r ^= dp_rx_rotl(l, 17); \ 283 l += r; \ 284 r ^= dp_rx_xswap(l); \ 285 l += r; \ 286 r ^= dp_rx_rotl(l, 3); \ 287 l += r; \ 288 r ^= dp_rx_rotr(l, 2); \ 289 l += r; \ 290 } while (0) 291 292 /** 293 * struct dp_rx_desc_list_elem_t 294 * 295 * @next : Next pointer to form free list 296 * @rx_desc : DP Rx descriptor 297 */ 298 union dp_rx_desc_list_elem_t { 299 union dp_rx_desc_list_elem_t *next; 300 struct dp_rx_desc rx_desc; 301 }; 302 303 #ifdef RX_DESC_MULTI_PAGE_ALLOC 304 /** 305 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 306 * @page_id: Page ID 307 * @offset: Offset of the descriptor element 308 * 309 * Return: RX descriptor element 310 */ 311 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 312 struct rx_desc_pool *rx_pool); 313 314 static inline 315 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 316 struct rx_desc_pool *pool, 317 uint32_t cookie) 318 { 319 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 320 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 321 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 322 struct rx_desc_pool *rx_desc_pool; 323 union dp_rx_desc_list_elem_t *rx_desc_elem; 324 325 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 326 return NULL; 327 328 rx_desc_pool = &pool[pool_id]; 329 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 330 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 331 rx_desc_pool->elem_size * offset); 332 333 return &rx_desc_elem->rx_desc; 334 } 335 336 /** 337 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 338 * the Rx descriptor on Rx DMA source ring buffer 339 * @soc: core txrx main context 340 * @cookie: cookie used to lookup virtual address 341 * 342 * Return: Pointer to the Rx descriptor 343 */ 344 static inline 345 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 346 uint32_t cookie) 347 { 348 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 349 } 350 351 /** 352 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 353 * the Rx descriptor on monitor ring buffer 354 * @soc: core txrx main context 355 * @cookie: cookie used to lookup virtual address 356 * 357 * Return: Pointer to the Rx descriptor 358 */ 359 static inline 360 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 361 uint32_t cookie) 362 { 363 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 364 } 365 366 /** 367 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 368 * the Rx descriptor on monitor status ring buffer 369 * @soc: core txrx main context 370 * @cookie: cookie used to lookup virtual address 371 * 372 * Return: Pointer to the Rx descriptor 373 */ 374 static inline 375 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 376 uint32_t cookie) 377 { 378 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie); 379 } 380 #else 381 /** 382 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 383 * the Rx descriptor on Rx DMA source ring buffer 384 * @soc: core txrx main context 385 * @cookie: cookie used to lookup virtual address 386 * 387 * Return: void *: Virtual Address of the Rx descriptor 388 */ 389 static inline 390 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 391 { 392 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 393 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 394 struct rx_desc_pool *rx_desc_pool; 395 396 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 397 return NULL; 398 399 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 400 401 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 402 return NULL; 403 404 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc); 405 } 406 407 /** 408 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 409 * the Rx descriptor on monitor ring buffer 410 * @soc: core txrx main context 411 * @cookie: cookie used to lookup virtual address 412 * 413 * Return: void *: Virtual Address of the Rx descriptor 414 */ 415 static inline 416 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 417 { 418 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 419 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 420 /* TODO */ 421 /* Add sanity for pool_id & index */ 422 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 423 } 424 425 /** 426 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 427 * the Rx descriptor on monitor status ring buffer 428 * @soc: core txrx main context 429 * @cookie: cookie used to lookup virtual address 430 * 431 * Return: void *: Virtual Address of the Rx descriptor 432 */ 433 static inline 434 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 435 { 436 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 437 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 438 /* TODO */ 439 /* Add sanity for pool_id & index */ 440 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 441 } 442 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 443 444 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 445 union dp_rx_desc_list_elem_t **local_desc_list, 446 union dp_rx_desc_list_elem_t **tail, 447 uint16_t pool_id, 448 struct rx_desc_pool *rx_desc_pool); 449 450 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 451 struct rx_desc_pool *rx_desc_pool, 452 uint16_t num_descs, 453 union dp_rx_desc_list_elem_t **desc_list, 454 union dp_rx_desc_list_elem_t **tail); 455 456 457 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 458 459 void dp_rx_pdev_detach(struct dp_pdev *pdev); 460 461 462 uint32_t 463 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint8_t reo_ring_num, 464 uint32_t quota); 465 466 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); 467 468 uint32_t 469 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); 470 471 /** 472 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 473 * multiple nbufs. 474 * @nbuf: pointer to the first msdu of an amsdu. 475 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 476 * 477 * This function implements the creation of RX frag_list for cases 478 * where an MSDU is spread across multiple nbufs. 479 * 480 * Return: returns the head nbuf which contains complete frag_list. 481 */ 482 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr); 483 484 /* 485 * dp_rx_desc_pool_alloc() - create a pool of software rx_descs 486 * at the time of dp rx initialization 487 * 488 * @soc: core txrx main context 489 * @pool_id: pool_id which is one of 3 mac_ids 490 * @pool_size: number of Rx descriptor in the pool 491 * @rx_desc_pool: rx descriptor pool pointer 492 * 493 * Return: QDF status 494 */ 495 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, 496 uint32_t pool_size, struct rx_desc_pool *pool); 497 498 /* 499 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 500 * de-initialization of wifi module. 501 * 502 * @soc: core txrx main context 503 * @pool_id: pool_id which is one of 3 mac_ids 504 * @rx_desc_pool: rx descriptor pool pointer 505 * 506 * Return: None 507 */ 508 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 509 struct rx_desc_pool *rx_desc_pool); 510 511 /* 512 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 513 * de-initialization of wifi module. 514 * 515 * @soc: core txrx main context 516 * @pool_id: pool_id which is one of 3 mac_ids 517 * @rx_desc_pool: rx descriptor pool pointer 518 * 519 * Return: None 520 */ 521 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 522 struct rx_desc_pool *rx_desc_pool); 523 524 /* 525 * dp_rx_desc_pool_free() - free the sw rx desc array called during 526 * de-initialization of wifi module. 527 * 528 * @soc: core txrx main context 529 * @rx_desc_pool: rx descriptor pool pointer 530 * 531 * Return: None 532 */ 533 void dp_rx_desc_pool_free(struct dp_soc *soc, 534 struct rx_desc_pool *rx_desc_pool); 535 536 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 537 struct dp_peer *peer); 538 539 /** 540 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 541 * 542 * @head: pointer to the head of local free list 543 * @tail: pointer to the tail of local free list 544 * @new: new descriptor that is added to the free list 545 * 546 * Return: void: 547 */ 548 static inline 549 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 550 union dp_rx_desc_list_elem_t **tail, 551 struct dp_rx_desc *new) 552 { 553 qdf_assert(head && new); 554 555 new->nbuf = NULL; 556 new->in_use = 0; 557 558 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 559 *head = (union dp_rx_desc_list_elem_t *)new; 560 if (!*tail) 561 *tail = *head; 562 563 } 564 565 /** 566 * dp_rx_wds_add_or_update_ast() - Add or update the ast entry. 567 * 568 * @soc: core txrx main context 569 * @ta_peer: WDS repeater peer 570 * @mac_addr: mac address of the peer 571 * @is_ad4_valid: 4-address valid flag 572 * @is_sa_valid: source address valid flag 573 * @is_chfrag_start: frag start flag 574 * @sa_idx: source-address index for peer 575 * @sa_sw_peer_id: software source-address peer-id 576 * 577 * Return: void: 578 */ 579 #ifdef FEATURE_WDS 580 static inline void 581 dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer, 582 qdf_nbuf_t nbuf, uint8_t is_ad4_valid, 583 uint8_t is_sa_valid, uint8_t is_chfrag_start, 584 uint16_t sa_idx, uint16_t sa_sw_peer_id) 585 { 586 struct dp_peer *sa_peer; 587 struct dp_ast_entry *ast; 588 uint32_t flags = IEEE80211_NODE_F_WDS_HM; 589 uint32_t ret = 0; 590 struct dp_neighbour_peer *neighbour_peer = NULL; 591 struct dp_pdev *pdev = ta_peer->vdev->pdev; 592 uint8_t wds_src_mac[QDF_MAC_ADDR_SIZE]; 593 594 /* For AP mode : Do wds source port learning only if it is a 595 * 4-address mpdu 596 * 597 * For STA mode : Frames from RootAP backend will be in 3-address mode, 598 * till RootAP does the WDS source port learning; Hence in repeater/STA 599 * mode, we enable learning even in 3-address mode , to avoid RootAP 600 * backbone getting wrongly learnt as MEC on repeater 601 */ 602 if (ta_peer->vdev->opmode != wlan_op_mode_sta) { 603 if (!(is_chfrag_start && is_ad4_valid)) 604 return; 605 } else { 606 /* For HKv2 Source port learing is not needed in STA mode 607 * as we have support in HW 608 */ 609 if (soc->ast_override_support) 610 return; 611 } 612 613 if (qdf_unlikely(!is_sa_valid)) { 614 qdf_mem_copy(wds_src_mac, 615 (qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE), 616 QDF_MAC_ADDR_SIZE); 617 618 ret = dp_peer_add_ast(soc, 619 ta_peer, 620 wds_src_mac, 621 CDP_TXRX_AST_TYPE_WDS, 622 flags); 623 return; 624 } 625 626 qdf_spin_lock_bh(&soc->ast_lock); 627 ast = soc->ast_table[sa_idx]; 628 qdf_spin_unlock_bh(&soc->ast_lock); 629 630 if (!ast) { 631 /* 632 * In HKv1, it is possible that HW retains the AST entry in 633 * GSE cache on 1 radio , even after the AST entry is deleted 634 * (on another radio). 635 * 636 * Due to this, host might still get sa_is_valid indications 637 * for frames with SA not really present in AST table. 638 * 639 * So we go ahead and send an add_ast command to FW in such 640 * cases where sa is reported still as valid, so that FW will 641 * invalidate this GSE cache entry and new AST entry gets 642 * cached. 643 */ 644 if (!soc->ast_override_support) { 645 qdf_mem_copy(wds_src_mac, 646 (qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE), 647 QDF_MAC_ADDR_SIZE); 648 649 ret = dp_peer_add_ast(soc, 650 ta_peer, 651 wds_src_mac, 652 CDP_TXRX_AST_TYPE_WDS, 653 flags); 654 return; 655 } else { 656 /* In HKv2 smart monitor case, when NAC client is 657 * added first and this client roams within BSS to 658 * connect to RE, since we have an AST entry for 659 * NAC we get sa_is_valid bit set. So we check if 660 * smart monitor is enabled and send add_ast command 661 * to FW. 662 */ 663 if (pdev->neighbour_peers_added) { 664 qdf_mem_copy(wds_src_mac, 665 (qdf_nbuf_data(nbuf) + 666 QDF_MAC_ADDR_SIZE), 667 QDF_MAC_ADDR_SIZE); 668 669 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); 670 TAILQ_FOREACH(neighbour_peer, 671 &pdev->neighbour_peers_list, 672 neighbour_peer_list_elem) { 673 if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr, 674 wds_src_mac, 675 QDF_MAC_ADDR_SIZE)) { 676 ret = dp_peer_add_ast(soc, 677 ta_peer, 678 wds_src_mac, 679 CDP_TXRX_AST_TYPE_WDS, 680 flags); 681 QDF_TRACE(QDF_MODULE_ID_DP, 682 QDF_TRACE_LEVEL_INFO, 683 "sa valid and nac roamed to wds"); 684 break; 685 } 686 } 687 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); 688 } 689 return; 690 } 691 } 692 693 694 if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) || 695 (ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) 696 return; 697 698 /* 699 * Ensure we are updating the right AST entry by 700 * validating ast_idx. 701 * There is a possibility we might arrive here without 702 * AST MAP event , so this check is mandatory 703 */ 704 if (ast->is_mapped && (ast->ast_idx == sa_idx)) 705 ast->is_active = TRUE; 706 707 if (sa_sw_peer_id != ta_peer->peer_ids[0]) { 708 sa_peer = ast->peer; 709 710 if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) && 711 (ast->type != CDP_TXRX_AST_TYPE_SELF) && 712 (ast->type != CDP_TXRX_AST_TYPE_STA_BSS)) { 713 if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) { 714 /* This case is when a STA roams from one 715 * repeater to another repeater, but these 716 * repeaters are connected to root AP on 717 * different radios. 718 * Ex: rptr1 connected to ROOT AP over 5G 719 * and rptr2 connected to ROOT AP over 2G 720 * radio 721 */ 722 qdf_spin_lock_bh(&soc->ast_lock); 723 dp_peer_del_ast(soc, ast); 724 qdf_spin_unlock_bh(&soc->ast_lock); 725 } else { 726 /* this case is when a STA roams from one 727 * reapter to another repeater, but inside 728 * same radio. 729 */ 730 qdf_spin_lock_bh(&soc->ast_lock); 731 dp_peer_update_ast(soc, ta_peer, ast, flags); 732 qdf_spin_unlock_bh(&soc->ast_lock); 733 return; 734 } 735 } 736 /* 737 * Do not kickout STA if it belongs to a different radio. 738 * For DBDC repeater, it is possible to arrive here 739 * for multicast loopback frames originated from connected 740 * clients and looped back (intrabss) by Root AP 741 */ 742 if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) { 743 return; 744 } 745 746 /* 747 * Kickout, when direct associated peer(SA) roams 748 * to another AP and reachable via TA peer 749 */ 750 if ((sa_peer->vdev->opmode == wlan_op_mode_ap) && 751 !sa_peer->delete_in_progress) { 752 sa_peer->delete_in_progress = true; 753 if (soc->cdp_soc.ol_ops->peer_sta_kickout) { 754 soc->cdp_soc.ol_ops->peer_sta_kickout( 755 sa_peer->vdev->pdev->ctrl_pdev, 756 wds_src_mac); 757 } 758 } 759 } 760 } 761 762 /** 763 * dp_rx_wds_srcport_learn() - Add or update the STA PEER which 764 * is behind the WDS repeater. 765 * 766 * @soc: core txrx main context 767 * @rx_tlv_hdr: base address of RX TLV header 768 * @ta_peer: WDS repeater peer 769 * @nbuf: rx pkt 770 * 771 * Return: void: 772 */ 773 static inline void 774 dp_rx_wds_srcport_learn(struct dp_soc *soc, 775 uint8_t *rx_tlv_hdr, 776 struct dp_peer *ta_peer, 777 qdf_nbuf_t nbuf) 778 { 779 uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr); 780 uint8_t sa_is_valid = hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr); 781 uint16_t sa_idx; 782 uint8_t is_chfrag_start = 0; 783 uint8_t is_ad4_valid = 0; 784 785 if (qdf_unlikely(!ta_peer)) 786 return; 787 788 is_chfrag_start = qdf_nbuf_is_rx_chfrag_start(nbuf); 789 if (is_chfrag_start) 790 is_ad4_valid = hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr); 791 792 /* 793 * Get the AST entry from HW SA index and mark it as active 794 */ 795 sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); 796 797 dp_rx_wds_add_or_update_ast(soc, ta_peer, nbuf, is_ad4_valid, 798 sa_is_valid, is_chfrag_start, 799 sa_idx, sa_sw_peer_id); 800 801 return; 802 } 803 #else 804 static inline void 805 dp_rx_wds_srcport_learn(struct dp_soc *soc, 806 uint8_t *rx_tlv_hdr, 807 struct dp_peer *ta_peer, 808 qdf_nbuf_t nbuf) 809 { 810 } 811 #endif 812 813 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 814 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 815 qdf_nbuf_t mpdu, bool mpdu_done); 816 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 817 uint8_t *rx_tlv_hdr, struct dp_peer *peer); 818 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 819 uint16_t peer_id, uint8_t tid); 820 821 822 #define DP_RX_LIST_APPEND(head, tail, elem) \ 823 do { \ 824 if (!(head)) { \ 825 (head) = (elem); \ 826 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 827 } else { \ 828 qdf_nbuf_set_next((tail), (elem)); \ 829 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 830 } \ 831 (tail) = (elem); \ 832 qdf_nbuf_set_next((tail), NULL); \ 833 } while (0) 834 835 #ifndef BUILD_X86 836 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 837 qdf_dma_addr_t *paddr, struct dp_pdev *pdev) 838 { 839 return QDF_STATUS_SUCCESS; 840 } 841 #else 842 #define MAX_RETRY 100 843 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 844 qdf_dma_addr_t *paddr, struct dp_pdev *pdev) 845 { 846 uint32_t nbuf_retry = 0; 847 int32_t ret; 848 const uint32_t x86_phy_addr = 0x50000000; 849 /* 850 * in M2M emulation platforms (x86) the memory below 0x50000000 851 * is reserved for target use, so any memory allocated in this 852 * region should not be used by host 853 */ 854 do { 855 if (qdf_likely(*paddr > x86_phy_addr)) 856 return QDF_STATUS_SUCCESS; 857 else { 858 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 859 "phy addr %pK exceeded 0x50000000 trying again", 860 paddr); 861 862 nbuf_retry++; 863 if ((*rx_netbuf)) { 864 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 865 QDF_DMA_FROM_DEVICE); 866 /* Not freeing buffer intentionally. 867 * Observed that same buffer is getting 868 * re-allocated resulting in longer load time 869 * WMI init timeout. 870 * This buffer is anyway not useful so skip it. 871 **/ 872 } 873 874 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 875 RX_BUFFER_SIZE, 876 RX_BUFFER_RESERVATION, 877 RX_BUFFER_ALIGNMENT, 878 FALSE); 879 880 if (qdf_unlikely(!(*rx_netbuf))) 881 return QDF_STATUS_E_FAILURE; 882 883 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf, 884 QDF_DMA_FROM_DEVICE); 885 886 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 887 qdf_nbuf_free(*rx_netbuf); 888 *rx_netbuf = NULL; 889 continue; 890 } 891 892 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 893 } 894 } while (nbuf_retry < MAX_RETRY); 895 896 if ((*rx_netbuf)) { 897 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 898 QDF_DMA_FROM_DEVICE); 899 qdf_nbuf_free(*rx_netbuf); 900 } 901 902 return QDF_STATUS_E_FAILURE; 903 } 904 #endif 905 906 /** 907 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 908 * the MSDU Link Descriptor 909 * @soc: core txrx main context 910 * @buf_info: buf_info include cookie that used to lookup virtual address of 911 * link descriptor Normally this is just an index into a per SOC array. 912 * 913 * This is the VA of the link descriptor, that HAL layer later uses to 914 * retrieve the list of MSDU's for a given MPDU. 915 * 916 * Return: void *: Virtual Address of the Rx descriptor 917 */ 918 static inline 919 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 920 struct hal_buf_info *buf_info) 921 { 922 void *link_desc_va; 923 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie); 924 925 926 /* TODO */ 927 /* Add sanity for cookie */ 928 929 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr + 930 (buf_info->paddr - 931 soc->link_desc_banks[bank_id].base_paddr); 932 933 return link_desc_va; 934 } 935 936 /** 937 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of 938 * the MSDU Link Descriptor 939 * @pdev: core txrx pdev context 940 * @buf_info: buf_info includes cookie that used to lookup virtual address of 941 * link descriptor. Normally this is just an index into a per pdev array. 942 * 943 * This is the VA of the link descriptor in monitor mode destination ring, 944 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU. 945 * 946 * Return: void *: Virtual Address of the Rx descriptor 947 */ 948 static inline 949 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, 950 struct hal_buf_info *buf_info, 951 int mac_id) 952 { 953 void *link_desc_va; 954 int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id); 955 956 /* TODO */ 957 /* Add sanity for cookie */ 958 959 link_desc_va = 960 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr + 961 (buf_info->paddr - 962 pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr); 963 964 return link_desc_va; 965 } 966 967 /** 968 * dp_rx_defrag_concat() - Concatenate the fragments 969 * 970 * @dst: destination pointer to the buffer 971 * @src: source pointer from where the fragment payload is to be copied 972 * 973 * Return: QDF_STATUS 974 */ 975 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 976 { 977 /* 978 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 979 * to provide space for src, the headroom portion is copied from 980 * the original dst buffer to the larger new dst buffer. 981 * (This is needed, because the headroom of the dst buffer 982 * contains the rx desc.) 983 */ 984 if (!qdf_nbuf_cat(dst, src)) { 985 /* 986 * qdf_nbuf_cat does not free the src memory. 987 * Free src nbuf before returning 988 * For failure case the caller takes of freeing the nbuf 989 */ 990 qdf_nbuf_free(src); 991 return QDF_STATUS_SUCCESS; 992 } 993 994 return QDF_STATUS_E_DEFRAG_ERROR; 995 } 996 997 /* 998 * dp_rx_ast_set_active() - set the active flag of the astentry 999 * corresponding to a hw index. 1000 * @soc: core txrx main context 1001 * @sa_idx: hw idx 1002 * @is_active: active flag 1003 * 1004 */ 1005 #ifdef FEATURE_WDS 1006 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1007 { 1008 struct dp_ast_entry *ast; 1009 qdf_spin_lock_bh(&soc->ast_lock); 1010 ast = soc->ast_table[sa_idx]; 1011 1012 /* 1013 * Ensure we are updating the right AST entry by 1014 * validating ast_idx. 1015 * There is a possibility we might arrive here without 1016 * AST MAP event , so this check is mandatory 1017 */ 1018 if (ast && ast->is_mapped && (ast->ast_idx == sa_idx)) { 1019 ast->is_active = is_active; 1020 qdf_spin_unlock_bh(&soc->ast_lock); 1021 return QDF_STATUS_SUCCESS; 1022 } 1023 1024 qdf_spin_unlock_bh(&soc->ast_lock); 1025 return QDF_STATUS_E_FAILURE; 1026 } 1027 #else 1028 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1029 { 1030 return QDF_STATUS_SUCCESS; 1031 } 1032 #endif 1033 1034 /* 1035 * dp_rx_desc_dump() - dump the sw rx descriptor 1036 * 1037 * @rx_desc: sw rx descriptor 1038 */ 1039 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 1040 { 1041 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, 1042 "rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 1043 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 1044 rx_desc->in_use, rx_desc->unmapped); 1045 } 1046 1047 /* 1048 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 1049 * In qwrap mode, packets originated from 1050 * any vdev should not loopback and 1051 * should be dropped. 1052 * @vdev: vdev on which rx packet is received 1053 * @nbuf: rx pkt 1054 * 1055 */ 1056 #if ATH_SUPPORT_WRAP 1057 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1058 qdf_nbuf_t nbuf) 1059 { 1060 struct dp_vdev *psta_vdev; 1061 struct dp_pdev *pdev = vdev->pdev; 1062 uint8_t *data = qdf_nbuf_data(nbuf); 1063 1064 if (qdf_unlikely(vdev->proxysta_vdev)) { 1065 /* In qwrap isolation mode, allow loopback packets as all 1066 * packets go to RootAP and Loopback on the mpsta. 1067 */ 1068 if (vdev->isolation_vdev) 1069 return false; 1070 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 1071 if (qdf_unlikely(psta_vdev->proxysta_vdev && 1072 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 1073 &data[QDF_MAC_ADDR_SIZE], 1074 QDF_MAC_ADDR_SIZE))) { 1075 /* Drop packet if source address is equal to 1076 * any of the vdev addresses. 1077 */ 1078 return true; 1079 } 1080 } 1081 } 1082 return false; 1083 } 1084 #else 1085 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1086 qdf_nbuf_t nbuf) 1087 { 1088 return false; 1089 } 1090 #endif 1091 1092 #if defined(WLAN_SUPPORT_RX_TAG_STATISTICS) && \ 1093 defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) 1094 /** 1095 * dp_rx_update_rx_protocol_tag_stats() - Increments the protocol tag stats 1096 * for the given protocol type 1097 * @soc: core txrx main context 1098 * @pdev: TXRX pdev context for which stats should be incremented 1099 * @protocol_index: Protocol index for which the stats should be incremented 1100 * @ring_index: REO ring number from which this tag was received. 1101 * 1102 * Since HKv2 is a SMP, two or more cores may simultaneously receive packets 1103 * of same type, and hence attempt to increment counters for the same protocol 1104 * type at the same time. This creates the possibility of missing stats. 1105 * 1106 * For example, when two or more CPUs have each read the old tag value, V, 1107 * for protocol type, P and each increment the value to V+1. Instead, the 1108 * operations should have been sequenced to achieve a final value of V+2. 1109 * 1110 * In order to avoid this scenario, we can either use locks or store stats 1111 * on a per-CPU basis. Since tagging happens in the core data path, locks 1112 * are not preferred. Instead, we use a per-ring counter, since each CPU 1113 * operates on a REO ring. 1114 * 1115 * Return: void 1116 */ 1117 static inline void dp_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev, 1118 uint16_t protocol_index, 1119 uint16_t ring_index) 1120 { 1121 if (ring_index >= MAX_REO_DEST_RINGS) 1122 return; 1123 1124 pdev->reo_proto_tag_stats[ring_index][protocol_index].tag_ctr++; 1125 } 1126 #else 1127 static inline void dp_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev, 1128 uint16_t protocol_index, 1129 uint16_t ring_index) 1130 { 1131 } 1132 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 1133 1134 #if defined(WLAN_SUPPORT_RX_TAG_STATISTICS) && \ 1135 defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) 1136 /** 1137 * dp_rx_update_rx_err_protocol_tag_stats() - Increments the protocol tag stats 1138 * for the given protocol type 1139 * received from exception ring 1140 * @soc: core txrx main context 1141 * @pdev: TXRX pdev context for which stats should be incremented 1142 * @protocol_index: Protocol index for which the stats should be incremented 1143 * 1144 * In HKv2, all exception packets are received on Ring-0 (along with normal 1145 * Rx). Hence tags are maintained separately for exception ring as well. 1146 * 1147 * Return: void 1148 */ 1149 static inline 1150 void dp_rx_update_rx_err_protocol_tag_stats(struct dp_pdev *pdev, 1151 uint16_t protocol_index) 1152 { 1153 pdev->rx_err_proto_tag_stats[protocol_index].tag_ctr++; 1154 } 1155 #else 1156 static inline 1157 void dp_rx_update_rx_err_protocol_tag_stats(struct dp_pdev *pdev, 1158 uint16_t protocol_index) 1159 { 1160 } 1161 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 1162 /** 1163 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 1164 * and set the corresponding tag in QDF packet 1165 * @soc: core txrx main context 1166 * @vdev: vdev on which the packet is received 1167 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1168 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1169 * @ring_index: REO ring number, not used for error & monitor ring 1170 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 1171 * @is_update_stats: flag to indicate whether to update stats or not 1172 * Return: void 1173 */ 1174 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1175 static inline void 1176 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1177 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1178 uint16_t ring_index, 1179 bool is_reo_exception, bool is_update_stats) 1180 { 1181 uint16_t cce_metadata = RX_PROTOCOL_TAG_START_OFFSET; 1182 bool cce_match = false; 1183 struct dp_pdev *pdev; 1184 uint16_t protocol_tag = 0; 1185 1186 if (qdf_unlikely(!vdev)) 1187 return; 1188 1189 pdev = vdev->pdev; 1190 1191 if (qdf_likely(!pdev->is_rx_protocol_tagging_enabled)) 1192 return; 1193 1194 /* 1195 * In case of raw frames, rx_attention and rx_msdu_end tlv 1196 * may be stale or invalid. Do not tag such frames. 1197 * Default decap_type is set to ethernet for monitor vdev, 1198 * therefore, cannot check decap_type for monitor mode. 1199 * We will call this only for eth frames from dp_rx_mon_dest.c. 1200 */ 1201 if (qdf_likely(!(pdev->monitor_vdev && pdev->monitor_vdev == vdev) && 1202 (vdev->rx_decap_type != htt_cmn_pkt_type_ethernet))) 1203 return; 1204 1205 /* 1206 * Check whether HW has filled in the CCE metadata in 1207 * this packet, if not filled, just return 1208 */ 1209 if (qdf_likely(!hal_rx_msdu_cce_match_get(rx_tlv_hdr))) 1210 return; 1211 1212 cce_match = true; 1213 /* Get the cce_metadata from RX MSDU TLV */ 1214 cce_metadata = (hal_rx_msdu_cce_metadata_get(rx_tlv_hdr) & 1215 RX_MSDU_END_16_CCE_METADATA_MASK); 1216 /* 1217 * Received CCE metadata should be within the 1218 * valid limits 1219 */ 1220 qdf_assert_always((cce_metadata >= RX_PROTOCOL_TAG_START_OFFSET) && 1221 (cce_metadata < (RX_PROTOCOL_TAG_START_OFFSET + 1222 RX_PROTOCOL_TAG_MAX))); 1223 1224 /* 1225 * The CCE metadata received is just the 1226 * packet_type + RX_PROTOCOL_TAG_START_OFFSET 1227 */ 1228 cce_metadata -= RX_PROTOCOL_TAG_START_OFFSET; 1229 1230 /* 1231 * Update the QDF packet with the user-specified 1232 * tag/metadata by looking up tag value for 1233 * received protocol type. 1234 */ 1235 protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag; 1236 qdf_nbuf_set_rx_protocol_tag(nbuf, protocol_tag); 1237 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, 1238 "Seq:%u decap:%u CCE Match:%d ProtoID:%u Tag:%u US:%d", 1239 hal_rx_get_rx_sequence(rx_tlv_hdr), 1240 vdev->rx_decap_type, cce_match, cce_metadata, 1241 protocol_tag, is_update_stats); 1242 1243 if (qdf_likely(!is_update_stats)) 1244 return; 1245 1246 if (qdf_unlikely(is_reo_exception)) { 1247 dp_rx_update_rx_err_protocol_tag_stats(pdev, 1248 cce_metadata); 1249 } else { 1250 dp_rx_update_rx_protocol_tag_stats(pdev, 1251 cce_metadata, 1252 ring_index); 1253 } 1254 1255 } 1256 #else 1257 static inline void 1258 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1259 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1260 uint16_t ring_index, 1261 bool is_reo_exception, bool is_update_stats) 1262 { 1263 /* Stub API */ 1264 } 1265 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1266 1267 /** 1268 * dp_rx_mon_update_protocol_tag() - Performs necessary checks for monitor mode 1269 * and then tags appropriate packets 1270 * @soc: core txrx main context 1271 * @vdev: pdev on which packet is received 1272 * @msdu: QDF packet buffer on which the protocol tag should be set 1273 * @rx_desc: base address where the RX TLVs start 1274 * Return: void 1275 */ 1276 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1277 static inline 1278 void dp_rx_mon_update_protocol_tag(struct dp_soc *soc, struct dp_pdev *dp_pdev, 1279 qdf_nbuf_t msdu, void *rx_desc) 1280 { 1281 uint32_t msdu_ppdu_id = 0; 1282 struct mon_rx_status *mon_recv_status; 1283 1284 if (qdf_likely(!dp_pdev->is_rx_protocol_tagging_enabled)) 1285 return; 1286 1287 if (qdf_likely(!dp_pdev->monitor_vdev)) 1288 return; 1289 1290 if (qdf_likely(1 != dp_pdev->ppdu_info.rx_status.rxpcu_filter_pass)) 1291 return; 1292 1293 msdu_ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_desc); 1294 1295 if (msdu_ppdu_id != dp_pdev->ppdu_info.com_info.ppdu_id) { 1296 QDF_TRACE(QDF_MODULE_ID_DP, 1297 QDF_TRACE_LEVEL_ERROR, 1298 "msdu_ppdu_id=%x,com_info.ppdu_id=%x", 1299 msdu_ppdu_id, 1300 dp_pdev->ppdu_info.com_info.ppdu_id); 1301 return; 1302 } 1303 1304 /* 1305 * Update the protocol tag in SKB for packets received on BSS. 1306 * Do not update tag stats since it would double actual received count 1307 */ 1308 mon_recv_status = &dp_pdev->ppdu_info.rx_status; 1309 if (mon_recv_status->frame_control_info_valid && 1310 ((mon_recv_status->frame_control & IEEE80211_FC0_TYPE_MASK) == 1311 IEEE80211_FC0_TYPE_DATA)) { 1312 dp_rx_update_protocol_tag(soc, 1313 dp_pdev->monitor_vdev, 1314 msdu, rx_desc, 1315 MAX_REO_DEST_RINGS, 1316 false, false); 1317 } 1318 } 1319 #else 1320 static inline 1321 void dp_rx_mon_update_protocol_tag(struct dp_soc *soc, struct dp_pdev *dp_pdev, 1322 qdf_nbuf_t msdu, void *rx_desc) 1323 { 1324 /* Stub API */ 1325 } 1326 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1327 /* 1328 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 1329 * called during dp rx initialization 1330 * and at the end of dp_rx_process. 1331 * 1332 * @soc: core txrx main context 1333 * @mac_id: mac_id which is one of 3 mac_ids 1334 * @dp_rxdma_srng: dp rxdma circular ring 1335 * @rx_desc_pool: Pointer to free Rx descriptor pool 1336 * @num_req_buffers: number of buffer to be replenished 1337 * @desc_list: list of descs if called from dp_rx_process 1338 * or NULL during dp rx initialization or out of buffer 1339 * interrupt. 1340 * @tail: tail of descs list 1341 * Return: return success or failure 1342 */ 1343 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1344 struct dp_srng *dp_rxdma_srng, 1345 struct rx_desc_pool *rx_desc_pool, 1346 uint32_t num_req_buffers, 1347 union dp_rx_desc_list_elem_t **desc_list, 1348 union dp_rx_desc_list_elem_t **tail); 1349 1350 /** 1351 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 1352 * (WBM), following error handling 1353 * 1354 * @soc: core DP main context 1355 * @buf_addr_info: opaque pointer to the REO error ring descriptor 1356 * @buf_addr_info: void pointer to the buffer_addr_info 1357 * @bm_action: put to idle_list or release to msdu_list 1358 * Return: QDF_STATUS 1359 */ 1360 QDF_STATUS 1361 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action); 1362 1363 QDF_STATUS 1364 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, 1365 void *buf_addr_info, uint8_t bm_action); 1366 /** 1367 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 1368 * (WBM) by address 1369 * 1370 * @soc: core DP main context 1371 * @link_desc_addr: link descriptor addr 1372 * 1373 * Return: QDF_STATUS 1374 */ 1375 QDF_STATUS 1376 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, 1377 uint8_t bm_action); 1378 1379 uint32_t 1380 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, 1381 uint32_t quota); 1382 1383 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1384 uint8_t *rx_tlv_hdr, struct dp_peer *peer); 1385 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1386 uint8_t *rx_tlv_hdr); 1387 1388 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1389 struct dp_peer *peer); 1390 1391 qdf_nbuf_t 1392 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev); 1393 1394 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring, 1395 void *ring_desc, struct dp_rx_desc *rx_desc); 1396 1397 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1398 #ifdef RX_DESC_DEBUG_CHECK 1399 /** 1400 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1401 * @rx_desc: rx descriptor pointer 1402 * 1403 * Return: true, if magic is correct, else false. 1404 */ 1405 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1406 { 1407 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1408 return false; 1409 1410 rx_desc->magic = 0; 1411 return true; 1412 } 1413 1414 /** 1415 * dp_rx_desc_prep() - prepare rx desc 1416 * @rx_desc: rx descriptor pointer to be prepared 1417 * @nbuf: nbuf to be associated with rx_desc 1418 * 1419 * Note: assumption is that we are associating a nbuf which is mapped 1420 * 1421 * Return: none 1422 */ 1423 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 1424 { 1425 rx_desc->magic = DP_RX_DESC_MAGIC; 1426 rx_desc->nbuf = nbuf; 1427 rx_desc->unmapped = 0; 1428 } 1429 1430 #else 1431 1432 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1433 { 1434 return true; 1435 } 1436 1437 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 1438 { 1439 rx_desc->nbuf = nbuf; 1440 rx_desc->unmapped = 0; 1441 } 1442 #endif /* RX_DESC_DEBUG_CHECK */ 1443 1444 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1445 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 1446 uint8_t err_code); 1447 1448 #ifdef PEER_CACHE_RX_PKTS 1449 /** 1450 * dp_rx_flush_rx_cached() - flush cached rx frames 1451 * @peer: peer 1452 * @drop: set flag to drop frames 1453 * 1454 * Return: None 1455 */ 1456 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop); 1457 #else 1458 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1459 { 1460 } 1461 #endif 1462 #endif /* _DP_RX_H */ 1463