1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _DP_RX_H 20 #define _DP_RX_H 21 22 #include "hal_rx.h" 23 #include "dp_tx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 27 #ifdef RXDMA_OPTIMIZATION 28 #ifndef RX_DATA_BUFFER_ALIGNMENT 29 #define RX_DATA_BUFFER_ALIGNMENT 128 30 #endif 31 #ifndef RX_MONITOR_BUFFER_ALIGNMENT 32 #define RX_MONITOR_BUFFER_ALIGNMENT 128 33 #endif 34 #else /* RXDMA_OPTIMIZATION */ 35 #define RX_DATA_BUFFER_ALIGNMENT 4 36 #define RX_MONITOR_BUFFER_ALIGNMENT 4 37 #endif /* RXDMA_OPTIMIZATION */ 38 39 #ifdef QCA_HOST2FW_RXBUF_RING 40 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM 41 /* RBM value used for re-injecting defragmented packets into REO */ 42 #define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM 43 #else 44 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM 45 #define DP_DEFRAG_RBM DP_WBM2SW_RBM 46 #endif /* QCA_HOST2FW_RXBUF_RING */ 47 48 #define RX_BUFFER_RESERVATION 0 49 50 #define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff 51 #define DP_PEER_METADATA_PEER_ID_SHIFT 0 52 #define DP_PEER_METADATA_VDEV_ID_MASK 0x003f0000 53 #define DP_PEER_METADATA_VDEV_ID_SHIFT 16 54 55 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \ 56 (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \ 57 >> DP_PEER_METADATA_PEER_ID_SHIFT) 58 59 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata) \ 60 (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \ 61 >> DP_PEER_METADATA_VDEV_ID_SHIFT) 62 63 #define DP_RX_DESC_MAGIC 0xdec0de 64 65 /** 66 * struct dp_rx_desc 67 * 68 * @nbuf : VA of the "skb" posted 69 * @rx_buf_start : VA of the original Rx buffer, before 70 * movement of any skb->data pointer 71 * @cookie : index into the sw array which holds 72 * the sw Rx descriptors 73 * Cookie space is 21 bits: 74 * lower 18 bits -- index 75 * upper 3 bits -- pool_id 76 * @pool_id : pool Id for which this allocated. 77 * Can only be used if there is no flow 78 * steering 79 * @in_use rx_desc is in use 80 * @unmapped used to mark rx_desc an unmapped if the corresponding 81 * nbuf is already unmapped 82 */ 83 struct dp_rx_desc { 84 qdf_nbuf_t nbuf; 85 uint8_t *rx_buf_start; 86 uint32_t cookie; 87 uint8_t pool_id; 88 #ifdef RX_DESC_DEBUG_CHECK 89 uint32_t magic; 90 #endif 91 uint8_t in_use:1, 92 unmapped:1; 93 }; 94 95 /* RX Descriptor Multi Page memory alloc related */ 96 #define DP_RX_DESC_OFFSET_NUM_BITS 8 97 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 98 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 99 100 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 101 #define DP_RX_DESC_POOL_ID_SHIFT \ 102 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 103 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 104 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 105 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 106 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 107 DP_RX_DESC_PAGE_ID_SHIFT) 108 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 109 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 110 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 111 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 112 DP_RX_DESC_POOL_ID_SHIFT) 113 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 114 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 115 DP_RX_DESC_PAGE_ID_SHIFT) 116 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 117 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 118 119 #define RX_DESC_COOKIE_INDEX_SHIFT 0 120 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 121 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 122 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 123 124 #define DP_RX_DESC_COOKIE_MAX \ 125 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 126 127 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 128 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 129 RX_DESC_COOKIE_POOL_ID_SHIFT) 130 131 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 132 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 133 RX_DESC_COOKIE_INDEX_SHIFT) 134 135 /* DOC: Offset to obtain LLC hdr 136 * 137 * In the case of Wifi parse error 138 * to reach LLC header from beginning 139 * of VLAN tag we need to skip 8 bytes. 140 * Vlan_tag(4)+length(2)+length added 141 * by HW(2) = 8 bytes. 142 */ 143 #define DP_SKIP_VLAN 8 144 145 /** 146 * struct dp_rx_cached_buf - rx cached buffer 147 * @list: linked list node 148 * @buf: skb buffer 149 */ 150 struct dp_rx_cached_buf { 151 qdf_list_node_t node; 152 qdf_nbuf_t buf; 153 }; 154 155 /* 156 *dp_rx_xor_block() - xor block of data 157 *@b: destination data block 158 *@a: source data block 159 *@len: length of the data to process 160 * 161 *Returns: None 162 */ 163 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 164 { 165 qdf_size_t i; 166 167 for (i = 0; i < len; i++) 168 b[i] ^= a[i]; 169 } 170 171 /* 172 *dp_rx_rotl() - rotate the bits left 173 *@val: unsigned integer input value 174 *@bits: number of bits 175 * 176 *Returns: Integer with left rotated by number of 'bits' 177 */ 178 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 179 { 180 return (val << bits) | (val >> (32 - bits)); 181 } 182 183 /* 184 *dp_rx_rotr() - rotate the bits right 185 *@val: unsigned integer input value 186 *@bits: number of bits 187 * 188 *Returns: Integer with right rotated by number of 'bits' 189 */ 190 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 191 { 192 return (val >> bits) | (val << (32 - bits)); 193 } 194 195 /* 196 * dp_set_rx_queue() - set queue_mapping in skb 197 * @nbuf: skb 198 * @queue_id: rx queue_id 199 * 200 * Return: void 201 */ 202 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 203 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 204 { 205 qdf_nbuf_record_rx_queue(nbuf, queue_id); 206 return; 207 } 208 #else 209 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 210 { 211 } 212 #endif 213 214 /* 215 *dp_rx_xswap() - swap the bits left 216 *@val: unsigned integer input value 217 * 218 *Returns: Integer with bits swapped 219 */ 220 static inline uint32_t dp_rx_xswap(uint32_t val) 221 { 222 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 223 } 224 225 /* 226 *dp_rx_get_le32_split() - get little endian 32 bits split 227 *@b0: byte 0 228 *@b1: byte 1 229 *@b2: byte 2 230 *@b3: byte 3 231 * 232 *Returns: Integer with split little endian 32 bits 233 */ 234 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 235 uint8_t b3) 236 { 237 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 238 } 239 240 /* 241 *dp_rx_get_le32() - get little endian 32 bits 242 *@b0: byte 0 243 *@b1: byte 1 244 *@b2: byte 2 245 *@b3: byte 3 246 * 247 *Returns: Integer with little endian 32 bits 248 */ 249 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 250 { 251 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 252 } 253 254 /* 255 * dp_rx_put_le32() - put little endian 32 bits 256 * @p: destination char array 257 * @v: source 32-bit integer 258 * 259 * Returns: None 260 */ 261 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 262 { 263 p[0] = (v) & 0xff; 264 p[1] = (v >> 8) & 0xff; 265 p[2] = (v >> 16) & 0xff; 266 p[3] = (v >> 24) & 0xff; 267 } 268 269 /* Extract michal mic block of data */ 270 #define dp_rx_michael_block(l, r) \ 271 do { \ 272 r ^= dp_rx_rotl(l, 17); \ 273 l += r; \ 274 r ^= dp_rx_xswap(l); \ 275 l += r; \ 276 r ^= dp_rx_rotl(l, 3); \ 277 l += r; \ 278 r ^= dp_rx_rotr(l, 2); \ 279 l += r; \ 280 } while (0) 281 282 /** 283 * struct dp_rx_desc_list_elem_t 284 * 285 * @next : Next pointer to form free list 286 * @rx_desc : DP Rx descriptor 287 */ 288 union dp_rx_desc_list_elem_t { 289 union dp_rx_desc_list_elem_t *next; 290 struct dp_rx_desc rx_desc; 291 }; 292 293 #ifdef RX_DESC_MULTI_PAGE_ALLOC 294 /** 295 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 296 * @page_id: Page ID 297 * @offset: Offset of the descriptor element 298 * 299 * Return: RX descriptor element 300 */ 301 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 302 struct rx_desc_pool *rx_pool); 303 304 static inline 305 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 306 struct rx_desc_pool *pool, 307 uint32_t cookie) 308 { 309 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 310 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 311 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 312 struct rx_desc_pool *rx_desc_pool; 313 union dp_rx_desc_list_elem_t *rx_desc_elem; 314 315 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 316 return NULL; 317 318 rx_desc_pool = &pool[pool_id]; 319 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 320 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 321 rx_desc_pool->elem_size * offset); 322 323 return &rx_desc_elem->rx_desc; 324 } 325 326 /** 327 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 328 * the Rx descriptor on Rx DMA source ring buffer 329 * @soc: core txrx main context 330 * @cookie: cookie used to lookup virtual address 331 * 332 * Return: Pointer to the Rx descriptor 333 */ 334 static inline 335 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 336 uint32_t cookie) 337 { 338 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 339 } 340 341 /** 342 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 343 * the Rx descriptor on monitor ring buffer 344 * @soc: core txrx main context 345 * @cookie: cookie used to lookup virtual address 346 * 347 * Return: Pointer to the Rx descriptor 348 */ 349 static inline 350 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 351 uint32_t cookie) 352 { 353 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 354 } 355 356 /** 357 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 358 * the Rx descriptor on monitor status ring buffer 359 * @soc: core txrx main context 360 * @cookie: cookie used to lookup virtual address 361 * 362 * Return: Pointer to the Rx descriptor 363 */ 364 static inline 365 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 366 uint32_t cookie) 367 { 368 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie); 369 } 370 #else 371 /** 372 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 373 * the Rx descriptor on Rx DMA source ring buffer 374 * @soc: core txrx main context 375 * @cookie: cookie used to lookup virtual address 376 * 377 * Return: void *: Virtual Address of the Rx descriptor 378 */ 379 static inline 380 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 381 { 382 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 383 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 384 struct rx_desc_pool *rx_desc_pool; 385 386 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 387 return NULL; 388 389 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 390 391 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 392 return NULL; 393 394 return &(soc->rx_desc_buf[pool_id].array[index].rx_desc); 395 } 396 397 /** 398 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 399 * the Rx descriptor on monitor ring buffer 400 * @soc: core txrx main context 401 * @cookie: cookie used to lookup virtual address 402 * 403 * Return: void *: Virtual Address of the Rx descriptor 404 */ 405 static inline 406 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 407 { 408 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 409 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 410 /* TODO */ 411 /* Add sanity for pool_id & index */ 412 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 413 } 414 415 /** 416 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 417 * the Rx descriptor on monitor status ring buffer 418 * @soc: core txrx main context 419 * @cookie: cookie used to lookup virtual address 420 * 421 * Return: void *: Virtual Address of the Rx descriptor 422 */ 423 static inline 424 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 425 { 426 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 427 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 428 /* TODO */ 429 /* Add sanity for pool_id & index */ 430 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 431 } 432 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 433 434 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 435 union dp_rx_desc_list_elem_t **local_desc_list, 436 union dp_rx_desc_list_elem_t **tail, 437 uint16_t pool_id, 438 struct rx_desc_pool *rx_desc_pool); 439 440 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 441 struct rx_desc_pool *rx_desc_pool, 442 uint16_t num_descs, 443 union dp_rx_desc_list_elem_t **desc_list, 444 union dp_rx_desc_list_elem_t **tail); 445 446 447 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 448 449 void dp_rx_pdev_detach(struct dp_pdev *pdev); 450 451 void dp_print_napi_stats(struct dp_soc *soc); 452 453 /** 454 * dp_rx_vdev_detach() - detach vdev from dp rx 455 * @vdev: virtual device instance 456 * 457 * Return: QDF_STATUS_SUCCESS: success 458 * QDF_STATUS_E_RESOURCES: Error return 459 */ 460 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); 461 462 uint32_t 463 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 464 uint8_t reo_ring_num, 465 uint32_t quota); 466 467 /** 468 * dp_rx_err_process() - Processes error frames routed to REO error ring 469 * @int_ctx: pointer to DP interrupt context 470 * @soc: core txrx main context 471 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 472 * @quota: No. of units (packets) that can be serviced in one shot. 473 * 474 * This function implements error processing and top level demultiplexer 475 * for all the frames routed to REO error ring. 476 * 477 * Return: uint32_t: No. of elements processed 478 */ 479 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 480 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 481 482 /** 483 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 484 * @int_ctx: pointer to DP interrupt context 485 * @soc: core txrx main context 486 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 487 * @quota: No. of units (packets) that can be serviced in one shot. 488 * 489 * This function implements error processing and top level demultiplexer 490 * for all the frames routed to WBM2HOST sw release ring. 491 * 492 * Return: uint32_t: No. of elements processed 493 */ 494 uint32_t 495 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 496 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 497 498 /** 499 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 500 * multiple nbufs. 501 * @nbuf: pointer to the first msdu of an amsdu. 502 * 503 * This function implements the creation of RX frag_list for cases 504 * where an MSDU is spread across multiple nbufs. 505 * 506 * Return: returns the head nbuf which contains complete frag_list. 507 */ 508 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf); 509 510 /* 511 * dp_rx_desc_pool_alloc() - create a pool of software rx_descs 512 * at the time of dp rx initialization 513 * 514 * @soc: core txrx main context 515 * @pool_id: pool_id which is one of 3 mac_ids 516 * @pool_size: number of Rx descriptor in the pool 517 * @rx_desc_pool: rx descriptor pool pointer 518 * 519 * Return: QDF status 520 */ 521 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, 522 uint32_t pool_size, struct rx_desc_pool *pool); 523 524 /* 525 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 526 * de-initialization of wifi module. 527 * 528 * @soc: core txrx main context 529 * @pool_id: pool_id which is one of 3 mac_ids 530 * @rx_desc_pool: rx descriptor pool pointer 531 * 532 * Return: None 533 */ 534 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 535 struct rx_desc_pool *rx_desc_pool); 536 537 /* 538 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 539 * de-initialization of wifi module. 540 * 541 * @soc: core txrx main context 542 * @pool_id: pool_id which is one of 3 mac_ids 543 * @rx_desc_pool: rx descriptor pool pointer 544 * 545 * Return: None 546 */ 547 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 548 struct rx_desc_pool *rx_desc_pool); 549 550 /* 551 * dp_rx_desc_pool_free() - free the sw rx desc array called during 552 * de-initialization of wifi module. 553 * 554 * @soc: core txrx main context 555 * @rx_desc_pool: rx descriptor pool pointer 556 * 557 * Return: None 558 */ 559 void dp_rx_desc_pool_free(struct dp_soc *soc, 560 struct rx_desc_pool *rx_desc_pool); 561 562 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 563 struct dp_peer *peer); 564 565 /** 566 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 567 * 568 * @head: pointer to the head of local free list 569 * @tail: pointer to the tail of local free list 570 * @new: new descriptor that is added to the free list 571 * 572 * Return: void: 573 */ 574 static inline 575 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 576 union dp_rx_desc_list_elem_t **tail, 577 struct dp_rx_desc *new) 578 { 579 qdf_assert(head && new); 580 581 new->nbuf = NULL; 582 new->in_use = 0; 583 584 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 585 *head = (union dp_rx_desc_list_elem_t *)new; 586 /* reset tail if head->next is NULL */ 587 if (!*tail || !(*head)->next) 588 *tail = *head; 589 590 } 591 592 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 593 uint8_t mac_id); 594 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 595 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); 596 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 597 uint8_t *rx_tlv_hdr, struct dp_peer *peer); 598 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 599 uint16_t peer_id, uint8_t tid); 600 601 602 #define DP_RX_LIST_APPEND(head, tail, elem) \ 603 do { \ 604 if (!(head)) { \ 605 (head) = (elem); \ 606 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 607 } else { \ 608 qdf_nbuf_set_next((tail), (elem)); \ 609 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 610 } \ 611 (tail) = (elem); \ 612 qdf_nbuf_set_next((tail), NULL); \ 613 } while (0) 614 615 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/ 616 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000) 617 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 618 qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool) 619 { 620 return QDF_STATUS_SUCCESS; 621 } 622 #else 623 #define MAX_RETRY 100 624 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, 625 qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool) 626 { 627 uint32_t nbuf_retry = 0; 628 int32_t ret; 629 const uint32_t x86_phy_addr = 0x50000000; 630 /* 631 * in M2M emulation platforms (x86) the memory below 0x50000000 632 * is reserved for target use, so any memory allocated in this 633 * region should not be used by host 634 */ 635 do { 636 if (qdf_likely(*paddr > x86_phy_addr)) 637 return QDF_STATUS_SUCCESS; 638 else { 639 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 640 "phy addr %pK exceeded 0x50000000 trying again", 641 paddr); 642 643 nbuf_retry++; 644 if ((*rx_netbuf)) { 645 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 646 QDF_DMA_FROM_DEVICE); 647 /* Not freeing buffer intentionally. 648 * Observed that same buffer is getting 649 * re-allocated resulting in longer load time 650 * WMI init timeout. 651 * This buffer is anyway not useful so skip it. 652 **/ 653 } 654 655 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 656 rx_desc_pool->buf_size, 657 RX_BUFFER_RESERVATION, 658 rx_desc_pool->buf_alignment, 659 FALSE); 660 661 if (qdf_unlikely(!(*rx_netbuf))) 662 return QDF_STATUS_E_FAILURE; 663 664 ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf, 665 QDF_DMA_FROM_DEVICE); 666 667 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 668 qdf_nbuf_free(*rx_netbuf); 669 *rx_netbuf = NULL; 670 continue; 671 } 672 673 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 674 } 675 } while (nbuf_retry < MAX_RETRY); 676 677 if ((*rx_netbuf)) { 678 qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, 679 QDF_DMA_FROM_DEVICE); 680 qdf_nbuf_free(*rx_netbuf); 681 } 682 683 return QDF_STATUS_E_FAILURE; 684 } 685 #endif 686 687 /** 688 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 689 * the MSDU Link Descriptor 690 * @soc: core txrx main context 691 * @buf_info: buf_info include cookie that used to lookup virtual address of 692 * link descriptor Normally this is just an index into a per SOC array. 693 * 694 * This is the VA of the link descriptor, that HAL layer later uses to 695 * retrieve the list of MSDU's for a given MPDU. 696 * 697 * Return: void *: Virtual Address of the Rx descriptor 698 */ 699 static inline 700 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 701 struct hal_buf_info *buf_info) 702 { 703 void *link_desc_va; 704 uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie); 705 706 707 /* TODO */ 708 /* Add sanity for cookie */ 709 710 link_desc_va = soc->link_desc_banks[bank_id].base_vaddr + 711 (buf_info->paddr - 712 soc->link_desc_banks[bank_id].base_paddr); 713 714 return link_desc_va; 715 } 716 717 /** 718 * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of 719 * the MSDU Link Descriptor 720 * @pdev: core txrx pdev context 721 * @buf_info: buf_info includes cookie that used to lookup virtual address of 722 * link descriptor. Normally this is just an index into a per pdev array. 723 * 724 * This is the VA of the link descriptor in monitor mode destination ring, 725 * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU. 726 * 727 * Return: void *: Virtual Address of the Rx descriptor 728 */ 729 static inline 730 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, 731 struct hal_buf_info *buf_info, 732 int mac_id) 733 { 734 void *link_desc_va; 735 736 /* TODO */ 737 /* Add sanity for cookie */ 738 739 link_desc_va = 740 pdev->soc->mon_link_desc_banks[mac_id][buf_info->sw_cookie] 741 .base_vaddr + 742 (buf_info->paddr - 743 pdev->soc->mon_link_desc_banks[mac_id][buf_info->sw_cookie] 744 .base_paddr); 745 746 return link_desc_va; 747 } 748 749 /** 750 * dp_rx_defrag_concat() - Concatenate the fragments 751 * 752 * @dst: destination pointer to the buffer 753 * @src: source pointer from where the fragment payload is to be copied 754 * 755 * Return: QDF_STATUS 756 */ 757 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 758 { 759 /* 760 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 761 * to provide space for src, the headroom portion is copied from 762 * the original dst buffer to the larger new dst buffer. 763 * (This is needed, because the headroom of the dst buffer 764 * contains the rx desc.) 765 */ 766 if (!qdf_nbuf_cat(dst, src)) { 767 /* 768 * qdf_nbuf_cat does not free the src memory. 769 * Free src nbuf before returning 770 * For failure case the caller takes of freeing the nbuf 771 */ 772 qdf_nbuf_free(src); 773 return QDF_STATUS_SUCCESS; 774 } 775 776 return QDF_STATUS_E_DEFRAG_ERROR; 777 } 778 779 #ifndef FEATURE_WDS 780 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 781 { 782 return QDF_STATUS_SUCCESS; 783 } 784 785 static inline void 786 dp_rx_wds_srcport_learn(struct dp_soc *soc, 787 uint8_t *rx_tlv_hdr, 788 struct dp_peer *ta_peer, 789 qdf_nbuf_t nbuf, 790 struct hal_rx_msdu_metadata msdu_metadata) 791 { 792 } 793 #endif 794 795 /* 796 * dp_rx_desc_dump() - dump the sw rx descriptor 797 * 798 * @rx_desc: sw rx descriptor 799 */ 800 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 801 { 802 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 803 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 804 rx_desc->in_use, rx_desc->unmapped); 805 } 806 807 /* 808 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 809 * In qwrap mode, packets originated from 810 * any vdev should not loopback and 811 * should be dropped. 812 * @vdev: vdev on which rx packet is received 813 * @nbuf: rx pkt 814 * 815 */ 816 #if ATH_SUPPORT_WRAP 817 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 818 qdf_nbuf_t nbuf) 819 { 820 struct dp_vdev *psta_vdev; 821 struct dp_pdev *pdev = vdev->pdev; 822 uint8_t *data = qdf_nbuf_data(nbuf); 823 824 if (qdf_unlikely(vdev->proxysta_vdev)) { 825 /* In qwrap isolation mode, allow loopback packets as all 826 * packets go to RootAP and Loopback on the mpsta. 827 */ 828 if (vdev->isolation_vdev) 829 return false; 830 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 831 if (qdf_unlikely(psta_vdev->proxysta_vdev && 832 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 833 &data[QDF_MAC_ADDR_SIZE], 834 QDF_MAC_ADDR_SIZE))) { 835 /* Drop packet if source address is equal to 836 * any of the vdev addresses. 837 */ 838 return true; 839 } 840 } 841 } 842 return false; 843 } 844 #else 845 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 846 qdf_nbuf_t nbuf) 847 { 848 return false; 849 } 850 #endif 851 852 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 853 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ 854 defined(WLAN_SUPPORT_RX_FLOW_TAG) 855 #include "dp_rx_tag.h" 856 #endif 857 858 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 859 /** 860 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 861 * and set the corresponding tag in QDF packet 862 * @soc: core txrx main context 863 * @vdev: vdev on which the packet is received 864 * @nbuf: QDF pkt buffer on which the protocol tag should be set 865 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 866 * @ring_index: REO ring number, not used for error & monitor ring 867 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 868 * @is_update_stats: flag to indicate whether to update stats or not 869 * Return: void 870 */ 871 static inline void 872 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 873 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 874 uint16_t ring_index, 875 bool is_reo_exception, bool is_update_stats) 876 { 877 } 878 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 879 880 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 881 /** 882 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV 883 * and set the corresponding tag in QDF packet 884 * @soc: core txrx main context 885 * @vdev: vdev on which the packet is received 886 * @nbuf: QDF pkt buffer on which the protocol tag should be set 887 * @rx_tlv_hdr: base address where the RX TLVs starts 888 * @is_update_stats: flag to indicate whether to update stats or not 889 * 890 * Return: void 891 */ 892 static inline void 893 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, 894 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) 895 { 896 } 897 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 898 899 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ 900 !defined(WLAN_SUPPORT_RX_FLOW_TAG) 901 /** 902 * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor 903 * mode and then tags appropriate packets 904 * @soc: core txrx main context 905 * @vdev: pdev on which packet is received 906 * @msdu: QDF packet buffer on which the protocol tag should be set 907 * @rx_desc: base address where the RX TLVs start 908 * Return: void 909 */ 910 static inline 911 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc, 912 struct dp_pdev *dp_pdev, 913 qdf_nbuf_t msdu, void *rx_desc) 914 { 915 } 916 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */ 917 918 /* 919 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 920 * called during dp rx initialization 921 * and at the end of dp_rx_process. 922 * 923 * @soc: core txrx main context 924 * @mac_id: mac_id which is one of 3 mac_ids 925 * @dp_rxdma_srng: dp rxdma circular ring 926 * @rx_desc_pool: Pointer to free Rx descriptor pool 927 * @num_req_buffers: number of buffer to be replenished 928 * @desc_list: list of descs if called from dp_rx_process 929 * or NULL during dp rx initialization or out of buffer 930 * interrupt. 931 * @tail: tail of descs list 932 * Return: return success or failure 933 */ 934 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 935 struct dp_srng *dp_rxdma_srng, 936 struct rx_desc_pool *rx_desc_pool, 937 uint32_t num_req_buffers, 938 union dp_rx_desc_list_elem_t **desc_list, 939 union dp_rx_desc_list_elem_t **tail); 940 941 /* 942 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs 943 * called during dp rx initialization 944 * 945 * @soc: core txrx main context 946 * @mac_id: mac_id which is one of 3 mac_ids 947 * @dp_rxdma_srng: dp rxdma circular ring 948 * @rx_desc_pool: Pointer to free Rx descriptor pool 949 * @num_req_buffers: number of buffer to be replenished 950 * 951 * Return: return success or failure 952 */ 953 QDF_STATUS 954 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 955 struct dp_srng *dp_rxdma_srng, 956 struct rx_desc_pool *rx_desc_pool, 957 uint32_t num_req_buffers); 958 959 /** 960 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 961 * (WBM), following error handling 962 * 963 * @soc: core DP main context 964 * @buf_addr_info: opaque pointer to the REO error ring descriptor 965 * @buf_addr_info: void pointer to the buffer_addr_info 966 * @bm_action: put to idle_list or release to msdu_list 967 * 968 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 969 */ 970 QDF_STATUS 971 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 972 uint8_t bm_action); 973 974 /** 975 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 976 * (WBM) by address 977 * 978 * @soc: core DP main context 979 * @link_desc_addr: link descriptor addr 980 * 981 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 982 */ 983 QDF_STATUS 984 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 985 hal_buff_addrinfo_t link_desc_addr, 986 uint8_t bm_action); 987 988 /** 989 * dp_rxdma_err_process() - RxDMA error processing functionality 990 * @soc: core txrx main contex 991 * @mac_id: mac id which is one of 3 mac_ids 992 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 993 * @quota: No. of units (packets) that can be serviced in one shot. 994 * 995 * Return: num of buffers processed 996 */ 997 uint32_t 998 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 999 uint32_t mac_id, uint32_t quota); 1000 1001 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1002 uint8_t *rx_tlv_hdr, struct dp_peer *peer); 1003 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1004 uint8_t *rx_tlv_hdr); 1005 1006 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1007 struct dp_peer *peer); 1008 1009 qdf_nbuf_t 1010 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev); 1011 1012 /* 1013 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 1014 * 1015 * @soc: core txrx main context 1016 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1017 * @ring_desc: opaque pointer to the RX ring descriptor 1018 * @rx_desc: host rs descriptor 1019 * 1020 * Return: void 1021 */ 1022 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 1023 hal_ring_handle_t hal_ring_hdl, 1024 hal_ring_desc_t ring_desc, 1025 struct dp_rx_desc *rx_desc); 1026 1027 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1028 #ifdef RX_DESC_DEBUG_CHECK 1029 /** 1030 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1031 * @rx_desc: rx descriptor pointer 1032 * 1033 * Return: true, if magic is correct, else false. 1034 */ 1035 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1036 { 1037 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1038 return false; 1039 1040 rx_desc->magic = 0; 1041 return true; 1042 } 1043 1044 /** 1045 * dp_rx_desc_prep() - prepare rx desc 1046 * @rx_desc: rx descriptor pointer to be prepared 1047 * @nbuf: nbuf to be associated with rx_desc 1048 * 1049 * Note: assumption is that we are associating a nbuf which is mapped 1050 * 1051 * Return: none 1052 */ 1053 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 1054 { 1055 rx_desc->magic = DP_RX_DESC_MAGIC; 1056 rx_desc->nbuf = nbuf; 1057 rx_desc->unmapped = 0; 1058 } 1059 1060 #else 1061 1062 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1063 { 1064 return true; 1065 } 1066 1067 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 1068 { 1069 rx_desc->nbuf = nbuf; 1070 rx_desc->unmapped = 0; 1071 } 1072 #endif /* RX_DESC_DEBUG_CHECK */ 1073 1074 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1075 uint8_t *rx_tlv_hdr, struct dp_peer *peer, 1076 uint8_t err_code, uint8_t mac_id); 1077 1078 #ifdef PEER_CACHE_RX_PKTS 1079 /** 1080 * dp_rx_flush_rx_cached() - flush cached rx frames 1081 * @peer: peer 1082 * @drop: set flag to drop frames 1083 * 1084 * Return: None 1085 */ 1086 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop); 1087 #else 1088 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) 1089 { 1090 } 1091 #endif 1092 1093 #ifndef QCA_MULTIPASS_SUPPORT 1094 static inline 1095 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid) 1096 { 1097 return false; 1098 } 1099 #else 1100 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, 1101 uint8_t tid); 1102 #endif 1103 1104 #ifndef WLAN_RX_PKT_CAPTURE_ENH 1105 static inline 1106 void dp_peer_set_rx_capture_enabled(struct dp_peer *peer_handle, bool value) 1107 { 1108 } 1109 #endif 1110 1111 /** 1112 * dp_rx_deliver_to_stack() - deliver pkts to network stack 1113 * Caller to hold peer refcount and check for valid peer 1114 * @soc: soc 1115 * @vdev: vdev 1116 * @peer: peer 1117 * @nbuf_head: skb list head 1118 * @nbuf_tail: skb list tail 1119 * 1120 * Return: None 1121 */ 1122 void dp_rx_deliver_to_stack(struct dp_soc *soc, 1123 struct dp_vdev *vdev, 1124 struct dp_peer *peer, 1125 qdf_nbuf_t nbuf_head, 1126 qdf_nbuf_t nbuf_tail); 1127 1128 #endif /* _DP_RX_H */ 1129