1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_RX_H 21 #define _DP_RX_H 22 23 #include "hal_rx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include <qdf_tracepoint.h> 27 #include "dp_ipa.h" 28 29 #ifdef RXDMA_OPTIMIZATION 30 #ifndef RX_DATA_BUFFER_ALIGNMENT 31 #define RX_DATA_BUFFER_ALIGNMENT 128 32 #endif 33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT 34 #define RX_MONITOR_BUFFER_ALIGNMENT 128 35 #endif 36 #else /* RXDMA_OPTIMIZATION */ 37 #define RX_DATA_BUFFER_ALIGNMENT 4 38 #define RX_MONITOR_BUFFER_ALIGNMENT 4 39 #endif /* RXDMA_OPTIMIZATION */ 40 41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 42 #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id) 43 /* RBM value used for re-injecting defragmented packets into REO */ 44 #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) 45 #endif 46 47 #define RX_BUFFER_RESERVATION 0 48 #ifdef BE_PKTLOG_SUPPORT 49 #define BUFFER_RESIDUE 1 50 #define RX_MON_MIN_HEAD_ROOM 64 51 #endif 52 53 #define DP_DEFAULT_NOISEFLOOR (-96) 54 55 #define DP_RX_DESC_MAGIC 0xdec0de 56 57 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params) 58 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params) 59 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params) 60 #define dp_rx_info(params...) \ 61 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 62 #define dp_rx_info_rl(params...) \ 63 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 64 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params) 65 66 /** 67 * enum dp_rx_desc_state 68 * 69 * @RX_DESC_REPLENISH: rx desc replenished 70 * @RX_DESC_FREELIST: rx desc in freelist 71 */ 72 enum dp_rx_desc_state { 73 RX_DESC_REPLENISHED, 74 RX_DESC_IN_FREELIST, 75 }; 76 77 #ifndef QCA_HOST_MODE_WIFI_DISABLED 78 /** 79 * struct dp_rx_desc_dbg_info 80 * 81 * @freelist_caller: name of the function that put the 82 * the rx desc in freelist 83 * @freelist_ts: timestamp when the rx desc is put in 84 * a freelist 85 * @replenish_caller: name of the function that last 86 * replenished the rx desc 87 * @replenish_ts: last replenish timestamp 88 * @prev_nbuf: previous nbuf info 89 * @prev_nbuf_data_addr: previous nbuf data address 90 */ 91 struct dp_rx_desc_dbg_info { 92 char freelist_caller[QDF_MEM_FUNC_NAME_SIZE]; 93 uint64_t freelist_ts; 94 char replenish_caller[QDF_MEM_FUNC_NAME_SIZE]; 95 uint64_t replenish_ts; 96 qdf_nbuf_t prev_nbuf; 97 uint8_t *prev_nbuf_data_addr; 98 }; 99 100 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 101 102 /** 103 * struct dp_rx_desc 104 * 105 * @nbuf : VA of the "skb" posted 106 * @rx_buf_start : VA of the original Rx buffer, before 107 * movement of any skb->data pointer 108 * @paddr_buf_start : PA of the original Rx buffer, before 109 * movement of any frag pointer 110 * @cookie : index into the sw array which holds 111 * the sw Rx descriptors 112 * Cookie space is 21 bits: 113 * lower 18 bits -- index 114 * upper 3 bits -- pool_id 115 * @pool_id : pool Id for which this allocated. 116 * Can only be used if there is no flow 117 * steering 118 * @chip_id : chip_id indicating MLO chip_id 119 * valid or used only in case of multi-chip MLO 120 * @in_use rx_desc is in use 121 * @unmapped used to mark rx_desc an unmapped if the corresponding 122 * nbuf is already unmapped 123 * @in_err_state : Nbuf sanity failed for this descriptor. 124 * @nbuf_data_addr : VA of nbuf data posted 125 */ 126 struct dp_rx_desc { 127 qdf_nbuf_t nbuf; 128 uint8_t *rx_buf_start; 129 qdf_dma_addr_t paddr_buf_start; 130 uint32_t cookie; 131 uint8_t pool_id; 132 uint8_t chip_id; 133 #ifdef RX_DESC_DEBUG_CHECK 134 uint32_t magic; 135 uint8_t *nbuf_data_addr; 136 struct dp_rx_desc_dbg_info *dbg_info; 137 #endif 138 uint8_t in_use:1, 139 unmapped:1, 140 in_err_state:1; 141 }; 142 143 #ifndef QCA_HOST_MODE_WIFI_DISABLED 144 #ifdef ATH_RX_PRI_SAVE 145 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 146 (qdf_nbuf_set_priority(_nbuf, _tid)) 147 #else 148 #define DP_RX_TID_SAVE(_nbuf, _tid) 149 #endif 150 151 /* RX Descriptor Multi Page memory alloc related */ 152 #define DP_RX_DESC_OFFSET_NUM_BITS 8 153 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 154 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 155 156 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 157 #define DP_RX_DESC_POOL_ID_SHIFT \ 158 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 159 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 160 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 161 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 162 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 163 DP_RX_DESC_PAGE_ID_SHIFT) 164 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 165 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 166 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 167 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 168 DP_RX_DESC_POOL_ID_SHIFT) 169 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 170 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 171 DP_RX_DESC_PAGE_ID_SHIFT) 172 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 173 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 174 175 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 176 177 #define RX_DESC_COOKIE_INDEX_SHIFT 0 178 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 179 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 180 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 181 182 #define DP_RX_DESC_COOKIE_MAX \ 183 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 184 185 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 186 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 187 RX_DESC_COOKIE_POOL_ID_SHIFT) 188 189 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 190 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 191 RX_DESC_COOKIE_INDEX_SHIFT) 192 193 #define dp_rx_add_to_free_desc_list(head, tail, new) \ 194 __dp_rx_add_to_free_desc_list(head, tail, new, __func__) 195 196 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 197 num_buffers, desc_list, tail) \ 198 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 199 num_buffers, desc_list, tail, __func__) 200 201 #ifdef WLAN_SUPPORT_RX_FISA 202 /** 203 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb 204 * @nbuf: pkt skb pointer 205 * @l3_padding: l3 padding 206 * 207 * Return: None 208 */ 209 static inline 210 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 211 { 212 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 213 } 214 #else 215 static inline 216 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 217 { 218 } 219 #endif 220 221 #ifdef DP_RX_SPECIAL_FRAME_NEED 222 /** 223 * dp_rx_is_special_frame() - check is RX frame special needed 224 * 225 * @nbuf: RX skb pointer 226 * @frame_mask: the mask for speical frame needed 227 * 228 * Check is RX frame wanted matched with mask 229 * 230 * Return: true - special frame needed, false - no 231 */ 232 static inline 233 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 234 { 235 if (((frame_mask & FRAME_MASK_IPV4_ARP) && 236 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || 237 ((frame_mask & FRAME_MASK_IPV4_DHCP) && 238 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || 239 ((frame_mask & FRAME_MASK_IPV4_EAPOL) && 240 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || 241 ((frame_mask & FRAME_MASK_IPV6_DHCP) && 242 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))) 243 return true; 244 245 return false; 246 } 247 248 /** 249 * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack 250 * if matches mask 251 * 252 * @soc: Datapath soc handler 253 * @peer: pointer to DP peer 254 * @nbuf: pointer to the skb of RX frame 255 * @frame_mask: the mask for speical frame needed 256 * @rx_tlv_hdr: start of rx tlv header 257 * 258 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 259 * single nbuf is expected. 260 * 261 * return: true - nbuf has been delivered to stack, false - not. 262 */ 263 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 264 qdf_nbuf_t nbuf, uint32_t frame_mask, 265 uint8_t *rx_tlv_hdr); 266 #else 267 static inline 268 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 269 { 270 return false; 271 } 272 273 static inline 274 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 275 qdf_nbuf_t nbuf, uint32_t frame_mask, 276 uint8_t *rx_tlv_hdr) 277 { 278 return false; 279 } 280 #endif 281 282 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 283 /** 284 * dp_rx_data_is_specific() - Used to exclude specific frames 285 * not practical for getting rx 286 * stats like rate, mcs, nss, etc. 287 * 288 * @hal-soc_hdl: soc handler 289 * @rx_tlv_hdr: rx tlv header 290 * @nbuf: RX skb pointer 291 * 292 * Return: true - a specific frame not suitable 293 * for getting rx stats from it. 294 * false - a common frame suitable for 295 * getting rx stats from it. 296 */ 297 static inline 298 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 299 uint8_t *rx_tlv_hdr, 300 qdf_nbuf_t nbuf) 301 { 302 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf))) 303 return true; 304 305 if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr)) 306 return true; 307 308 if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr)) 309 return true; 310 311 /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */ 312 if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 313 QDF_NBUF_TRAC_IPV4_ETH_TYPE)) { 314 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 315 return true; 316 } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 317 QDF_NBUF_TRAC_IPV6_ETH_TYPE)) { 318 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) 319 return true; 320 } else { 321 return true; 322 } 323 return false; 324 } 325 #else 326 static inline 327 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 328 uint8_t *rx_tlv_hdr, 329 qdf_nbuf_t nbuf) 330 331 { 332 /* 333 * default return is true to make sure that rx stats 334 * will not be handled when this feature is disabled 335 */ 336 return true; 337 } 338 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 339 340 #ifndef QCA_HOST_MODE_WIFI_DISABLED 341 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING 342 static inline 343 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 344 qdf_nbuf_t nbuf) 345 { 346 if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi && 347 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { 348 DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer, 349 rx.intra_bss.mdns_no_fwd, 1); 350 return false; 351 } 352 return true; 353 } 354 #else 355 static inline 356 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 357 qdf_nbuf_t nbuf) 358 { 359 return true; 360 } 361 #endif 362 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 363 364 /* DOC: Offset to obtain LLC hdr 365 * 366 * In the case of Wifi parse error 367 * to reach LLC header from beginning 368 * of VLAN tag we need to skip 8 bytes. 369 * Vlan_tag(4)+length(2)+length added 370 * by HW(2) = 8 bytes. 371 */ 372 #define DP_SKIP_VLAN 8 373 374 #ifndef QCA_HOST_MODE_WIFI_DISABLED 375 376 /** 377 * struct dp_rx_cached_buf - rx cached buffer 378 * @list: linked list node 379 * @buf: skb buffer 380 */ 381 struct dp_rx_cached_buf { 382 qdf_list_node_t node; 383 qdf_nbuf_t buf; 384 }; 385 386 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 387 388 /* 389 *dp_rx_xor_block() - xor block of data 390 *@b: destination data block 391 *@a: source data block 392 *@len: length of the data to process 393 * 394 *Returns: None 395 */ 396 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 397 { 398 qdf_size_t i; 399 400 for (i = 0; i < len; i++) 401 b[i] ^= a[i]; 402 } 403 404 /* 405 *dp_rx_rotl() - rotate the bits left 406 *@val: unsigned integer input value 407 *@bits: number of bits 408 * 409 *Returns: Integer with left rotated by number of 'bits' 410 */ 411 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 412 { 413 return (val << bits) | (val >> (32 - bits)); 414 } 415 416 /* 417 *dp_rx_rotr() - rotate the bits right 418 *@val: unsigned integer input value 419 *@bits: number of bits 420 * 421 *Returns: Integer with right rotated by number of 'bits' 422 */ 423 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 424 { 425 return (val >> bits) | (val << (32 - bits)); 426 } 427 428 /* 429 * dp_set_rx_queue() - set queue_mapping in skb 430 * @nbuf: skb 431 * @queue_id: rx queue_id 432 * 433 * Return: void 434 */ 435 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 436 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 437 { 438 qdf_nbuf_record_rx_queue(nbuf, queue_id); 439 return; 440 } 441 #else 442 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 443 { 444 } 445 #endif 446 447 /* 448 *dp_rx_xswap() - swap the bits left 449 *@val: unsigned integer input value 450 * 451 *Returns: Integer with bits swapped 452 */ 453 static inline uint32_t dp_rx_xswap(uint32_t val) 454 { 455 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 456 } 457 458 /* 459 *dp_rx_get_le32_split() - get little endian 32 bits split 460 *@b0: byte 0 461 *@b1: byte 1 462 *@b2: byte 2 463 *@b3: byte 3 464 * 465 *Returns: Integer with split little endian 32 bits 466 */ 467 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 468 uint8_t b3) 469 { 470 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 471 } 472 473 /* 474 *dp_rx_get_le32() - get little endian 32 bits 475 *@b0: byte 0 476 *@b1: byte 1 477 *@b2: byte 2 478 *@b3: byte 3 479 * 480 *Returns: Integer with little endian 32 bits 481 */ 482 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 483 { 484 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 485 } 486 487 /* 488 * dp_rx_put_le32() - put little endian 32 bits 489 * @p: destination char array 490 * @v: source 32-bit integer 491 * 492 * Returns: None 493 */ 494 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 495 { 496 p[0] = (v) & 0xff; 497 p[1] = (v >> 8) & 0xff; 498 p[2] = (v >> 16) & 0xff; 499 p[3] = (v >> 24) & 0xff; 500 } 501 502 /* Extract michal mic block of data */ 503 #define dp_rx_michael_block(l, r) \ 504 do { \ 505 r ^= dp_rx_rotl(l, 17); \ 506 l += r; \ 507 r ^= dp_rx_xswap(l); \ 508 l += r; \ 509 r ^= dp_rx_rotl(l, 3); \ 510 l += r; \ 511 r ^= dp_rx_rotr(l, 2); \ 512 l += r; \ 513 } while (0) 514 515 /** 516 * struct dp_rx_desc_list_elem_t 517 * 518 * @next : Next pointer to form free list 519 * @rx_desc : DP Rx descriptor 520 */ 521 union dp_rx_desc_list_elem_t { 522 union dp_rx_desc_list_elem_t *next; 523 struct dp_rx_desc rx_desc; 524 }; 525 526 #ifdef RX_DESC_MULTI_PAGE_ALLOC 527 /** 528 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 529 * @page_id: Page ID 530 * @offset: Offset of the descriptor element 531 * 532 * Return: RX descriptor element 533 */ 534 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 535 struct rx_desc_pool *rx_pool); 536 537 static inline 538 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 539 struct rx_desc_pool *pool, 540 uint32_t cookie) 541 { 542 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 543 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 544 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 545 struct rx_desc_pool *rx_desc_pool; 546 union dp_rx_desc_list_elem_t *rx_desc_elem; 547 548 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 549 return NULL; 550 551 rx_desc_pool = &pool[pool_id]; 552 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 553 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 554 rx_desc_pool->elem_size * offset); 555 556 return &rx_desc_elem->rx_desc; 557 } 558 559 /** 560 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 561 * the Rx descriptor on Rx DMA source ring buffer 562 * @soc: core txrx main context 563 * @cookie: cookie used to lookup virtual address 564 * 565 * Return: Pointer to the Rx descriptor 566 */ 567 static inline 568 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 569 uint32_t cookie) 570 { 571 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 572 } 573 574 /** 575 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 576 * the Rx descriptor on monitor ring buffer 577 * @soc: core txrx main context 578 * @cookie: cookie used to lookup virtual address 579 * 580 * Return: Pointer to the Rx descriptor 581 */ 582 static inline 583 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 584 uint32_t cookie) 585 { 586 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 587 } 588 589 /** 590 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 591 * the Rx descriptor on monitor status ring buffer 592 * @soc: core txrx main context 593 * @cookie: cookie used to lookup virtual address 594 * 595 * Return: Pointer to the Rx descriptor 596 */ 597 static inline 598 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 599 uint32_t cookie) 600 { 601 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie); 602 } 603 #else 604 605 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 606 uint32_t pool_size, 607 struct rx_desc_pool *rx_desc_pool); 608 609 /** 610 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 611 * the Rx descriptor on Rx DMA source ring buffer 612 * @soc: core txrx main context 613 * @cookie: cookie used to lookup virtual address 614 * 615 * Return: void *: Virtual Address of the Rx descriptor 616 */ 617 static inline 618 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 619 { 620 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 621 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 622 struct rx_desc_pool *rx_desc_pool; 623 624 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 625 return NULL; 626 627 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 628 629 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 630 return NULL; 631 632 return &rx_desc_pool->array[index].rx_desc; 633 } 634 635 /** 636 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 637 * the Rx descriptor on monitor ring buffer 638 * @soc: core txrx main context 639 * @cookie: cookie used to lookup virtual address 640 * 641 * Return: void *: Virtual Address of the Rx descriptor 642 */ 643 static inline 644 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 645 { 646 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 647 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 648 /* TODO */ 649 /* Add sanity for pool_id & index */ 650 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 651 } 652 653 /** 654 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 655 * the Rx descriptor on monitor status ring buffer 656 * @soc: core txrx main context 657 * @cookie: cookie used to lookup virtual address 658 * 659 * Return: void *: Virtual Address of the Rx descriptor 660 */ 661 static inline 662 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 663 { 664 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 665 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 666 /* TODO */ 667 /* Add sanity for pool_id & index */ 668 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 669 } 670 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 671 672 #ifndef QCA_HOST_MODE_WIFI_DISABLED 673 674 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 675 { 676 return vdev->ap_bridge_enabled; 677 } 678 679 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 680 static inline QDF_STATUS 681 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 682 { 683 if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc))) 684 return QDF_STATUS_E_FAILURE; 685 686 HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc); 687 return QDF_STATUS_SUCCESS; 688 } 689 690 /** 691 * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie 692 * field in ring descriptor 693 * @ring_desc: ring descriptor 694 * 695 * Return: None 696 */ 697 static inline void 698 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 699 { 700 HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc); 701 } 702 #else 703 static inline QDF_STATUS 704 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 705 { 706 return QDF_STATUS_SUCCESS; 707 } 708 709 static inline void 710 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 711 { 712 } 713 #endif 714 715 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 716 717 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \ 718 defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE) 719 /** 720 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 721 * @soc: dp soc ref 722 * @cookie: Rx buf SW cookie value 723 * 724 * Return: true if cookie is valid else false 725 */ 726 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 727 uint32_t cookie) 728 { 729 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 730 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 731 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 732 struct rx_desc_pool *rx_desc_pool; 733 734 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 735 goto fail; 736 737 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 738 739 if (page_id >= rx_desc_pool->desc_pages.num_pages || 740 offset >= rx_desc_pool->desc_pages.num_element_per_page) 741 goto fail; 742 743 return true; 744 745 fail: 746 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 747 return false; 748 } 749 #else 750 /** 751 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 752 * @soc: dp soc ref 753 * @cookie: Rx buf SW cookie value 754 * 755 * When multi page alloc is disabled SW cookie validness is 756 * checked while fetching Rx descriptor, so no need to check here 757 * Return: true if cookie is valid else false 758 */ 759 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 760 uint32_t cookie) 761 { 762 return true; 763 } 764 #endif 765 766 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool); 767 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 768 uint32_t pool_size, 769 struct rx_desc_pool *rx_desc_pool); 770 771 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 772 uint32_t pool_size, 773 struct rx_desc_pool *rx_desc_pool); 774 775 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 776 union dp_rx_desc_list_elem_t **local_desc_list, 777 union dp_rx_desc_list_elem_t **tail, 778 uint16_t pool_id, 779 struct rx_desc_pool *rx_desc_pool); 780 781 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 782 struct rx_desc_pool *rx_desc_pool, 783 uint16_t num_descs, 784 union dp_rx_desc_list_elem_t **desc_list, 785 union dp_rx_desc_list_elem_t **tail); 786 787 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev); 788 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev); 789 790 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev); 791 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev); 792 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 793 struct rx_desc_pool *rx_desc_pool, 794 uint32_t pool_id); 795 796 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 797 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev); 798 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev); 799 800 void dp_rx_pdev_detach(struct dp_pdev *pdev); 801 802 void dp_print_napi_stats(struct dp_soc *soc); 803 804 /** 805 * dp_rx_vdev_detach() - detach vdev from dp rx 806 * @vdev: virtual device instance 807 * 808 * Return: QDF_STATUS_SUCCESS: success 809 * QDF_STATUS_E_RESOURCES: Error return 810 */ 811 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); 812 813 #ifndef QCA_HOST_MODE_WIFI_DISABLED 814 815 uint32_t 816 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 817 uint8_t reo_ring_num, 818 uint32_t quota); 819 820 /** 821 * dp_rx_err_process() - Processes error frames routed to REO error ring 822 * @int_ctx: pointer to DP interrupt context 823 * @soc: core txrx main context 824 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 825 * @quota: No. of units (packets) that can be serviced in one shot. 826 * 827 * This function implements error processing and top level demultiplexer 828 * for all the frames routed to REO error ring. 829 * 830 * Return: uint32_t: No. of elements processed 831 */ 832 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 833 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 834 835 /** 836 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 837 * @int_ctx: pointer to DP interrupt context 838 * @soc: core txrx main context 839 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 840 * @quota: No. of units (packets) that can be serviced in one shot. 841 * 842 * This function implements error processing and top level demultiplexer 843 * for all the frames routed to WBM2HOST sw release ring. 844 * 845 * Return: uint32_t: No. of elements processed 846 */ 847 uint32_t 848 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 849 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 850 851 /** 852 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 853 * multiple nbufs. 854 * @soc: core txrx main context 855 * @nbuf: pointer to the first msdu of an amsdu. 856 * 857 * This function implements the creation of RX frag_list for cases 858 * where an MSDU is spread across multiple nbufs. 859 * 860 * Return: returns the head nbuf which contains complete frag_list. 861 */ 862 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); 863 864 865 /* 866 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 867 * de-initialization of wifi module. 868 * 869 * @soc: core txrx main context 870 * @pool_id: pool_id which is one of 3 mac_ids 871 * @rx_desc_pool: rx descriptor pool pointer 872 * 873 * Return: None 874 */ 875 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 876 struct rx_desc_pool *rx_desc_pool); 877 878 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 879 880 /* 881 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 882 * de-initialization of wifi module. 883 * 884 * @soc: core txrx main context 885 * @pool_id: pool_id which is one of 3 mac_ids 886 * @rx_desc_pool: rx descriptor pool pointer 887 * 888 * Return: None 889 */ 890 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 891 struct rx_desc_pool *rx_desc_pool); 892 893 #ifdef DP_RX_MON_MEM_FRAG 894 /* 895 * dp_rx_desc_frag_free() - free the sw rx desc frag called during 896 * de-initialization of wifi module. 897 * 898 * @soc: core txrx main context 899 * @rx_desc_pool: rx descriptor pool pointer 900 * 901 * Return: None 902 */ 903 void dp_rx_desc_frag_free(struct dp_soc *soc, 904 struct rx_desc_pool *rx_desc_pool); 905 #else 906 static inline 907 void dp_rx_desc_frag_free(struct dp_soc *soc, 908 struct rx_desc_pool *rx_desc_pool) 909 { 910 } 911 #endif 912 /* 913 * dp_rx_desc_pool_free() - free the sw rx desc array called during 914 * de-initialization of wifi module. 915 * 916 * @soc: core txrx main context 917 * @rx_desc_pool: rx descriptor pool pointer 918 * 919 * Return: None 920 */ 921 void dp_rx_desc_pool_free(struct dp_soc *soc, 922 struct rx_desc_pool *rx_desc_pool); 923 924 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 925 struct dp_txrx_peer *peer); 926 927 #ifdef RX_DESC_LOGGING 928 /* 929 * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug 930 * structure 931 * @rx_desc: rx descriptor pointer 932 * 933 * Return: None 934 */ 935 static inline 936 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 937 { 938 rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); 939 } 940 941 /* 942 * dp_rx_desc_free_dbg_info() - Free rx descriptor debug 943 * structure memory 944 * @rx_desc: rx descriptor pointer 945 * 946 * Return: None 947 */ 948 static inline 949 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 950 { 951 qdf_mem_free(rx_desc->dbg_info); 952 } 953 954 /* 955 * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info 956 * structure memory 957 * @rx_desc: rx descriptor pointer 958 * 959 * Return: None 960 */ 961 static 962 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 963 const char *func_name, uint8_t flag) 964 { 965 struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info; 966 967 if (!info) 968 return; 969 970 if (flag == RX_DESC_REPLENISHED) { 971 qdf_str_lcopy(info->replenish_caller, func_name, 972 QDF_MEM_FUNC_NAME_SIZE); 973 info->replenish_ts = qdf_get_log_timestamp(); 974 } else { 975 qdf_str_lcopy(info->freelist_caller, func_name, 976 QDF_MEM_FUNC_NAME_SIZE); 977 info->freelist_ts = qdf_get_log_timestamp(); 978 info->prev_nbuf = rx_desc->nbuf; 979 info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr; 980 rx_desc->nbuf_data_addr = NULL; 981 } 982 } 983 #else 984 985 static inline 986 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 987 { 988 } 989 990 static inline 991 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 992 { 993 } 994 995 static inline 996 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 997 const char *func_name, uint8_t flag) 998 { 999 } 1000 #endif /* RX_DESC_LOGGING */ 1001 1002 /** 1003 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 1004 * 1005 * @head: pointer to the head of local free list 1006 * @tail: pointer to the tail of local free list 1007 * @new: new descriptor that is added to the free list 1008 * @func_name: caller func name 1009 * 1010 * Return: void: 1011 */ 1012 static inline 1013 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 1014 union dp_rx_desc_list_elem_t **tail, 1015 struct dp_rx_desc *new, const char *func_name) 1016 { 1017 qdf_assert(head && new); 1018 1019 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); 1020 1021 new->nbuf = NULL; 1022 new->in_use = 0; 1023 1024 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 1025 *head = (union dp_rx_desc_list_elem_t *)new; 1026 /* reset tail if head->next is NULL */ 1027 if (!*tail || !(*head)->next) 1028 *tail = *head; 1029 } 1030 1031 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1032 uint8_t mac_id); 1033 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1034 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); 1035 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1036 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1037 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1038 uint16_t peer_id, uint8_t tid); 1039 1040 #define DP_RX_HEAD_APPEND(head, elem) \ 1041 do { \ 1042 qdf_nbuf_set_next((elem), (head)); \ 1043 (head) = (elem); \ 1044 } while (0) 1045 1046 1047 #define DP_RX_LIST_APPEND(head, tail, elem) \ 1048 do { \ 1049 if (!(head)) { \ 1050 (head) = (elem); \ 1051 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 1052 } else { \ 1053 qdf_nbuf_set_next((tail), (elem)); \ 1054 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 1055 } \ 1056 (tail) = (elem); \ 1057 qdf_nbuf_set_next((tail), NULL); \ 1058 } while (0) 1059 1060 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \ 1061 do { \ 1062 if (!(phead)) { \ 1063 (phead) = (chead); \ 1064 } else { \ 1065 qdf_nbuf_set_next((ptail), (chead)); \ 1066 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \ 1067 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \ 1068 } \ 1069 (ptail) = (ctail); \ 1070 qdf_nbuf_set_next((ptail), NULL); \ 1071 } while (0) 1072 1073 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) 1074 /* 1075 * on some third-party platform, the memory below 0x2000 1076 * is reserved for target use, so any memory allocated in this 1077 * region should not be used by host 1078 */ 1079 #define MAX_RETRY 50 1080 #define DP_PHY_ADDR_RESERVED 0x2000 1081 #elif defined(BUILD_X86) 1082 /* 1083 * in M2M emulation platforms (x86) the memory below 0x50000000 1084 * is reserved for target use, so any memory allocated in this 1085 * region should not be used by host 1086 */ 1087 #define MAX_RETRY 100 1088 #define DP_PHY_ADDR_RESERVED 0x50000000 1089 #endif 1090 1091 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86) 1092 /** 1093 * dp_check_paddr() - check if current phy address is valid or not 1094 * @dp_soc: core txrx main context 1095 * @rx_netbuf: skb buffer 1096 * @paddr: physical address 1097 * @rx_desc_pool: struct of rx descriptor pool 1098 * check if the physical address of the nbuf->data is less 1099 * than DP_PHY_ADDR_RESERVED then free the nbuf and try 1100 * allocating new nbuf. We can try for 100 times. 1101 * 1102 * This is a temp WAR till we fix it properly. 1103 * 1104 * Return: success or failure. 1105 */ 1106 static inline 1107 int dp_check_paddr(struct dp_soc *dp_soc, 1108 qdf_nbuf_t *rx_netbuf, 1109 qdf_dma_addr_t *paddr, 1110 struct rx_desc_pool *rx_desc_pool) 1111 { 1112 uint32_t nbuf_retry = 0; 1113 int32_t ret; 1114 1115 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1116 return QDF_STATUS_SUCCESS; 1117 1118 do { 1119 dp_debug("invalid phy addr 0x%llx, trying again", 1120 (uint64_t)(*paddr)); 1121 nbuf_retry++; 1122 if ((*rx_netbuf)) { 1123 /* Not freeing buffer intentionally. 1124 * Observed that same buffer is getting 1125 * re-allocated resulting in longer load time 1126 * WMI init timeout. 1127 * This buffer is anyway not useful so skip it. 1128 *.Add such buffer to invalid list and free 1129 *.them when driver unload. 1130 **/ 1131 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1132 *rx_netbuf, 1133 QDF_DMA_FROM_DEVICE, 1134 rx_desc_pool->buf_size); 1135 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1136 *rx_netbuf); 1137 } 1138 1139 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 1140 rx_desc_pool->buf_size, 1141 RX_BUFFER_RESERVATION, 1142 rx_desc_pool->buf_alignment, 1143 FALSE); 1144 1145 if (qdf_unlikely(!(*rx_netbuf))) 1146 return QDF_STATUS_E_FAILURE; 1147 1148 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 1149 *rx_netbuf, 1150 QDF_DMA_FROM_DEVICE, 1151 rx_desc_pool->buf_size); 1152 1153 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1154 qdf_nbuf_free(*rx_netbuf); 1155 *rx_netbuf = NULL; 1156 continue; 1157 } 1158 1159 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 1160 1161 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1162 return QDF_STATUS_SUCCESS; 1163 1164 } while (nbuf_retry < MAX_RETRY); 1165 1166 if ((*rx_netbuf)) { 1167 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1168 *rx_netbuf, 1169 QDF_DMA_FROM_DEVICE, 1170 rx_desc_pool->buf_size); 1171 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1172 *rx_netbuf); 1173 } 1174 1175 return QDF_STATUS_E_FAILURE; 1176 } 1177 1178 #else 1179 static inline 1180 int dp_check_paddr(struct dp_soc *dp_soc, 1181 qdf_nbuf_t *rx_netbuf, 1182 qdf_dma_addr_t *paddr, 1183 struct rx_desc_pool *rx_desc_pool) 1184 { 1185 return QDF_STATUS_SUCCESS; 1186 } 1187 1188 #endif 1189 1190 /** 1191 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 1192 * the MSDU Link Descriptor 1193 * @soc: core txrx main context 1194 * @buf_info: buf_info includes cookie that is used to lookup 1195 * virtual address of link descriptor after deriving the page id 1196 * and the offset or index of the desc on the associatde page. 1197 * 1198 * This is the VA of the link descriptor, that HAL layer later uses to 1199 * retrieve the list of MSDU's for a given MPDU. 1200 * 1201 * Return: void *: Virtual Address of the Rx descriptor 1202 */ 1203 static inline 1204 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 1205 struct hal_buf_info *buf_info) 1206 { 1207 void *link_desc_va; 1208 struct qdf_mem_multi_page_t *pages; 1209 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie); 1210 1211 pages = &soc->link_desc_pages; 1212 if (!pages) 1213 return NULL; 1214 if (qdf_unlikely(page_id >= pages->num_pages)) 1215 return NULL; 1216 link_desc_va = pages->dma_pages[page_id].page_v_addr_start + 1217 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr); 1218 return link_desc_va; 1219 } 1220 1221 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1222 #ifdef DISABLE_EAPOL_INTRABSS_FWD 1223 #ifdef WLAN_FEATURE_11BE_MLO 1224 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1225 qdf_nbuf_t nbuf) 1226 { 1227 struct qdf_mac_addr *self_mld_mac_addr = 1228 (struct qdf_mac_addr *)vdev->mld_mac_addr.raw; 1229 return qdf_is_macaddr_equal(self_mld_mac_addr, 1230 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1231 QDF_NBUF_DEST_MAC_OFFSET); 1232 } 1233 #else 1234 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1235 qdf_nbuf_t nbuf) 1236 { 1237 return false; 1238 } 1239 #endif 1240 1241 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev, 1242 qdf_nbuf_t nbuf) 1243 { 1244 return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw, 1245 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1246 QDF_NBUF_DEST_MAC_OFFSET); 1247 } 1248 1249 /* 1250 * dp_rx_intrabss_eapol_drop_check() - API For EAPOL 1251 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1252 * @soc: core txrx main context 1253 * @ta_txrx_peer: source peer entry 1254 * @rx_tlv_hdr: start address of rx tlvs 1255 * @nbuf: nbuf that has to be intrabss forwarded 1256 * 1257 * Return: true if it is forwarded else false 1258 */ 1259 static inline 1260 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1261 struct dp_txrx_peer *ta_txrx_peer, 1262 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1263 { 1264 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && 1265 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev, 1266 nbuf) || 1267 dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev, 1268 nbuf)))) { 1269 qdf_nbuf_free(nbuf); 1270 DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); 1271 return true; 1272 } 1273 1274 return false; 1275 } 1276 #else /* DISABLE_EAPOL_INTRABSS_FWD */ 1277 1278 static inline 1279 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1280 struct dp_txrx_peer *ta_txrx_peer, 1281 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1282 { 1283 return false; 1284 } 1285 #endif /* DISABLE_EAPOL_INTRABSS_FWD */ 1286 1287 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, 1288 struct dp_txrx_peer *ta_txrx_peer, 1289 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1290 struct cdp_tid_rx_stats *tid_stats); 1291 1292 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, 1293 struct dp_txrx_peer *ta_txrx_peer, 1294 uint8_t tx_vdev_id, 1295 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1296 struct cdp_tid_rx_stats *tid_stats); 1297 1298 /** 1299 * dp_rx_defrag_concat() - Concatenate the fragments 1300 * 1301 * @dst: destination pointer to the buffer 1302 * @src: source pointer from where the fragment payload is to be copied 1303 * 1304 * Return: QDF_STATUS 1305 */ 1306 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 1307 { 1308 /* 1309 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 1310 * to provide space for src, the headroom portion is copied from 1311 * the original dst buffer to the larger new dst buffer. 1312 * (This is needed, because the headroom of the dst buffer 1313 * contains the rx desc.) 1314 */ 1315 if (!qdf_nbuf_cat(dst, src)) { 1316 /* 1317 * qdf_nbuf_cat does not free the src memory. 1318 * Free src nbuf before returning 1319 * For failure case the caller takes of freeing the nbuf 1320 */ 1321 qdf_nbuf_free(src); 1322 return QDF_STATUS_SUCCESS; 1323 } 1324 1325 return QDF_STATUS_E_DEFRAG_ERROR; 1326 } 1327 1328 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1329 1330 #ifndef FEATURE_WDS 1331 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1332 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf); 1333 1334 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1335 { 1336 return QDF_STATUS_SUCCESS; 1337 } 1338 1339 static inline void 1340 dp_rx_wds_srcport_learn(struct dp_soc *soc, 1341 uint8_t *rx_tlv_hdr, 1342 struct dp_txrx_peer *txrx_peer, 1343 qdf_nbuf_t nbuf, 1344 struct hal_rx_msdu_metadata msdu_metadata) 1345 { 1346 } 1347 1348 static inline void 1349 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc, 1350 struct dp_peer *ta_peer, qdf_nbuf_t nbuf, 1351 struct hal_rx_msdu_metadata msdu_end_info, 1352 bool ad4_valid, bool chfrag_start) 1353 { 1354 } 1355 #endif 1356 1357 /* 1358 * dp_rx_desc_dump() - dump the sw rx descriptor 1359 * 1360 * @rx_desc: sw rx descriptor 1361 */ 1362 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 1363 { 1364 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 1365 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 1366 rx_desc->in_use, rx_desc->unmapped); 1367 } 1368 1369 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1370 1371 /* 1372 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 1373 * In qwrap mode, packets originated from 1374 * any vdev should not loopback and 1375 * should be dropped. 1376 * @vdev: vdev on which rx packet is received 1377 * @nbuf: rx pkt 1378 * 1379 */ 1380 #if ATH_SUPPORT_WRAP 1381 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1382 qdf_nbuf_t nbuf) 1383 { 1384 struct dp_vdev *psta_vdev; 1385 struct dp_pdev *pdev = vdev->pdev; 1386 uint8_t *data = qdf_nbuf_data(nbuf); 1387 1388 if (qdf_unlikely(vdev->proxysta_vdev)) { 1389 /* In qwrap isolation mode, allow loopback packets as all 1390 * packets go to RootAP and Loopback on the mpsta. 1391 */ 1392 if (vdev->isolation_vdev) 1393 return false; 1394 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 1395 if (qdf_unlikely(psta_vdev->proxysta_vdev && 1396 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 1397 &data[QDF_MAC_ADDR_SIZE], 1398 QDF_MAC_ADDR_SIZE))) { 1399 /* Drop packet if source address is equal to 1400 * any of the vdev addresses. 1401 */ 1402 return true; 1403 } 1404 } 1405 } 1406 return false; 1407 } 1408 #else 1409 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1410 qdf_nbuf_t nbuf) 1411 { 1412 return false; 1413 } 1414 #endif 1415 1416 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1417 1418 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 1419 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ 1420 defined(WLAN_SUPPORT_RX_FLOW_TAG) 1421 #include "dp_rx_tag.h" 1422 #endif 1423 1424 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ 1425 !defined(WLAN_SUPPORT_RX_FLOW_TAG) 1426 /** 1427 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 1428 * and set the corresponding tag in QDF packet 1429 * @soc: core txrx main context 1430 * @vdev: vdev on which the packet is received 1431 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1432 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1433 * @ring_index: REO ring number, not used for error & monitor ring 1434 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 1435 * @is_update_stats: flag to indicate whether to update stats or not 1436 * Return: void 1437 */ 1438 static inline void 1439 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1440 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1441 uint16_t ring_index, 1442 bool is_reo_exception, bool is_update_stats) 1443 { 1444 } 1445 #endif 1446 1447 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1448 /** 1449 * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV 1450 * and returns whether cce metadata matches 1451 * @soc: core txrx main context 1452 * @vdev: vdev on which the packet is received 1453 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1454 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1455 * Return: bool 1456 */ 1457 static inline bool 1458 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev, 1459 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1460 { 1461 return false; 1462 } 1463 1464 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1465 1466 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 1467 /** 1468 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV 1469 * and set the corresponding tag in QDF packet 1470 * @soc: core txrx main context 1471 * @vdev: vdev on which the packet is received 1472 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1473 * @rx_tlv_hdr: base address where the RX TLVs starts 1474 * @is_update_stats: flag to indicate whether to update stats or not 1475 * 1476 * Return: void 1477 */ 1478 static inline void 1479 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1480 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) 1481 { 1482 } 1483 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1484 1485 /* 1486 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 1487 * called during dp rx initialization 1488 * and at the end of dp_rx_process. 1489 * 1490 * @soc: core txrx main context 1491 * @mac_id: mac_id which is one of 3 mac_ids 1492 * @dp_rxdma_srng: dp rxdma circular ring 1493 * @rx_desc_pool: Pointer to free Rx descriptor pool 1494 * @num_req_buffers: number of buffer to be replenished 1495 * @desc_list: list of descs if called from dp_rx_process 1496 * or NULL during dp rx initialization or out of buffer 1497 * interrupt. 1498 * @tail: tail of descs list 1499 * @func_name: name of the caller function 1500 * Return: return success or failure 1501 */ 1502 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1503 struct dp_srng *dp_rxdma_srng, 1504 struct rx_desc_pool *rx_desc_pool, 1505 uint32_t num_req_buffers, 1506 union dp_rx_desc_list_elem_t **desc_list, 1507 union dp_rx_desc_list_elem_t **tail, 1508 const char *func_name); 1509 /* 1510 * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs 1511 * use direct APIs to get invalidate 1512 * and get the physical address of the 1513 * nbuf instead of map api,called during 1514 * dp rx initialization and at the end 1515 * of dp_rx_process. 1516 * 1517 * @soc: core txrx main context 1518 * @mac_id: mac_id which is one of 3 mac_ids 1519 * @dp_rxdma_srng: dp rxdma circular ring 1520 * @rx_desc_pool: Pointer to free Rx descriptor pool 1521 * @num_req_buffers: number of buffer to be replenished 1522 * @desc_list: list of descs if called from dp_rx_process 1523 * or NULL during dp rx initialization or out of buffer 1524 * interrupt. 1525 * @tail: tail of descs list 1526 * Return: return success or failure 1527 */ 1528 QDF_STATUS 1529 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1530 struct dp_srng *dp_rxdma_srng, 1531 struct rx_desc_pool *rx_desc_pool, 1532 uint32_t num_req_buffers, 1533 union dp_rx_desc_list_elem_t **desc_list, 1534 union dp_rx_desc_list_elem_t **tail); 1535 1536 /* 1537 * __dp_rx_buffers_no_map__lt_replenish() - replenish rxdma ring with rx nbufs 1538 * use direct APIs to get invalidate 1539 * and get the physical address of the 1540 * nbuf instead of map api,called when 1541 * low threshold interrupt is triggered 1542 * 1543 * @soc: core txrx main context 1544 * @mac_id: mac_id which is one of 3 mac_ids 1545 * @dp_rxdma_srng: dp rxdma circular ring 1546 * @rx_desc_pool: Pointer to free Rx descriptor pool 1547 * Return: return success or failure 1548 */ 1549 QDF_STATUS 1550 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1551 struct dp_srng *dp_rxdma_srng, 1552 struct rx_desc_pool *rx_desc_pool); 1553 /* 1554 * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs 1555 * use direct APIs to get invalidate 1556 * and get the physical address of the 1557 * nbuf instead of map api,called during 1558 * dp rx initialization. 1559 * 1560 * @soc: core txrx main context 1561 * @mac_id: mac_id which is one of 3 mac_ids 1562 * @dp_rxdma_srng: dp rxdma circular ring 1563 * @rx_desc_pool: Pointer to free Rx descriptor pool 1564 * @num_req_buffers: number of buffer to be replenished 1565 * Return: return success or failure 1566 */ 1567 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, 1568 uint32_t mac_id, 1569 struct dp_srng *dp_rxdma_srng, 1570 struct rx_desc_pool *rx_desc_pool, 1571 uint32_t num_req_buffers); 1572 1573 /* 1574 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs 1575 * called during dp rx initialization 1576 * 1577 * @soc: core txrx main context 1578 * @mac_id: mac_id which is one of 3 mac_ids 1579 * @dp_rxdma_srng: dp rxdma circular ring 1580 * @rx_desc_pool: Pointer to free Rx descriptor pool 1581 * @num_req_buffers: number of buffer to be replenished 1582 * 1583 * Return: return success or failure 1584 */ 1585 QDF_STATUS 1586 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 1587 struct dp_srng *dp_rxdma_srng, 1588 struct rx_desc_pool *rx_desc_pool, 1589 uint32_t num_req_buffers); 1590 1591 /** 1592 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 1593 * (WBM), following error handling 1594 * 1595 * @soc: core DP main context 1596 * @buf_addr_info: opaque pointer to the REO error ring descriptor 1597 * @buf_addr_info: void pointer to the buffer_addr_info 1598 * @bm_action: put to idle_list or release to msdu_list 1599 * 1600 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1601 */ 1602 QDF_STATUS 1603 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 1604 uint8_t bm_action); 1605 1606 /** 1607 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 1608 * (WBM) by address 1609 * 1610 * @soc: core DP main context 1611 * @link_desc_addr: link descriptor addr 1612 * 1613 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1614 */ 1615 QDF_STATUS 1616 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 1617 hal_buff_addrinfo_t link_desc_addr, 1618 uint8_t bm_action); 1619 1620 /** 1621 * dp_rxdma_err_process() - RxDMA error processing functionality 1622 * @soc: core txrx main contex 1623 * @mac_id: mac id which is one of 3 mac_ids 1624 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1625 * @quota: No. of units (packets) that can be serviced in one shot. 1626 * 1627 * Return: num of buffers processed 1628 */ 1629 uint32_t 1630 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1631 uint32_t mac_id, uint32_t quota); 1632 1633 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1634 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1635 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1636 uint8_t *rx_tlv_hdr); 1637 1638 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1639 struct dp_txrx_peer *peer); 1640 1641 /* 1642 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 1643 * 1644 * @soc: core txrx main context 1645 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1646 * @ring_desc: opaque pointer to the RX ring descriptor 1647 * @rx_desc: host rx descriptor 1648 * 1649 * Return: void 1650 */ 1651 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 1652 hal_ring_handle_t hal_ring_hdl, 1653 hal_ring_desc_t ring_desc, 1654 struct dp_rx_desc *rx_desc); 1655 1656 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1657 1658 #ifdef QCA_PEER_EXT_STATS 1659 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1660 qdf_nbuf_t nbuf); 1661 #endif /* QCA_PEER_EXT_STATS */ 1662 1663 #ifdef RX_DESC_DEBUG_CHECK 1664 /** 1665 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1666 * @rx_desc: rx descriptor pointer 1667 * 1668 * Return: true, if magic is correct, else false. 1669 */ 1670 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1671 { 1672 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1673 return false; 1674 1675 rx_desc->magic = 0; 1676 return true; 1677 } 1678 1679 /** 1680 * dp_rx_desc_prep() - prepare rx desc 1681 * @rx_desc: rx descriptor pointer to be prepared 1682 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1683 * 1684 * Note: assumption is that we are associating a nbuf which is mapped 1685 * 1686 * Return: none 1687 */ 1688 static inline 1689 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1690 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1691 { 1692 rx_desc->magic = DP_RX_DESC_MAGIC; 1693 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1694 rx_desc->unmapped = 0; 1695 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf); 1696 } 1697 1698 /** 1699 * dp_rx_desc_frag_prep() - prepare rx desc 1700 * @rx_desc: rx descriptor pointer to be prepared 1701 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1702 * 1703 * Note: assumption is that we frag address is mapped 1704 * 1705 * Return: none 1706 */ 1707 #ifdef DP_RX_MON_MEM_FRAG 1708 static inline 1709 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1710 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1711 { 1712 rx_desc->magic = DP_RX_DESC_MAGIC; 1713 rx_desc->rx_buf_start = 1714 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1715 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1716 rx_desc->unmapped = 0; 1717 } 1718 #else 1719 static inline 1720 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1721 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1722 { 1723 } 1724 #endif /* DP_RX_MON_MEM_FRAG */ 1725 1726 /** 1727 * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc 1728 * @rx_desc: rx descriptor 1729 * @ring_paddr: paddr obatined from the ring 1730 * 1731 * Returns: QDF_STATUS 1732 */ 1733 static inline 1734 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1735 uint64_t ring_paddr) 1736 { 1737 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); 1738 } 1739 #else 1740 1741 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1742 { 1743 return true; 1744 } 1745 1746 static inline 1747 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1748 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1749 { 1750 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1751 rx_desc->unmapped = 0; 1752 } 1753 1754 #ifdef DP_RX_MON_MEM_FRAG 1755 static inline 1756 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1757 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1758 { 1759 rx_desc->rx_buf_start = 1760 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1761 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1762 rx_desc->unmapped = 0; 1763 } 1764 #else 1765 static inline 1766 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1767 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1768 { 1769 } 1770 #endif /* DP_RX_MON_MEM_FRAG */ 1771 1772 static inline 1773 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1774 uint64_t ring_paddr) 1775 { 1776 return true; 1777 } 1778 #endif /* RX_DESC_DEBUG_CHECK */ 1779 1780 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 1781 bool is_mon_dest_desc); 1782 1783 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1784 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 1785 uint8_t err_code, uint8_t mac_id); 1786 1787 #ifndef QCA_MULTIPASS_SUPPORT 1788 static inline 1789 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1790 uint8_t tid) 1791 { 1792 return false; 1793 } 1794 #else 1795 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1796 uint8_t tid); 1797 #endif 1798 1799 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1800 1801 #ifndef WLAN_RX_PKT_CAPTURE_ENH 1802 static inline 1803 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev, 1804 struct dp_peer *peer_handle, 1805 bool value, uint8_t *mac_addr) 1806 { 1807 return QDF_STATUS_SUCCESS; 1808 } 1809 #endif 1810 1811 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1812 1813 /** 1814 * dp_rx_deliver_to_stack() - deliver pkts to network stack 1815 * Caller to hold peer refcount and check for valid peer 1816 * @soc: soc 1817 * @vdev: vdev 1818 * @txrx_peer: txrx peer 1819 * @nbuf_head: skb list head 1820 * @nbuf_tail: skb list tail 1821 * 1822 * Return: QDF_STATUS 1823 */ 1824 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 1825 struct dp_vdev *vdev, 1826 struct dp_txrx_peer *peer, 1827 qdf_nbuf_t nbuf_head, 1828 qdf_nbuf_t nbuf_tail); 1829 1830 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1831 /** 1832 * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack 1833 * caller to hold peer refcount and check for valid peer 1834 * @soc: soc 1835 * @vdev: vdev 1836 * @peer: peer 1837 * @nbuf_head: skb list head 1838 * @nbuf_tail: skb list tail 1839 * 1840 * return: QDF_STATUS 1841 */ 1842 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 1843 struct dp_vdev *vdev, 1844 struct dp_txrx_peer *peer, 1845 qdf_nbuf_t nbuf_head, 1846 qdf_nbuf_t nbuf_tail); 1847 #endif 1848 1849 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1850 1851 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS 1852 /* 1853 * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring 1854 * @int_ctx: pointer to DP interrupt context 1855 * @dp_soc - DP soc structure pointer 1856 * @hal_ring_hdl - HAL ring handle 1857 * 1858 * Return: 0 on success; error on failure 1859 */ 1860 static inline int 1861 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1862 hal_ring_handle_t hal_ring_hdl) 1863 { 1864 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 1865 } 1866 1867 /* 1868 * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring 1869 * @int_ctx: pointer to DP interrupt context 1870 * @dp_soc - DP soc structure pointer 1871 * @hal_ring_hdl - HAL ring handle 1872 * 1873 * Return - None 1874 */ 1875 static inline void 1876 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1877 hal_ring_handle_t hal_ring_hdl) 1878 { 1879 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 1880 } 1881 #else 1882 static inline int 1883 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1884 hal_ring_handle_t hal_ring_hdl) 1885 { 1886 return dp_srng_access_start(int_ctx, soc, hal_ring_hdl); 1887 } 1888 1889 static inline void 1890 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1891 hal_ring_handle_t hal_ring_hdl) 1892 { 1893 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1894 } 1895 #endif 1896 1897 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1898 1899 /* 1900 * dp_rx_wbm_sg_list_reset() - Initialize sg list 1901 * 1902 * This api should be called at soc init and afterevery sg processing. 1903 *@soc: DP SOC handle 1904 */ 1905 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) 1906 { 1907 if (soc) { 1908 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false; 1909 soc->wbm_sg_param.wbm_sg_nbuf_head = NULL; 1910 soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL; 1911 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0; 1912 } 1913 } 1914 1915 /* 1916 * dp_rx_wbm_sg_list_deinit() - De-initialize sg list 1917 * 1918 * This api should be called in down path, to avoid any leak. 1919 *@soc: DP SOC handle 1920 */ 1921 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) 1922 { 1923 if (soc) { 1924 if (soc->wbm_sg_param.wbm_sg_nbuf_head) 1925 qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head); 1926 1927 dp_rx_wbm_sg_list_reset(soc); 1928 } 1929 } 1930 1931 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1932 1933 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 1934 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1935 do { \ 1936 if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \ 1937 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \ 1938 break; \ 1939 } \ 1940 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \ 1941 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \ 1942 if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \ 1943 rx_desc->pool_id)) \ 1944 DP_RX_MERGE_TWO_LIST(head, tail, \ 1945 ebuf_head, ebuf_tail);\ 1946 ebuf_head = NULL; \ 1947 ebuf_tail = NULL; \ 1948 } \ 1949 } while (0) 1950 #else 1951 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1952 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf) 1953 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */ 1954 1955 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1956 1957 /* 1958 * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate 1959 to refill 1960 * @soc: DP SOC handle 1961 * @buf_info: the last link desc buf info 1962 * @ring_buf_info: current buf address pointor including link desc 1963 * 1964 * return: none. 1965 */ 1966 void dp_rx_link_desc_refill_duplicate_check( 1967 struct dp_soc *soc, 1968 struct hal_buf_info *buf_info, 1969 hal_buff_addrinfo_t ring_buf_info); 1970 1971 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 1972 /** 1973 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 1974 * @soc : dp_soc handle 1975 * @pdev: dp_pdev handle 1976 * @peer_id: peer_id of the peer for which completion came 1977 * @ppdu_id: ppdu_id 1978 * @netbuf: Buffer pointer 1979 * 1980 * This function is used to deliver rx packet to packet capture 1981 */ 1982 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 1983 uint16_t peer_id, uint32_t is_offload, 1984 qdf_nbuf_t netbuf); 1985 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1986 uint32_t is_offload); 1987 #else 1988 static inline void 1989 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 1990 uint16_t peer_id, uint32_t is_offload, 1991 qdf_nbuf_t netbuf) 1992 { 1993 } 1994 1995 static inline void 1996 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1997 uint32_t is_offload) 1998 { 1999 } 2000 #endif 2001 2002 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2003 #ifdef FEATURE_MEC 2004 /** 2005 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 2006 * back on same vap or a different vap. 2007 * @soc: core DP main context 2008 * @peer: dp peer handler 2009 * @rx_tlv_hdr: start of the rx TLV header 2010 * @nbuf: pkt buffer 2011 * 2012 * Return: bool (true if it is a looped back pkt else false) 2013 * 2014 */ 2015 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2016 struct dp_txrx_peer *peer, 2017 uint8_t *rx_tlv_hdr, 2018 qdf_nbuf_t nbuf); 2019 #else 2020 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2021 struct dp_txrx_peer *peer, 2022 uint8_t *rx_tlv_hdr, 2023 qdf_nbuf_t nbuf) 2024 { 2025 return false; 2026 } 2027 #endif /* FEATURE_MEC */ 2028 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2029 2030 #ifdef RECEIVE_OFFLOAD 2031 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2032 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); 2033 #else 2034 static inline 2035 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2036 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 2037 { 2038 } 2039 #endif 2040 2041 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2042 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 2043 uint8_t ring_id, 2044 struct cdp_tid_rx_stats *tid_stats); 2045 2046 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 2047 2048 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2049 hal_ring_handle_t hal_ring_hdl, 2050 uint32_t num_entries, 2051 bool *near_full); 2052 2053 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2054 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2055 hal_ring_desc_t ring_desc); 2056 #else 2057 static inline void 2058 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2059 hal_ring_desc_t ring_desc) 2060 { 2061 } 2062 #endif 2063 2064 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2065 #ifdef RX_DESC_SANITY_WAR 2066 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2067 hal_ring_handle_t hal_ring_hdl, 2068 hal_ring_desc_t ring_desc, 2069 struct dp_rx_desc *rx_desc); 2070 #else 2071 static inline 2072 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2073 hal_ring_handle_t hal_ring_hdl, 2074 hal_ring_desc_t ring_desc, 2075 struct dp_rx_desc *rx_desc) 2076 { 2077 return QDF_STATUS_SUCCESS; 2078 } 2079 #endif 2080 2081 #ifdef DP_RX_DROP_RAW_FRM 2082 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); 2083 #else 2084 static inline 2085 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2086 { 2087 return false; 2088 } 2089 #endif 2090 2091 #ifdef RX_DESC_DEBUG_CHECK 2092 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2093 hal_ring_desc_t ring_desc, 2094 struct dp_rx_desc *rx_desc); 2095 #else 2096 static inline 2097 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2098 hal_ring_desc_t ring_desc, 2099 struct dp_rx_desc *rx_desc) 2100 { 2101 return QDF_STATUS_SUCCESS; 2102 } 2103 #endif 2104 2105 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2106 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2107 #else 2108 static inline 2109 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2110 { 2111 } 2112 #endif 2113 2114 /** 2115 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 2116 * @nbuf: pointer to the first msdu of an amsdu. 2117 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2118 * 2119 * The ipsumed field of the skb is set based on whether HW validated the 2120 * IP/TCP/UDP checksum. 2121 * 2122 * Return: void 2123 */ 2124 static inline 2125 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2126 qdf_nbuf_t nbuf, 2127 uint8_t *rx_tlv_hdr) 2128 { 2129 qdf_nbuf_rx_cksum_t cksum = {0}; 2130 //TODO - Move this to ring desc api 2131 //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET 2132 //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET 2133 uint32_t ip_csum_err, tcp_udp_csum_er; 2134 2135 hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, 2136 &tcp_udp_csum_er); 2137 2138 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 2139 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 2140 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 2141 } else { 2142 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 2143 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 2144 } 2145 } 2146 2147 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2148 2149 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 2150 static inline 2151 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2152 int max_reap_limit) 2153 { 2154 bool limit_hit = false; 2155 2156 limit_hit = 2157 (num_reaped >= max_reap_limit) ? true : false; 2158 2159 if (limit_hit) 2160 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 2161 2162 return limit_hit; 2163 } 2164 2165 static inline 2166 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2167 { 2168 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 2169 } 2170 2171 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2172 { 2173 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 2174 2175 return cfg->rx_reap_loop_pkt_limit; 2176 } 2177 #else 2178 static inline 2179 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2180 int max_reap_limit) 2181 { 2182 return false; 2183 } 2184 2185 static inline 2186 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2187 { 2188 return false; 2189 } 2190 2191 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2192 { 2193 return 0; 2194 } 2195 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 2196 2197 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2198 2199 #ifdef QCA_SUPPORT_WDS_EXTENDED 2200 /** 2201 * dp_rx_is_list_ready() - Make different lists for 4-address 2202 and 3-address frames 2203 * @nbuf_head: skb list head 2204 * @vdev: vdev 2205 * @txrx_peer : txrx_peer 2206 * @peer_id: peer id of new received frame 2207 * @vdev_id: vdev_id of new received frame 2208 * 2209 * Return: true if peer_ids are different. 2210 */ 2211 static inline bool 2212 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2213 struct dp_vdev *vdev, 2214 struct dp_txrx_peer *txrx_peer, 2215 uint16_t peer_id, 2216 uint8_t vdev_id) 2217 { 2218 if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id) 2219 return true; 2220 2221 return false; 2222 } 2223 #else 2224 static inline bool 2225 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2226 struct dp_vdev *vdev, 2227 struct dp_txrx_peer *txrx_peer, 2228 uint16_t peer_id, 2229 uint8_t vdev_id) 2230 { 2231 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) 2232 return true; 2233 2234 return false; 2235 } 2236 #endif 2237 2238 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 2239 /** 2240 * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup 2241 * @pdev: pointer to dp_pdev structure 2242 * @rx_tlv: pointer to rx_pkt_tlvs structure 2243 * @nbuf: pointer to skb buffer 2244 * 2245 * Return: None 2246 */ 2247 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2248 uint8_t *rx_tlv, 2249 qdf_nbuf_t nbuf); 2250 #else 2251 static inline void 2252 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2253 uint8_t *rx_tlv, 2254 qdf_nbuf_t nbuf) 2255 { 2256 } 2257 #endif 2258 2259 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 2260 static inline uint8_t 2261 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2262 { 2263 return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id); 2264 } 2265 2266 static inline uint8_t 2267 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2268 { 2269 return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id); 2270 } 2271 #else 2272 static inline uint8_t 2273 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2274 { 2275 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx; 2276 uint8_t wbm2_sw_rx_rel_ring_id; 2277 2278 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx); 2279 2280 return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id, 2281 wbm2_sw_rx_rel_ring_id); 2282 } 2283 2284 static inline uint8_t 2285 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2286 { 2287 return dp_rx_get_rx_bm_id(soc); 2288 } 2289 #endif 2290 2291 static inline uint16_t 2292 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata) 2293 { 2294 return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc, 2295 peer_metadata); 2296 } 2297 2298 /** 2299 * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization 2300 * @soc: SOC handle 2301 * @rx_desc_pool: pointer to RX descriptor pool 2302 * @pool_id: pool ID 2303 * 2304 * Return: None 2305 */ 2306 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 2307 struct rx_desc_pool *rx_desc_pool, 2308 uint32_t pool_id); 2309 2310 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 2311 struct rx_desc_pool *rx_desc_pool, 2312 uint32_t pool_id); 2313 2314 /** 2315 * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint 2316 * 2317 * Return: True if any rx pkt tracepoint is enabled else false 2318 */ 2319 static inline 2320 bool dp_rx_pkt_tracepoints_enabled(void) 2321 { 2322 return (qdf_trace_dp_rx_tcp_pkt_enabled() || 2323 qdf_trace_dp_rx_udp_pkt_enabled() || 2324 qdf_trace_dp_rx_pkt_enabled()); 2325 } 2326 2327 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 2328 static inline 2329 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2330 struct dp_srng *rxdma_srng, 2331 struct rx_desc_pool *rx_desc_pool, 2332 uint32_t num_req_buffers) 2333 { 2334 return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id, 2335 rxdma_srng, 2336 rx_desc_pool, 2337 num_req_buffers); 2338 } 2339 2340 static inline 2341 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2342 struct dp_srng *rxdma_srng, 2343 struct rx_desc_pool *rx_desc_pool, 2344 uint32_t num_req_buffers, 2345 union dp_rx_desc_list_elem_t **desc_list, 2346 union dp_rx_desc_list_elem_t **tail) 2347 { 2348 __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2349 num_req_buffers, desc_list, tail); 2350 } 2351 2352 static inline 2353 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2354 struct dp_srng *rxdma_srng, 2355 struct rx_desc_pool *rx_desc_pool, 2356 uint32_t num_req_buffers, 2357 union dp_rx_desc_list_elem_t **desc_list, 2358 union dp_rx_desc_list_elem_t **tail) 2359 { 2360 __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng, 2361 rx_desc_pool); 2362 } 2363 2364 static inline 2365 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2366 qdf_nbuf_t nbuf, 2367 uint32_t buf_size) 2368 { 2369 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2370 (void *)(nbuf->data + buf_size)); 2371 2372 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2373 } 2374 2375 static inline 2376 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2377 qdf_nbuf_t nbuf, 2378 uint32_t buf_size) 2379 { 2380 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2381 (void *)(nbuf->data + buf_size)); 2382 2383 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2384 } 2385 2386 #if !defined(SPECULATIVE_READ_DISABLED) 2387 static inline 2388 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2389 struct dp_rx_desc *rx_desc, 2390 uint8_t reo_ring_num) 2391 { 2392 struct rx_desc_pool *rx_desc_pool; 2393 qdf_nbuf_t nbuf; 2394 2395 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2396 nbuf = rx_desc->nbuf; 2397 2398 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2399 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2400 } 2401 2402 static inline 2403 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2404 struct rx_desc_pool *rx_desc_pool, 2405 qdf_nbuf_t nbuf) 2406 { 2407 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2408 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2409 } 2410 2411 #else 2412 static inline 2413 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2414 struct dp_rx_desc *rx_desc, 2415 uint8_t reo_ring_num) 2416 { 2417 } 2418 2419 static inline 2420 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2421 struct rx_desc_pool *rx_desc_pool, 2422 qdf_nbuf_t nbuf) 2423 { 2424 } 2425 #endif 2426 2427 static inline 2428 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2429 uint32_t bufs_reaped) 2430 { 2431 } 2432 2433 static inline 2434 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2435 struct rx_desc_pool *rx_desc_pool) 2436 { 2437 return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size, 2438 RX_BUFFER_RESERVATION, 2439 rx_desc_pool->buf_alignment, FALSE); 2440 } 2441 2442 static inline 2443 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2444 { 2445 qdf_nbuf_free_simple(nbuf); 2446 } 2447 #else 2448 static inline 2449 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2450 struct dp_srng *rxdma_srng, 2451 struct rx_desc_pool *rx_desc_pool, 2452 uint32_t num_req_buffers) 2453 { 2454 return dp_pdev_rx_buffers_attach(soc, mac_id, 2455 rxdma_srng, 2456 rx_desc_pool, 2457 num_req_buffers); 2458 } 2459 2460 static inline 2461 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2462 struct dp_srng *rxdma_srng, 2463 struct rx_desc_pool *rx_desc_pool, 2464 uint32_t num_req_buffers, 2465 union dp_rx_desc_list_elem_t **desc_list, 2466 union dp_rx_desc_list_elem_t **tail) 2467 { 2468 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2469 num_req_buffers, desc_list, tail); 2470 } 2471 2472 static inline 2473 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2474 struct dp_srng *rxdma_srng, 2475 struct rx_desc_pool *rx_desc_pool, 2476 uint32_t num_req_buffers, 2477 union dp_rx_desc_list_elem_t **desc_list, 2478 union dp_rx_desc_list_elem_t **tail) 2479 { 2480 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2481 num_req_buffers, desc_list, tail); 2482 } 2483 2484 static inline 2485 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2486 qdf_nbuf_t nbuf, 2487 uint32_t buf_size) 2488 { 2489 return (qdf_dma_addr_t)NULL; 2490 } 2491 2492 static inline 2493 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2494 qdf_nbuf_t nbuf, 2495 uint32_t buf_size) 2496 { 2497 return (qdf_dma_addr_t)NULL; 2498 } 2499 2500 static inline 2501 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2502 struct dp_rx_desc *rx_desc, 2503 uint8_t reo_ring_num) 2504 { 2505 struct rx_desc_pool *rx_desc_pool; 2506 2507 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2508 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 2509 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 2510 rx_desc_pool->buf_size, 2511 false); 2512 2513 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2514 QDF_DMA_FROM_DEVICE, 2515 rx_desc_pool->buf_size); 2516 2517 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 2518 } 2519 2520 static inline 2521 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2522 struct rx_desc_pool *rx_desc_pool, 2523 qdf_nbuf_t nbuf) 2524 { 2525 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size, 2526 false); 2527 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE, 2528 rx_desc_pool->buf_size); 2529 } 2530 2531 static inline 2532 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2533 uint32_t bufs_reaped) 2534 { 2535 int cpu_id = qdf_get_cpu(); 2536 2537 DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped); 2538 } 2539 2540 static inline 2541 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2542 struct rx_desc_pool *rx_desc_pool) 2543 { 2544 return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 2545 RX_BUFFER_RESERVATION, 2546 rx_desc_pool->buf_alignment, FALSE); 2547 } 2548 2549 static inline 2550 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2551 { 2552 qdf_nbuf_free(nbuf); 2553 } 2554 #endif 2555 2556 /** 2557 * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id 2558 * @nbuf : pointer to the first msdu of an amsdu. 2559 * @peer_id : Peer id of the peer 2560 * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference 2561 * @pkt_capture_offload : Flag indicating if pkt capture offload is needed 2562 * @vdev : Buffer to hold pointer to vdev 2563 * @rx_pdev : Buffer to hold pointer to rx pdev 2564 * @dsf : delay stats flag 2565 * @old_tid : Old tid 2566 * 2567 * Get txrx peer and vdev from peer id 2568 * 2569 * Return: Pointer to txrx peer 2570 */ 2571 static inline struct dp_txrx_peer * 2572 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc, 2573 qdf_nbuf_t nbuf, 2574 uint16_t peer_id, 2575 dp_txrx_ref_handle *txrx_ref_handle, 2576 bool pkt_capture_offload, 2577 struct dp_vdev **vdev, 2578 struct dp_pdev **rx_pdev, 2579 uint32_t *dsf, 2580 uint32_t *old_tid) 2581 { 2582 struct dp_txrx_peer *txrx_peer = NULL; 2583 2584 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle, 2585 DP_MOD_ID_RX); 2586 2587 if (qdf_likely(txrx_peer)) { 2588 *vdev = txrx_peer->vdev; 2589 } else { 2590 nbuf->next = NULL; 2591 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf, 2592 pkt_capture_offload); 2593 if (!pkt_capture_offload) 2594 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2595 2596 goto end; 2597 } 2598 2599 if (qdf_unlikely(!(*vdev))) { 2600 qdf_nbuf_free(nbuf); 2601 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2602 goto end; 2603 } 2604 2605 *rx_pdev = (*vdev)->pdev; 2606 *dsf = (*rx_pdev)->delay_stats_flag; 2607 *old_tid = 0xff; 2608 2609 end: 2610 return txrx_peer; 2611 } 2612 2613 static inline QDF_STATUS 2614 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer, 2615 int tid, uint32_t ba_window_size) 2616 { 2617 return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc, 2618 peer, tid, 2619 ba_window_size); 2620 } 2621 2622 static inline 2623 void dp_rx_nbuf_list_deliver(struct dp_soc *soc, 2624 struct dp_vdev *vdev, 2625 struct dp_txrx_peer *txrx_peer, 2626 uint16_t peer_id, 2627 uint8_t pkt_capture_offload, 2628 qdf_nbuf_t deliver_list_head, 2629 qdf_nbuf_t deliver_list_tail) 2630 { 2631 qdf_nbuf_t nbuf, next; 2632 2633 if (qdf_likely(deliver_list_head)) { 2634 if (qdf_likely(txrx_peer)) { 2635 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 2636 pkt_capture_offload, 2637 deliver_list_head); 2638 if (!pkt_capture_offload) 2639 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 2640 deliver_list_head, 2641 deliver_list_tail); 2642 } else { 2643 nbuf = deliver_list_head; 2644 while (nbuf) { 2645 next = nbuf->next; 2646 nbuf->next = NULL; 2647 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2648 nbuf = next; 2649 } 2650 } 2651 } 2652 } 2653 2654 #ifdef DP_TX_RX_TPUT_SIMULATE 2655 /* 2656 * Change this macro value to simulate different RX T-put, 2657 * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor 2658 * is 2, set macro value as 1 (multiplication factor - 1). 2659 */ 2660 #define DP_RX_PKTS_DUPLICATE_CNT 0 2661 static inline 2662 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc, 2663 struct dp_vdev *vdev, 2664 struct dp_txrx_peer *txrx_peer, 2665 uint16_t peer_id, 2666 uint8_t pkt_capture_offload, 2667 qdf_nbuf_t ori_list_head, 2668 qdf_nbuf_t ori_list_tail) 2669 { 2670 qdf_nbuf_t new_skb = NULL; 2671 qdf_nbuf_t new_list_head = NULL; 2672 qdf_nbuf_t new_list_tail = NULL; 2673 qdf_nbuf_t nbuf = NULL; 2674 int i; 2675 2676 for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) { 2677 nbuf = ori_list_head; 2678 new_list_head = NULL; 2679 new_list_tail = NULL; 2680 2681 while (nbuf) { 2682 new_skb = qdf_nbuf_copy(nbuf); 2683 if (qdf_likely(new_skb)) 2684 DP_RX_LIST_APPEND(new_list_head, 2685 new_list_tail, 2686 new_skb); 2687 else 2688 dp_err("copy skb failed"); 2689 2690 nbuf = qdf_nbuf_next(nbuf); 2691 } 2692 2693 /* deliver the copied nbuf list */ 2694 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2695 pkt_capture_offload, 2696 new_list_head, 2697 new_list_tail); 2698 } 2699 2700 /* deliver the original skb_list */ 2701 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2702 pkt_capture_offload, 2703 ori_list_head, 2704 ori_list_tail); 2705 } 2706 2707 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver 2708 2709 #else /* !DP_TX_RX_TPUT_SIMULATE */ 2710 2711 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver 2712 2713 #endif /* DP_TX_RX_TPUT_SIMULATE */ 2714 2715 #endif /* _DP_RX_H */ 2716