1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_RX_H 21 #define _DP_RX_H 22 23 #include "hal_rx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include <qdf_tracepoint.h> 27 #include "dp_ipa.h" 28 29 #ifdef RXDMA_OPTIMIZATION 30 #ifndef RX_DATA_BUFFER_ALIGNMENT 31 #define RX_DATA_BUFFER_ALIGNMENT 128 32 #endif 33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT 34 #define RX_MONITOR_BUFFER_ALIGNMENT 128 35 #endif 36 #else /* RXDMA_OPTIMIZATION */ 37 #define RX_DATA_BUFFER_ALIGNMENT 4 38 #define RX_MONITOR_BUFFER_ALIGNMENT 4 39 #endif /* RXDMA_OPTIMIZATION */ 40 41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 42 #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id) 43 /* RBM value used for re-injecting defragmented packets into REO */ 44 #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) 45 #endif 46 47 #define RX_BUFFER_RESERVATION 0 48 #ifdef BE_PKTLOG_SUPPORT 49 #define BUFFER_RESIDUE 1 50 #define RX_MON_MIN_HEAD_ROOM 64 51 #endif 52 53 #define DP_DEFAULT_NOISEFLOOR (-96) 54 55 #define DP_RX_DESC_MAGIC 0xdec0de 56 57 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params) 58 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params) 59 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params) 60 #define dp_rx_info(params...) \ 61 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 62 #define dp_rx_info_rl(params...) \ 63 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 64 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params) 65 66 /** 67 * enum dp_rx_desc_state 68 * 69 * @RX_DESC_REPLENISH: rx desc replenished 70 * @RX_DESC_FREELIST: rx desc in freelist 71 */ 72 enum dp_rx_desc_state { 73 RX_DESC_REPLENISHED, 74 RX_DESC_IN_FREELIST, 75 }; 76 77 #ifndef QCA_HOST_MODE_WIFI_DISABLED 78 /** 79 * struct dp_rx_desc_dbg_info 80 * 81 * @freelist_caller: name of the function that put the 82 * the rx desc in freelist 83 * @freelist_ts: timestamp when the rx desc is put in 84 * a freelist 85 * @replenish_caller: name of the function that last 86 * replenished the rx desc 87 * @replenish_ts: last replenish timestamp 88 * @prev_nbuf: previous nbuf info 89 * @prev_nbuf_data_addr: previous nbuf data address 90 */ 91 struct dp_rx_desc_dbg_info { 92 char freelist_caller[QDF_MEM_FUNC_NAME_SIZE]; 93 uint64_t freelist_ts; 94 char replenish_caller[QDF_MEM_FUNC_NAME_SIZE]; 95 uint64_t replenish_ts; 96 qdf_nbuf_t prev_nbuf; 97 uint8_t *prev_nbuf_data_addr; 98 }; 99 100 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 101 102 /** 103 * struct dp_rx_desc 104 * 105 * @nbuf : VA of the "skb" posted 106 * @rx_buf_start : VA of the original Rx buffer, before 107 * movement of any skb->data pointer 108 * @paddr_buf_start : PA of the original Rx buffer, before 109 * movement of any frag pointer 110 * @cookie : index into the sw array which holds 111 * the sw Rx descriptors 112 * Cookie space is 21 bits: 113 * lower 18 bits -- index 114 * upper 3 bits -- pool_id 115 * @pool_id : pool Id for which this allocated. 116 * Can only be used if there is no flow 117 * steering 118 * @chip_id : chip_id indicating MLO chip_id 119 * valid or used only in case of multi-chip MLO 120 * @in_use rx_desc is in use 121 * @unmapped used to mark rx_desc an unmapped if the corresponding 122 * nbuf is already unmapped 123 * @in_err_state : Nbuf sanity failed for this descriptor. 124 * @nbuf_data_addr : VA of nbuf data posted 125 */ 126 struct dp_rx_desc { 127 qdf_nbuf_t nbuf; 128 uint8_t *rx_buf_start; 129 qdf_dma_addr_t paddr_buf_start; 130 uint32_t cookie; 131 uint8_t pool_id; 132 uint8_t chip_id; 133 #ifdef RX_DESC_DEBUG_CHECK 134 uint32_t magic; 135 uint8_t *nbuf_data_addr; 136 struct dp_rx_desc_dbg_info *dbg_info; 137 #endif 138 uint8_t in_use:1, 139 unmapped:1, 140 in_err_state:1; 141 }; 142 143 #ifndef QCA_HOST_MODE_WIFI_DISABLED 144 #ifdef ATH_RX_PRI_SAVE 145 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 146 (qdf_nbuf_set_priority(_nbuf, _tid)) 147 #else 148 #define DP_RX_TID_SAVE(_nbuf, _tid) 149 #endif 150 151 /* RX Descriptor Multi Page memory alloc related */ 152 #define DP_RX_DESC_OFFSET_NUM_BITS 8 153 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 154 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 155 156 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 157 #define DP_RX_DESC_POOL_ID_SHIFT \ 158 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 159 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 160 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 161 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 162 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 163 DP_RX_DESC_PAGE_ID_SHIFT) 164 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 165 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 166 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 167 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 168 DP_RX_DESC_POOL_ID_SHIFT) 169 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 170 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 171 DP_RX_DESC_PAGE_ID_SHIFT) 172 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 173 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 174 175 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 176 177 #define RX_DESC_COOKIE_INDEX_SHIFT 0 178 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 179 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 180 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 181 182 #define DP_RX_DESC_COOKIE_MAX \ 183 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 184 185 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 186 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 187 RX_DESC_COOKIE_POOL_ID_SHIFT) 188 189 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 190 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 191 RX_DESC_COOKIE_INDEX_SHIFT) 192 193 #define dp_rx_add_to_free_desc_list(head, tail, new) \ 194 __dp_rx_add_to_free_desc_list(head, tail, new, __func__) 195 196 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 197 num_buffers, desc_list, tail, req_only) \ 198 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 199 num_buffers, desc_list, tail, req_only, \ 200 __func__) 201 202 #ifdef WLAN_SUPPORT_RX_FISA 203 /** 204 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb 205 * @nbuf: pkt skb pointer 206 * @l3_padding: l3 padding 207 * 208 * Return: None 209 */ 210 static inline 211 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 212 { 213 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 214 } 215 #else 216 static inline 217 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 218 { 219 } 220 #endif 221 222 #ifdef DP_RX_SPECIAL_FRAME_NEED 223 /** 224 * dp_rx_is_special_frame() - check is RX frame special needed 225 * 226 * @nbuf: RX skb pointer 227 * @frame_mask: the mask for speical frame needed 228 * 229 * Check is RX frame wanted matched with mask 230 * 231 * Return: true - special frame needed, false - no 232 */ 233 static inline 234 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 235 { 236 if (((frame_mask & FRAME_MASK_IPV4_ARP) && 237 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || 238 ((frame_mask & FRAME_MASK_IPV4_DHCP) && 239 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || 240 ((frame_mask & FRAME_MASK_IPV4_EAPOL) && 241 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || 242 ((frame_mask & FRAME_MASK_IPV6_DHCP) && 243 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))) 244 return true; 245 246 return false; 247 } 248 249 /** 250 * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack 251 * if matches mask 252 * 253 * @soc: Datapath soc handler 254 * @peer: pointer to DP peer 255 * @nbuf: pointer to the skb of RX frame 256 * @frame_mask: the mask for speical frame needed 257 * @rx_tlv_hdr: start of rx tlv header 258 * 259 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 260 * single nbuf is expected. 261 * 262 * return: true - nbuf has been delivered to stack, false - not. 263 */ 264 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 265 qdf_nbuf_t nbuf, uint32_t frame_mask, 266 uint8_t *rx_tlv_hdr); 267 #else 268 static inline 269 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 270 { 271 return false; 272 } 273 274 static inline 275 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 276 qdf_nbuf_t nbuf, uint32_t frame_mask, 277 uint8_t *rx_tlv_hdr) 278 { 279 return false; 280 } 281 #endif 282 283 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 284 /** 285 * dp_rx_data_is_specific() - Used to exclude specific frames 286 * not practical for getting rx 287 * stats like rate, mcs, nss, etc. 288 * 289 * @hal-soc_hdl: soc handler 290 * @rx_tlv_hdr: rx tlv header 291 * @nbuf: RX skb pointer 292 * 293 * Return: true - a specific frame not suitable 294 * for getting rx stats from it. 295 * false - a common frame suitable for 296 * getting rx stats from it. 297 */ 298 static inline 299 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 300 uint8_t *rx_tlv_hdr, 301 qdf_nbuf_t nbuf) 302 { 303 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf))) 304 return true; 305 306 if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr)) 307 return true; 308 309 if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr)) 310 return true; 311 312 /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */ 313 if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 314 QDF_NBUF_TRAC_IPV4_ETH_TYPE)) { 315 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 316 return true; 317 } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 318 QDF_NBUF_TRAC_IPV6_ETH_TYPE)) { 319 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) 320 return true; 321 } else { 322 return true; 323 } 324 return false; 325 } 326 #else 327 static inline 328 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 329 uint8_t *rx_tlv_hdr, 330 qdf_nbuf_t nbuf) 331 332 { 333 /* 334 * default return is true to make sure that rx stats 335 * will not be handled when this feature is disabled 336 */ 337 return true; 338 } 339 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 340 341 #ifndef QCA_HOST_MODE_WIFI_DISABLED 342 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING 343 static inline 344 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 345 qdf_nbuf_t nbuf) 346 { 347 if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi && 348 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { 349 DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer, 350 rx.intra_bss.mdns_no_fwd, 1); 351 return false; 352 } 353 return true; 354 } 355 #else 356 static inline 357 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 358 qdf_nbuf_t nbuf) 359 { 360 return true; 361 } 362 #endif 363 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 364 365 /* DOC: Offset to obtain LLC hdr 366 * 367 * In the case of Wifi parse error 368 * to reach LLC header from beginning 369 * of VLAN tag we need to skip 8 bytes. 370 * Vlan_tag(4)+length(2)+length added 371 * by HW(2) = 8 bytes. 372 */ 373 #define DP_SKIP_VLAN 8 374 375 #ifndef QCA_HOST_MODE_WIFI_DISABLED 376 377 /** 378 * struct dp_rx_cached_buf - rx cached buffer 379 * @list: linked list node 380 * @buf: skb buffer 381 */ 382 struct dp_rx_cached_buf { 383 qdf_list_node_t node; 384 qdf_nbuf_t buf; 385 }; 386 387 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 388 389 /* 390 *dp_rx_xor_block() - xor block of data 391 *@b: destination data block 392 *@a: source data block 393 *@len: length of the data to process 394 * 395 *Returns: None 396 */ 397 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 398 { 399 qdf_size_t i; 400 401 for (i = 0; i < len; i++) 402 b[i] ^= a[i]; 403 } 404 405 /* 406 *dp_rx_rotl() - rotate the bits left 407 *@val: unsigned integer input value 408 *@bits: number of bits 409 * 410 *Returns: Integer with left rotated by number of 'bits' 411 */ 412 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 413 { 414 return (val << bits) | (val >> (32 - bits)); 415 } 416 417 /* 418 *dp_rx_rotr() - rotate the bits right 419 *@val: unsigned integer input value 420 *@bits: number of bits 421 * 422 *Returns: Integer with right rotated by number of 'bits' 423 */ 424 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 425 { 426 return (val >> bits) | (val << (32 - bits)); 427 } 428 429 /* 430 * dp_set_rx_queue() - set queue_mapping in skb 431 * @nbuf: skb 432 * @queue_id: rx queue_id 433 * 434 * Return: void 435 */ 436 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 437 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 438 { 439 qdf_nbuf_record_rx_queue(nbuf, queue_id); 440 return; 441 } 442 #else 443 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 444 { 445 } 446 #endif 447 448 /* 449 *dp_rx_xswap() - swap the bits left 450 *@val: unsigned integer input value 451 * 452 *Returns: Integer with bits swapped 453 */ 454 static inline uint32_t dp_rx_xswap(uint32_t val) 455 { 456 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 457 } 458 459 /* 460 *dp_rx_get_le32_split() - get little endian 32 bits split 461 *@b0: byte 0 462 *@b1: byte 1 463 *@b2: byte 2 464 *@b3: byte 3 465 * 466 *Returns: Integer with split little endian 32 bits 467 */ 468 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 469 uint8_t b3) 470 { 471 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 472 } 473 474 /* 475 *dp_rx_get_le32() - get little endian 32 bits 476 *@b0: byte 0 477 *@b1: byte 1 478 *@b2: byte 2 479 *@b3: byte 3 480 * 481 *Returns: Integer with little endian 32 bits 482 */ 483 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 484 { 485 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 486 } 487 488 /* 489 * dp_rx_put_le32() - put little endian 32 bits 490 * @p: destination char array 491 * @v: source 32-bit integer 492 * 493 * Returns: None 494 */ 495 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 496 { 497 p[0] = (v) & 0xff; 498 p[1] = (v >> 8) & 0xff; 499 p[2] = (v >> 16) & 0xff; 500 p[3] = (v >> 24) & 0xff; 501 } 502 503 /* Extract michal mic block of data */ 504 #define dp_rx_michael_block(l, r) \ 505 do { \ 506 r ^= dp_rx_rotl(l, 17); \ 507 l += r; \ 508 r ^= dp_rx_xswap(l); \ 509 l += r; \ 510 r ^= dp_rx_rotl(l, 3); \ 511 l += r; \ 512 r ^= dp_rx_rotr(l, 2); \ 513 l += r; \ 514 } while (0) 515 516 /** 517 * struct dp_rx_desc_list_elem_t 518 * 519 * @next : Next pointer to form free list 520 * @rx_desc : DP Rx descriptor 521 */ 522 union dp_rx_desc_list_elem_t { 523 union dp_rx_desc_list_elem_t *next; 524 struct dp_rx_desc rx_desc; 525 }; 526 527 #ifdef RX_DESC_MULTI_PAGE_ALLOC 528 /** 529 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 530 * @page_id: Page ID 531 * @offset: Offset of the descriptor element 532 * 533 * Return: RX descriptor element 534 */ 535 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 536 struct rx_desc_pool *rx_pool); 537 538 static inline 539 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 540 struct rx_desc_pool *pool, 541 uint32_t cookie) 542 { 543 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 544 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 545 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 546 struct rx_desc_pool *rx_desc_pool; 547 union dp_rx_desc_list_elem_t *rx_desc_elem; 548 549 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 550 return NULL; 551 552 rx_desc_pool = &pool[pool_id]; 553 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 554 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 555 rx_desc_pool->elem_size * offset); 556 557 return &rx_desc_elem->rx_desc; 558 } 559 560 static inline 561 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc, 562 struct rx_desc_pool *pool, 563 uint32_t cookie) 564 { 565 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 566 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 567 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 568 struct rx_desc_pool *rx_desc_pool; 569 union dp_rx_desc_list_elem_t *rx_desc_elem; 570 571 if (qdf_unlikely(pool_id >= NUM_RXDMA_RINGS_PER_PDEV)) 572 return NULL; 573 574 rx_desc_pool = &pool[pool_id]; 575 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 576 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 577 rx_desc_pool->elem_size * offset); 578 579 return &rx_desc_elem->rx_desc; 580 } 581 582 /** 583 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 584 * the Rx descriptor on Rx DMA source ring buffer 585 * @soc: core txrx main context 586 * @cookie: cookie used to lookup virtual address 587 * 588 * Return: Pointer to the Rx descriptor 589 */ 590 static inline 591 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 592 uint32_t cookie) 593 { 594 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 595 } 596 597 /** 598 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 599 * the Rx descriptor on monitor ring buffer 600 * @soc: core txrx main context 601 * @cookie: cookie used to lookup virtual address 602 * 603 * Return: Pointer to the Rx descriptor 604 */ 605 static inline 606 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 607 uint32_t cookie) 608 { 609 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 610 } 611 612 /** 613 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 614 * the Rx descriptor on monitor status ring buffer 615 * @soc: core txrx main context 616 * @cookie: cookie used to lookup virtual address 617 * 618 * Return: Pointer to the Rx descriptor 619 */ 620 static inline 621 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 622 uint32_t cookie) 623 { 624 return dp_get_rx_mon_status_desc_from_cookie(soc, 625 &soc->rx_desc_status[0], 626 cookie); 627 } 628 #else 629 630 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 631 uint32_t pool_size, 632 struct rx_desc_pool *rx_desc_pool); 633 634 /** 635 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 636 * the Rx descriptor on Rx DMA source ring buffer 637 * @soc: core txrx main context 638 * @cookie: cookie used to lookup virtual address 639 * 640 * Return: void *: Virtual Address of the Rx descriptor 641 */ 642 static inline 643 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 644 { 645 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 646 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 647 struct rx_desc_pool *rx_desc_pool; 648 649 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 650 return NULL; 651 652 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 653 654 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 655 return NULL; 656 657 return &rx_desc_pool->array[index].rx_desc; 658 } 659 660 /** 661 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 662 * the Rx descriptor on monitor ring buffer 663 * @soc: core txrx main context 664 * @cookie: cookie used to lookup virtual address 665 * 666 * Return: void *: Virtual Address of the Rx descriptor 667 */ 668 static inline 669 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 670 { 671 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 672 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 673 /* TODO */ 674 /* Add sanity for pool_id & index */ 675 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 676 } 677 678 /** 679 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 680 * the Rx descriptor on monitor status ring buffer 681 * @soc: core txrx main context 682 * @cookie: cookie used to lookup virtual address 683 * 684 * Return: void *: Virtual Address of the Rx descriptor 685 */ 686 static inline 687 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 688 { 689 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 690 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 691 /* TODO */ 692 /* Add sanity for pool_id & index */ 693 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 694 } 695 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 696 697 #ifndef QCA_HOST_MODE_WIFI_DISABLED 698 699 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 700 { 701 return vdev->ap_bridge_enabled; 702 } 703 704 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 705 static inline QDF_STATUS 706 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 707 { 708 if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc))) 709 return QDF_STATUS_E_FAILURE; 710 711 HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc); 712 return QDF_STATUS_SUCCESS; 713 } 714 715 /** 716 * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie 717 * field in ring descriptor 718 * @ring_desc: ring descriptor 719 * 720 * Return: None 721 */ 722 static inline void 723 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 724 { 725 HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc); 726 } 727 #else 728 static inline QDF_STATUS 729 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 730 { 731 return QDF_STATUS_SUCCESS; 732 } 733 734 static inline void 735 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 736 { 737 } 738 #endif 739 740 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 741 742 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \ 743 defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE) 744 /** 745 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 746 * @soc: dp soc ref 747 * @cookie: Rx buf SW cookie value 748 * 749 * Return: true if cookie is valid else false 750 */ 751 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 752 uint32_t cookie) 753 { 754 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 755 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 756 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 757 struct rx_desc_pool *rx_desc_pool; 758 759 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 760 goto fail; 761 762 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 763 764 if (page_id >= rx_desc_pool->desc_pages.num_pages || 765 offset >= rx_desc_pool->desc_pages.num_element_per_page) 766 goto fail; 767 768 return true; 769 770 fail: 771 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 772 return false; 773 } 774 #else 775 /** 776 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 777 * @soc: dp soc ref 778 * @cookie: Rx buf SW cookie value 779 * 780 * When multi page alloc is disabled SW cookie validness is 781 * checked while fetching Rx descriptor, so no need to check here 782 * Return: true if cookie is valid else false 783 */ 784 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 785 uint32_t cookie) 786 { 787 return true; 788 } 789 #endif 790 791 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool); 792 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 793 uint32_t pool_size, 794 struct rx_desc_pool *rx_desc_pool); 795 796 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 797 uint32_t pool_size, 798 struct rx_desc_pool *rx_desc_pool); 799 800 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 801 union dp_rx_desc_list_elem_t **local_desc_list, 802 union dp_rx_desc_list_elem_t **tail, 803 uint16_t pool_id, 804 struct rx_desc_pool *rx_desc_pool); 805 806 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 807 struct rx_desc_pool *rx_desc_pool, 808 uint16_t num_descs, 809 union dp_rx_desc_list_elem_t **desc_list, 810 union dp_rx_desc_list_elem_t **tail); 811 812 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev); 813 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev); 814 815 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev); 816 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev); 817 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 818 struct rx_desc_pool *rx_desc_pool, 819 uint32_t pool_id); 820 821 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 822 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev); 823 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev); 824 825 void dp_rx_pdev_detach(struct dp_pdev *pdev); 826 827 void dp_print_napi_stats(struct dp_soc *soc); 828 829 /** 830 * dp_rx_vdev_detach() - detach vdev from dp rx 831 * @vdev: virtual device instance 832 * 833 * Return: QDF_STATUS_SUCCESS: success 834 * QDF_STATUS_E_RESOURCES: Error return 835 */ 836 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); 837 838 #ifndef QCA_HOST_MODE_WIFI_DISABLED 839 840 uint32_t 841 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 842 uint8_t reo_ring_num, 843 uint32_t quota); 844 845 /** 846 * dp_rx_err_process() - Processes error frames routed to REO error ring 847 * @int_ctx: pointer to DP interrupt context 848 * @soc: core txrx main context 849 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 850 * @quota: No. of units (packets) that can be serviced in one shot. 851 * 852 * This function implements error processing and top level demultiplexer 853 * for all the frames routed to REO error ring. 854 * 855 * Return: uint32_t: No. of elements processed 856 */ 857 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 858 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 859 860 /** 861 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 862 * @int_ctx: pointer to DP interrupt context 863 * @soc: core txrx main context 864 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 865 * @quota: No. of units (packets) that can be serviced in one shot. 866 * 867 * This function implements error processing and top level demultiplexer 868 * for all the frames routed to WBM2HOST sw release ring. 869 * 870 * Return: uint32_t: No. of elements processed 871 */ 872 uint32_t 873 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 874 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 875 876 /** 877 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 878 * multiple nbufs. 879 * @soc: core txrx main context 880 * @nbuf: pointer to the first msdu of an amsdu. 881 * 882 * This function implements the creation of RX frag_list for cases 883 * where an MSDU is spread across multiple nbufs. 884 * 885 * Return: returns the head nbuf which contains complete frag_list. 886 */ 887 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); 888 889 890 /* 891 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 892 * de-initialization of wifi module. 893 * 894 * @soc: core txrx main context 895 * @pool_id: pool_id which is one of 3 mac_ids 896 * @rx_desc_pool: rx descriptor pool pointer 897 * 898 * Return: None 899 */ 900 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 901 struct rx_desc_pool *rx_desc_pool); 902 903 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 904 905 /* 906 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 907 * de-initialization of wifi module. 908 * 909 * @soc: core txrx main context 910 * @pool_id: pool_id which is one of 3 mac_ids 911 * @rx_desc_pool: rx descriptor pool pointer 912 * 913 * Return: None 914 */ 915 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 916 struct rx_desc_pool *rx_desc_pool); 917 918 #ifdef DP_RX_MON_MEM_FRAG 919 /* 920 * dp_rx_desc_frag_free() - free the sw rx desc frag called during 921 * de-initialization of wifi module. 922 * 923 * @soc: core txrx main context 924 * @rx_desc_pool: rx descriptor pool pointer 925 * 926 * Return: None 927 */ 928 void dp_rx_desc_frag_free(struct dp_soc *soc, 929 struct rx_desc_pool *rx_desc_pool); 930 #else 931 static inline 932 void dp_rx_desc_frag_free(struct dp_soc *soc, 933 struct rx_desc_pool *rx_desc_pool) 934 { 935 } 936 #endif 937 /* 938 * dp_rx_desc_pool_free() - free the sw rx desc array called during 939 * de-initialization of wifi module. 940 * 941 * @soc: core txrx main context 942 * @rx_desc_pool: rx descriptor pool pointer 943 * 944 * Return: None 945 */ 946 void dp_rx_desc_pool_free(struct dp_soc *soc, 947 struct rx_desc_pool *rx_desc_pool); 948 949 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 950 struct dp_txrx_peer *peer); 951 952 #ifdef RX_DESC_LOGGING 953 /* 954 * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug 955 * structure 956 * @rx_desc: rx descriptor pointer 957 * 958 * Return: None 959 */ 960 static inline 961 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 962 { 963 rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); 964 } 965 966 /* 967 * dp_rx_desc_free_dbg_info() - Free rx descriptor debug 968 * structure memory 969 * @rx_desc: rx descriptor pointer 970 * 971 * Return: None 972 */ 973 static inline 974 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 975 { 976 qdf_mem_free(rx_desc->dbg_info); 977 } 978 979 /* 980 * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info 981 * structure memory 982 * @rx_desc: rx descriptor pointer 983 * 984 * Return: None 985 */ 986 static 987 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 988 const char *func_name, uint8_t flag) 989 { 990 struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info; 991 992 if (!info) 993 return; 994 995 if (flag == RX_DESC_REPLENISHED) { 996 qdf_str_lcopy(info->replenish_caller, func_name, 997 QDF_MEM_FUNC_NAME_SIZE); 998 info->replenish_ts = qdf_get_log_timestamp(); 999 } else { 1000 qdf_str_lcopy(info->freelist_caller, func_name, 1001 QDF_MEM_FUNC_NAME_SIZE); 1002 info->freelist_ts = qdf_get_log_timestamp(); 1003 info->prev_nbuf = rx_desc->nbuf; 1004 info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr; 1005 rx_desc->nbuf_data_addr = NULL; 1006 } 1007 } 1008 #else 1009 1010 static inline 1011 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 1012 { 1013 } 1014 1015 static inline 1016 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 1017 { 1018 } 1019 1020 static inline 1021 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 1022 const char *func_name, uint8_t flag) 1023 { 1024 } 1025 #endif /* RX_DESC_LOGGING */ 1026 1027 /** 1028 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 1029 * 1030 * @head: pointer to the head of local free list 1031 * @tail: pointer to the tail of local free list 1032 * @new: new descriptor that is added to the free list 1033 * @func_name: caller func name 1034 * 1035 * Return: void: 1036 */ 1037 static inline 1038 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 1039 union dp_rx_desc_list_elem_t **tail, 1040 struct dp_rx_desc *new, const char *func_name) 1041 { 1042 qdf_assert(head && new); 1043 1044 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); 1045 1046 new->nbuf = NULL; 1047 new->in_use = 0; 1048 1049 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 1050 *head = (union dp_rx_desc_list_elem_t *)new; 1051 /* reset tail if head->next is NULL */ 1052 if (!*tail || !(*head)->next) 1053 *tail = *head; 1054 } 1055 1056 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1057 uint8_t mac_id); 1058 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1059 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); 1060 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1061 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1062 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1063 uint16_t peer_id, uint8_t tid); 1064 1065 #define DP_RX_HEAD_APPEND(head, elem) \ 1066 do { \ 1067 qdf_nbuf_set_next((elem), (head)); \ 1068 (head) = (elem); \ 1069 } while (0) 1070 1071 1072 #define DP_RX_LIST_APPEND(head, tail, elem) \ 1073 do { \ 1074 if (!(head)) { \ 1075 (head) = (elem); \ 1076 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 1077 } else { \ 1078 qdf_nbuf_set_next((tail), (elem)); \ 1079 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 1080 } \ 1081 (tail) = (elem); \ 1082 qdf_nbuf_set_next((tail), NULL); \ 1083 } while (0) 1084 1085 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \ 1086 do { \ 1087 if (!(phead)) { \ 1088 (phead) = (chead); \ 1089 } else { \ 1090 qdf_nbuf_set_next((ptail), (chead)); \ 1091 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \ 1092 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \ 1093 } \ 1094 (ptail) = (ctail); \ 1095 qdf_nbuf_set_next((ptail), NULL); \ 1096 } while (0) 1097 1098 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) 1099 /* 1100 * on some third-party platform, the memory below 0x2000 1101 * is reserved for target use, so any memory allocated in this 1102 * region should not be used by host 1103 */ 1104 #define MAX_RETRY 50 1105 #define DP_PHY_ADDR_RESERVED 0x2000 1106 #elif defined(BUILD_X86) 1107 /* 1108 * in M2M emulation platforms (x86) the memory below 0x50000000 1109 * is reserved for target use, so any memory allocated in this 1110 * region should not be used by host 1111 */ 1112 #define MAX_RETRY 100 1113 #define DP_PHY_ADDR_RESERVED 0x50000000 1114 #endif 1115 1116 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86) 1117 /** 1118 * dp_check_paddr() - check if current phy address is valid or not 1119 * @dp_soc: core txrx main context 1120 * @rx_netbuf: skb buffer 1121 * @paddr: physical address 1122 * @rx_desc_pool: struct of rx descriptor pool 1123 * check if the physical address of the nbuf->data is less 1124 * than DP_PHY_ADDR_RESERVED then free the nbuf and try 1125 * allocating new nbuf. We can try for 100 times. 1126 * 1127 * This is a temp WAR till we fix it properly. 1128 * 1129 * Return: success or failure. 1130 */ 1131 static inline 1132 int dp_check_paddr(struct dp_soc *dp_soc, 1133 qdf_nbuf_t *rx_netbuf, 1134 qdf_dma_addr_t *paddr, 1135 struct rx_desc_pool *rx_desc_pool) 1136 { 1137 uint32_t nbuf_retry = 0; 1138 int32_t ret; 1139 1140 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1141 return QDF_STATUS_SUCCESS; 1142 1143 do { 1144 dp_debug("invalid phy addr 0x%llx, trying again", 1145 (uint64_t)(*paddr)); 1146 nbuf_retry++; 1147 if ((*rx_netbuf)) { 1148 /* Not freeing buffer intentionally. 1149 * Observed that same buffer is getting 1150 * re-allocated resulting in longer load time 1151 * WMI init timeout. 1152 * This buffer is anyway not useful so skip it. 1153 *.Add such buffer to invalid list and free 1154 *.them when driver unload. 1155 **/ 1156 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1157 *rx_netbuf, 1158 QDF_DMA_FROM_DEVICE, 1159 rx_desc_pool->buf_size); 1160 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1161 *rx_netbuf); 1162 } 1163 1164 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 1165 rx_desc_pool->buf_size, 1166 RX_BUFFER_RESERVATION, 1167 rx_desc_pool->buf_alignment, 1168 FALSE); 1169 1170 if (qdf_unlikely(!(*rx_netbuf))) 1171 return QDF_STATUS_E_FAILURE; 1172 1173 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 1174 *rx_netbuf, 1175 QDF_DMA_FROM_DEVICE, 1176 rx_desc_pool->buf_size); 1177 1178 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1179 qdf_nbuf_free(*rx_netbuf); 1180 *rx_netbuf = NULL; 1181 continue; 1182 } 1183 1184 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 1185 1186 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1187 return QDF_STATUS_SUCCESS; 1188 1189 } while (nbuf_retry < MAX_RETRY); 1190 1191 if ((*rx_netbuf)) { 1192 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1193 *rx_netbuf, 1194 QDF_DMA_FROM_DEVICE, 1195 rx_desc_pool->buf_size); 1196 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1197 *rx_netbuf); 1198 } 1199 1200 return QDF_STATUS_E_FAILURE; 1201 } 1202 1203 #else 1204 static inline 1205 int dp_check_paddr(struct dp_soc *dp_soc, 1206 qdf_nbuf_t *rx_netbuf, 1207 qdf_dma_addr_t *paddr, 1208 struct rx_desc_pool *rx_desc_pool) 1209 { 1210 return QDF_STATUS_SUCCESS; 1211 } 1212 1213 #endif 1214 1215 /** 1216 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 1217 * the MSDU Link Descriptor 1218 * @soc: core txrx main context 1219 * @buf_info: buf_info includes cookie that is used to lookup 1220 * virtual address of link descriptor after deriving the page id 1221 * and the offset or index of the desc on the associatde page. 1222 * 1223 * This is the VA of the link descriptor, that HAL layer later uses to 1224 * retrieve the list of MSDU's for a given MPDU. 1225 * 1226 * Return: void *: Virtual Address of the Rx descriptor 1227 */ 1228 static inline 1229 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 1230 struct hal_buf_info *buf_info) 1231 { 1232 void *link_desc_va; 1233 struct qdf_mem_multi_page_t *pages; 1234 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie); 1235 1236 pages = &soc->link_desc_pages; 1237 if (!pages) 1238 return NULL; 1239 if (qdf_unlikely(page_id >= pages->num_pages)) 1240 return NULL; 1241 link_desc_va = pages->dma_pages[page_id].page_v_addr_start + 1242 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr); 1243 return link_desc_va; 1244 } 1245 1246 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1247 #ifdef DISABLE_EAPOL_INTRABSS_FWD 1248 #ifdef WLAN_FEATURE_11BE_MLO 1249 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1250 qdf_nbuf_t nbuf) 1251 { 1252 struct qdf_mac_addr *self_mld_mac_addr = 1253 (struct qdf_mac_addr *)vdev->mld_mac_addr.raw; 1254 return qdf_is_macaddr_equal(self_mld_mac_addr, 1255 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1256 QDF_NBUF_DEST_MAC_OFFSET); 1257 } 1258 #else 1259 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1260 qdf_nbuf_t nbuf) 1261 { 1262 return false; 1263 } 1264 #endif 1265 1266 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev, 1267 qdf_nbuf_t nbuf) 1268 { 1269 return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw, 1270 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1271 QDF_NBUF_DEST_MAC_OFFSET); 1272 } 1273 1274 /* 1275 * dp_rx_intrabss_eapol_drop_check() - API For EAPOL 1276 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1277 * @soc: core txrx main context 1278 * @ta_txrx_peer: source peer entry 1279 * @rx_tlv_hdr: start address of rx tlvs 1280 * @nbuf: nbuf that has to be intrabss forwarded 1281 * 1282 * Return: true if it is forwarded else false 1283 */ 1284 static inline 1285 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1286 struct dp_txrx_peer *ta_txrx_peer, 1287 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1288 { 1289 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && 1290 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev, 1291 nbuf) || 1292 dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev, 1293 nbuf)))) { 1294 qdf_nbuf_free(nbuf); 1295 DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); 1296 return true; 1297 } 1298 1299 return false; 1300 } 1301 #else /* DISABLE_EAPOL_INTRABSS_FWD */ 1302 1303 static inline 1304 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1305 struct dp_txrx_peer *ta_txrx_peer, 1306 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1307 { 1308 return false; 1309 } 1310 #endif /* DISABLE_EAPOL_INTRABSS_FWD */ 1311 1312 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, 1313 struct dp_txrx_peer *ta_txrx_peer, 1314 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1315 struct cdp_tid_rx_stats *tid_stats); 1316 1317 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, 1318 struct dp_txrx_peer *ta_txrx_peer, 1319 uint8_t tx_vdev_id, 1320 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1321 struct cdp_tid_rx_stats *tid_stats); 1322 1323 /** 1324 * dp_rx_defrag_concat() - Concatenate the fragments 1325 * 1326 * @dst: destination pointer to the buffer 1327 * @src: source pointer from where the fragment payload is to be copied 1328 * 1329 * Return: QDF_STATUS 1330 */ 1331 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 1332 { 1333 /* 1334 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 1335 * to provide space for src, the headroom portion is copied from 1336 * the original dst buffer to the larger new dst buffer. 1337 * (This is needed, because the headroom of the dst buffer 1338 * contains the rx desc.) 1339 */ 1340 if (!qdf_nbuf_cat(dst, src)) { 1341 /* 1342 * qdf_nbuf_cat does not free the src memory. 1343 * Free src nbuf before returning 1344 * For failure case the caller takes of freeing the nbuf 1345 */ 1346 qdf_nbuf_free(src); 1347 return QDF_STATUS_SUCCESS; 1348 } 1349 1350 return QDF_STATUS_E_DEFRAG_ERROR; 1351 } 1352 1353 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1354 1355 #ifndef FEATURE_WDS 1356 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1357 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf); 1358 1359 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1360 { 1361 return QDF_STATUS_SUCCESS; 1362 } 1363 1364 static inline void 1365 dp_rx_wds_srcport_learn(struct dp_soc *soc, 1366 uint8_t *rx_tlv_hdr, 1367 struct dp_txrx_peer *txrx_peer, 1368 qdf_nbuf_t nbuf, 1369 struct hal_rx_msdu_metadata msdu_metadata) 1370 { 1371 } 1372 1373 static inline void 1374 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc, 1375 struct dp_peer *ta_peer, qdf_nbuf_t nbuf, 1376 struct hal_rx_msdu_metadata msdu_end_info, 1377 bool ad4_valid, bool chfrag_start) 1378 { 1379 } 1380 #endif 1381 1382 /* 1383 * dp_rx_desc_dump() - dump the sw rx descriptor 1384 * 1385 * @rx_desc: sw rx descriptor 1386 */ 1387 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 1388 { 1389 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 1390 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 1391 rx_desc->in_use, rx_desc->unmapped); 1392 } 1393 1394 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1395 1396 /* 1397 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 1398 * In qwrap mode, packets originated from 1399 * any vdev should not loopback and 1400 * should be dropped. 1401 * @vdev: vdev on which rx packet is received 1402 * @nbuf: rx pkt 1403 * 1404 */ 1405 #if ATH_SUPPORT_WRAP 1406 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1407 qdf_nbuf_t nbuf) 1408 { 1409 struct dp_vdev *psta_vdev; 1410 struct dp_pdev *pdev = vdev->pdev; 1411 uint8_t *data = qdf_nbuf_data(nbuf); 1412 1413 if (qdf_unlikely(vdev->proxysta_vdev)) { 1414 /* In qwrap isolation mode, allow loopback packets as all 1415 * packets go to RootAP and Loopback on the mpsta. 1416 */ 1417 if (vdev->isolation_vdev) 1418 return false; 1419 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 1420 if (qdf_unlikely(psta_vdev->proxysta_vdev && 1421 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 1422 &data[QDF_MAC_ADDR_SIZE], 1423 QDF_MAC_ADDR_SIZE))) { 1424 /* Drop packet if source address is equal to 1425 * any of the vdev addresses. 1426 */ 1427 return true; 1428 } 1429 } 1430 } 1431 return false; 1432 } 1433 #else 1434 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1435 qdf_nbuf_t nbuf) 1436 { 1437 return false; 1438 } 1439 #endif 1440 1441 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1442 1443 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 1444 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ 1445 defined(WLAN_SUPPORT_RX_FLOW_TAG) 1446 #include "dp_rx_tag.h" 1447 #endif 1448 1449 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ 1450 !defined(WLAN_SUPPORT_RX_FLOW_TAG) 1451 /** 1452 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 1453 * and set the corresponding tag in QDF packet 1454 * @soc: core txrx main context 1455 * @vdev: vdev on which the packet is received 1456 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1457 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1458 * @ring_index: REO ring number, not used for error & monitor ring 1459 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 1460 * @is_update_stats: flag to indicate whether to update stats or not 1461 * Return: void 1462 */ 1463 static inline void 1464 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1465 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1466 uint16_t ring_index, 1467 bool is_reo_exception, bool is_update_stats) 1468 { 1469 } 1470 #endif 1471 1472 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1473 /** 1474 * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV 1475 * and returns whether cce metadata matches 1476 * @soc: core txrx main context 1477 * @vdev: vdev on which the packet is received 1478 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1479 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1480 * Return: bool 1481 */ 1482 static inline bool 1483 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev, 1484 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1485 { 1486 return false; 1487 } 1488 1489 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1490 1491 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 1492 /** 1493 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV 1494 * and set the corresponding tag in QDF packet 1495 * @soc: core txrx main context 1496 * @vdev: vdev on which the packet is received 1497 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1498 * @rx_tlv_hdr: base address where the RX TLVs starts 1499 * @is_update_stats: flag to indicate whether to update stats or not 1500 * 1501 * Return: void 1502 */ 1503 static inline void 1504 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1505 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) 1506 { 1507 } 1508 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1509 1510 #define CRITICAL_BUFFER_THRESHOLD 64 1511 /* 1512 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 1513 * called during dp rx initialization 1514 * and at the end of dp_rx_process. 1515 * 1516 * @soc: core txrx main context 1517 * @mac_id: mac_id which is one of 3 mac_ids 1518 * @dp_rxdma_srng: dp rxdma circular ring 1519 * @rx_desc_pool: Pointer to free Rx descriptor pool 1520 * @num_req_buffers: number of buffer to be replenished 1521 * @desc_list: list of descs if called from dp_rx_process 1522 * or NULL during dp rx initialization or out of buffer 1523 * interrupt. 1524 * @tail: tail of descs list 1525 * @req_only: If true don't replenish more than req buffers 1526 * @func_name: name of the caller function 1527 * Return: return success or failure 1528 */ 1529 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1530 struct dp_srng *dp_rxdma_srng, 1531 struct rx_desc_pool *rx_desc_pool, 1532 uint32_t num_req_buffers, 1533 union dp_rx_desc_list_elem_t **desc_list, 1534 union dp_rx_desc_list_elem_t **tail, 1535 bool req_only, 1536 const char *func_name); 1537 /* 1538 * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs 1539 * use direct APIs to get invalidate 1540 * and get the physical address of the 1541 * nbuf instead of map api,called during 1542 * dp rx initialization and at the end 1543 * of dp_rx_process. 1544 * 1545 * @soc: core txrx main context 1546 * @mac_id: mac_id which is one of 3 mac_ids 1547 * @dp_rxdma_srng: dp rxdma circular ring 1548 * @rx_desc_pool: Pointer to free Rx descriptor pool 1549 * @num_req_buffers: number of buffer to be replenished 1550 * @desc_list: list of descs if called from dp_rx_process 1551 * or NULL during dp rx initialization or out of buffer 1552 * interrupt. 1553 * @tail: tail of descs list 1554 * Return: return success or failure 1555 */ 1556 QDF_STATUS 1557 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1558 struct dp_srng *dp_rxdma_srng, 1559 struct rx_desc_pool *rx_desc_pool, 1560 uint32_t num_req_buffers, 1561 union dp_rx_desc_list_elem_t **desc_list, 1562 union dp_rx_desc_list_elem_t **tail); 1563 1564 /* 1565 * __dp_rx_buffers_no_map__lt_replenish() - replenish rxdma ring with rx nbufs 1566 * use direct APIs to get invalidate 1567 * and get the physical address of the 1568 * nbuf instead of map api,called when 1569 * low threshold interrupt is triggered 1570 * 1571 * @soc: core txrx main context 1572 * @mac_id: mac_id which is one of 3 mac_ids 1573 * @dp_rxdma_srng: dp rxdma circular ring 1574 * @rx_desc_pool: Pointer to free Rx descriptor pool 1575 * Return: return success or failure 1576 */ 1577 QDF_STATUS 1578 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1579 struct dp_srng *dp_rxdma_srng, 1580 struct rx_desc_pool *rx_desc_pool); 1581 /* 1582 * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs 1583 * use direct APIs to get invalidate 1584 * and get the physical address of the 1585 * nbuf instead of map api,called during 1586 * dp rx initialization. 1587 * 1588 * @soc: core txrx main context 1589 * @mac_id: mac_id which is one of 3 mac_ids 1590 * @dp_rxdma_srng: dp rxdma circular ring 1591 * @rx_desc_pool: Pointer to free Rx descriptor pool 1592 * @num_req_buffers: number of buffer to be replenished 1593 * Return: return success or failure 1594 */ 1595 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, 1596 uint32_t mac_id, 1597 struct dp_srng *dp_rxdma_srng, 1598 struct rx_desc_pool *rx_desc_pool, 1599 uint32_t num_req_buffers); 1600 1601 /* 1602 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs 1603 * called during dp rx initialization 1604 * 1605 * @soc: core txrx main context 1606 * @mac_id: mac_id which is one of 3 mac_ids 1607 * @dp_rxdma_srng: dp rxdma circular ring 1608 * @rx_desc_pool: Pointer to free Rx descriptor pool 1609 * @num_req_buffers: number of buffer to be replenished 1610 * 1611 * Return: return success or failure 1612 */ 1613 QDF_STATUS 1614 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 1615 struct dp_srng *dp_rxdma_srng, 1616 struct rx_desc_pool *rx_desc_pool, 1617 uint32_t num_req_buffers); 1618 1619 /** 1620 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 1621 * (WBM), following error handling 1622 * 1623 * @soc: core DP main context 1624 * @buf_addr_info: opaque pointer to the REO error ring descriptor 1625 * @buf_addr_info: void pointer to the buffer_addr_info 1626 * @bm_action: put to idle_list or release to msdu_list 1627 * 1628 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1629 */ 1630 QDF_STATUS 1631 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 1632 uint8_t bm_action); 1633 1634 /** 1635 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 1636 * (WBM) by address 1637 * 1638 * @soc: core DP main context 1639 * @link_desc_addr: link descriptor addr 1640 * 1641 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1642 */ 1643 QDF_STATUS 1644 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 1645 hal_buff_addrinfo_t link_desc_addr, 1646 uint8_t bm_action); 1647 1648 /** 1649 * dp_rxdma_err_process() - RxDMA error processing functionality 1650 * @soc: core txrx main contex 1651 * @mac_id: mac id which is one of 3 mac_ids 1652 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1653 * @quota: No. of units (packets) that can be serviced in one shot. 1654 * 1655 * Return: num of buffers processed 1656 */ 1657 uint32_t 1658 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1659 uint32_t mac_id, uint32_t quota); 1660 1661 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1662 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1663 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1664 uint8_t *rx_tlv_hdr); 1665 1666 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1667 struct dp_txrx_peer *peer); 1668 1669 /* 1670 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 1671 * 1672 * @soc: core txrx main context 1673 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1674 * @ring_desc: opaque pointer to the RX ring descriptor 1675 * @rx_desc: host rx descriptor 1676 * 1677 * Return: void 1678 */ 1679 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 1680 hal_ring_handle_t hal_ring_hdl, 1681 hal_ring_desc_t ring_desc, 1682 struct dp_rx_desc *rx_desc); 1683 1684 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1685 1686 #ifdef QCA_PEER_EXT_STATS 1687 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1688 qdf_nbuf_t nbuf); 1689 #endif /* QCA_PEER_EXT_STATS */ 1690 1691 #ifdef RX_DESC_DEBUG_CHECK 1692 /** 1693 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1694 * @rx_desc: rx descriptor pointer 1695 * 1696 * Return: true, if magic is correct, else false. 1697 */ 1698 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1699 { 1700 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1701 return false; 1702 1703 rx_desc->magic = 0; 1704 return true; 1705 } 1706 1707 /** 1708 * dp_rx_desc_prep() - prepare rx desc 1709 * @rx_desc: rx descriptor pointer to be prepared 1710 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1711 * 1712 * Note: assumption is that we are associating a nbuf which is mapped 1713 * 1714 * Return: none 1715 */ 1716 static inline 1717 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1718 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1719 { 1720 rx_desc->magic = DP_RX_DESC_MAGIC; 1721 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1722 rx_desc->unmapped = 0; 1723 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf); 1724 } 1725 1726 /** 1727 * dp_rx_desc_frag_prep() - prepare rx desc 1728 * @rx_desc: rx descriptor pointer to be prepared 1729 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1730 * 1731 * Note: assumption is that we frag address is mapped 1732 * 1733 * Return: none 1734 */ 1735 #ifdef DP_RX_MON_MEM_FRAG 1736 static inline 1737 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1738 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1739 { 1740 rx_desc->magic = DP_RX_DESC_MAGIC; 1741 rx_desc->rx_buf_start = 1742 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1743 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1744 rx_desc->unmapped = 0; 1745 } 1746 #else 1747 static inline 1748 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1749 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1750 { 1751 } 1752 #endif /* DP_RX_MON_MEM_FRAG */ 1753 1754 /** 1755 * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc 1756 * @rx_desc: rx descriptor 1757 * @ring_paddr: paddr obatined from the ring 1758 * 1759 * Returns: QDF_STATUS 1760 */ 1761 static inline 1762 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1763 uint64_t ring_paddr) 1764 { 1765 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); 1766 } 1767 #else 1768 1769 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1770 { 1771 return true; 1772 } 1773 1774 static inline 1775 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1776 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1777 { 1778 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1779 rx_desc->unmapped = 0; 1780 } 1781 1782 #ifdef DP_RX_MON_MEM_FRAG 1783 static inline 1784 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1785 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1786 { 1787 rx_desc->rx_buf_start = 1788 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1789 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1790 rx_desc->unmapped = 0; 1791 } 1792 #else 1793 static inline 1794 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1795 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1796 { 1797 } 1798 #endif /* DP_RX_MON_MEM_FRAG */ 1799 1800 static inline 1801 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1802 uint64_t ring_paddr) 1803 { 1804 return true; 1805 } 1806 #endif /* RX_DESC_DEBUG_CHECK */ 1807 1808 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 1809 bool is_mon_dest_desc); 1810 1811 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1812 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 1813 uint8_t err_code, uint8_t mac_id); 1814 1815 #ifndef QCA_MULTIPASS_SUPPORT 1816 static inline 1817 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1818 uint8_t tid) 1819 { 1820 return false; 1821 } 1822 #else 1823 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1824 uint8_t tid); 1825 #endif 1826 1827 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1828 1829 #ifndef WLAN_RX_PKT_CAPTURE_ENH 1830 static inline 1831 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev, 1832 struct dp_peer *peer_handle, 1833 bool value, uint8_t *mac_addr) 1834 { 1835 return QDF_STATUS_SUCCESS; 1836 } 1837 #endif 1838 1839 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1840 1841 /** 1842 * dp_rx_deliver_to_stack() - deliver pkts to network stack 1843 * Caller to hold peer refcount and check for valid peer 1844 * @soc: soc 1845 * @vdev: vdev 1846 * @txrx_peer: txrx peer 1847 * @nbuf_head: skb list head 1848 * @nbuf_tail: skb list tail 1849 * 1850 * Return: QDF_STATUS 1851 */ 1852 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 1853 struct dp_vdev *vdev, 1854 struct dp_txrx_peer *peer, 1855 qdf_nbuf_t nbuf_head, 1856 qdf_nbuf_t nbuf_tail); 1857 1858 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1859 /** 1860 * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack 1861 * caller to hold peer refcount and check for valid peer 1862 * @soc: soc 1863 * @vdev: vdev 1864 * @peer: peer 1865 * @nbuf_head: skb list head 1866 * @nbuf_tail: skb list tail 1867 * 1868 * return: QDF_STATUS 1869 */ 1870 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 1871 struct dp_vdev *vdev, 1872 struct dp_txrx_peer *peer, 1873 qdf_nbuf_t nbuf_head, 1874 qdf_nbuf_t nbuf_tail); 1875 #endif 1876 1877 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1878 1879 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS 1880 /* 1881 * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring 1882 * @int_ctx: pointer to DP interrupt context 1883 * @dp_soc - DP soc structure pointer 1884 * @hal_ring_hdl - HAL ring handle 1885 * 1886 * Return: 0 on success; error on failure 1887 */ 1888 static inline int 1889 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1890 hal_ring_handle_t hal_ring_hdl) 1891 { 1892 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 1893 } 1894 1895 /* 1896 * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring 1897 * @int_ctx: pointer to DP interrupt context 1898 * @dp_soc - DP soc structure pointer 1899 * @hal_ring_hdl - HAL ring handle 1900 * 1901 * Return - None 1902 */ 1903 static inline void 1904 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1905 hal_ring_handle_t hal_ring_hdl) 1906 { 1907 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 1908 } 1909 #else 1910 static inline int 1911 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1912 hal_ring_handle_t hal_ring_hdl) 1913 { 1914 return dp_srng_access_start(int_ctx, soc, hal_ring_hdl); 1915 } 1916 1917 static inline void 1918 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1919 hal_ring_handle_t hal_ring_hdl) 1920 { 1921 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1922 } 1923 #endif 1924 1925 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1926 1927 /* 1928 * dp_rx_wbm_sg_list_reset() - Initialize sg list 1929 * 1930 * This api should be called at soc init and afterevery sg processing. 1931 *@soc: DP SOC handle 1932 */ 1933 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) 1934 { 1935 if (soc) { 1936 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false; 1937 soc->wbm_sg_param.wbm_sg_nbuf_head = NULL; 1938 soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL; 1939 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0; 1940 } 1941 } 1942 1943 /* 1944 * dp_rx_wbm_sg_list_deinit() - De-initialize sg list 1945 * 1946 * This api should be called in down path, to avoid any leak. 1947 *@soc: DP SOC handle 1948 */ 1949 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) 1950 { 1951 if (soc) { 1952 if (soc->wbm_sg_param.wbm_sg_nbuf_head) 1953 qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head); 1954 1955 dp_rx_wbm_sg_list_reset(soc); 1956 } 1957 } 1958 1959 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1960 1961 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 1962 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1963 do { \ 1964 if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \ 1965 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \ 1966 break; \ 1967 } \ 1968 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \ 1969 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \ 1970 if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \ 1971 rx_desc->pool_id)) \ 1972 DP_RX_MERGE_TWO_LIST(head, tail, \ 1973 ebuf_head, ebuf_tail);\ 1974 ebuf_head = NULL; \ 1975 ebuf_tail = NULL; \ 1976 } \ 1977 } while (0) 1978 #else 1979 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1980 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf) 1981 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */ 1982 1983 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1984 1985 /* 1986 * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate 1987 to refill 1988 * @soc: DP SOC handle 1989 * @buf_info: the last link desc buf info 1990 * @ring_buf_info: current buf address pointor including link desc 1991 * 1992 * return: none. 1993 */ 1994 void dp_rx_link_desc_refill_duplicate_check( 1995 struct dp_soc *soc, 1996 struct hal_buf_info *buf_info, 1997 hal_buff_addrinfo_t ring_buf_info); 1998 1999 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2000 /** 2001 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2002 * @soc : dp_soc handle 2003 * @pdev: dp_pdev handle 2004 * @peer_id: peer_id of the peer for which completion came 2005 * @ppdu_id: ppdu_id 2006 * @netbuf: Buffer pointer 2007 * 2008 * This function is used to deliver rx packet to packet capture 2009 */ 2010 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2011 uint16_t peer_id, uint32_t is_offload, 2012 qdf_nbuf_t netbuf); 2013 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2014 uint32_t is_offload); 2015 #else 2016 static inline void 2017 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2018 uint16_t peer_id, uint32_t is_offload, 2019 qdf_nbuf_t netbuf) 2020 { 2021 } 2022 2023 static inline void 2024 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2025 uint32_t is_offload) 2026 { 2027 } 2028 #endif 2029 2030 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2031 #ifdef FEATURE_MEC 2032 /** 2033 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 2034 * back on same vap or a different vap. 2035 * @soc: core DP main context 2036 * @peer: dp peer handler 2037 * @rx_tlv_hdr: start of the rx TLV header 2038 * @nbuf: pkt buffer 2039 * 2040 * Return: bool (true if it is a looped back pkt else false) 2041 * 2042 */ 2043 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2044 struct dp_txrx_peer *peer, 2045 uint8_t *rx_tlv_hdr, 2046 qdf_nbuf_t nbuf); 2047 #else 2048 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2049 struct dp_txrx_peer *peer, 2050 uint8_t *rx_tlv_hdr, 2051 qdf_nbuf_t nbuf) 2052 { 2053 return false; 2054 } 2055 #endif /* FEATURE_MEC */ 2056 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2057 2058 #ifdef RECEIVE_OFFLOAD 2059 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2060 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); 2061 #else 2062 static inline 2063 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2064 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 2065 { 2066 } 2067 #endif 2068 2069 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2070 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 2071 uint8_t ring_id, 2072 struct cdp_tid_rx_stats *tid_stats); 2073 2074 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 2075 2076 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2077 hal_ring_handle_t hal_ring_hdl, 2078 uint32_t num_entries, 2079 bool *near_full); 2080 2081 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2082 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2083 hal_ring_desc_t ring_desc); 2084 #else 2085 static inline void 2086 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2087 hal_ring_desc_t ring_desc) 2088 { 2089 } 2090 #endif 2091 2092 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2093 #ifdef RX_DESC_SANITY_WAR 2094 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2095 hal_ring_handle_t hal_ring_hdl, 2096 hal_ring_desc_t ring_desc, 2097 struct dp_rx_desc *rx_desc); 2098 #else 2099 static inline 2100 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2101 hal_ring_handle_t hal_ring_hdl, 2102 hal_ring_desc_t ring_desc, 2103 struct dp_rx_desc *rx_desc) 2104 { 2105 return QDF_STATUS_SUCCESS; 2106 } 2107 #endif 2108 2109 #ifdef DP_RX_DROP_RAW_FRM 2110 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); 2111 #else 2112 static inline 2113 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2114 { 2115 return false; 2116 } 2117 #endif 2118 2119 #ifdef RX_DESC_DEBUG_CHECK 2120 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2121 hal_ring_desc_t ring_desc, 2122 struct dp_rx_desc *rx_desc); 2123 #else 2124 static inline 2125 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2126 hal_ring_desc_t ring_desc, 2127 struct dp_rx_desc *rx_desc) 2128 { 2129 return QDF_STATUS_SUCCESS; 2130 } 2131 #endif 2132 2133 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2134 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2135 #else 2136 static inline 2137 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2138 { 2139 } 2140 #endif 2141 2142 /** 2143 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 2144 * @nbuf: pointer to the first msdu of an amsdu. 2145 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2146 * 2147 * The ipsumed field of the skb is set based on whether HW validated the 2148 * IP/TCP/UDP checksum. 2149 * 2150 * Return: void 2151 */ 2152 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1) 2153 static inline 2154 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2155 qdf_nbuf_t nbuf, 2156 uint8_t *rx_tlv_hdr) 2157 { 2158 qdf_nbuf_rx_cksum_t cksum = {0}; 2159 //TODO - Move this to ring desc api 2160 //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET 2161 //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET 2162 uint32_t ip_csum_err, tcp_udp_csum_er; 2163 2164 hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, 2165 &tcp_udp_csum_er); 2166 2167 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 2168 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 2169 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 2170 } else { 2171 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 2172 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 2173 } 2174 } 2175 #else 2176 static inline 2177 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2178 qdf_nbuf_t nbuf, 2179 uint8_t *rx_tlv_hdr) 2180 { 2181 } 2182 #endif 2183 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2184 2185 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 2186 static inline 2187 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2188 int max_reap_limit) 2189 { 2190 bool limit_hit = false; 2191 2192 limit_hit = 2193 (num_reaped >= max_reap_limit) ? true : false; 2194 2195 if (limit_hit) 2196 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 2197 2198 return limit_hit; 2199 } 2200 2201 static inline 2202 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2203 { 2204 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 2205 } 2206 2207 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2208 { 2209 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 2210 2211 return cfg->rx_reap_loop_pkt_limit; 2212 } 2213 #else 2214 static inline 2215 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2216 int max_reap_limit) 2217 { 2218 return false; 2219 } 2220 2221 static inline 2222 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2223 { 2224 return false; 2225 } 2226 2227 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2228 { 2229 return 0; 2230 } 2231 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 2232 2233 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2234 2235 #ifdef QCA_SUPPORT_WDS_EXTENDED 2236 /** 2237 * dp_rx_is_list_ready() - Make different lists for 4-address 2238 and 3-address frames 2239 * @nbuf_head: skb list head 2240 * @vdev: vdev 2241 * @txrx_peer : txrx_peer 2242 * @peer_id: peer id of new received frame 2243 * @vdev_id: vdev_id of new received frame 2244 * 2245 * Return: true if peer_ids are different. 2246 */ 2247 static inline bool 2248 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2249 struct dp_vdev *vdev, 2250 struct dp_txrx_peer *txrx_peer, 2251 uint16_t peer_id, 2252 uint8_t vdev_id) 2253 { 2254 if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id) 2255 return true; 2256 2257 return false; 2258 } 2259 #else 2260 static inline bool 2261 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2262 struct dp_vdev *vdev, 2263 struct dp_txrx_peer *txrx_peer, 2264 uint16_t peer_id, 2265 uint8_t vdev_id) 2266 { 2267 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) 2268 return true; 2269 2270 return false; 2271 } 2272 #endif 2273 2274 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 2275 /** 2276 * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup 2277 * @pdev: pointer to dp_pdev structure 2278 * @rx_tlv: pointer to rx_pkt_tlvs structure 2279 * @nbuf: pointer to skb buffer 2280 * 2281 * Return: None 2282 */ 2283 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2284 uint8_t *rx_tlv, 2285 qdf_nbuf_t nbuf); 2286 #else 2287 static inline void 2288 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2289 uint8_t *rx_tlv, 2290 qdf_nbuf_t nbuf) 2291 { 2292 } 2293 #endif 2294 2295 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 2296 static inline uint8_t 2297 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2298 { 2299 return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id); 2300 } 2301 2302 static inline uint8_t 2303 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2304 { 2305 return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id); 2306 } 2307 #else 2308 static inline uint8_t 2309 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2310 { 2311 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx; 2312 uint8_t wbm2_sw_rx_rel_ring_id; 2313 2314 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx); 2315 2316 return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id, 2317 wbm2_sw_rx_rel_ring_id); 2318 } 2319 2320 static inline uint8_t 2321 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2322 { 2323 return dp_rx_get_rx_bm_id(soc); 2324 } 2325 #endif 2326 2327 static inline uint16_t 2328 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata) 2329 { 2330 return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc, 2331 peer_metadata); 2332 } 2333 2334 /** 2335 * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization 2336 * @soc: SOC handle 2337 * @rx_desc_pool: pointer to RX descriptor pool 2338 * @pool_id: pool ID 2339 * 2340 * Return: None 2341 */ 2342 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 2343 struct rx_desc_pool *rx_desc_pool, 2344 uint32_t pool_id); 2345 2346 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 2347 struct rx_desc_pool *rx_desc_pool, 2348 uint32_t pool_id); 2349 2350 /** 2351 * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint 2352 * 2353 * Return: True if any rx pkt tracepoint is enabled else false 2354 */ 2355 static inline 2356 bool dp_rx_pkt_tracepoints_enabled(void) 2357 { 2358 return (qdf_trace_dp_rx_tcp_pkt_enabled() || 2359 qdf_trace_dp_rx_udp_pkt_enabled() || 2360 qdf_trace_dp_rx_pkt_enabled()); 2361 } 2362 2363 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 2364 static inline 2365 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2366 struct dp_srng *rxdma_srng, 2367 struct rx_desc_pool *rx_desc_pool, 2368 uint32_t num_req_buffers) 2369 { 2370 return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id, 2371 rxdma_srng, 2372 rx_desc_pool, 2373 num_req_buffers); 2374 } 2375 2376 static inline 2377 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2378 struct dp_srng *rxdma_srng, 2379 struct rx_desc_pool *rx_desc_pool, 2380 uint32_t num_req_buffers, 2381 union dp_rx_desc_list_elem_t **desc_list, 2382 union dp_rx_desc_list_elem_t **tail) 2383 { 2384 __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2385 num_req_buffers, desc_list, tail); 2386 } 2387 2388 static inline 2389 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2390 struct dp_srng *rxdma_srng, 2391 struct rx_desc_pool *rx_desc_pool, 2392 uint32_t num_req_buffers, 2393 union dp_rx_desc_list_elem_t **desc_list, 2394 union dp_rx_desc_list_elem_t **tail) 2395 { 2396 __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng, 2397 rx_desc_pool); 2398 } 2399 2400 static inline 2401 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2402 qdf_nbuf_t nbuf, 2403 uint32_t buf_size) 2404 { 2405 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2406 (void *)(nbuf->data + buf_size)); 2407 2408 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2409 } 2410 2411 static inline 2412 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2413 qdf_nbuf_t nbuf, 2414 uint32_t buf_size) 2415 { 2416 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2417 (void *)(nbuf->data + buf_size)); 2418 2419 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2420 } 2421 2422 #if !defined(SPECULATIVE_READ_DISABLED) 2423 static inline 2424 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2425 struct dp_rx_desc *rx_desc, 2426 uint8_t reo_ring_num) 2427 { 2428 struct rx_desc_pool *rx_desc_pool; 2429 qdf_nbuf_t nbuf; 2430 2431 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2432 nbuf = rx_desc->nbuf; 2433 2434 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2435 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2436 } 2437 2438 static inline 2439 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2440 struct rx_desc_pool *rx_desc_pool, 2441 qdf_nbuf_t nbuf) 2442 { 2443 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2444 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2445 } 2446 2447 #else 2448 static inline 2449 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2450 struct dp_rx_desc *rx_desc, 2451 uint8_t reo_ring_num) 2452 { 2453 } 2454 2455 static inline 2456 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2457 struct rx_desc_pool *rx_desc_pool, 2458 qdf_nbuf_t nbuf) 2459 { 2460 } 2461 #endif 2462 2463 static inline 2464 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2465 uint32_t bufs_reaped) 2466 { 2467 } 2468 2469 static inline 2470 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2471 struct rx_desc_pool *rx_desc_pool) 2472 { 2473 return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size, 2474 RX_BUFFER_RESERVATION, 2475 rx_desc_pool->buf_alignment, FALSE); 2476 } 2477 2478 static inline 2479 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2480 { 2481 qdf_nbuf_free_simple(nbuf); 2482 } 2483 #else 2484 static inline 2485 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2486 struct dp_srng *rxdma_srng, 2487 struct rx_desc_pool *rx_desc_pool, 2488 uint32_t num_req_buffers) 2489 { 2490 return dp_pdev_rx_buffers_attach(soc, mac_id, 2491 rxdma_srng, 2492 rx_desc_pool, 2493 num_req_buffers); 2494 } 2495 2496 static inline 2497 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2498 struct dp_srng *rxdma_srng, 2499 struct rx_desc_pool *rx_desc_pool, 2500 uint32_t num_req_buffers, 2501 union dp_rx_desc_list_elem_t **desc_list, 2502 union dp_rx_desc_list_elem_t **tail) 2503 { 2504 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2505 num_req_buffers, desc_list, tail, false); 2506 } 2507 2508 static inline 2509 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2510 struct dp_srng *rxdma_srng, 2511 struct rx_desc_pool *rx_desc_pool, 2512 uint32_t num_req_buffers, 2513 union dp_rx_desc_list_elem_t **desc_list, 2514 union dp_rx_desc_list_elem_t **tail) 2515 { 2516 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2517 num_req_buffers, desc_list, tail, false); 2518 } 2519 2520 static inline 2521 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2522 qdf_nbuf_t nbuf, 2523 uint32_t buf_size) 2524 { 2525 return (qdf_dma_addr_t)NULL; 2526 } 2527 2528 static inline 2529 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2530 qdf_nbuf_t nbuf, 2531 uint32_t buf_size) 2532 { 2533 return (qdf_dma_addr_t)NULL; 2534 } 2535 2536 static inline 2537 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2538 struct dp_rx_desc *rx_desc, 2539 uint8_t reo_ring_num) 2540 { 2541 struct rx_desc_pool *rx_desc_pool; 2542 2543 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2544 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 2545 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 2546 rx_desc_pool->buf_size, 2547 false); 2548 2549 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2550 QDF_DMA_FROM_DEVICE, 2551 rx_desc_pool->buf_size); 2552 2553 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 2554 } 2555 2556 static inline 2557 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2558 struct rx_desc_pool *rx_desc_pool, 2559 qdf_nbuf_t nbuf) 2560 { 2561 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size, 2562 false); 2563 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE, 2564 rx_desc_pool->buf_size); 2565 } 2566 2567 static inline 2568 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2569 uint32_t bufs_reaped) 2570 { 2571 int cpu_id = qdf_get_cpu(); 2572 2573 DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped); 2574 } 2575 2576 static inline 2577 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2578 struct rx_desc_pool *rx_desc_pool) 2579 { 2580 return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 2581 RX_BUFFER_RESERVATION, 2582 rx_desc_pool->buf_alignment, FALSE); 2583 } 2584 2585 static inline 2586 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2587 { 2588 qdf_nbuf_free(nbuf); 2589 } 2590 #endif 2591 2592 #ifdef DP_UMAC_HW_RESET_SUPPORT 2593 /* 2594 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 2595 * 2596 * @soc: core txrx main context 2597 * @nbuf_list: nbuf list for delayed free 2598 * 2599 * Return: void 2600 */ 2601 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list); 2602 2603 /* 2604 * dp_rx_desc_delayed_free() - Delayed free of the rx descs 2605 * 2606 * @soc: core txrx main context 2607 * 2608 * Return: void 2609 */ 2610 void dp_rx_desc_delayed_free(struct dp_soc *soc); 2611 #endif 2612 2613 /** 2614 * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id 2615 * @nbuf : pointer to the first msdu of an amsdu. 2616 * @peer_id : Peer id of the peer 2617 * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference 2618 * @pkt_capture_offload : Flag indicating if pkt capture offload is needed 2619 * @vdev : Buffer to hold pointer to vdev 2620 * @rx_pdev : Buffer to hold pointer to rx pdev 2621 * @dsf : delay stats flag 2622 * @old_tid : Old tid 2623 * 2624 * Get txrx peer and vdev from peer id 2625 * 2626 * Return: Pointer to txrx peer 2627 */ 2628 static inline struct dp_txrx_peer * 2629 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc, 2630 qdf_nbuf_t nbuf, 2631 uint16_t peer_id, 2632 dp_txrx_ref_handle *txrx_ref_handle, 2633 bool pkt_capture_offload, 2634 struct dp_vdev **vdev, 2635 struct dp_pdev **rx_pdev, 2636 uint32_t *dsf, 2637 uint32_t *old_tid) 2638 { 2639 struct dp_txrx_peer *txrx_peer = NULL; 2640 2641 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle, 2642 DP_MOD_ID_RX); 2643 2644 if (qdf_likely(txrx_peer)) { 2645 *vdev = txrx_peer->vdev; 2646 } else { 2647 nbuf->next = NULL; 2648 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf, 2649 pkt_capture_offload); 2650 if (!pkt_capture_offload) 2651 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2652 2653 goto end; 2654 } 2655 2656 if (qdf_unlikely(!(*vdev))) { 2657 qdf_nbuf_free(nbuf); 2658 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2659 goto end; 2660 } 2661 2662 *rx_pdev = (*vdev)->pdev; 2663 *dsf = (*rx_pdev)->delay_stats_flag; 2664 *old_tid = 0xff; 2665 2666 end: 2667 return txrx_peer; 2668 } 2669 2670 static inline QDF_STATUS 2671 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer, 2672 int tid, uint32_t ba_window_size) 2673 { 2674 return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc, 2675 peer, tid, 2676 ba_window_size); 2677 } 2678 2679 static inline 2680 void dp_rx_nbuf_list_deliver(struct dp_soc *soc, 2681 struct dp_vdev *vdev, 2682 struct dp_txrx_peer *txrx_peer, 2683 uint16_t peer_id, 2684 uint8_t pkt_capture_offload, 2685 qdf_nbuf_t deliver_list_head, 2686 qdf_nbuf_t deliver_list_tail) 2687 { 2688 qdf_nbuf_t nbuf, next; 2689 2690 if (qdf_likely(deliver_list_head)) { 2691 if (qdf_likely(txrx_peer)) { 2692 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 2693 pkt_capture_offload, 2694 deliver_list_head); 2695 if (!pkt_capture_offload) 2696 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 2697 deliver_list_head, 2698 deliver_list_tail); 2699 } else { 2700 nbuf = deliver_list_head; 2701 while (nbuf) { 2702 next = nbuf->next; 2703 nbuf->next = NULL; 2704 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2705 nbuf = next; 2706 } 2707 } 2708 } 2709 } 2710 2711 #ifdef DP_TX_RX_TPUT_SIMULATE 2712 /* 2713 * Change this macro value to simulate different RX T-put, 2714 * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor 2715 * is 2, set macro value as 1 (multiplication factor - 1). 2716 */ 2717 #define DP_RX_PKTS_DUPLICATE_CNT 0 2718 static inline 2719 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc, 2720 struct dp_vdev *vdev, 2721 struct dp_txrx_peer *txrx_peer, 2722 uint16_t peer_id, 2723 uint8_t pkt_capture_offload, 2724 qdf_nbuf_t ori_list_head, 2725 qdf_nbuf_t ori_list_tail) 2726 { 2727 qdf_nbuf_t new_skb = NULL; 2728 qdf_nbuf_t new_list_head = NULL; 2729 qdf_nbuf_t new_list_tail = NULL; 2730 qdf_nbuf_t nbuf = NULL; 2731 int i; 2732 2733 for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) { 2734 nbuf = ori_list_head; 2735 new_list_head = NULL; 2736 new_list_tail = NULL; 2737 2738 while (nbuf) { 2739 new_skb = qdf_nbuf_copy(nbuf); 2740 if (qdf_likely(new_skb)) 2741 DP_RX_LIST_APPEND(new_list_head, 2742 new_list_tail, 2743 new_skb); 2744 else 2745 dp_err("copy skb failed"); 2746 2747 nbuf = qdf_nbuf_next(nbuf); 2748 } 2749 2750 /* deliver the copied nbuf list */ 2751 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2752 pkt_capture_offload, 2753 new_list_head, 2754 new_list_tail); 2755 } 2756 2757 /* deliver the original skb_list */ 2758 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2759 pkt_capture_offload, 2760 ori_list_head, 2761 ori_list_tail); 2762 } 2763 2764 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver 2765 2766 #else /* !DP_TX_RX_TPUT_SIMULATE */ 2767 2768 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver 2769 2770 #endif /* DP_TX_RX_TPUT_SIMULATE */ 2771 2772 #endif /* _DP_RX_H */ 2773