1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_RX_H 21 #define _DP_RX_H 22 23 #include "hal_rx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include <qdf_tracepoint.h> 27 #include "dp_ipa.h" 28 29 #ifdef RXDMA_OPTIMIZATION 30 #ifndef RX_DATA_BUFFER_ALIGNMENT 31 #define RX_DATA_BUFFER_ALIGNMENT 128 32 #endif 33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT 34 #define RX_MONITOR_BUFFER_ALIGNMENT 128 35 #endif 36 #else /* RXDMA_OPTIMIZATION */ 37 #define RX_DATA_BUFFER_ALIGNMENT 4 38 #define RX_MONITOR_BUFFER_ALIGNMENT 4 39 #endif /* RXDMA_OPTIMIZATION */ 40 41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 42 #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id) 43 /* RBM value used for re-injecting defragmented packets into REO */ 44 #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) 45 #endif 46 47 /* Max buffer in invalid peer SG list*/ 48 #define DP_MAX_INVALID_BUFFERS 10 49 #ifdef DP_INVALID_PEER_ASSERT 50 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 51 do { \ 52 qdf_assert_always(!(head)); \ 53 qdf_assert_always(!(tail)); \ 54 } while (0) 55 #else 56 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 57 #endif 58 59 #define RX_BUFFER_RESERVATION 0 60 61 #define DP_DEFAULT_NOISEFLOOR (-96) 62 63 #define DP_RX_DESC_MAGIC 0xdec0de 64 65 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params) 66 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params) 67 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params) 68 #define dp_rx_info(params...) \ 69 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 70 #define dp_rx_info_rl(params...) \ 71 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 72 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params) 73 #define dp_rx_err_err(params...) \ 74 QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params) 75 76 /** 77 * enum dp_rx_desc_state 78 * 79 * @RX_DESC_REPLENISHED: rx desc replenished 80 * @RX_DESC_IN_FREELIST: rx desc in freelist 81 */ 82 enum dp_rx_desc_state { 83 RX_DESC_REPLENISHED, 84 RX_DESC_IN_FREELIST, 85 }; 86 87 #ifndef QCA_HOST_MODE_WIFI_DISABLED 88 /** 89 * struct dp_rx_desc_dbg_info 90 * 91 * @freelist_caller: name of the function that put the 92 * the rx desc in freelist 93 * @freelist_ts: timestamp when the rx desc is put in 94 * a freelist 95 * @replenish_caller: name of the function that last 96 * replenished the rx desc 97 * @replenish_ts: last replenish timestamp 98 * @prev_nbuf: previous nbuf info 99 * @prev_nbuf_data_addr: previous nbuf data address 100 */ 101 struct dp_rx_desc_dbg_info { 102 char freelist_caller[QDF_MEM_FUNC_NAME_SIZE]; 103 uint64_t freelist_ts; 104 char replenish_caller[QDF_MEM_FUNC_NAME_SIZE]; 105 uint64_t replenish_ts; 106 qdf_nbuf_t prev_nbuf; 107 uint8_t *prev_nbuf_data_addr; 108 }; 109 110 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 111 112 /** 113 * struct dp_rx_desc 114 * 115 * @nbuf: VA of the "skb" posted 116 * @rx_buf_start: VA of the original Rx buffer, before 117 * movement of any skb->data pointer 118 * @paddr_buf_start: PA of the original Rx buffer, before 119 * movement of any frag pointer 120 * @cookie: index into the sw array which holds 121 * the sw Rx descriptors 122 * Cookie space is 21 bits: 123 * lower 18 bits -- index 124 * upper 3 bits -- pool_id 125 * @pool_id: pool Id for which this allocated. 126 * Can only be used if there is no flow 127 * steering 128 * @chip_id: chip_id indicating MLO chip_id 129 * valid or used only in case of multi-chip MLO 130 * @reuse_nbuf: VA of the "skb" which is being reused 131 * @magic: 132 * @nbuf_data_addr: VA of nbuf data posted 133 * @dbg_info: 134 * @in_use: rx_desc is in use 135 * @unmapped: used to mark rx_desc an unmapped if the corresponding 136 * nbuf is already unmapped 137 * @in_err_state: Nbuf sanity failed for this descriptor. 138 * @has_reuse_nbuf: the nbuf associated with this desc is also saved in 139 * reuse_nbuf field 140 */ 141 struct dp_rx_desc { 142 qdf_nbuf_t nbuf; 143 #ifdef WLAN_SUPPORT_PPEDS 144 qdf_nbuf_t reuse_nbuf; 145 #endif 146 uint8_t *rx_buf_start; 147 qdf_dma_addr_t paddr_buf_start; 148 uint32_t cookie; 149 uint8_t pool_id; 150 uint8_t chip_id; 151 #ifdef RX_DESC_DEBUG_CHECK 152 uint32_t magic; 153 uint8_t *nbuf_data_addr; 154 struct dp_rx_desc_dbg_info *dbg_info; 155 #endif 156 uint8_t in_use:1, 157 unmapped:1, 158 in_err_state:1, 159 has_reuse_nbuf:1; 160 }; 161 162 #ifndef QCA_HOST_MODE_WIFI_DISABLED 163 #ifdef ATH_RX_PRI_SAVE 164 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 165 (qdf_nbuf_set_priority(_nbuf, _tid)) 166 #else 167 #define DP_RX_TID_SAVE(_nbuf, _tid) 168 #endif 169 170 /* RX Descriptor Multi Page memory alloc related */ 171 #define DP_RX_DESC_OFFSET_NUM_BITS 8 172 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 173 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 174 175 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 176 #define DP_RX_DESC_POOL_ID_SHIFT \ 177 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 178 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 179 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 180 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 181 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 182 DP_RX_DESC_PAGE_ID_SHIFT) 183 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 184 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 185 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 186 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 187 DP_RX_DESC_POOL_ID_SHIFT) 188 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 189 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 190 DP_RX_DESC_PAGE_ID_SHIFT) 191 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 192 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 193 194 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 195 196 #define RX_DESC_COOKIE_INDEX_SHIFT 0 197 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 198 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 199 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 200 201 #define DP_RX_DESC_COOKIE_MAX \ 202 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 203 204 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 205 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 206 RX_DESC_COOKIE_POOL_ID_SHIFT) 207 208 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 209 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 210 RX_DESC_COOKIE_INDEX_SHIFT) 211 212 #define dp_rx_add_to_free_desc_list(head, tail, new) \ 213 __dp_rx_add_to_free_desc_list(head, tail, new, __func__) 214 215 #define dp_rx_add_to_free_desc_list_reuse(head, tail, new) \ 216 __dp_rx_add_to_free_desc_list_reuse(head, tail, new, __func__) 217 218 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 219 num_buffers, desc_list, tail, req_only) \ 220 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 221 num_buffers, desc_list, tail, req_only, \ 222 __func__) 223 224 #ifdef WLAN_SUPPORT_RX_FISA 225 /** 226 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb 227 * @nbuf: pkt skb pointer 228 * @l3_padding: l3 padding 229 * 230 * Return: None 231 */ 232 static inline 233 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 234 { 235 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 236 } 237 #else 238 static inline 239 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 240 { 241 } 242 #endif 243 244 #ifdef DP_RX_SPECIAL_FRAME_NEED 245 /** 246 * dp_rx_is_special_frame() - check is RX frame special needed 247 * 248 * @nbuf: RX skb pointer 249 * @frame_mask: the mask for special frame needed 250 * 251 * Check is RX frame wanted matched with mask 252 * 253 * Return: true - special frame needed, false - no 254 */ 255 static inline 256 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 257 { 258 if (((frame_mask & FRAME_MASK_IPV4_ARP) && 259 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || 260 ((frame_mask & FRAME_MASK_IPV4_DHCP) && 261 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || 262 ((frame_mask & FRAME_MASK_IPV4_EAPOL) && 263 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || 264 ((frame_mask & FRAME_MASK_IPV6_DHCP) && 265 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))) 266 return true; 267 268 return false; 269 } 270 271 /** 272 * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack 273 * if matches mask 274 * 275 * @soc: Datapath soc handler 276 * @peer: pointer to DP peer 277 * @nbuf: pointer to the skb of RX frame 278 * @frame_mask: the mask for special frame needed 279 * @rx_tlv_hdr: start of rx tlv header 280 * 281 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 282 * single nbuf is expected. 283 * 284 * Return: true - nbuf has been delivered to stack, false - not. 285 */ 286 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 287 qdf_nbuf_t nbuf, uint32_t frame_mask, 288 uint8_t *rx_tlv_hdr); 289 #else 290 static inline 291 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 292 { 293 return false; 294 } 295 296 static inline 297 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 298 qdf_nbuf_t nbuf, uint32_t frame_mask, 299 uint8_t *rx_tlv_hdr) 300 { 301 return false; 302 } 303 #endif 304 305 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 306 /** 307 * dp_rx_data_is_specific() - Used to exclude specific frames 308 * not practical for getting rx 309 * stats like rate, mcs, nss, etc. 310 * 311 * @hal_soc_hdl: soc handler 312 * @rx_tlv_hdr: rx tlv header 313 * @nbuf: RX skb pointer 314 * 315 * Return: true - a specific frame not suitable 316 * for getting rx stats from it. 317 * false - a common frame suitable for 318 * getting rx stats from it. 319 */ 320 static inline 321 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 322 uint8_t *rx_tlv_hdr, 323 qdf_nbuf_t nbuf) 324 { 325 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf))) 326 return true; 327 328 if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr)) 329 return true; 330 331 if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr)) 332 return true; 333 334 /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */ 335 if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 336 QDF_NBUF_TRAC_IPV4_ETH_TYPE)) { 337 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 338 return true; 339 } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 340 QDF_NBUF_TRAC_IPV6_ETH_TYPE)) { 341 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) 342 return true; 343 } else { 344 return true; 345 } 346 return false; 347 } 348 #else 349 static inline 350 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 351 uint8_t *rx_tlv_hdr, 352 qdf_nbuf_t nbuf) 353 354 { 355 /* 356 * default return is true to make sure that rx stats 357 * will not be handled when this feature is disabled 358 */ 359 return true; 360 } 361 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 362 363 #ifndef QCA_HOST_MODE_WIFI_DISABLED 364 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING 365 static inline 366 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 367 qdf_nbuf_t nbuf, uint8_t link_id) 368 { 369 if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi && 370 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { 371 DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer, 372 rx.intra_bss.mdns_no_fwd, 373 1, link_id); 374 return false; 375 } 376 return true; 377 } 378 #else 379 static inline 380 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 381 qdf_nbuf_t nbuf, uint8_t link_id) 382 { 383 return true; 384 } 385 #endif 386 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 387 388 /* DOC: Offset to obtain LLC hdr 389 * 390 * In the case of Wifi parse error 391 * to reach LLC header from beginning 392 * of VLAN tag we need to skip 8 bytes. 393 * Vlan_tag(4)+length(2)+length added 394 * by HW(2) = 8 bytes. 395 */ 396 #define DP_SKIP_VLAN 8 397 398 #ifndef QCA_HOST_MODE_WIFI_DISABLED 399 400 /** 401 * struct dp_rx_cached_buf - rx cached buffer 402 * @node: linked list node 403 * @buf: skb buffer 404 */ 405 struct dp_rx_cached_buf { 406 qdf_list_node_t node; 407 qdf_nbuf_t buf; 408 }; 409 410 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 411 412 /** 413 * dp_rx_xor_block() - xor block of data 414 * @b: destination data block 415 * @a: source data block 416 * @len: length of the data to process 417 * 418 * Return: None 419 */ 420 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 421 { 422 qdf_size_t i; 423 424 for (i = 0; i < len; i++) 425 b[i] ^= a[i]; 426 } 427 428 /** 429 * dp_rx_rotl() - rotate the bits left 430 * @val: unsigned integer input value 431 * @bits: number of bits 432 * 433 * Return: Integer with left rotated by number of 'bits' 434 */ 435 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 436 { 437 return (val << bits) | (val >> (32 - bits)); 438 } 439 440 /** 441 * dp_rx_rotr() - rotate the bits right 442 * @val: unsigned integer input value 443 * @bits: number of bits 444 * 445 * Return: Integer with right rotated by number of 'bits' 446 */ 447 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 448 { 449 return (val >> bits) | (val << (32 - bits)); 450 } 451 452 /** 453 * dp_set_rx_queue() - set queue_mapping in skb 454 * @nbuf: skb 455 * @queue_id: rx queue_id 456 * 457 * Return: void 458 */ 459 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 460 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 461 { 462 qdf_nbuf_record_rx_queue(nbuf, queue_id); 463 return; 464 } 465 #else 466 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 467 { 468 } 469 #endif 470 471 /** 472 * dp_rx_xswap() - swap the bits left 473 * @val: unsigned integer input value 474 * 475 * Return: Integer with bits swapped 476 */ 477 static inline uint32_t dp_rx_xswap(uint32_t val) 478 { 479 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 480 } 481 482 /** 483 * dp_rx_get_le32_split() - get little endian 32 bits split 484 * @b0: byte 0 485 * @b1: byte 1 486 * @b2: byte 2 487 * @b3: byte 3 488 * 489 * Return: Integer with split little endian 32 bits 490 */ 491 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 492 uint8_t b3) 493 { 494 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 495 } 496 497 /** 498 * dp_rx_get_le32() - get little endian 32 bits 499 * @p: source 32-bit value 500 * 501 * Return: Integer with little endian 32 bits 502 */ 503 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 504 { 505 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 506 } 507 508 /** 509 * dp_rx_put_le32() - put little endian 32 bits 510 * @p: destination char array 511 * @v: source 32-bit integer 512 * 513 * Return: None 514 */ 515 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 516 { 517 p[0] = (v) & 0xff; 518 p[1] = (v >> 8) & 0xff; 519 p[2] = (v >> 16) & 0xff; 520 p[3] = (v >> 24) & 0xff; 521 } 522 523 /* Extract michal mic block of data */ 524 #define dp_rx_michael_block(l, r) \ 525 do { \ 526 r ^= dp_rx_rotl(l, 17); \ 527 l += r; \ 528 r ^= dp_rx_xswap(l); \ 529 l += r; \ 530 r ^= dp_rx_rotl(l, 3); \ 531 l += r; \ 532 r ^= dp_rx_rotr(l, 2); \ 533 l += r; \ 534 } while (0) 535 536 /** 537 * struct dp_rx_desc_list_elem_t 538 * 539 * @next: Next pointer to form free list 540 * @rx_desc: DP Rx descriptor 541 */ 542 union dp_rx_desc_list_elem_t { 543 union dp_rx_desc_list_elem_t *next; 544 struct dp_rx_desc rx_desc; 545 }; 546 547 #ifdef RX_DESC_MULTI_PAGE_ALLOC 548 /** 549 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 550 * @page_id: Page ID 551 * @offset: Offset of the descriptor element 552 * @rx_pool: RX pool 553 * 554 * Return: RX descriptor element 555 */ 556 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 557 struct rx_desc_pool *rx_pool); 558 559 static inline 560 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 561 struct rx_desc_pool *pool, 562 uint32_t cookie) 563 { 564 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 565 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 566 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 567 struct rx_desc_pool *rx_desc_pool; 568 union dp_rx_desc_list_elem_t *rx_desc_elem; 569 570 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 571 return NULL; 572 573 rx_desc_pool = &pool[pool_id]; 574 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 575 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 576 rx_desc_pool->elem_size * offset); 577 578 return &rx_desc_elem->rx_desc; 579 } 580 581 static inline 582 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc, 583 struct rx_desc_pool *pool, 584 uint32_t cookie) 585 { 586 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 587 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 588 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 589 struct rx_desc_pool *rx_desc_pool; 590 union dp_rx_desc_list_elem_t *rx_desc_elem; 591 592 if (qdf_unlikely(pool_id >= NUM_RXDMA_STATUS_RINGS_PER_PDEV)) 593 return NULL; 594 595 rx_desc_pool = &pool[pool_id]; 596 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 597 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 598 rx_desc_pool->elem_size * offset); 599 600 return &rx_desc_elem->rx_desc; 601 } 602 603 /** 604 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 605 * the Rx descriptor on Rx DMA source ring buffer 606 * @soc: core txrx main context 607 * @cookie: cookie used to lookup virtual address 608 * 609 * Return: Pointer to the Rx descriptor 610 */ 611 static inline 612 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 613 uint32_t cookie) 614 { 615 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 616 } 617 618 /** 619 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 620 * the Rx descriptor on monitor ring buffer 621 * @soc: core txrx main context 622 * @cookie: cookie used to lookup virtual address 623 * 624 * Return: Pointer to the Rx descriptor 625 */ 626 static inline 627 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 628 uint32_t cookie) 629 { 630 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 631 } 632 633 /** 634 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 635 * the Rx descriptor on monitor status ring buffer 636 * @soc: core txrx main context 637 * @cookie: cookie used to lookup virtual address 638 * 639 * Return: Pointer to the Rx descriptor 640 */ 641 static inline 642 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 643 uint32_t cookie) 644 { 645 return dp_get_rx_mon_status_desc_from_cookie(soc, 646 &soc->rx_desc_status[0], 647 cookie); 648 } 649 #else 650 651 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 652 uint32_t pool_size, 653 struct rx_desc_pool *rx_desc_pool); 654 655 /** 656 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 657 * the Rx descriptor on Rx DMA source ring buffer 658 * @soc: core txrx main context 659 * @cookie: cookie used to lookup virtual address 660 * 661 * Return: void *: Virtual Address of the Rx descriptor 662 */ 663 static inline 664 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 665 { 666 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 667 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 668 struct rx_desc_pool *rx_desc_pool; 669 670 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 671 return NULL; 672 673 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 674 675 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 676 return NULL; 677 678 return &rx_desc_pool->array[index].rx_desc; 679 } 680 681 /** 682 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 683 * the Rx descriptor on monitor ring buffer 684 * @soc: core txrx main context 685 * @cookie: cookie used to lookup virtual address 686 * 687 * Return: void *: Virtual Address of the Rx descriptor 688 */ 689 static inline 690 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 691 { 692 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 693 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 694 /* TODO */ 695 /* Add sanity for pool_id & index */ 696 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 697 } 698 699 /** 700 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 701 * the Rx descriptor on monitor status ring buffer 702 * @soc: core txrx main context 703 * @cookie: cookie used to lookup virtual address 704 * 705 * Return: void *: Virtual Address of the Rx descriptor 706 */ 707 static inline 708 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 709 { 710 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 711 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 712 /* TODO */ 713 /* Add sanity for pool_id & index */ 714 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 715 } 716 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 717 718 #ifndef QCA_HOST_MODE_WIFI_DISABLED 719 720 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 721 { 722 return vdev->ap_bridge_enabled; 723 } 724 725 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 726 static inline QDF_STATUS 727 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 728 { 729 if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc))) 730 return QDF_STATUS_E_FAILURE; 731 732 HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc); 733 return QDF_STATUS_SUCCESS; 734 } 735 736 /** 737 * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie 738 * field in ring descriptor 739 * @ring_desc: ring descriptor 740 * 741 * Return: None 742 */ 743 static inline void 744 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 745 { 746 HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc); 747 } 748 #else 749 static inline QDF_STATUS 750 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 751 { 752 return QDF_STATUS_SUCCESS; 753 } 754 755 static inline void 756 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 757 { 758 } 759 #endif 760 761 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 762 763 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \ 764 defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE) 765 /** 766 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 767 * @soc: dp soc ref 768 * @cookie: Rx buf SW cookie value 769 * 770 * Return: true if cookie is valid else false 771 */ 772 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 773 uint32_t cookie) 774 { 775 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 776 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 777 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 778 struct rx_desc_pool *rx_desc_pool; 779 780 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 781 goto fail; 782 783 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 784 785 if (page_id >= rx_desc_pool->desc_pages.num_pages || 786 offset >= rx_desc_pool->desc_pages.num_element_per_page) 787 goto fail; 788 789 return true; 790 791 fail: 792 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 793 return false; 794 } 795 #else 796 /** 797 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 798 * @soc: dp soc ref 799 * @cookie: Rx buf SW cookie value 800 * 801 * When multi page alloc is disabled SW cookie validness is 802 * checked while fetching Rx descriptor, so no need to check here 803 * 804 * Return: true if cookie is valid else false 805 */ 806 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 807 uint32_t cookie) 808 { 809 return true; 810 } 811 #endif 812 813 /** 814 * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the 815 * rx descriptor pool 816 * @rx_desc_pool: rx descriptor pool pointer 817 * 818 * Return: QDF_STATUS QDF_STATUS_SUCCESS 819 * QDF_STATUS_E_NOMEM 820 */ 821 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool); 822 823 /** 824 * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx 825 * descriptors 826 * @soc: core txrx main context 827 * @pool_size: number of rx descriptors (size of the pool) 828 * @rx_desc_pool: rx descriptor pool pointer 829 * 830 * Return: QDF_STATUS QDF_STATUS_SUCCESS 831 * QDF_STATUS_E_NOMEM 832 * QDF_STATUS_E_FAULT 833 */ 834 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 835 uint32_t pool_size, 836 struct rx_desc_pool *rx_desc_pool); 837 838 /** 839 * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool 840 * @soc: core txrx main context 841 * @pool_id: pool_id which is one of 3 mac_ids 842 * @pool_size: size of the rx descriptor pool 843 * @rx_desc_pool: rx descriptor pool pointer 844 * 845 * Convert the pool of memory into a list of rx descriptors and create 846 * locks to access this list of rx descriptors. 847 * 848 */ 849 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 850 uint32_t pool_size, 851 struct rx_desc_pool *rx_desc_pool); 852 853 /** 854 * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to 855 * freelist. 856 * @soc: core txrx main context 857 * @local_desc_list: local desc list provided by the caller 858 * @tail: attach the point to last desc of local desc list 859 * @pool_id: pool_id which is one of 3 mac_ids 860 * @rx_desc_pool: rx descriptor pool pointer 861 */ 862 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 863 union dp_rx_desc_list_elem_t **local_desc_list, 864 union dp_rx_desc_list_elem_t **tail, 865 uint16_t pool_id, 866 struct rx_desc_pool *rx_desc_pool); 867 868 /** 869 * dp_rx_get_free_desc_list() - provide a list of descriptors from 870 * the free rx desc pool. 871 * @soc: core txrx main context 872 * @pool_id: pool_id which is one of 3 mac_ids 873 * @rx_desc_pool: rx descriptor pool pointer 874 * @num_descs: number of descs requested from freelist 875 * @desc_list: attach the descs to this list (output parameter) 876 * @tail: attach the point to last desc of free list (output parameter) 877 * 878 * Return: number of descs allocated from free list. 879 */ 880 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 881 struct rx_desc_pool *rx_desc_pool, 882 uint16_t num_descs, 883 union dp_rx_desc_list_elem_t **desc_list, 884 union dp_rx_desc_list_elem_t **tail); 885 886 /** 887 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor 888 * pool 889 * @pdev: core txrx pdev context 890 * 891 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 892 * QDF_STATUS_E_NOMEM 893 */ 894 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev); 895 896 /** 897 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool 898 * @pdev: core txrx pdev context 899 */ 900 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev); 901 902 /** 903 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors 904 * @pdev: core txrx pdev context 905 * 906 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 907 * QDF_STATUS_E_NOMEM 908 */ 909 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev); 910 911 /** 912 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools 913 * @pdev: core txrx pdev context 914 * 915 * This function resets the freelist of rx descriptors and destroys locks 916 * associated with this list of descriptors. 917 */ 918 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev); 919 920 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 921 struct rx_desc_pool *rx_desc_pool, 922 uint32_t pool_id); 923 924 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 925 926 /** 927 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring 928 * @pdev: core txrx pdev context 929 * 930 * Return: QDF_STATUS - QDF_STATUS_SUCCESS 931 * QDF_STATUS_E_NOMEM 932 */ 933 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev); 934 935 /** 936 * dp_rx_pdev_buffers_free() - Free nbufs (skbs) 937 * @pdev: core txrx pdev context 938 */ 939 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev); 940 941 void dp_rx_pdev_detach(struct dp_pdev *pdev); 942 943 /** 944 * dp_print_napi_stats() - NAPI stats 945 * @soc: soc handle 946 */ 947 void dp_print_napi_stats(struct dp_soc *soc); 948 949 /** 950 * dp_rx_vdev_detach() - detach vdev from dp rx 951 * @vdev: virtual device instance 952 * 953 * Return: QDF_STATUS_SUCCESS: success 954 * QDF_STATUS_E_RESOURCES: Error return 955 */ 956 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); 957 958 #ifndef QCA_HOST_MODE_WIFI_DISABLED 959 960 uint32_t 961 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 962 uint8_t reo_ring_num, 963 uint32_t quota); 964 965 /** 966 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 967 * multiple nbufs. 968 * @soc: core txrx main context 969 * @nbuf: pointer to the first msdu of an amsdu. 970 * 971 * This function implements the creation of RX frag_list for cases 972 * where an MSDU is spread across multiple nbufs. 973 * 974 * Return: returns the head nbuf which contains complete frag_list. 975 */ 976 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); 977 978 /** 979 * dp_rx_is_sg_supported() - SG packets processing supported or not. 980 * 981 * Return: returns true when processing is supported else false. 982 */ 983 bool dp_rx_is_sg_supported(void); 984 985 /** 986 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 987 * de-initialization of wifi module. 988 * 989 * @soc: core txrx main context 990 * @pool_id: pool_id which is one of 3 mac_ids 991 * @rx_desc_pool: rx descriptor pool pointer 992 * 993 * Return: None 994 */ 995 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 996 struct rx_desc_pool *rx_desc_pool); 997 998 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 999 1000 /** 1001 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 1002 * de-initialization of wifi module. 1003 * 1004 * @soc: core txrx main context 1005 * @rx_desc_pool: rx descriptor pool pointer 1006 * @is_mon_pool: true if this is a monitor pool 1007 * 1008 * Return: None 1009 */ 1010 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 1011 struct rx_desc_pool *rx_desc_pool, 1012 bool is_mon_pool); 1013 1014 #ifdef DP_RX_MON_MEM_FRAG 1015 /** 1016 * dp_rx_desc_frag_free() - free the sw rx desc frag called during 1017 * de-initialization of wifi module. 1018 * 1019 * @soc: core txrx main context 1020 * @rx_desc_pool: rx descriptor pool pointer 1021 * 1022 * Return: None 1023 */ 1024 void dp_rx_desc_frag_free(struct dp_soc *soc, 1025 struct rx_desc_pool *rx_desc_pool); 1026 #else 1027 static inline 1028 void dp_rx_desc_frag_free(struct dp_soc *soc, 1029 struct rx_desc_pool *rx_desc_pool) 1030 { 1031 } 1032 #endif 1033 /** 1034 * dp_rx_desc_pool_free() - free the sw rx desc array called during 1035 * de-initialization of wifi module. 1036 * 1037 * @soc: core txrx main context 1038 * @rx_desc_pool: rx descriptor pool pointer 1039 * 1040 * Return: None 1041 */ 1042 void dp_rx_desc_pool_free(struct dp_soc *soc, 1043 struct rx_desc_pool *rx_desc_pool); 1044 1045 /** 1046 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the 1047 * pkts to RAW mode simulation to 1048 * decapsulate the pkt. 1049 * @vdev: vdev on which RAW mode is enabled 1050 * @nbuf_list: list of RAW pkts to process 1051 * @peer: peer object from which the pkt is rx 1052 * @link_id: link Id on which the packet is received 1053 * 1054 * Return: void 1055 */ 1056 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 1057 struct dp_txrx_peer *peer, uint8_t link_id); 1058 1059 #ifdef RX_DESC_LOGGING 1060 /** 1061 * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug 1062 * structure 1063 * @rx_desc: rx descriptor pointer 1064 * 1065 * Return: None 1066 */ 1067 static inline 1068 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 1069 { 1070 rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); 1071 } 1072 1073 /** 1074 * dp_rx_desc_free_dbg_info() - Free rx descriptor debug 1075 * structure memory 1076 * @rx_desc: rx descriptor pointer 1077 * 1078 * Return: None 1079 */ 1080 static inline 1081 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 1082 { 1083 qdf_mem_free(rx_desc->dbg_info); 1084 } 1085 1086 /** 1087 * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info 1088 * structure memory 1089 * @rx_desc: rx descriptor pointer 1090 * @func_name: name of calling function 1091 * @flag: 1092 * 1093 * Return: None 1094 */ 1095 static 1096 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 1097 const char *func_name, uint8_t flag) 1098 { 1099 struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info; 1100 1101 if (!info) 1102 return; 1103 1104 if (flag == RX_DESC_REPLENISHED) { 1105 qdf_str_lcopy(info->replenish_caller, func_name, 1106 QDF_MEM_FUNC_NAME_SIZE); 1107 info->replenish_ts = qdf_get_log_timestamp(); 1108 } else { 1109 qdf_str_lcopy(info->freelist_caller, func_name, 1110 QDF_MEM_FUNC_NAME_SIZE); 1111 info->freelist_ts = qdf_get_log_timestamp(); 1112 info->prev_nbuf = rx_desc->nbuf; 1113 info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr; 1114 rx_desc->nbuf_data_addr = NULL; 1115 } 1116 } 1117 #else 1118 1119 static inline 1120 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 1121 { 1122 } 1123 1124 static inline 1125 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 1126 { 1127 } 1128 1129 static inline 1130 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 1131 const char *func_name, uint8_t flag) 1132 { 1133 } 1134 #endif /* RX_DESC_LOGGING */ 1135 1136 /** 1137 * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 1138 * 1139 * @head: pointer to the head of local free list 1140 * @tail: pointer to the tail of local free list 1141 * @new: new descriptor that is added to the free list 1142 * @func_name: caller func name 1143 * 1144 * Return: void: 1145 */ 1146 static inline 1147 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 1148 union dp_rx_desc_list_elem_t **tail, 1149 struct dp_rx_desc *new, const char *func_name) 1150 { 1151 qdf_assert(head && new); 1152 1153 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); 1154 1155 new->nbuf = NULL; 1156 new->in_use = 0; 1157 1158 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 1159 *head = (union dp_rx_desc_list_elem_t *)new; 1160 /* reset tail if head->next is NULL */ 1161 if (!*tail || !(*head)->next) 1162 *tail = *head; 1163 } 1164 1165 /** 1166 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac 1167 * @soc: DP SOC handle 1168 * @nbuf: network buffer 1169 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1170 * pool_id has same mapping) 1171 * 1172 * Return: integer type 1173 */ 1174 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1175 uint8_t mac_id); 1176 1177 /** 1178 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler 1179 * @soc: DP SOC handle 1180 * @mpdu: mpdu for which peer is invalid 1181 * @mpdu_done: if an mpdu is completed 1182 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 1183 * pool_id has same mapping) 1184 * 1185 * Return: integer type 1186 */ 1187 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1188 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); 1189 1190 #define DP_RX_HEAD_APPEND(head, elem) \ 1191 do { \ 1192 qdf_nbuf_set_next((elem), (head)); \ 1193 (head) = (elem); \ 1194 } while (0) 1195 1196 1197 #define DP_RX_LIST_APPEND(head, tail, elem) \ 1198 do { \ 1199 if (!(head)) { \ 1200 (head) = (elem); \ 1201 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 1202 } else { \ 1203 qdf_nbuf_set_next((tail), (elem)); \ 1204 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 1205 } \ 1206 (tail) = (elem); \ 1207 qdf_nbuf_set_next((tail), NULL); \ 1208 } while (0) 1209 1210 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \ 1211 do { \ 1212 if (!(phead)) { \ 1213 (phead) = (chead); \ 1214 } else { \ 1215 qdf_nbuf_set_next((ptail), (chead)); \ 1216 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \ 1217 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \ 1218 } \ 1219 (ptail) = (ctail); \ 1220 qdf_nbuf_set_next((ptail), NULL); \ 1221 } while (0) 1222 1223 #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM) 1224 /* 1225 * on some third-party platform, the memory below 0x2000 1226 * is reserved for target use, so any memory allocated in this 1227 * region should not be used by host 1228 */ 1229 #define MAX_RETRY 50 1230 #define DP_PHY_ADDR_RESERVED 0x2000 1231 #elif defined(BUILD_X86) 1232 /* 1233 * in M2M emulation platforms (x86) the memory below 0x50000000 1234 * is reserved for target use, so any memory allocated in this 1235 * region should not be used by host 1236 */ 1237 #define MAX_RETRY 100 1238 #define DP_PHY_ADDR_RESERVED 0x50000000 1239 #endif 1240 1241 #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM) || defined(BUILD_X86) 1242 /** 1243 * dp_check_paddr() - check if current phy address is valid or not 1244 * @dp_soc: core txrx main context 1245 * @rx_netbuf: skb buffer 1246 * @paddr: physical address 1247 * @rx_desc_pool: struct of rx descriptor pool 1248 * check if the physical address of the nbuf->data is less 1249 * than DP_PHY_ADDR_RESERVED then free the nbuf and try 1250 * allocating new nbuf. We can try for 100 times. 1251 * 1252 * This is a temp WAR till we fix it properly. 1253 * 1254 * Return: success or failure. 1255 */ 1256 static inline 1257 int dp_check_paddr(struct dp_soc *dp_soc, 1258 qdf_nbuf_t *rx_netbuf, 1259 qdf_dma_addr_t *paddr, 1260 struct rx_desc_pool *rx_desc_pool) 1261 { 1262 uint32_t nbuf_retry = 0; 1263 int32_t ret; 1264 1265 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1266 return QDF_STATUS_SUCCESS; 1267 1268 do { 1269 dp_debug("invalid phy addr 0x%llx, trying again", 1270 (uint64_t)(*paddr)); 1271 nbuf_retry++; 1272 if ((*rx_netbuf)) { 1273 /* Not freeing buffer intentionally. 1274 * Observed that same buffer is getting 1275 * re-allocated resulting in longer load time 1276 * WMI init timeout. 1277 * This buffer is anyway not useful so skip it. 1278 *.Add such buffer to invalid list and free 1279 *.them when driver unload. 1280 **/ 1281 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1282 *rx_netbuf, 1283 QDF_DMA_FROM_DEVICE, 1284 rx_desc_pool->buf_size); 1285 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1286 *rx_netbuf); 1287 } 1288 1289 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 1290 rx_desc_pool->buf_size, 1291 RX_BUFFER_RESERVATION, 1292 rx_desc_pool->buf_alignment, 1293 FALSE); 1294 1295 if (qdf_unlikely(!(*rx_netbuf))) 1296 return QDF_STATUS_E_FAILURE; 1297 1298 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 1299 *rx_netbuf, 1300 QDF_DMA_FROM_DEVICE, 1301 rx_desc_pool->buf_size); 1302 1303 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1304 qdf_nbuf_free(*rx_netbuf); 1305 *rx_netbuf = NULL; 1306 continue; 1307 } 1308 1309 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 1310 1311 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1312 return QDF_STATUS_SUCCESS; 1313 1314 } while (nbuf_retry < MAX_RETRY); 1315 1316 if ((*rx_netbuf)) { 1317 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1318 *rx_netbuf, 1319 QDF_DMA_FROM_DEVICE, 1320 rx_desc_pool->buf_size); 1321 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1322 *rx_netbuf); 1323 } 1324 1325 return QDF_STATUS_E_FAILURE; 1326 } 1327 1328 #else 1329 static inline 1330 int dp_check_paddr(struct dp_soc *dp_soc, 1331 qdf_nbuf_t *rx_netbuf, 1332 qdf_dma_addr_t *paddr, 1333 struct rx_desc_pool *rx_desc_pool) 1334 { 1335 return QDF_STATUS_SUCCESS; 1336 } 1337 1338 #endif 1339 1340 /** 1341 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 1342 * the MSDU Link Descriptor 1343 * @soc: core txrx main context 1344 * @buf_info: buf_info includes cookie that is used to lookup 1345 * virtual address of link descriptor after deriving the page id 1346 * and the offset or index of the desc on the associatde page. 1347 * 1348 * This is the VA of the link descriptor, that HAL layer later uses to 1349 * retrieve the list of MSDU's for a given MPDU. 1350 * 1351 * Return: void *: Virtual Address of the Rx descriptor 1352 */ 1353 static inline 1354 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 1355 struct hal_buf_info *buf_info) 1356 { 1357 void *link_desc_va; 1358 struct qdf_mem_multi_page_t *pages; 1359 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie); 1360 1361 pages = &soc->link_desc_pages; 1362 if (!pages) 1363 return NULL; 1364 if (qdf_unlikely(page_id >= pages->num_pages)) 1365 return NULL; 1366 link_desc_va = pages->dma_pages[page_id].page_v_addr_start + 1367 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr); 1368 return link_desc_va; 1369 } 1370 1371 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1372 #ifdef DISABLE_EAPOL_INTRABSS_FWD 1373 #ifdef WLAN_FEATURE_11BE_MLO 1374 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1375 qdf_nbuf_t nbuf) 1376 { 1377 struct qdf_mac_addr *self_mld_mac_addr = 1378 (struct qdf_mac_addr *)vdev->mld_mac_addr.raw; 1379 return qdf_is_macaddr_equal(self_mld_mac_addr, 1380 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1381 QDF_NBUF_DEST_MAC_OFFSET); 1382 } 1383 #else 1384 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1385 qdf_nbuf_t nbuf) 1386 { 1387 return false; 1388 } 1389 #endif 1390 1391 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev, 1392 qdf_nbuf_t nbuf) 1393 { 1394 return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw, 1395 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1396 QDF_NBUF_DEST_MAC_OFFSET); 1397 } 1398 1399 /** 1400 * dp_rx_intrabss_eapol_drop_check() - API For EAPOL 1401 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1402 * @soc: core txrx main context 1403 * @ta_txrx_peer: source peer entry 1404 * @rx_tlv_hdr: start address of rx tlvs 1405 * @nbuf: nbuf that has to be intrabss forwarded 1406 * 1407 * Return: true if it is forwarded else false 1408 */ 1409 static inline 1410 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1411 struct dp_txrx_peer *ta_txrx_peer, 1412 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1413 { 1414 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && 1415 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev, 1416 nbuf) || 1417 dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev, 1418 nbuf)))) { 1419 qdf_nbuf_free(nbuf); 1420 DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); 1421 return true; 1422 } 1423 1424 return false; 1425 } 1426 #else /* DISABLE_EAPOL_INTRABSS_FWD */ 1427 1428 static inline 1429 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1430 struct dp_txrx_peer *ta_txrx_peer, 1431 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1432 { 1433 return false; 1434 } 1435 #endif /* DISABLE_EAPOL_INTRABSS_FWD */ 1436 1437 /** 1438 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets 1439 * @soc: core txrx main context 1440 * @ta_peer: source peer entry 1441 * @rx_tlv_hdr: start address of rx tlvs 1442 * @nbuf: nbuf that has to be intrabss forwarded 1443 * @tid_stats: tid stats pointer 1444 * @link_id: link Id on which packet is received 1445 * 1446 * Return: bool: true if it is forwarded else false 1447 */ 1448 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, 1449 struct dp_txrx_peer *ta_peer, 1450 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1451 struct cdp_tid_rx_stats *tid_stats, 1452 uint8_t link_id); 1453 1454 /** 1455 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets 1456 * @soc: core txrx main context 1457 * @ta_peer: source peer entry 1458 * @tx_vdev_id: VDEV ID for Intra-BSS TX 1459 * @rx_tlv_hdr: start address of rx tlvs 1460 * @nbuf: nbuf that has to be intrabss forwarded 1461 * @tid_stats: tid stats pointer 1462 * @link_id: link Id on which packet is received 1463 * 1464 * Return: bool: true if it is forwarded else false 1465 */ 1466 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, 1467 struct dp_txrx_peer *ta_peer, 1468 uint8_t tx_vdev_id, 1469 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1470 struct cdp_tid_rx_stats *tid_stats, 1471 uint8_t link_id); 1472 1473 /** 1474 * dp_rx_defrag_concat() - Concatenate the fragments 1475 * 1476 * @dst: destination pointer to the buffer 1477 * @src: source pointer from where the fragment payload is to be copied 1478 * 1479 * Return: QDF_STATUS 1480 */ 1481 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 1482 { 1483 /* 1484 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 1485 * to provide space for src, the headroom portion is copied from 1486 * the original dst buffer to the larger new dst buffer. 1487 * (This is needed, because the headroom of the dst buffer 1488 * contains the rx desc.) 1489 */ 1490 if (!qdf_nbuf_cat(dst, src)) { 1491 /* 1492 * qdf_nbuf_cat does not free the src memory. 1493 * Free src nbuf before returning 1494 * For failure case the caller takes of freeing the nbuf 1495 */ 1496 qdf_nbuf_free(src); 1497 return QDF_STATUS_SUCCESS; 1498 } 1499 1500 return QDF_STATUS_E_DEFRAG_ERROR; 1501 } 1502 1503 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1504 1505 #ifndef FEATURE_WDS 1506 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1507 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf); 1508 1509 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1510 { 1511 return QDF_STATUS_SUCCESS; 1512 } 1513 1514 static inline void 1515 dp_rx_wds_srcport_learn(struct dp_soc *soc, 1516 uint8_t *rx_tlv_hdr, 1517 struct dp_txrx_peer *txrx_peer, 1518 qdf_nbuf_t nbuf, 1519 struct hal_rx_msdu_metadata msdu_metadata) 1520 { 1521 } 1522 1523 static inline void 1524 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc, 1525 struct dp_peer *ta_peer, qdf_nbuf_t nbuf, 1526 struct hal_rx_msdu_metadata msdu_end_info, 1527 bool ad4_valid, bool chfrag_start) 1528 { 1529 } 1530 #endif 1531 1532 /** 1533 * dp_rx_desc_dump() - dump the sw rx descriptor 1534 * 1535 * @rx_desc: sw rx descriptor 1536 */ 1537 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 1538 { 1539 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 1540 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 1541 rx_desc->in_use, rx_desc->unmapped); 1542 } 1543 1544 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1545 1546 /** 1547 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 1548 * In qwrap mode, packets originated from 1549 * any vdev should not loopback and 1550 * should be dropped. 1551 * @vdev: vdev on which rx packet is received 1552 * @nbuf: rx pkt 1553 * 1554 */ 1555 #if ATH_SUPPORT_WRAP 1556 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1557 qdf_nbuf_t nbuf) 1558 { 1559 struct dp_vdev *psta_vdev; 1560 struct dp_pdev *pdev = vdev->pdev; 1561 uint8_t *data = qdf_nbuf_data(nbuf); 1562 1563 if (qdf_unlikely(vdev->proxysta_vdev)) { 1564 /* In qwrap isolation mode, allow loopback packets as all 1565 * packets go to RootAP and Loopback on the mpsta. 1566 */ 1567 if (vdev->isolation_vdev) 1568 return false; 1569 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 1570 if (qdf_unlikely(psta_vdev->proxysta_vdev && 1571 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 1572 &data[QDF_MAC_ADDR_SIZE], 1573 QDF_MAC_ADDR_SIZE))) { 1574 /* Drop packet if source address is equal to 1575 * any of the vdev addresses. 1576 */ 1577 return true; 1578 } 1579 } 1580 } 1581 return false; 1582 } 1583 #else 1584 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1585 qdf_nbuf_t nbuf) 1586 { 1587 return false; 1588 } 1589 #endif 1590 1591 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1592 1593 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 1594 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ 1595 defined(WLAN_SUPPORT_RX_FLOW_TAG) 1596 #include "dp_rx_tag.h" 1597 #endif 1598 1599 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ 1600 !defined(WLAN_SUPPORT_RX_FLOW_TAG) 1601 /** 1602 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 1603 * and set the corresponding tag in QDF packet 1604 * @soc: core txrx main context 1605 * @vdev: vdev on which the packet is received 1606 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1607 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1608 * @ring_index: REO ring number, not used for error & monitor ring 1609 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 1610 * @is_update_stats: flag to indicate whether to update stats or not 1611 * 1612 * Return: void 1613 */ 1614 static inline void 1615 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1616 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1617 uint16_t ring_index, 1618 bool is_reo_exception, bool is_update_stats) 1619 { 1620 } 1621 #endif 1622 1623 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1624 /** 1625 * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV 1626 * and returns whether cce metadata matches 1627 * @soc: core txrx main context 1628 * @vdev: vdev on which the packet is received 1629 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1630 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1631 * 1632 * Return: bool 1633 */ 1634 static inline bool 1635 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev, 1636 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1637 { 1638 return false; 1639 } 1640 1641 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1642 1643 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 1644 /** 1645 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV 1646 * and set the corresponding tag in QDF packet 1647 * @soc: core txrx main context 1648 * @vdev: vdev on which the packet is received 1649 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1650 * @rx_tlv_hdr: base address where the RX TLVs starts 1651 * @update_stats: flag to indicate whether to update stats or not 1652 * 1653 * Return: void 1654 */ 1655 static inline void 1656 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1657 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) 1658 { 1659 } 1660 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1661 1662 #define CRITICAL_BUFFER_THRESHOLD 64 1663 /** 1664 * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 1665 * called during dp rx initialization 1666 * and at the end of dp_rx_process. 1667 * 1668 * @dp_soc: core txrx main context 1669 * @mac_id: mac_id which is one of 3 mac_ids 1670 * @dp_rxdma_srng: dp rxdma circular ring 1671 * @rx_desc_pool: Pointer to free Rx descriptor pool 1672 * @num_req_buffers: number of buffer to be replenished 1673 * @desc_list: list of descs if called from dp_rx_process 1674 * or NULL during dp rx initialization or out of buffer 1675 * interrupt. 1676 * @tail: tail of descs list 1677 * @req_only: If true don't replenish more than req buffers 1678 * @func_name: name of the caller function 1679 * 1680 * Return: return success or failure 1681 */ 1682 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1683 struct dp_srng *dp_rxdma_srng, 1684 struct rx_desc_pool *rx_desc_pool, 1685 uint32_t num_req_buffers, 1686 union dp_rx_desc_list_elem_t **desc_list, 1687 union dp_rx_desc_list_elem_t **tail, 1688 bool req_only, 1689 const char *func_name); 1690 1691 /** 1692 * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs 1693 * use direct APIs to get invalidate 1694 * and get the physical address of the 1695 * nbuf instead of map api,called during 1696 * dp rx initialization and at the end 1697 * of dp_rx_process. 1698 * 1699 * @dp_soc: core txrx main context 1700 * @mac_id: mac_id which is one of 3 mac_ids 1701 * @dp_rxdma_srng: dp rxdma circular ring 1702 * @rx_desc_pool: Pointer to free Rx descriptor pool 1703 * @num_req_buffers: number of buffer to be replenished 1704 * @desc_list: list of descs if called from dp_rx_process 1705 * or NULL during dp rx initialization or out of buffer 1706 * interrupt. 1707 * @tail: tail of descs list 1708 * 1709 * Return: return success or failure 1710 */ 1711 QDF_STATUS 1712 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1713 struct dp_srng *dp_rxdma_srng, 1714 struct rx_desc_pool *rx_desc_pool, 1715 uint32_t num_req_buffers, 1716 union dp_rx_desc_list_elem_t **desc_list, 1717 union dp_rx_desc_list_elem_t **tail); 1718 1719 /** 1720 * __dp_rx_comp2refill_replenish() - replenish rxdma ring with rx nbufs 1721 * use direct APIs to get invalidate 1722 * and get the physical address of the 1723 * nbuf instead of map api,called during 1724 * dp rx initialization and at the end 1725 * of dp_rx_process. 1726 * 1727 * @dp_soc: core txrx main context 1728 * @mac_id: mac_id which is one of 3 mac_ids 1729 * @dp_rxdma_srng: dp rxdma circular ring 1730 * @rx_desc_pool: Pointer to free Rx descriptor pool 1731 * @num_req_buffers: number of buffer to be replenished 1732 * @desc_list: list of descs if called from dp_rx_process 1733 * or NULL during dp rx initialization or out of buffer 1734 * interrupt. 1735 * @tail: tail of descs list 1736 * Return: return success or failure 1737 */ 1738 QDF_STATUS 1739 __dp_rx_comp2refill_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1740 struct dp_srng *dp_rxdma_srng, 1741 struct rx_desc_pool *rx_desc_pool, 1742 uint32_t num_req_buffers, 1743 union dp_rx_desc_list_elem_t **desc_list, 1744 union dp_rx_desc_list_elem_t **tail); 1745 1746 /** 1747 * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs 1748 * use direct APIs to get invalidate 1749 * and get the physical address of the 1750 * nbuf instead of map api,called when 1751 * low threshold interrupt is triggered 1752 * 1753 * @dp_soc: core txrx main context 1754 * @mac_id: mac_id which is one of 3 mac_ids 1755 * @dp_rxdma_srng: dp rxdma circular ring 1756 * @rx_desc_pool: Pointer to free Rx descriptor pool 1757 * 1758 * Return: return success or failure 1759 */ 1760 QDF_STATUS 1761 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1762 struct dp_srng *dp_rxdma_srng, 1763 struct rx_desc_pool *rx_desc_pool); 1764 1765 /** 1766 * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs 1767 * use direct APIs to get invalidate 1768 * and get the physical address of the 1769 * nbuf instead of map api,called during 1770 * dp rx initialization. 1771 * 1772 * @dp_soc: core txrx main context 1773 * @mac_id: mac_id which is one of 3 mac_ids 1774 * @dp_rxdma_srng: dp rxdma circular ring 1775 * @rx_desc_pool: Pointer to free Rx descriptor pool 1776 * @num_req_buffers: number of buffer to be replenished 1777 * 1778 * Return: return success or failure 1779 */ 1780 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, 1781 uint32_t mac_id, 1782 struct dp_srng *dp_rxdma_srng, 1783 struct rx_desc_pool *rx_desc_pool, 1784 uint32_t num_req_buffers); 1785 1786 /** 1787 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs 1788 * called during dp rx initialization 1789 * 1790 * @dp_soc: core txrx main context 1791 * @mac_id: mac_id which is one of 3 mac_ids 1792 * @dp_rxdma_srng: dp rxdma circular ring 1793 * @rx_desc_pool: Pointer to free Rx descriptor pool 1794 * @num_req_buffers: number of buffer to be replenished 1795 * 1796 * Return: return success or failure 1797 */ 1798 QDF_STATUS 1799 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 1800 struct dp_srng *dp_rxdma_srng, 1801 struct rx_desc_pool *rx_desc_pool, 1802 uint32_t num_req_buffers); 1803 1804 /** 1805 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats 1806 * @vdev: DP Virtual device handle 1807 * @nbuf: Buffer pointer 1808 * @rx_tlv_hdr: start of rx tlv header 1809 * @txrx_peer: pointer to peer 1810 * 1811 * This function allocated memory for mesh receive stats and fill the 1812 * required stats. Stores the memory address in skb cb. 1813 * 1814 * Return: void 1815 */ 1816 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1817 uint8_t *rx_tlv_hdr, 1818 struct dp_txrx_peer *txrx_peer); 1819 1820 /** 1821 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets 1822 * @vdev: DP Virtual device handle 1823 * @nbuf: Buffer pointer 1824 * @rx_tlv_hdr: start of rx tlv header 1825 * 1826 * This checks if the received packet is matching any filter out 1827 * catogery and and drop the packet if it matches. 1828 * 1829 * Return: QDF_STATUS_SUCCESS indicates drop, 1830 * QDF_STATUS_E_FAILURE indicate to not drop 1831 */ 1832 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1833 uint8_t *rx_tlv_hdr); 1834 1835 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1836 struct dp_txrx_peer *peer); 1837 1838 /** 1839 * dp_rx_compute_delay() - Compute and fill in all timestamps 1840 * to pass in correct fields 1841 * @vdev: pdev handle 1842 * @nbuf: network buffer 1843 * 1844 * Return: none 1845 */ 1846 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1847 1848 #ifdef QCA_PEER_EXT_STATS 1849 1850 /** 1851 * dp_rx_compute_tid_delay - Compute per TID delay stats 1852 * @stats: TID delay stats to update 1853 * @nbuf: NBuffer 1854 * 1855 * Return: Void 1856 */ 1857 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1858 qdf_nbuf_t nbuf); 1859 #endif /* QCA_PEER_EXT_STATS */ 1860 1861 #ifdef WLAN_SUPPORT_PPEDS 1862 static inline 1863 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 1864 { 1865 rx_desc->reuse_nbuf = nbuf; 1866 rx_desc->has_reuse_nbuf = true; 1867 } 1868 1869 /** 1870 * __dp_rx_add_to_free_desc_list_reuse() - Adds to a local free descriptor list 1871 * this list will reused 1872 * 1873 * @head: pointer to the head of local free list 1874 * @tail: pointer to the tail of local free list 1875 * @new: new descriptor that is added to the free list 1876 * @func_name: caller func name 1877 * 1878 * Return: void: 1879 */ 1880 static inline 1881 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head, 1882 union dp_rx_desc_list_elem_t **tail, 1883 struct dp_rx_desc *new, 1884 const char *func_name) 1885 { 1886 qdf_assert(head && new); 1887 1888 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); 1889 1890 new->nbuf = NULL; 1891 1892 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 1893 *head = (union dp_rx_desc_list_elem_t *)new; 1894 /* reset tail if head->next is NULL */ 1895 if (!*tail || !(*head)->next) 1896 *tail = *head; 1897 } 1898 #else 1899 static inline 1900 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) 1901 { 1902 } 1903 1904 static inline 1905 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head, 1906 union dp_rx_desc_list_elem_t **tail, 1907 struct dp_rx_desc *new, 1908 const char *func_name) 1909 { 1910 } 1911 #endif 1912 1913 #ifdef RX_DESC_DEBUG_CHECK 1914 /** 1915 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1916 * @rx_desc: rx descriptor pointer 1917 * 1918 * Return: true, if magic is correct, else false. 1919 */ 1920 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1921 { 1922 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1923 return false; 1924 1925 rx_desc->magic = 0; 1926 return true; 1927 } 1928 1929 /** 1930 * dp_rx_desc_prep() - prepare rx desc 1931 * @rx_desc: rx descriptor pointer to be prepared 1932 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1933 * 1934 * Note: assumption is that we are associating a nbuf which is mapped 1935 * 1936 * Return: none 1937 */ 1938 static inline 1939 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1940 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1941 { 1942 rx_desc->magic = DP_RX_DESC_MAGIC; 1943 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1944 rx_desc->unmapped = 0; 1945 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf); 1946 dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf); 1947 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1948 } 1949 1950 /** 1951 * dp_rx_desc_frag_prep() - prepare rx desc 1952 * @rx_desc: rx descriptor pointer to be prepared 1953 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1954 * 1955 * Note: assumption is that we frag address is mapped 1956 * 1957 * Return: none 1958 */ 1959 #ifdef DP_RX_MON_MEM_FRAG 1960 static inline 1961 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1962 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1963 { 1964 rx_desc->magic = DP_RX_DESC_MAGIC; 1965 rx_desc->rx_buf_start = 1966 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1967 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1968 rx_desc->unmapped = 0; 1969 } 1970 #else 1971 static inline 1972 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1973 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1974 { 1975 } 1976 #endif /* DP_RX_MON_MEM_FRAG */ 1977 1978 /** 1979 * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc 1980 * @rx_desc: rx descriptor 1981 * @ring_paddr: paddr obatined from the ring 1982 * 1983 * Return: QDF_STATUS 1984 */ 1985 static inline 1986 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1987 uint64_t ring_paddr) 1988 { 1989 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); 1990 } 1991 #else 1992 1993 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1994 { 1995 return true; 1996 } 1997 1998 static inline 1999 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 2000 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 2001 { 2002 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 2003 dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf); 2004 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 2005 rx_desc->unmapped = 0; 2006 } 2007 2008 #ifdef DP_RX_MON_MEM_FRAG 2009 static inline 2010 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 2011 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 2012 { 2013 rx_desc->rx_buf_start = 2014 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 2015 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 2016 rx_desc->unmapped = 0; 2017 } 2018 #else 2019 static inline 2020 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 2021 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 2022 { 2023 } 2024 #endif /* DP_RX_MON_MEM_FRAG */ 2025 2026 static inline 2027 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 2028 uint64_t ring_paddr) 2029 { 2030 return true; 2031 } 2032 #endif /* RX_DESC_DEBUG_CHECK */ 2033 2034 /** 2035 * dp_rx_enable_mon_dest_frag() - Enable frag processing for 2036 * monitor destination ring via frag. 2037 * @rx_desc_pool: Rx desc pool 2038 * @is_mon_dest_desc: Is it for monitor dest buffer 2039 * 2040 * Enable this flag only for monitor destination buffer processing 2041 * if DP_RX_MON_MEM_FRAG feature is enabled. 2042 * If flag is set then frag based function will be called for alloc, 2043 * map, prep desc and free ops for desc buffer else normal nbuf based 2044 * function will be called. 2045 * 2046 * Return: None 2047 */ 2048 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 2049 bool is_mon_dest_desc); 2050 2051 #ifndef QCA_MULTIPASS_SUPPORT 2052 static inline 2053 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 2054 uint8_t tid) 2055 { 2056 return false; 2057 } 2058 #else 2059 /** 2060 * dp_rx_multipass_process - insert vlan tag on frames for traffic separation 2061 * @txrx_peer: DP txrx peer handle 2062 * @nbuf: skb 2063 * @tid: traffic priority 2064 * 2065 * Return: bool: true in case of success else false 2066 * Success is considered if: 2067 * i. If frame has vlan header 2068 * ii. If the frame comes from different peer and dont need multipass processing 2069 * Failure is considered if: 2070 * i. Frame comes from multipass peer but doesn't contain vlan header. 2071 * In failure case, drop such frames. 2072 */ 2073 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, 2074 uint8_t tid); 2075 #endif 2076 2077 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2078 2079 #ifndef WLAN_RX_PKT_CAPTURE_ENH 2080 static inline 2081 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev, 2082 struct dp_peer *peer_handle, 2083 bool value, uint8_t *mac_addr) 2084 { 2085 return QDF_STATUS_SUCCESS; 2086 } 2087 #endif 2088 2089 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2090 2091 /** 2092 * dp_rx_deliver_to_stack() - deliver pkts to network stack 2093 * Caller to hold peer refcount and check for valid peer 2094 * @soc: soc 2095 * @vdev: vdev 2096 * @peer: txrx peer 2097 * @nbuf_head: skb list head 2098 * @nbuf_tail: skb list tail 2099 * 2100 * Return: QDF_STATUS 2101 */ 2102 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 2103 struct dp_vdev *vdev, 2104 struct dp_txrx_peer *peer, 2105 qdf_nbuf_t nbuf_head, 2106 qdf_nbuf_t nbuf_tail); 2107 2108 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 2109 /** 2110 * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack 2111 * caller to hold peer refcount and check for valid peer 2112 * @soc: soc 2113 * @vdev: vdev 2114 * @peer: peer 2115 * @nbuf_head: skb list head 2116 * @nbuf_tail: skb list tail 2117 * 2118 * Return: QDF_STATUS 2119 */ 2120 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 2121 struct dp_vdev *vdev, 2122 struct dp_txrx_peer *peer, 2123 qdf_nbuf_t nbuf_head, 2124 qdf_nbuf_t nbuf_tail); 2125 #endif 2126 2127 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2128 2129 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 2130 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 2131 do { \ 2132 if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \ 2133 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \ 2134 break; \ 2135 } \ 2136 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \ 2137 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \ 2138 if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \ 2139 rx_desc->pool_id)) \ 2140 DP_RX_MERGE_TWO_LIST(head, tail, \ 2141 ebuf_head, ebuf_tail);\ 2142 ebuf_head = NULL; \ 2143 ebuf_tail = NULL; \ 2144 } \ 2145 } while (0) 2146 #else 2147 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 2148 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf) 2149 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */ 2150 2151 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2152 2153 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2154 /** 2155 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2156 * @soc : dp_soc handle 2157 * @pdev: dp_pdev handle 2158 * @peer_id: peer_id of the peer for which completion came 2159 * @is_offload: 2160 * @netbuf: Buffer pointer 2161 * 2162 * This function is used to deliver rx packet to packet capture 2163 */ 2164 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2165 uint16_t peer_id, uint32_t is_offload, 2166 qdf_nbuf_t netbuf); 2167 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2168 uint32_t is_offload); 2169 #else 2170 static inline void 2171 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2172 uint16_t peer_id, uint32_t is_offload, 2173 qdf_nbuf_t netbuf) 2174 { 2175 } 2176 2177 static inline void 2178 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2179 uint32_t is_offload) 2180 { 2181 } 2182 #endif 2183 2184 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2185 #ifdef FEATURE_MEC 2186 /** 2187 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 2188 * back on same vap or a different vap. 2189 * @soc: core DP main context 2190 * @peer: dp peer handler 2191 * @rx_tlv_hdr: start of the rx TLV header 2192 * @nbuf: pkt buffer 2193 * 2194 * Return: bool (true if it is a looped back pkt else false) 2195 * 2196 */ 2197 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2198 struct dp_txrx_peer *peer, 2199 uint8_t *rx_tlv_hdr, 2200 qdf_nbuf_t nbuf); 2201 #else 2202 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2203 struct dp_txrx_peer *peer, 2204 uint8_t *rx_tlv_hdr, 2205 qdf_nbuf_t nbuf) 2206 { 2207 return false; 2208 } 2209 #endif /* FEATURE_MEC */ 2210 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2211 2212 #ifdef RECEIVE_OFFLOAD 2213 /** 2214 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb 2215 * @soc: DP SOC handle 2216 * @rx_tlv: RX TLV received for the msdu 2217 * @msdu: msdu for which GRO info needs to be filled 2218 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets 2219 * 2220 * Return: None 2221 */ 2222 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2223 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); 2224 #else 2225 static inline 2226 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2227 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 2228 { 2229 } 2230 #endif 2231 2232 /** 2233 * dp_rx_msdu_stats_update() - update per msdu stats. 2234 * @soc: core txrx main context 2235 * @nbuf: pointer to the first msdu of an amsdu. 2236 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2237 * @txrx_peer: pointer to the txrx peer object. 2238 * @ring_id: reo dest ring number on which pkt is reaped. 2239 * @tid_stats: per tid rx stats. 2240 * @link_id: link Id on which packet is received 2241 * 2242 * update all the per msdu stats for that nbuf. 2243 * 2244 * Return: void 2245 */ 2246 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2247 uint8_t *rx_tlv_hdr, 2248 struct dp_txrx_peer *txrx_peer, 2249 uint8_t ring_id, 2250 struct cdp_tid_rx_stats *tid_stats, 2251 uint8_t link_id); 2252 2253 /** 2254 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if 2255 * no corresbonding peer found 2256 * @soc: core txrx main context 2257 * @nbuf: pkt skb pointer 2258 * 2259 * This function will try to deliver some RX special frames to stack 2260 * even there is no peer matched found. for instance, LFR case, some 2261 * eapol data will be sent to host before peer_map done. 2262 * 2263 * Return: None 2264 */ 2265 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 2266 2267 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2268 #ifdef DP_RX_DROP_RAW_FRM 2269 /** 2270 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop 2271 * @nbuf: pkt skb pointer 2272 * 2273 * Return: true - raw frame, dropped 2274 * false - not raw frame, do nothing 2275 */ 2276 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); 2277 #else 2278 static inline 2279 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2280 { 2281 return false; 2282 } 2283 #endif 2284 2285 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2286 /** 2287 * dp_rx_update_stats() - Update soc level rx packet count 2288 * @soc: DP soc handle 2289 * @nbuf: nbuf received 2290 * 2291 * Return: none 2292 */ 2293 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2294 #else 2295 static inline 2296 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2297 { 2298 } 2299 #endif 2300 2301 /** 2302 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 2303 * @pdev: dp_pdev handle 2304 * @nbuf: pointer to the first msdu of an amsdu. 2305 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2306 * 2307 * The ipsumed field of the skb is set based on whether HW validated the 2308 * IP/TCP/UDP checksum. 2309 * 2310 * Return: void 2311 */ 2312 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1) 2313 static inline 2314 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2315 qdf_nbuf_t nbuf, 2316 uint8_t *rx_tlv_hdr) 2317 { 2318 qdf_nbuf_rx_cksum_t cksum = {0}; 2319 //TODO - Move this to ring desc api 2320 //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET 2321 //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET 2322 uint32_t ip_csum_err, tcp_udp_csum_er; 2323 2324 hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, 2325 &tcp_udp_csum_er); 2326 2327 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { 2328 if (qdf_likely(!ip_csum_err)) { 2329 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 2330 if (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || 2331 qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) { 2332 if (qdf_likely(!tcp_udp_csum_er)) 2333 cksum.csum_level = 1; 2334 else 2335 DP_STATS_INCC(pdev, 2336 err.tcp_udp_csum_err, 1, 2337 tcp_udp_csum_er); 2338 } 2339 } else { 2340 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 2341 } 2342 } else if (qdf_nbuf_is_ipv6_udp_pkt(nbuf) || 2343 qdf_nbuf_is_ipv6_tcp_pkt(nbuf)) { 2344 if (qdf_likely(!tcp_udp_csum_er)) 2345 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 2346 else 2347 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, 2348 tcp_udp_csum_er); 2349 } else { 2350 cksum.l4_result = QDF_NBUF_RX_CKSUM_NONE; 2351 } 2352 2353 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 2354 } 2355 #else 2356 static inline 2357 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2358 qdf_nbuf_t nbuf, 2359 uint8_t *rx_tlv_hdr) 2360 { 2361 } 2362 #endif 2363 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2364 2365 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 2366 static inline 2367 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2368 int max_reap_limit) 2369 { 2370 bool limit_hit = false; 2371 2372 limit_hit = 2373 (num_reaped >= max_reap_limit) ? true : false; 2374 2375 if (limit_hit) 2376 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 2377 2378 return limit_hit; 2379 } 2380 2381 static inline 2382 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2383 { 2384 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 2385 } 2386 2387 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2388 { 2389 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 2390 2391 return cfg->rx_reap_loop_pkt_limit; 2392 } 2393 #else 2394 static inline 2395 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2396 int max_reap_limit) 2397 { 2398 return false; 2399 } 2400 2401 static inline 2402 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2403 { 2404 return false; 2405 } 2406 2407 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2408 { 2409 return 0; 2410 } 2411 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 2412 2413 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2414 2415 static inline uint16_t 2416 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata) 2417 { 2418 return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc, 2419 peer_metadata); 2420 } 2421 2422 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT) 2423 /** 2424 * dp_rx_nbuf_set_link_id_from_tlv() - Set link id in nbuf cb 2425 * @soc: SOC handle 2426 * @tlv_hdr: rx tlv header 2427 * @nbuf: nbuf pointer 2428 * 2429 * Return: None 2430 */ 2431 static inline void 2432 dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr, 2433 qdf_nbuf_t nbuf) 2434 { 2435 uint32_t peer_metadata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc, 2436 tlv_hdr); 2437 2438 if (soc->arch_ops.dp_rx_peer_set_link_id) 2439 soc->arch_ops.dp_rx_peer_set_link_id(nbuf, peer_metadata); 2440 } 2441 2442 /** 2443 * dp_rx_set_nbuf_band() - Set band info in nbuf cb 2444 * @nbuf: nbuf pointer 2445 * @txrx_peer: txrx_peer pointer 2446 * @link_id: Peer Link ID 2447 * 2448 * Returen: None 2449 */ 2450 static inline void 2451 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, 2452 uint8_t link_id) 2453 { 2454 qdf_nbuf_rx_set_band(nbuf, txrx_peer->band[link_id]); 2455 } 2456 #else 2457 static inline void 2458 dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr, 2459 qdf_nbuf_t nbuf) 2460 { 2461 } 2462 2463 static inline void 2464 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, 2465 uint8_t link_id) 2466 { 2467 } 2468 #endif 2469 2470 /** 2471 * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization 2472 * @soc: SOC handle 2473 * @rx_desc_pool: pointer to RX descriptor pool 2474 * @pool_id: pool ID 2475 * 2476 * Return: None 2477 */ 2478 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 2479 struct rx_desc_pool *rx_desc_pool, 2480 uint32_t pool_id); 2481 2482 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 2483 struct rx_desc_pool *rx_desc_pool, 2484 uint32_t pool_id); 2485 2486 /** 2487 * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint 2488 * 2489 * Return: True if any rx pkt tracepoint is enabled else false 2490 */ 2491 static inline 2492 bool dp_rx_pkt_tracepoints_enabled(void) 2493 { 2494 return (qdf_trace_dp_rx_tcp_pkt_enabled() || 2495 qdf_trace_dp_rx_udp_pkt_enabled() || 2496 qdf_trace_dp_rx_pkt_enabled()); 2497 } 2498 2499 #ifdef FEATURE_DIRECT_LINK 2500 /** 2501 * dp_audio_smmu_map()- Map memory region into Audio SMMU CB 2502 * @qdf_dev: pointer to QDF device structure 2503 * @paddr: physical address 2504 * @iova: DMA address 2505 * @size: memory region size 2506 * 2507 * Return: 0 on success else failure code 2508 */ 2509 static inline 2510 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr, 2511 qdf_dma_addr_t iova, qdf_size_t size) 2512 { 2513 return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size); 2514 } 2515 2516 /** 2517 * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB 2518 * @qdf_dev: pointer to QDF device structure 2519 * @iova: DMA address 2520 * @size: memory region size 2521 * 2522 * Return: None 2523 */ 2524 static inline 2525 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova, 2526 qdf_size_t size) 2527 { 2528 pld_audio_smmu_unmap(qdf_dev->dev, iova, size); 2529 } 2530 #else 2531 static inline 2532 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr, 2533 qdf_dma_addr_t iova, qdf_size_t size) 2534 { 2535 return 0; 2536 } 2537 2538 static inline 2539 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova, 2540 qdf_size_t size) 2541 { 2542 } 2543 #endif 2544 2545 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 2546 static inline 2547 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2548 struct dp_srng *rxdma_srng, 2549 struct rx_desc_pool *rx_desc_pool, 2550 uint32_t num_req_buffers) 2551 { 2552 return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id, 2553 rxdma_srng, 2554 rx_desc_pool, 2555 num_req_buffers); 2556 } 2557 2558 static inline 2559 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2560 struct dp_srng *rxdma_srng, 2561 struct rx_desc_pool *rx_desc_pool, 2562 uint32_t num_req_buffers, 2563 union dp_rx_desc_list_elem_t **desc_list, 2564 union dp_rx_desc_list_elem_t **tail) 2565 { 2566 __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2567 num_req_buffers, desc_list, tail); 2568 } 2569 2570 static inline 2571 void dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id, 2572 struct dp_srng *rxdma_srng, 2573 struct rx_desc_pool *rx_desc_pool, 2574 uint32_t num_req_buffers, 2575 union dp_rx_desc_list_elem_t **desc_list, 2576 union dp_rx_desc_list_elem_t **tail) 2577 { 2578 __dp_rx_comp2refill_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2579 num_req_buffers, desc_list, tail); 2580 } 2581 2582 static inline 2583 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2584 struct dp_srng *rxdma_srng, 2585 struct rx_desc_pool *rx_desc_pool, 2586 uint32_t num_req_buffers, 2587 union dp_rx_desc_list_elem_t **desc_list, 2588 union dp_rx_desc_list_elem_t **tail) 2589 { 2590 __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng, 2591 rx_desc_pool); 2592 } 2593 2594 #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK 2595 static inline 2596 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2597 qdf_nbuf_t nbuf, 2598 uint32_t buf_size) 2599 { 2600 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2601 (void *)(nbuf->data + buf_size)); 2602 2603 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2604 } 2605 #else 2606 #define L3_HEADER_PAD 2 2607 static inline 2608 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2609 qdf_nbuf_t nbuf, 2610 uint32_t buf_size) 2611 { 2612 if (nbuf->recycled_for_ds) 2613 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2614 2615 if (unlikely(!nbuf->fast_recycled)) { 2616 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2617 (void *)(nbuf->data + buf_size)); 2618 } 2619 2620 DP_STATS_INC(dp_soc, rx.fast_recycled, 1); 2621 nbuf->fast_recycled = 0; 2622 2623 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2624 } 2625 #endif 2626 2627 static inline 2628 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2629 qdf_nbuf_t nbuf, 2630 uint32_t buf_size) 2631 { 2632 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2633 (void *)(nbuf->data + buf_size)); 2634 2635 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2636 } 2637 2638 #if !defined(SPECULATIVE_READ_DISABLED) 2639 static inline 2640 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2641 struct dp_rx_desc *rx_desc, 2642 uint8_t reo_ring_num) 2643 { 2644 struct rx_desc_pool *rx_desc_pool; 2645 qdf_nbuf_t nbuf; 2646 2647 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2648 nbuf = rx_desc->nbuf; 2649 2650 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2651 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2652 } 2653 2654 static inline 2655 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2656 struct rx_desc_pool *rx_desc_pool, 2657 qdf_nbuf_t nbuf) 2658 { 2659 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2660 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2661 } 2662 2663 #else 2664 static inline 2665 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2666 struct dp_rx_desc *rx_desc, 2667 uint8_t reo_ring_num) 2668 { 2669 } 2670 2671 static inline 2672 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2673 struct rx_desc_pool *rx_desc_pool, 2674 qdf_nbuf_t nbuf) 2675 { 2676 } 2677 #endif 2678 2679 static inline 2680 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2681 uint32_t bufs_reaped) 2682 { 2683 } 2684 2685 static inline 2686 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2687 struct rx_desc_pool *rx_desc_pool) 2688 { 2689 return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size, 2690 RX_BUFFER_RESERVATION, 2691 rx_desc_pool->buf_alignment, FALSE); 2692 } 2693 2694 static inline 2695 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2696 { 2697 qdf_nbuf_free_simple(nbuf); 2698 } 2699 #else 2700 static inline 2701 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2702 struct dp_srng *rxdma_srng, 2703 struct rx_desc_pool *rx_desc_pool, 2704 uint32_t num_req_buffers) 2705 { 2706 return dp_pdev_rx_buffers_attach(soc, mac_id, 2707 rxdma_srng, 2708 rx_desc_pool, 2709 num_req_buffers); 2710 } 2711 2712 static inline 2713 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2714 struct dp_srng *rxdma_srng, 2715 struct rx_desc_pool *rx_desc_pool, 2716 uint32_t num_req_buffers, 2717 union dp_rx_desc_list_elem_t **desc_list, 2718 union dp_rx_desc_list_elem_t **tail) 2719 { 2720 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2721 num_req_buffers, desc_list, tail, false); 2722 } 2723 2724 static inline 2725 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2726 struct dp_srng *rxdma_srng, 2727 struct rx_desc_pool *rx_desc_pool, 2728 uint32_t num_req_buffers, 2729 union dp_rx_desc_list_elem_t **desc_list, 2730 union dp_rx_desc_list_elem_t **tail) 2731 { 2732 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2733 num_req_buffers, desc_list, tail, false); 2734 } 2735 2736 static inline 2737 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2738 qdf_nbuf_t nbuf, 2739 uint32_t buf_size) 2740 { 2741 return (qdf_dma_addr_t)NULL; 2742 } 2743 2744 static inline 2745 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2746 qdf_nbuf_t nbuf, 2747 uint32_t buf_size) 2748 { 2749 return (qdf_dma_addr_t)NULL; 2750 } 2751 2752 static inline 2753 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2754 struct dp_rx_desc *rx_desc, 2755 uint8_t reo_ring_num) 2756 { 2757 struct rx_desc_pool *rx_desc_pool; 2758 2759 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2760 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 2761 2762 dp_audio_smmu_unmap(soc->osdev, 2763 QDF_NBUF_CB_PADDR(rx_desc->nbuf), 2764 rx_desc_pool->buf_size); 2765 2766 if (qdf_atomic_read(&soc->ipa_mapped)) 2767 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 2768 rx_desc_pool->buf_size, 2769 false, __func__, __LINE__); 2770 2771 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2772 QDF_DMA_FROM_DEVICE, 2773 rx_desc_pool->buf_size); 2774 2775 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 2776 } 2777 2778 static inline 2779 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2780 struct rx_desc_pool *rx_desc_pool, 2781 qdf_nbuf_t nbuf) 2782 { 2783 dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf), 2784 rx_desc_pool->buf_size); 2785 if (qdf_atomic_read(&soc->ipa_mapped)) 2786 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, 2787 rx_desc_pool->buf_size, 2788 false, __func__, __LINE__); 2789 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE, 2790 rx_desc_pool->buf_size); 2791 } 2792 2793 static inline 2794 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2795 uint32_t bufs_reaped) 2796 { 2797 int cpu_id = qdf_get_cpu(); 2798 2799 DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped); 2800 } 2801 2802 static inline 2803 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2804 struct rx_desc_pool *rx_desc_pool) 2805 { 2806 return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 2807 RX_BUFFER_RESERVATION, 2808 rx_desc_pool->buf_alignment, FALSE); 2809 } 2810 2811 static inline 2812 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2813 { 2814 qdf_nbuf_free(nbuf); 2815 } 2816 #endif 2817 2818 #ifdef DP_UMAC_HW_RESET_SUPPORT 2819 /** 2820 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 2821 * @soc: core txrx main context 2822 * @nbuf_list: nbuf list for delayed free 2823 * 2824 * Return: void 2825 */ 2826 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list); 2827 2828 /** 2829 * dp_rx_desc_delayed_free() - Delayed free of the rx descs 2830 * 2831 * @soc: core txrx main context 2832 * 2833 * Return: void 2834 */ 2835 void dp_rx_desc_delayed_free(struct dp_soc *soc); 2836 #endif 2837 2838 /** 2839 * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id 2840 * @soc: core txrx main context 2841 * @nbuf : pointer to the first msdu of an amsdu. 2842 * @peer_id : Peer id of the peer 2843 * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference 2844 * @pkt_capture_offload : Flag indicating if pkt capture offload is needed 2845 * @vdev : Buffer to hold pointer to vdev 2846 * @rx_pdev : Buffer to hold pointer to rx pdev 2847 * @dsf : delay stats flag 2848 * @old_tid : Old tid 2849 * 2850 * Get txrx peer and vdev from peer id 2851 * 2852 * Return: Pointer to txrx peer 2853 */ 2854 static inline struct dp_txrx_peer * 2855 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc, 2856 qdf_nbuf_t nbuf, 2857 uint16_t peer_id, 2858 dp_txrx_ref_handle *txrx_ref_handle, 2859 bool pkt_capture_offload, 2860 struct dp_vdev **vdev, 2861 struct dp_pdev **rx_pdev, 2862 uint32_t *dsf, 2863 uint32_t *old_tid) 2864 { 2865 struct dp_txrx_peer *txrx_peer = NULL; 2866 2867 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle, 2868 DP_MOD_ID_RX); 2869 2870 if (qdf_likely(txrx_peer)) { 2871 *vdev = txrx_peer->vdev; 2872 } else { 2873 nbuf->next = NULL; 2874 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf, 2875 pkt_capture_offload); 2876 if (!pkt_capture_offload) 2877 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2878 2879 goto end; 2880 } 2881 2882 if (qdf_unlikely(!(*vdev))) { 2883 qdf_nbuf_free(nbuf); 2884 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2885 goto end; 2886 } 2887 2888 *rx_pdev = (*vdev)->pdev; 2889 *dsf = (*rx_pdev)->delay_stats_flag; 2890 *old_tid = 0xff; 2891 2892 end: 2893 return txrx_peer; 2894 } 2895 2896 static inline QDF_STATUS 2897 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer, 2898 int tid, uint32_t ba_window_size) 2899 { 2900 return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc, 2901 peer, tid, 2902 ba_window_size); 2903 } 2904 2905 static inline 2906 void dp_rx_nbuf_list_deliver(struct dp_soc *soc, 2907 struct dp_vdev *vdev, 2908 struct dp_txrx_peer *txrx_peer, 2909 uint16_t peer_id, 2910 uint8_t pkt_capture_offload, 2911 qdf_nbuf_t deliver_list_head, 2912 qdf_nbuf_t deliver_list_tail) 2913 { 2914 qdf_nbuf_t nbuf, next; 2915 2916 if (qdf_likely(deliver_list_head)) { 2917 if (qdf_likely(txrx_peer)) { 2918 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 2919 pkt_capture_offload, 2920 deliver_list_head); 2921 if (!pkt_capture_offload) 2922 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 2923 deliver_list_head, 2924 deliver_list_tail); 2925 } else { 2926 nbuf = deliver_list_head; 2927 while (nbuf) { 2928 next = nbuf->next; 2929 nbuf->next = NULL; 2930 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2931 nbuf = next; 2932 } 2933 } 2934 } 2935 } 2936 2937 #ifdef DP_TX_RX_TPUT_SIMULATE 2938 /* 2939 * Change this macro value to simulate different RX T-put, 2940 * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor 2941 * is 2, set macro value as 1 (multiplication factor - 1). 2942 */ 2943 #define DP_RX_PKTS_DUPLICATE_CNT 0 2944 static inline 2945 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc, 2946 struct dp_vdev *vdev, 2947 struct dp_txrx_peer *txrx_peer, 2948 uint16_t peer_id, 2949 uint8_t pkt_capture_offload, 2950 qdf_nbuf_t ori_list_head, 2951 qdf_nbuf_t ori_list_tail) 2952 { 2953 qdf_nbuf_t new_skb = NULL; 2954 qdf_nbuf_t new_list_head = NULL; 2955 qdf_nbuf_t new_list_tail = NULL; 2956 qdf_nbuf_t nbuf = NULL; 2957 int i; 2958 2959 for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) { 2960 nbuf = ori_list_head; 2961 new_list_head = NULL; 2962 new_list_tail = NULL; 2963 2964 while (nbuf) { 2965 new_skb = qdf_nbuf_copy(nbuf); 2966 if (qdf_likely(new_skb)) 2967 DP_RX_LIST_APPEND(new_list_head, 2968 new_list_tail, 2969 new_skb); 2970 else 2971 dp_err("copy skb failed"); 2972 2973 nbuf = qdf_nbuf_next(nbuf); 2974 } 2975 2976 /* deliver the copied nbuf list */ 2977 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2978 pkt_capture_offload, 2979 new_list_head, 2980 new_list_tail); 2981 } 2982 2983 /* deliver the original skb_list */ 2984 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2985 pkt_capture_offload, 2986 ori_list_head, 2987 ori_list_tail); 2988 } 2989 2990 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver 2991 2992 #else /* !DP_TX_RX_TPUT_SIMULATE */ 2993 2994 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver 2995 2996 #endif /* DP_TX_RX_TPUT_SIMULATE */ 2997 2998 /** 2999 * dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc 3000 * paddr corruption 3001 * @soc: core txrx main context 3002 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring 3003 * @ring_desc: REO ring descriptor 3004 * @rx_desc: Rx descriptor 3005 * 3006 * Return: NONE 3007 */ 3008 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, 3009 hal_ring_handle_t hal_ring_hdl, 3010 hal_ring_desc_t ring_desc, 3011 struct dp_rx_desc *rx_desc); 3012 /** 3013 * dp_rx_is_sg_formation_required() - Check if sg formation is required 3014 * @info: WBM desc info 3015 * 3016 * Return: True if sg is required else false 3017 */ 3018 bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info); 3019 3020 /** 3021 * dp_rx_err_tlv_invalidate() - Invalidate network buffer 3022 * @soc: core txrx main context 3023 * @nbuf: Network buffer to invalidate 3024 * 3025 * Return: NONE 3026 */ 3027 void dp_rx_err_tlv_invalidate(struct dp_soc *soc, 3028 qdf_nbuf_t nbuf); 3029 3030 /** 3031 * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue 3032 * @soc: DP SOC handle 3033 * 3034 * This is a war for HW issue where length is only valid in last msdu 3035 * 3036 * Return: NONE 3037 */ 3038 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc); 3039 3040 /** 3041 * dp_rx_check_pkt_len() - Check for pktlen validity 3042 * @soc: DP SOC context 3043 * @pkt_len: computed length of the pkt from caller in bytes 3044 * 3045 * Return: true if pktlen > RX_BUFFER_SIZE, else return false 3046 * 3047 */ 3048 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len); 3049 3050 /** 3051 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception 3052 * @soc: pointer to dp_soc struct 3053 * @pool_id: Pool id to find dp_pdev 3054 * @rx_tlv_hdr: TLV header of received packet 3055 * @nbuf: SKB 3056 * 3057 * In certain types of packets if peer_id is not correct then 3058 * driver may not be able find. Try finding peer by addr_2 of 3059 * received MPDU. If you find the peer then most likely sw_peer_id & 3060 * ast_idx is corrupted. 3061 * 3062 * Return: True if you find the peer by addr_2 of received MPDU else false 3063 */ 3064 bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, 3065 uint8_t pool_id, 3066 uint8_t *rx_tlv_hdr, 3067 qdf_nbuf_t nbuf); 3068 3069 /** 3070 * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled 3071 * If so, drop the multicast frame. 3072 * @vdev: datapath vdev 3073 * @rx_tlv_hdr: TLV header 3074 * 3075 * Return: true if packet is to be dropped, 3076 * false, if packet is not dropped. 3077 */ 3078 bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr); 3079 3080 /** 3081 * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack 3082 * @soc: DP soc 3083 * @vdev: DP vdev handle 3084 * @txrx_peer: pointer to the txrx_peer object 3085 * @nbuf: skb list head 3086 * @tail: skb list tail 3087 * @is_eapol: eapol pkt check 3088 * 3089 * Return: None 3090 */ 3091 void 3092 dp_rx_deliver_to_osif_stack(struct dp_soc *soc, 3093 struct dp_vdev *vdev, 3094 struct dp_txrx_peer *txrx_peer, 3095 qdf_nbuf_t nbuf, 3096 qdf_nbuf_t tail, 3097 bool is_eapol); 3098 3099 /** 3100 * dp_rx_set_wbm_err_info_in_nbuf() - function to set wbm err info in nbuf 3101 * @soc: DP soc 3102 * @nbuf: skb list head 3103 * @wbm_err: wbm error info details 3104 * 3105 * Return: None 3106 */ 3107 void 3108 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc, 3109 qdf_nbuf_t nbuf, 3110 union hal_wbm_err_info_u wbm_err); 3111 3112 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 3113 static inline uint8_t 3114 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 3115 { 3116 return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id); 3117 } 3118 3119 static inline uint8_t 3120 dp_rx_get_rx_bm_id(struct dp_soc *soc) 3121 { 3122 return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id); 3123 } 3124 #else 3125 static inline uint8_t 3126 dp_rx_get_rx_bm_id(struct dp_soc *soc) 3127 { 3128 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx; 3129 uint8_t wbm2_sw_rx_rel_ring_id; 3130 3131 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx); 3132 3133 return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id, 3134 wbm2_sw_rx_rel_ring_id); 3135 } 3136 3137 static inline uint8_t 3138 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 3139 { 3140 return dp_rx_get_rx_bm_id(soc); 3141 } 3142 #endif 3143 3144 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */ 3145 /** 3146 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 3147 * 3148 * @soc: core txrx main context 3149 * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced 3150 * @ring_desc: opaque pointer to the RX ring descriptor 3151 * @rx_desc: host rx descriptor 3152 * 3153 * Return: void 3154 */ 3155 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 3156 hal_ring_handle_t hal_ring_hdl, 3157 hal_ring_desc_t ring_desc, 3158 struct dp_rx_desc *rx_desc); 3159 3160 /** 3161 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 3162 * (WBM), following error handling 3163 * 3164 * @soc: core DP main context 3165 * @ring_desc: opaque pointer to the REO error ring descriptor 3166 * @bm_action: put to idle_list or release to msdu_list 3167 * 3168 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 3169 */ 3170 QDF_STATUS 3171 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 3172 uint8_t bm_action); 3173 3174 /** 3175 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 3176 * (WBM) by address 3177 * 3178 * @soc: core DP main context 3179 * @link_desc_addr: link descriptor addr 3180 * @bm_action: put to idle_list or release to msdu_list 3181 * 3182 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 3183 */ 3184 QDF_STATUS 3185 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 3186 hal_buff_addrinfo_t link_desc_addr, 3187 uint8_t bm_action); 3188 3189 /** 3190 * dp_rxdma_err_process() - RxDMA error processing functionality 3191 * @int_ctx: pointer to DP interrupt context 3192 * @soc: core txrx main context 3193 * @mac_id: mac id which is one of 3 mac_ids 3194 * @quota: No. of units (packets) that can be serviced in one shot. 3195 * 3196 * Return: num of buffers processed 3197 */ 3198 uint32_t 3199 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3200 uint32_t mac_id, uint32_t quota); 3201 3202 /** 3203 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err 3204 * frames to OS or wifi parse errors. 3205 * @soc: core DP main context 3206 * @nbuf: buffer pointer 3207 * @rx_tlv_hdr: start of rx tlv header 3208 * @txrx_peer: peer reference 3209 * @err_code: rxdma err code 3210 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and 3211 * pool_id has same mapping) 3212 * @link_id: link Id on which the packet is received 3213 * 3214 * Return: None 3215 */ 3216 void 3217 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 3218 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, 3219 uint8_t err_code, uint8_t mac_id, uint8_t link_id); 3220 3221 /** 3222 * dp_rx_process_mic_error(): Function to pass mic error indication to umac 3223 * @soc: core DP main context 3224 * @nbuf: buffer pointer 3225 * @rx_tlv_hdr: start of rx tlv header 3226 * @txrx_peer: txrx peer handle 3227 * 3228 * Return: void 3229 */ 3230 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 3231 uint8_t *rx_tlv_hdr, 3232 struct dp_txrx_peer *txrx_peer); 3233 3234 /** 3235 * dp_2k_jump_handle() - Function to handle 2k jump exception 3236 * on WBM ring 3237 * @soc: core DP main context 3238 * @nbuf: buffer pointer 3239 * @rx_tlv_hdr: start of rx tlv header 3240 * @peer_id: peer id of first msdu 3241 * @tid: Tid for which exception occurred 3242 * 3243 * This function handles 2k jump violations arising out 3244 * of receiving aggregates in non BA case. This typically 3245 * may happen if aggregates are received on a QOS enabled TID 3246 * while Rx window size is still initialized to value of 2. Or 3247 * it may also happen if negotiated window size is 1 but peer 3248 * sends aggregates. 3249 */ 3250 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 3251 uint16_t peer_id, uint8_t tid); 3252 3253 #ifndef QCA_HOST_MODE_WIFI_DISABLED 3254 3255 /** 3256 * dp_rx_err_process() - Processes error frames routed to REO error ring 3257 * @int_ctx: pointer to DP interrupt context 3258 * @soc: core txrx main context 3259 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced 3260 * @quota: No. of units (packets) that can be serviced in one shot. 3261 * 3262 * This function implements error processing and top level demultiplexer 3263 * for all the frames routed to REO error ring. 3264 * 3265 * Return: uint32_t: No. of elements processed 3266 */ 3267 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3268 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 3269 3270 /** 3271 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 3272 * @int_ctx: pointer to DP interrupt context 3273 * @soc: core txrx main context 3274 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be 3275 * serviced 3276 * @quota: No. of units (packets) that can be serviced in one shot. 3277 * 3278 * This function implements error processing and top level demultiplexer 3279 * for all the frames routed to WBM2HOST sw release ring. 3280 * 3281 * Return: uint32_t: No. of elements processed 3282 */ 3283 uint32_t 3284 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3285 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 3286 3287 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS 3288 /** 3289 * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring 3290 * @int_ctx: pointer to DP interrupt context 3291 * @soc: DP soc structure pointer 3292 * @hal_ring_hdl: HAL ring handle 3293 * 3294 * Return: 0 on success; error on failure 3295 */ 3296 static inline int 3297 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 3298 hal_ring_handle_t hal_ring_hdl) 3299 { 3300 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 3301 } 3302 3303 /** 3304 * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring 3305 * @int_ctx: pointer to DP interrupt context 3306 * @soc: DP soc structure pointer 3307 * @hal_ring_hdl: HAL ring handle 3308 * 3309 * Return: None 3310 */ 3311 static inline void 3312 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 3313 hal_ring_handle_t hal_ring_hdl) 3314 { 3315 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 3316 } 3317 #else 3318 static inline int 3319 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 3320 hal_ring_handle_t hal_ring_hdl) 3321 { 3322 return dp_srng_access_start(int_ctx, soc, hal_ring_hdl); 3323 } 3324 3325 static inline void 3326 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 3327 hal_ring_handle_t hal_ring_hdl) 3328 { 3329 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 3330 } 3331 #endif 3332 3333 #ifdef RX_DESC_SANITY_WAR 3334 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 3335 hal_ring_handle_t hal_ring_hdl, 3336 hal_ring_desc_t ring_desc, 3337 struct dp_rx_desc *rx_desc); 3338 #else 3339 static inline 3340 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 3341 hal_ring_handle_t hal_ring_hdl, 3342 hal_ring_desc_t ring_desc, 3343 struct dp_rx_desc *rx_desc) 3344 { 3345 return QDF_STATUS_SUCCESS; 3346 } 3347 #endif 3348 3349 #ifdef RX_DESC_DEBUG_CHECK 3350 /** 3351 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr 3352 * corruption 3353 * @soc: DP SoC context 3354 * @ring_desc: REO ring descriptor 3355 * @rx_desc: Rx descriptor 3356 * 3357 * Return: NONE 3358 */ 3359 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 3360 hal_ring_desc_t ring_desc, 3361 struct dp_rx_desc *rx_desc); 3362 #else 3363 static inline 3364 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 3365 hal_ring_desc_t ring_desc, 3366 struct dp_rx_desc *rx_desc) 3367 { 3368 return QDF_STATUS_SUCCESS; 3369 } 3370 #endif 3371 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 3372 3373 /** 3374 * dp_rx_wbm_sg_list_reset() - Initialize sg list 3375 * 3376 * This api should be called at soc init and afterevery sg processing. 3377 *@soc: DP SOC handle 3378 */ 3379 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) 3380 { 3381 if (soc) { 3382 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false; 3383 soc->wbm_sg_param.wbm_sg_nbuf_head = NULL; 3384 soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL; 3385 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0; 3386 } 3387 } 3388 3389 /** 3390 * dp_rx_wbm_sg_list_deinit() - De-initialize sg list 3391 * 3392 * This api should be called in down path, to avoid any leak. 3393 *@soc: DP SOC handle 3394 */ 3395 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) 3396 { 3397 if (soc) { 3398 if (soc->wbm_sg_param.wbm_sg_nbuf_head) 3399 qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head); 3400 3401 dp_rx_wbm_sg_list_reset(soc); 3402 } 3403 } 3404 3405 /** 3406 * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate 3407 * to refill 3408 * @soc: DP SOC handle 3409 * @buf_info: the last link desc buf info 3410 * @ring_buf_info: current buf address pointor including link desc 3411 * 3412 * Return: none. 3413 */ 3414 void dp_rx_link_desc_refill_duplicate_check( 3415 struct dp_soc *soc, 3416 struct hal_buf_info *buf_info, 3417 hal_buff_addrinfo_t ring_buf_info); 3418 /** 3419 * dp_rx_srng_get_num_pending() - get number of pending entries 3420 * @hal_soc: hal soc opaque pointer 3421 * @hal_ring_hdl: opaque pointer to the HAL Rx Ring 3422 * @num_entries: number of entries in the hal_ring. 3423 * @near_full: pointer to a boolean. This is set if ring is near full. 3424 * 3425 * The function returns the number of entries in a destination ring which are 3426 * yet to be reaped. The function also checks if the ring is near full. 3427 * If more than half of the ring needs to be reaped, the ring is considered 3428 * approaching full. 3429 * The function uses hal_srng_dst_num_valid_locked to get the number of valid 3430 * entries. It should not be called within a SRNG lock. HW pointer value is 3431 * synced into cached_hp. 3432 * 3433 * Return: Number of pending entries if any 3434 */ 3435 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 3436 hal_ring_handle_t hal_ring_hdl, 3437 uint32_t num_entries, 3438 bool *near_full); 3439 3440 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 3441 /** 3442 * dp_rx_ring_record_entry() - Record an entry into the rx ring history. 3443 * @soc: Datapath soc structure 3444 * @ring_num: REO ring number 3445 * @ring_desc: REO ring descriptor 3446 * 3447 * Return: None 3448 */ 3449 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 3450 hal_ring_desc_t ring_desc); 3451 #else 3452 static inline void 3453 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 3454 hal_ring_desc_t ring_desc) 3455 { 3456 } 3457 #endif 3458 3459 #ifdef QCA_SUPPORT_WDS_EXTENDED 3460 /** 3461 * dp_rx_is_list_ready() - Make different lists for 4-address 3462 * and 3-address frames 3463 * @nbuf_head: skb list head 3464 * @vdev: vdev 3465 * @txrx_peer : txrx_peer 3466 * @peer_id: peer id of new received frame 3467 * @vdev_id: vdev_id of new received frame 3468 * 3469 * Return: true if peer_ids are different. 3470 */ 3471 static inline bool 3472 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 3473 struct dp_vdev *vdev, 3474 struct dp_txrx_peer *txrx_peer, 3475 uint16_t peer_id, 3476 uint8_t vdev_id) 3477 { 3478 if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id) 3479 return true; 3480 3481 return false; 3482 } 3483 #else 3484 static inline bool 3485 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 3486 struct dp_vdev *vdev, 3487 struct dp_txrx_peer *txrx_peer, 3488 uint16_t peer_id, 3489 uint8_t vdev_id) 3490 { 3491 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) 3492 return true; 3493 3494 return false; 3495 } 3496 #endif 3497 3498 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 3499 /** 3500 * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup 3501 * @pdev: pointer to dp_pdev structure 3502 * @rx_tlv: pointer to rx_pkt_tlvs structure 3503 * @nbuf: pointer to skb buffer 3504 * 3505 * Return: None 3506 */ 3507 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3508 uint8_t *rx_tlv, 3509 qdf_nbuf_t nbuf); 3510 #else 3511 static inline void 3512 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 3513 uint8_t *rx_tlv, 3514 qdf_nbuf_t nbuf) 3515 { 3516 } 3517 #endif 3518 3519 #else 3520 static inline QDF_STATUS 3521 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 3522 hal_buff_addrinfo_t link_desc_addr, 3523 uint8_t bm_action) 3524 { 3525 return QDF_STATUS_SUCCESS; 3526 } 3527 3528 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) 3529 { 3530 } 3531 3532 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) 3533 { 3534 } 3535 3536 static inline uint32_t 3537 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 3538 uint32_t mac_id, uint32_t quota) 3539 { 3540 return 0; 3541 } 3542 #endif /* WLAN_SOFTUMAC_SUPPORT */ 3543 3544 #ifndef CONFIG_NBUF_AP_PLATFORM 3545 static inline uint8_t 3546 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf, 3547 struct dp_txrx_peer *txrx_peer) 3548 { 3549 return QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf); 3550 } 3551 #else 3552 static inline uint8_t 3553 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf, 3554 struct dp_txrx_peer *txrx_peer) 3555 { 3556 uint8_t link_id = 0; 3557 3558 link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1); 3559 if (link_id > DP_MAX_MLO_LINKS) { 3560 link_id = 0; 3561 DP_PEER_PER_PKT_STATS_INC(txrx_peer, 3562 rx.inval_link_id_pkt_cnt, 3563 1, link_id); 3564 } 3565 3566 return link_id; 3567 } 3568 #endif /* CONFIG_NBUF_AP_PLATFORM */ 3569 3570 #endif /* _DP_RX_H */ 3571