1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_RX_H 21 #define _DP_RX_H 22 23 #include "hal_rx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include <qdf_tracepoint.h> 27 #include "dp_ipa.h" 28 29 #ifdef RXDMA_OPTIMIZATION 30 #ifndef RX_DATA_BUFFER_ALIGNMENT 31 #define RX_DATA_BUFFER_ALIGNMENT 128 32 #endif 33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT 34 #define RX_MONITOR_BUFFER_ALIGNMENT 128 35 #endif 36 #else /* RXDMA_OPTIMIZATION */ 37 #define RX_DATA_BUFFER_ALIGNMENT 4 38 #define RX_MONITOR_BUFFER_ALIGNMENT 4 39 #endif /* RXDMA_OPTIMIZATION */ 40 41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 42 #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id) 43 /* RBM value used for re-injecting defragmented packets into REO */ 44 #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) 45 #endif 46 47 #define RX_BUFFER_RESERVATION 0 48 #ifdef BE_PKTLOG_SUPPORT 49 #define BUFFER_RESIDUE 1 50 #define RX_MON_MIN_HEAD_ROOM 64 51 #endif 52 53 #define DP_DEFAULT_NOISEFLOOR (-96) 54 55 #define DP_RX_DESC_MAGIC 0xdec0de 56 57 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params) 58 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params) 59 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params) 60 #define dp_rx_info(params...) \ 61 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 62 #define dp_rx_info_rl(params...) \ 63 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 64 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params) 65 66 /** 67 * enum dp_rx_desc_state 68 * 69 * @RX_DESC_REPLENISH: rx desc replenished 70 * @RX_DESC_FREELIST: rx desc in freelist 71 */ 72 enum dp_rx_desc_state { 73 RX_DESC_REPLENISHED, 74 RX_DESC_IN_FREELIST, 75 }; 76 77 #ifndef QCA_HOST_MODE_WIFI_DISABLED 78 /** 79 * struct dp_rx_desc_dbg_info 80 * 81 * @freelist_caller: name of the function that put the 82 * the rx desc in freelist 83 * @freelist_ts: timestamp when the rx desc is put in 84 * a freelist 85 * @replenish_caller: name of the function that last 86 * replenished the rx desc 87 * @replenish_ts: last replenish timestamp 88 * @prev_nbuf: previous nbuf info 89 * @prev_nbuf_data_addr: previous nbuf data address 90 */ 91 struct dp_rx_desc_dbg_info { 92 char freelist_caller[QDF_MEM_FUNC_NAME_SIZE]; 93 uint64_t freelist_ts; 94 char replenish_caller[QDF_MEM_FUNC_NAME_SIZE]; 95 uint64_t replenish_ts; 96 qdf_nbuf_t prev_nbuf; 97 uint8_t *prev_nbuf_data_addr; 98 }; 99 100 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 101 102 /** 103 * struct dp_rx_desc 104 * 105 * @nbuf : VA of the "skb" posted 106 * @rx_buf_start : VA of the original Rx buffer, before 107 * movement of any skb->data pointer 108 * @paddr_buf_start : PA of the original Rx buffer, before 109 * movement of any frag pointer 110 * @cookie : index into the sw array which holds 111 * the sw Rx descriptors 112 * Cookie space is 21 bits: 113 * lower 18 bits -- index 114 * upper 3 bits -- pool_id 115 * @pool_id : pool Id for which this allocated. 116 * Can only be used if there is no flow 117 * steering 118 * @chip_id : chip_id indicating MLO chip_id 119 * valid or used only in case of multi-chip MLO 120 * @in_use rx_desc is in use 121 * @unmapped used to mark rx_desc an unmapped if the corresponding 122 * nbuf is already unmapped 123 * @in_err_state : Nbuf sanity failed for this descriptor. 124 * @nbuf_data_addr : VA of nbuf data posted 125 */ 126 struct dp_rx_desc { 127 qdf_nbuf_t nbuf; 128 uint8_t *rx_buf_start; 129 qdf_dma_addr_t paddr_buf_start; 130 uint32_t cookie; 131 uint8_t pool_id; 132 uint8_t chip_id; 133 #ifdef RX_DESC_DEBUG_CHECK 134 uint32_t magic; 135 uint8_t *nbuf_data_addr; 136 struct dp_rx_desc_dbg_info *dbg_info; 137 #endif 138 uint8_t in_use:1, 139 unmapped:1, 140 in_err_state:1; 141 }; 142 143 #ifndef QCA_HOST_MODE_WIFI_DISABLED 144 #ifdef ATH_RX_PRI_SAVE 145 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 146 (qdf_nbuf_set_priority(_nbuf, _tid)) 147 #else 148 #define DP_RX_TID_SAVE(_nbuf, _tid) 149 #endif 150 151 /* RX Descriptor Multi Page memory alloc related */ 152 #define DP_RX_DESC_OFFSET_NUM_BITS 8 153 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 154 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 155 156 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 157 #define DP_RX_DESC_POOL_ID_SHIFT \ 158 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 159 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 160 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 161 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 162 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 163 DP_RX_DESC_PAGE_ID_SHIFT) 164 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 165 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 166 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 167 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 168 DP_RX_DESC_POOL_ID_SHIFT) 169 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 170 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 171 DP_RX_DESC_PAGE_ID_SHIFT) 172 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 173 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 174 175 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 176 177 #define RX_DESC_COOKIE_INDEX_SHIFT 0 178 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 179 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 180 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 181 182 #define DP_RX_DESC_COOKIE_MAX \ 183 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 184 185 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 186 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 187 RX_DESC_COOKIE_POOL_ID_SHIFT) 188 189 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 190 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 191 RX_DESC_COOKIE_INDEX_SHIFT) 192 193 #define dp_rx_add_to_free_desc_list(head, tail, new) \ 194 __dp_rx_add_to_free_desc_list(head, tail, new, __func__) 195 196 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 197 num_buffers, desc_list, tail) \ 198 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 199 num_buffers, desc_list, tail, __func__) 200 201 #ifdef WLAN_SUPPORT_RX_FISA 202 /** 203 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb 204 * @nbuf: pkt skb pointer 205 * @l3_padding: l3 padding 206 * 207 * Return: None 208 */ 209 static inline 210 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 211 { 212 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 213 } 214 #else 215 static inline 216 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 217 { 218 } 219 #endif 220 221 #ifdef DP_RX_SPECIAL_FRAME_NEED 222 /** 223 * dp_rx_is_special_frame() - check is RX frame special needed 224 * 225 * @nbuf: RX skb pointer 226 * @frame_mask: the mask for speical frame needed 227 * 228 * Check is RX frame wanted matched with mask 229 * 230 * Return: true - special frame needed, false - no 231 */ 232 static inline 233 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 234 { 235 if (((frame_mask & FRAME_MASK_IPV4_ARP) && 236 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || 237 ((frame_mask & FRAME_MASK_IPV4_DHCP) && 238 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || 239 ((frame_mask & FRAME_MASK_IPV4_EAPOL) && 240 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || 241 ((frame_mask & FRAME_MASK_IPV6_DHCP) && 242 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))) 243 return true; 244 245 return false; 246 } 247 248 /** 249 * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack 250 * if matches mask 251 * 252 * @soc: Datapath soc handler 253 * @peer: pointer to DP peer 254 * @nbuf: pointer to the skb of RX frame 255 * @frame_mask: the mask for speical frame needed 256 * @rx_tlv_hdr: start of rx tlv header 257 * 258 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 259 * single nbuf is expected. 260 * 261 * return: true - nbuf has been delivered to stack, false - not. 262 */ 263 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 264 qdf_nbuf_t nbuf, uint32_t frame_mask, 265 uint8_t *rx_tlv_hdr); 266 #else 267 static inline 268 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 269 { 270 return false; 271 } 272 273 static inline 274 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 275 qdf_nbuf_t nbuf, uint32_t frame_mask, 276 uint8_t *rx_tlv_hdr) 277 { 278 return false; 279 } 280 #endif 281 282 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 283 /** 284 * dp_rx_data_is_specific() - Used to exclude specific frames 285 * not practical for getting rx 286 * stats like rate, mcs, nss, etc. 287 * 288 * @hal-soc_hdl: soc handler 289 * @rx_tlv_hdr: rx tlv header 290 * @nbuf: RX skb pointer 291 * 292 * Return: true - a specific frame not suitable 293 * for getting rx stats from it. 294 * false - a common frame suitable for 295 * getting rx stats from it. 296 */ 297 static inline 298 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 299 uint8_t *rx_tlv_hdr, 300 qdf_nbuf_t nbuf) 301 { 302 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf))) 303 return true; 304 305 if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr)) 306 return true; 307 308 if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr)) 309 return true; 310 311 /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */ 312 if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 313 QDF_NBUF_TRAC_IPV4_ETH_TYPE)) { 314 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 315 return true; 316 } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 317 QDF_NBUF_TRAC_IPV6_ETH_TYPE)) { 318 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) 319 return true; 320 } else { 321 return true; 322 } 323 return false; 324 } 325 #else 326 static inline 327 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 328 uint8_t *rx_tlv_hdr, 329 qdf_nbuf_t nbuf) 330 331 { 332 /* 333 * default return is true to make sure that rx stats 334 * will not be handled when this feature is disabled 335 */ 336 return true; 337 } 338 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 339 340 #ifndef QCA_HOST_MODE_WIFI_DISABLED 341 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING 342 static inline 343 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 344 qdf_nbuf_t nbuf) 345 { 346 if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi && 347 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { 348 DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer, 349 rx.intra_bss.mdns_no_fwd, 1); 350 return false; 351 } 352 return true; 353 } 354 #else 355 static inline 356 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 357 qdf_nbuf_t nbuf) 358 { 359 return true; 360 } 361 #endif 362 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 363 364 /* DOC: Offset to obtain LLC hdr 365 * 366 * In the case of Wifi parse error 367 * to reach LLC header from beginning 368 * of VLAN tag we need to skip 8 bytes. 369 * Vlan_tag(4)+length(2)+length added 370 * by HW(2) = 8 bytes. 371 */ 372 #define DP_SKIP_VLAN 8 373 374 #ifndef QCA_HOST_MODE_WIFI_DISABLED 375 376 /** 377 * struct dp_rx_cached_buf - rx cached buffer 378 * @list: linked list node 379 * @buf: skb buffer 380 */ 381 struct dp_rx_cached_buf { 382 qdf_list_node_t node; 383 qdf_nbuf_t buf; 384 }; 385 386 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 387 388 /* 389 *dp_rx_xor_block() - xor block of data 390 *@b: destination data block 391 *@a: source data block 392 *@len: length of the data to process 393 * 394 *Returns: None 395 */ 396 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 397 { 398 qdf_size_t i; 399 400 for (i = 0; i < len; i++) 401 b[i] ^= a[i]; 402 } 403 404 /* 405 *dp_rx_rotl() - rotate the bits left 406 *@val: unsigned integer input value 407 *@bits: number of bits 408 * 409 *Returns: Integer with left rotated by number of 'bits' 410 */ 411 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 412 { 413 return (val << bits) | (val >> (32 - bits)); 414 } 415 416 /* 417 *dp_rx_rotr() - rotate the bits right 418 *@val: unsigned integer input value 419 *@bits: number of bits 420 * 421 *Returns: Integer with right rotated by number of 'bits' 422 */ 423 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 424 { 425 return (val >> bits) | (val << (32 - bits)); 426 } 427 428 /* 429 * dp_set_rx_queue() - set queue_mapping in skb 430 * @nbuf: skb 431 * @queue_id: rx queue_id 432 * 433 * Return: void 434 */ 435 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 436 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 437 { 438 qdf_nbuf_record_rx_queue(nbuf, queue_id); 439 return; 440 } 441 #else 442 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 443 { 444 } 445 #endif 446 447 /* 448 *dp_rx_xswap() - swap the bits left 449 *@val: unsigned integer input value 450 * 451 *Returns: Integer with bits swapped 452 */ 453 static inline uint32_t dp_rx_xswap(uint32_t val) 454 { 455 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 456 } 457 458 /* 459 *dp_rx_get_le32_split() - get little endian 32 bits split 460 *@b0: byte 0 461 *@b1: byte 1 462 *@b2: byte 2 463 *@b3: byte 3 464 * 465 *Returns: Integer with split little endian 32 bits 466 */ 467 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 468 uint8_t b3) 469 { 470 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 471 } 472 473 /* 474 *dp_rx_get_le32() - get little endian 32 bits 475 *@b0: byte 0 476 *@b1: byte 1 477 *@b2: byte 2 478 *@b3: byte 3 479 * 480 *Returns: Integer with little endian 32 bits 481 */ 482 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 483 { 484 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 485 } 486 487 /* 488 * dp_rx_put_le32() - put little endian 32 bits 489 * @p: destination char array 490 * @v: source 32-bit integer 491 * 492 * Returns: None 493 */ 494 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 495 { 496 p[0] = (v) & 0xff; 497 p[1] = (v >> 8) & 0xff; 498 p[2] = (v >> 16) & 0xff; 499 p[3] = (v >> 24) & 0xff; 500 } 501 502 /* Extract michal mic block of data */ 503 #define dp_rx_michael_block(l, r) \ 504 do { \ 505 r ^= dp_rx_rotl(l, 17); \ 506 l += r; \ 507 r ^= dp_rx_xswap(l); \ 508 l += r; \ 509 r ^= dp_rx_rotl(l, 3); \ 510 l += r; \ 511 r ^= dp_rx_rotr(l, 2); \ 512 l += r; \ 513 } while (0) 514 515 /** 516 * struct dp_rx_desc_list_elem_t 517 * 518 * @next : Next pointer to form free list 519 * @rx_desc : DP Rx descriptor 520 */ 521 union dp_rx_desc_list_elem_t { 522 union dp_rx_desc_list_elem_t *next; 523 struct dp_rx_desc rx_desc; 524 }; 525 526 #ifdef RX_DESC_MULTI_PAGE_ALLOC 527 /** 528 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 529 * @page_id: Page ID 530 * @offset: Offset of the descriptor element 531 * 532 * Return: RX descriptor element 533 */ 534 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 535 struct rx_desc_pool *rx_pool); 536 537 static inline 538 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 539 struct rx_desc_pool *pool, 540 uint32_t cookie) 541 { 542 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 543 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 544 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 545 struct rx_desc_pool *rx_desc_pool; 546 union dp_rx_desc_list_elem_t *rx_desc_elem; 547 548 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 549 return NULL; 550 551 rx_desc_pool = &pool[pool_id]; 552 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 553 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 554 rx_desc_pool->elem_size * offset); 555 556 return &rx_desc_elem->rx_desc; 557 } 558 559 static inline 560 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc, 561 struct rx_desc_pool *pool, 562 uint32_t cookie) 563 { 564 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 565 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 566 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 567 struct rx_desc_pool *rx_desc_pool; 568 union dp_rx_desc_list_elem_t *rx_desc_elem; 569 570 if (qdf_unlikely(pool_id >= NUM_RXDMA_RINGS_PER_PDEV)) 571 return NULL; 572 573 rx_desc_pool = &pool[pool_id]; 574 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 575 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 576 rx_desc_pool->elem_size * offset); 577 578 return &rx_desc_elem->rx_desc; 579 } 580 581 /** 582 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 583 * the Rx descriptor on Rx DMA source ring buffer 584 * @soc: core txrx main context 585 * @cookie: cookie used to lookup virtual address 586 * 587 * Return: Pointer to the Rx descriptor 588 */ 589 static inline 590 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 591 uint32_t cookie) 592 { 593 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 594 } 595 596 /** 597 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 598 * the Rx descriptor on monitor ring buffer 599 * @soc: core txrx main context 600 * @cookie: cookie used to lookup virtual address 601 * 602 * Return: Pointer to the Rx descriptor 603 */ 604 static inline 605 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 606 uint32_t cookie) 607 { 608 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 609 } 610 611 /** 612 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 613 * the Rx descriptor on monitor status ring buffer 614 * @soc: core txrx main context 615 * @cookie: cookie used to lookup virtual address 616 * 617 * Return: Pointer to the Rx descriptor 618 */ 619 static inline 620 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 621 uint32_t cookie) 622 { 623 return dp_get_rx_mon_status_desc_from_cookie(soc, 624 &soc->rx_desc_status[0], 625 cookie); 626 } 627 #else 628 629 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 630 uint32_t pool_size, 631 struct rx_desc_pool *rx_desc_pool); 632 633 /** 634 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 635 * the Rx descriptor on Rx DMA source ring buffer 636 * @soc: core txrx main context 637 * @cookie: cookie used to lookup virtual address 638 * 639 * Return: void *: Virtual Address of the Rx descriptor 640 */ 641 static inline 642 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 643 { 644 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 645 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 646 struct rx_desc_pool *rx_desc_pool; 647 648 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 649 return NULL; 650 651 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 652 653 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 654 return NULL; 655 656 return &rx_desc_pool->array[index].rx_desc; 657 } 658 659 /** 660 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 661 * the Rx descriptor on monitor ring buffer 662 * @soc: core txrx main context 663 * @cookie: cookie used to lookup virtual address 664 * 665 * Return: void *: Virtual Address of the Rx descriptor 666 */ 667 static inline 668 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 669 { 670 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 671 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 672 /* TODO */ 673 /* Add sanity for pool_id & index */ 674 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 675 } 676 677 /** 678 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 679 * the Rx descriptor on monitor status ring buffer 680 * @soc: core txrx main context 681 * @cookie: cookie used to lookup virtual address 682 * 683 * Return: void *: Virtual Address of the Rx descriptor 684 */ 685 static inline 686 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 687 { 688 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 689 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 690 /* TODO */ 691 /* Add sanity for pool_id & index */ 692 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 693 } 694 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 695 696 #ifndef QCA_HOST_MODE_WIFI_DISABLED 697 698 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 699 { 700 return vdev->ap_bridge_enabled; 701 } 702 703 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 704 static inline QDF_STATUS 705 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 706 { 707 if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc))) 708 return QDF_STATUS_E_FAILURE; 709 710 HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc); 711 return QDF_STATUS_SUCCESS; 712 } 713 714 /** 715 * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie 716 * field in ring descriptor 717 * @ring_desc: ring descriptor 718 * 719 * Return: None 720 */ 721 static inline void 722 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 723 { 724 HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc); 725 } 726 #else 727 static inline QDF_STATUS 728 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 729 { 730 return QDF_STATUS_SUCCESS; 731 } 732 733 static inline void 734 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 735 { 736 } 737 #endif 738 739 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 740 741 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \ 742 defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE) 743 /** 744 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 745 * @soc: dp soc ref 746 * @cookie: Rx buf SW cookie value 747 * 748 * Return: true if cookie is valid else false 749 */ 750 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 751 uint32_t cookie) 752 { 753 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 754 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 755 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 756 struct rx_desc_pool *rx_desc_pool; 757 758 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 759 goto fail; 760 761 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 762 763 if (page_id >= rx_desc_pool->desc_pages.num_pages || 764 offset >= rx_desc_pool->desc_pages.num_element_per_page) 765 goto fail; 766 767 return true; 768 769 fail: 770 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 771 return false; 772 } 773 #else 774 /** 775 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 776 * @soc: dp soc ref 777 * @cookie: Rx buf SW cookie value 778 * 779 * When multi page alloc is disabled SW cookie validness is 780 * checked while fetching Rx descriptor, so no need to check here 781 * Return: true if cookie is valid else false 782 */ 783 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 784 uint32_t cookie) 785 { 786 return true; 787 } 788 #endif 789 790 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool); 791 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 792 uint32_t pool_size, 793 struct rx_desc_pool *rx_desc_pool); 794 795 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 796 uint32_t pool_size, 797 struct rx_desc_pool *rx_desc_pool); 798 799 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 800 union dp_rx_desc_list_elem_t **local_desc_list, 801 union dp_rx_desc_list_elem_t **tail, 802 uint16_t pool_id, 803 struct rx_desc_pool *rx_desc_pool); 804 805 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 806 struct rx_desc_pool *rx_desc_pool, 807 uint16_t num_descs, 808 union dp_rx_desc_list_elem_t **desc_list, 809 union dp_rx_desc_list_elem_t **tail); 810 811 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev); 812 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev); 813 814 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev); 815 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev); 816 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 817 struct rx_desc_pool *rx_desc_pool, 818 uint32_t pool_id); 819 820 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 821 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev); 822 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev); 823 824 void dp_rx_pdev_detach(struct dp_pdev *pdev); 825 826 void dp_print_napi_stats(struct dp_soc *soc); 827 828 /** 829 * dp_rx_vdev_detach() - detach vdev from dp rx 830 * @vdev: virtual device instance 831 * 832 * Return: QDF_STATUS_SUCCESS: success 833 * QDF_STATUS_E_RESOURCES: Error return 834 */ 835 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); 836 837 #ifndef QCA_HOST_MODE_WIFI_DISABLED 838 839 uint32_t 840 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 841 uint8_t reo_ring_num, 842 uint32_t quota); 843 844 /** 845 * dp_rx_err_process() - Processes error frames routed to REO error ring 846 * @int_ctx: pointer to DP interrupt context 847 * @soc: core txrx main context 848 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 849 * @quota: No. of units (packets) that can be serviced in one shot. 850 * 851 * This function implements error processing and top level demultiplexer 852 * for all the frames routed to REO error ring. 853 * 854 * Return: uint32_t: No. of elements processed 855 */ 856 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 857 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 858 859 /** 860 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 861 * @int_ctx: pointer to DP interrupt context 862 * @soc: core txrx main context 863 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 864 * @quota: No. of units (packets) that can be serviced in one shot. 865 * 866 * This function implements error processing and top level demultiplexer 867 * for all the frames routed to WBM2HOST sw release ring. 868 * 869 * Return: uint32_t: No. of elements processed 870 */ 871 uint32_t 872 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 873 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 874 875 /** 876 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 877 * multiple nbufs. 878 * @soc: core txrx main context 879 * @nbuf: pointer to the first msdu of an amsdu. 880 * 881 * This function implements the creation of RX frag_list for cases 882 * where an MSDU is spread across multiple nbufs. 883 * 884 * Return: returns the head nbuf which contains complete frag_list. 885 */ 886 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); 887 888 889 /* 890 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 891 * de-initialization of wifi module. 892 * 893 * @soc: core txrx main context 894 * @pool_id: pool_id which is one of 3 mac_ids 895 * @rx_desc_pool: rx descriptor pool pointer 896 * 897 * Return: None 898 */ 899 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 900 struct rx_desc_pool *rx_desc_pool); 901 902 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 903 904 /* 905 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 906 * de-initialization of wifi module. 907 * 908 * @soc: core txrx main context 909 * @pool_id: pool_id which is one of 3 mac_ids 910 * @rx_desc_pool: rx descriptor pool pointer 911 * 912 * Return: None 913 */ 914 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 915 struct rx_desc_pool *rx_desc_pool); 916 917 #ifdef DP_RX_MON_MEM_FRAG 918 /* 919 * dp_rx_desc_frag_free() - free the sw rx desc frag called during 920 * de-initialization of wifi module. 921 * 922 * @soc: core txrx main context 923 * @rx_desc_pool: rx descriptor pool pointer 924 * 925 * Return: None 926 */ 927 void dp_rx_desc_frag_free(struct dp_soc *soc, 928 struct rx_desc_pool *rx_desc_pool); 929 #else 930 static inline 931 void dp_rx_desc_frag_free(struct dp_soc *soc, 932 struct rx_desc_pool *rx_desc_pool) 933 { 934 } 935 #endif 936 /* 937 * dp_rx_desc_pool_free() - free the sw rx desc array called during 938 * de-initialization of wifi module. 939 * 940 * @soc: core txrx main context 941 * @rx_desc_pool: rx descriptor pool pointer 942 * 943 * Return: None 944 */ 945 void dp_rx_desc_pool_free(struct dp_soc *soc, 946 struct rx_desc_pool *rx_desc_pool); 947 948 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 949 struct dp_txrx_peer *peer); 950 951 #ifdef RX_DESC_LOGGING 952 /* 953 * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug 954 * structure 955 * @rx_desc: rx descriptor pointer 956 * 957 * Return: None 958 */ 959 static inline 960 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 961 { 962 rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); 963 } 964 965 /* 966 * dp_rx_desc_free_dbg_info() - Free rx descriptor debug 967 * structure memory 968 * @rx_desc: rx descriptor pointer 969 * 970 * Return: None 971 */ 972 static inline 973 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 974 { 975 qdf_mem_free(rx_desc->dbg_info); 976 } 977 978 /* 979 * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info 980 * structure memory 981 * @rx_desc: rx descriptor pointer 982 * 983 * Return: None 984 */ 985 static 986 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 987 const char *func_name, uint8_t flag) 988 { 989 struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info; 990 991 if (!info) 992 return; 993 994 if (flag == RX_DESC_REPLENISHED) { 995 qdf_str_lcopy(info->replenish_caller, func_name, 996 QDF_MEM_FUNC_NAME_SIZE); 997 info->replenish_ts = qdf_get_log_timestamp(); 998 } else { 999 qdf_str_lcopy(info->freelist_caller, func_name, 1000 QDF_MEM_FUNC_NAME_SIZE); 1001 info->freelist_ts = qdf_get_log_timestamp(); 1002 info->prev_nbuf = rx_desc->nbuf; 1003 info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr; 1004 rx_desc->nbuf_data_addr = NULL; 1005 } 1006 } 1007 #else 1008 1009 static inline 1010 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 1011 { 1012 } 1013 1014 static inline 1015 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 1016 { 1017 } 1018 1019 static inline 1020 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 1021 const char *func_name, uint8_t flag) 1022 { 1023 } 1024 #endif /* RX_DESC_LOGGING */ 1025 1026 /** 1027 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 1028 * 1029 * @head: pointer to the head of local free list 1030 * @tail: pointer to the tail of local free list 1031 * @new: new descriptor that is added to the free list 1032 * @func_name: caller func name 1033 * 1034 * Return: void: 1035 */ 1036 static inline 1037 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 1038 union dp_rx_desc_list_elem_t **tail, 1039 struct dp_rx_desc *new, const char *func_name) 1040 { 1041 qdf_assert(head && new); 1042 1043 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); 1044 1045 new->nbuf = NULL; 1046 new->in_use = 0; 1047 1048 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 1049 *head = (union dp_rx_desc_list_elem_t *)new; 1050 /* reset tail if head->next is NULL */ 1051 if (!*tail || !(*head)->next) 1052 *tail = *head; 1053 } 1054 1055 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1056 uint8_t mac_id); 1057 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1058 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); 1059 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1060 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1061 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1062 uint16_t peer_id, uint8_t tid); 1063 1064 #define DP_RX_HEAD_APPEND(head, elem) \ 1065 do { \ 1066 qdf_nbuf_set_next((elem), (head)); \ 1067 (head) = (elem); \ 1068 } while (0) 1069 1070 1071 #define DP_RX_LIST_APPEND(head, tail, elem) \ 1072 do { \ 1073 if (!(head)) { \ 1074 (head) = (elem); \ 1075 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 1076 } else { \ 1077 qdf_nbuf_set_next((tail), (elem)); \ 1078 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 1079 } \ 1080 (tail) = (elem); \ 1081 qdf_nbuf_set_next((tail), NULL); \ 1082 } while (0) 1083 1084 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \ 1085 do { \ 1086 if (!(phead)) { \ 1087 (phead) = (chead); \ 1088 } else { \ 1089 qdf_nbuf_set_next((ptail), (chead)); \ 1090 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \ 1091 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \ 1092 } \ 1093 (ptail) = (ctail); \ 1094 qdf_nbuf_set_next((ptail), NULL); \ 1095 } while (0) 1096 1097 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) 1098 /* 1099 * on some third-party platform, the memory below 0x2000 1100 * is reserved for target use, so any memory allocated in this 1101 * region should not be used by host 1102 */ 1103 #define MAX_RETRY 50 1104 #define DP_PHY_ADDR_RESERVED 0x2000 1105 #elif defined(BUILD_X86) 1106 /* 1107 * in M2M emulation platforms (x86) the memory below 0x50000000 1108 * is reserved for target use, so any memory allocated in this 1109 * region should not be used by host 1110 */ 1111 #define MAX_RETRY 100 1112 #define DP_PHY_ADDR_RESERVED 0x50000000 1113 #endif 1114 1115 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86) 1116 /** 1117 * dp_check_paddr() - check if current phy address is valid or not 1118 * @dp_soc: core txrx main context 1119 * @rx_netbuf: skb buffer 1120 * @paddr: physical address 1121 * @rx_desc_pool: struct of rx descriptor pool 1122 * check if the physical address of the nbuf->data is less 1123 * than DP_PHY_ADDR_RESERVED then free the nbuf and try 1124 * allocating new nbuf. We can try for 100 times. 1125 * 1126 * This is a temp WAR till we fix it properly. 1127 * 1128 * Return: success or failure. 1129 */ 1130 static inline 1131 int dp_check_paddr(struct dp_soc *dp_soc, 1132 qdf_nbuf_t *rx_netbuf, 1133 qdf_dma_addr_t *paddr, 1134 struct rx_desc_pool *rx_desc_pool) 1135 { 1136 uint32_t nbuf_retry = 0; 1137 int32_t ret; 1138 1139 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1140 return QDF_STATUS_SUCCESS; 1141 1142 do { 1143 dp_debug("invalid phy addr 0x%llx, trying again", 1144 (uint64_t)(*paddr)); 1145 nbuf_retry++; 1146 if ((*rx_netbuf)) { 1147 /* Not freeing buffer intentionally. 1148 * Observed that same buffer is getting 1149 * re-allocated resulting in longer load time 1150 * WMI init timeout. 1151 * This buffer is anyway not useful so skip it. 1152 *.Add such buffer to invalid list and free 1153 *.them when driver unload. 1154 **/ 1155 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1156 *rx_netbuf, 1157 QDF_DMA_FROM_DEVICE, 1158 rx_desc_pool->buf_size); 1159 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1160 *rx_netbuf); 1161 } 1162 1163 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 1164 rx_desc_pool->buf_size, 1165 RX_BUFFER_RESERVATION, 1166 rx_desc_pool->buf_alignment, 1167 FALSE); 1168 1169 if (qdf_unlikely(!(*rx_netbuf))) 1170 return QDF_STATUS_E_FAILURE; 1171 1172 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 1173 *rx_netbuf, 1174 QDF_DMA_FROM_DEVICE, 1175 rx_desc_pool->buf_size); 1176 1177 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1178 qdf_nbuf_free(*rx_netbuf); 1179 *rx_netbuf = NULL; 1180 continue; 1181 } 1182 1183 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 1184 1185 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1186 return QDF_STATUS_SUCCESS; 1187 1188 } while (nbuf_retry < MAX_RETRY); 1189 1190 if ((*rx_netbuf)) { 1191 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1192 *rx_netbuf, 1193 QDF_DMA_FROM_DEVICE, 1194 rx_desc_pool->buf_size); 1195 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1196 *rx_netbuf); 1197 } 1198 1199 return QDF_STATUS_E_FAILURE; 1200 } 1201 1202 #else 1203 static inline 1204 int dp_check_paddr(struct dp_soc *dp_soc, 1205 qdf_nbuf_t *rx_netbuf, 1206 qdf_dma_addr_t *paddr, 1207 struct rx_desc_pool *rx_desc_pool) 1208 { 1209 return QDF_STATUS_SUCCESS; 1210 } 1211 1212 #endif 1213 1214 /** 1215 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 1216 * the MSDU Link Descriptor 1217 * @soc: core txrx main context 1218 * @buf_info: buf_info includes cookie that is used to lookup 1219 * virtual address of link descriptor after deriving the page id 1220 * and the offset or index of the desc on the associatde page. 1221 * 1222 * This is the VA of the link descriptor, that HAL layer later uses to 1223 * retrieve the list of MSDU's for a given MPDU. 1224 * 1225 * Return: void *: Virtual Address of the Rx descriptor 1226 */ 1227 static inline 1228 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 1229 struct hal_buf_info *buf_info) 1230 { 1231 void *link_desc_va; 1232 struct qdf_mem_multi_page_t *pages; 1233 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie); 1234 1235 pages = &soc->link_desc_pages; 1236 if (!pages) 1237 return NULL; 1238 if (qdf_unlikely(page_id >= pages->num_pages)) 1239 return NULL; 1240 link_desc_va = pages->dma_pages[page_id].page_v_addr_start + 1241 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr); 1242 return link_desc_va; 1243 } 1244 1245 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1246 #ifdef DISABLE_EAPOL_INTRABSS_FWD 1247 #ifdef WLAN_FEATURE_11BE_MLO 1248 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1249 qdf_nbuf_t nbuf) 1250 { 1251 struct qdf_mac_addr *self_mld_mac_addr = 1252 (struct qdf_mac_addr *)vdev->mld_mac_addr.raw; 1253 return qdf_is_macaddr_equal(self_mld_mac_addr, 1254 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1255 QDF_NBUF_DEST_MAC_OFFSET); 1256 } 1257 #else 1258 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1259 qdf_nbuf_t nbuf) 1260 { 1261 return false; 1262 } 1263 #endif 1264 1265 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev, 1266 qdf_nbuf_t nbuf) 1267 { 1268 return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw, 1269 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1270 QDF_NBUF_DEST_MAC_OFFSET); 1271 } 1272 1273 /* 1274 * dp_rx_intrabss_eapol_drop_check() - API For EAPOL 1275 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1276 * @soc: core txrx main context 1277 * @ta_txrx_peer: source peer entry 1278 * @rx_tlv_hdr: start address of rx tlvs 1279 * @nbuf: nbuf that has to be intrabss forwarded 1280 * 1281 * Return: true if it is forwarded else false 1282 */ 1283 static inline 1284 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1285 struct dp_txrx_peer *ta_txrx_peer, 1286 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1287 { 1288 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && 1289 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev, 1290 nbuf) || 1291 dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev, 1292 nbuf)))) { 1293 qdf_nbuf_free(nbuf); 1294 DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); 1295 return true; 1296 } 1297 1298 return false; 1299 } 1300 #else /* DISABLE_EAPOL_INTRABSS_FWD */ 1301 1302 static inline 1303 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1304 struct dp_txrx_peer *ta_txrx_peer, 1305 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1306 { 1307 return false; 1308 } 1309 #endif /* DISABLE_EAPOL_INTRABSS_FWD */ 1310 1311 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, 1312 struct dp_txrx_peer *ta_txrx_peer, 1313 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1314 struct cdp_tid_rx_stats *tid_stats); 1315 1316 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, 1317 struct dp_txrx_peer *ta_txrx_peer, 1318 uint8_t tx_vdev_id, 1319 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1320 struct cdp_tid_rx_stats *tid_stats); 1321 1322 /** 1323 * dp_rx_defrag_concat() - Concatenate the fragments 1324 * 1325 * @dst: destination pointer to the buffer 1326 * @src: source pointer from where the fragment payload is to be copied 1327 * 1328 * Return: QDF_STATUS 1329 */ 1330 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 1331 { 1332 /* 1333 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 1334 * to provide space for src, the headroom portion is copied from 1335 * the original dst buffer to the larger new dst buffer. 1336 * (This is needed, because the headroom of the dst buffer 1337 * contains the rx desc.) 1338 */ 1339 if (!qdf_nbuf_cat(dst, src)) { 1340 /* 1341 * qdf_nbuf_cat does not free the src memory. 1342 * Free src nbuf before returning 1343 * For failure case the caller takes of freeing the nbuf 1344 */ 1345 qdf_nbuf_free(src); 1346 return QDF_STATUS_SUCCESS; 1347 } 1348 1349 return QDF_STATUS_E_DEFRAG_ERROR; 1350 } 1351 1352 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1353 1354 #ifndef FEATURE_WDS 1355 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1356 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf); 1357 1358 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1359 { 1360 return QDF_STATUS_SUCCESS; 1361 } 1362 1363 static inline void 1364 dp_rx_wds_srcport_learn(struct dp_soc *soc, 1365 uint8_t *rx_tlv_hdr, 1366 struct dp_txrx_peer *txrx_peer, 1367 qdf_nbuf_t nbuf, 1368 struct hal_rx_msdu_metadata msdu_metadata) 1369 { 1370 } 1371 1372 static inline void 1373 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc, 1374 struct dp_peer *ta_peer, qdf_nbuf_t nbuf, 1375 struct hal_rx_msdu_metadata msdu_end_info, 1376 bool ad4_valid, bool chfrag_start) 1377 { 1378 } 1379 #endif 1380 1381 /* 1382 * dp_rx_desc_dump() - dump the sw rx descriptor 1383 * 1384 * @rx_desc: sw rx descriptor 1385 */ 1386 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 1387 { 1388 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 1389 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 1390 rx_desc->in_use, rx_desc->unmapped); 1391 } 1392 1393 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1394 1395 /* 1396 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 1397 * In qwrap mode, packets originated from 1398 * any vdev should not loopback and 1399 * should be dropped. 1400 * @vdev: vdev on which rx packet is received 1401 * @nbuf: rx pkt 1402 * 1403 */ 1404 #if ATH_SUPPORT_WRAP 1405 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1406 qdf_nbuf_t nbuf) 1407 { 1408 struct dp_vdev *psta_vdev; 1409 struct dp_pdev *pdev = vdev->pdev; 1410 uint8_t *data = qdf_nbuf_data(nbuf); 1411 1412 if (qdf_unlikely(vdev->proxysta_vdev)) { 1413 /* In qwrap isolation mode, allow loopback packets as all 1414 * packets go to RootAP and Loopback on the mpsta. 1415 */ 1416 if (vdev->isolation_vdev) 1417 return false; 1418 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 1419 if (qdf_unlikely(psta_vdev->proxysta_vdev && 1420 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 1421 &data[QDF_MAC_ADDR_SIZE], 1422 QDF_MAC_ADDR_SIZE))) { 1423 /* Drop packet if source address is equal to 1424 * any of the vdev addresses. 1425 */ 1426 return true; 1427 } 1428 } 1429 } 1430 return false; 1431 } 1432 #else 1433 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1434 qdf_nbuf_t nbuf) 1435 { 1436 return false; 1437 } 1438 #endif 1439 1440 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1441 1442 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 1443 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ 1444 defined(WLAN_SUPPORT_RX_FLOW_TAG) 1445 #include "dp_rx_tag.h" 1446 #endif 1447 1448 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ 1449 !defined(WLAN_SUPPORT_RX_FLOW_TAG) 1450 /** 1451 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 1452 * and set the corresponding tag in QDF packet 1453 * @soc: core txrx main context 1454 * @vdev: vdev on which the packet is received 1455 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1456 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1457 * @ring_index: REO ring number, not used for error & monitor ring 1458 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 1459 * @is_update_stats: flag to indicate whether to update stats or not 1460 * Return: void 1461 */ 1462 static inline void 1463 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1464 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1465 uint16_t ring_index, 1466 bool is_reo_exception, bool is_update_stats) 1467 { 1468 } 1469 #endif 1470 1471 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1472 /** 1473 * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV 1474 * and returns whether cce metadata matches 1475 * @soc: core txrx main context 1476 * @vdev: vdev on which the packet is received 1477 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1478 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1479 * Return: bool 1480 */ 1481 static inline bool 1482 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev, 1483 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1484 { 1485 return false; 1486 } 1487 1488 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1489 1490 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 1491 /** 1492 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV 1493 * and set the corresponding tag in QDF packet 1494 * @soc: core txrx main context 1495 * @vdev: vdev on which the packet is received 1496 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1497 * @rx_tlv_hdr: base address where the RX TLVs starts 1498 * @is_update_stats: flag to indicate whether to update stats or not 1499 * 1500 * Return: void 1501 */ 1502 static inline void 1503 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1504 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) 1505 { 1506 } 1507 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1508 1509 #define CRITICAL_BUFFER_THRESHOLD 64 1510 /* 1511 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 1512 * called during dp rx initialization 1513 * and at the end of dp_rx_process. 1514 * 1515 * @soc: core txrx main context 1516 * @mac_id: mac_id which is one of 3 mac_ids 1517 * @dp_rxdma_srng: dp rxdma circular ring 1518 * @rx_desc_pool: Pointer to free Rx descriptor pool 1519 * @num_req_buffers: number of buffer to be replenished 1520 * @desc_list: list of descs if called from dp_rx_process 1521 * or NULL during dp rx initialization or out of buffer 1522 * interrupt. 1523 * @tail: tail of descs list 1524 * @func_name: name of the caller function 1525 * Return: return success or failure 1526 */ 1527 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1528 struct dp_srng *dp_rxdma_srng, 1529 struct rx_desc_pool *rx_desc_pool, 1530 uint32_t num_req_buffers, 1531 union dp_rx_desc_list_elem_t **desc_list, 1532 union dp_rx_desc_list_elem_t **tail, 1533 const char *func_name); 1534 /* 1535 * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs 1536 * use direct APIs to get invalidate 1537 * and get the physical address of the 1538 * nbuf instead of map api,called during 1539 * dp rx initialization and at the end 1540 * of dp_rx_process. 1541 * 1542 * @soc: core txrx main context 1543 * @mac_id: mac_id which is one of 3 mac_ids 1544 * @dp_rxdma_srng: dp rxdma circular ring 1545 * @rx_desc_pool: Pointer to free Rx descriptor pool 1546 * @num_req_buffers: number of buffer to be replenished 1547 * @desc_list: list of descs if called from dp_rx_process 1548 * or NULL during dp rx initialization or out of buffer 1549 * interrupt. 1550 * @tail: tail of descs list 1551 * Return: return success or failure 1552 */ 1553 QDF_STATUS 1554 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1555 struct dp_srng *dp_rxdma_srng, 1556 struct rx_desc_pool *rx_desc_pool, 1557 uint32_t num_req_buffers, 1558 union dp_rx_desc_list_elem_t **desc_list, 1559 union dp_rx_desc_list_elem_t **tail); 1560 1561 /* 1562 * __dp_rx_buffers_no_map__lt_replenish() - replenish rxdma ring with rx nbufs 1563 * use direct APIs to get invalidate 1564 * and get the physical address of the 1565 * nbuf instead of map api,called when 1566 * low threshold interrupt is triggered 1567 * 1568 * @soc: core txrx main context 1569 * @mac_id: mac_id which is one of 3 mac_ids 1570 * @dp_rxdma_srng: dp rxdma circular ring 1571 * @rx_desc_pool: Pointer to free Rx descriptor pool 1572 * Return: return success or failure 1573 */ 1574 QDF_STATUS 1575 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1576 struct dp_srng *dp_rxdma_srng, 1577 struct rx_desc_pool *rx_desc_pool); 1578 /* 1579 * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs 1580 * use direct APIs to get invalidate 1581 * and get the physical address of the 1582 * nbuf instead of map api,called during 1583 * dp rx initialization. 1584 * 1585 * @soc: core txrx main context 1586 * @mac_id: mac_id which is one of 3 mac_ids 1587 * @dp_rxdma_srng: dp rxdma circular ring 1588 * @rx_desc_pool: Pointer to free Rx descriptor pool 1589 * @num_req_buffers: number of buffer to be replenished 1590 * Return: return success or failure 1591 */ 1592 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, 1593 uint32_t mac_id, 1594 struct dp_srng *dp_rxdma_srng, 1595 struct rx_desc_pool *rx_desc_pool, 1596 uint32_t num_req_buffers); 1597 1598 /* 1599 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs 1600 * called during dp rx initialization 1601 * 1602 * @soc: core txrx main context 1603 * @mac_id: mac_id which is one of 3 mac_ids 1604 * @dp_rxdma_srng: dp rxdma circular ring 1605 * @rx_desc_pool: Pointer to free Rx descriptor pool 1606 * @num_req_buffers: number of buffer to be replenished 1607 * 1608 * Return: return success or failure 1609 */ 1610 QDF_STATUS 1611 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 1612 struct dp_srng *dp_rxdma_srng, 1613 struct rx_desc_pool *rx_desc_pool, 1614 uint32_t num_req_buffers); 1615 1616 /** 1617 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 1618 * (WBM), following error handling 1619 * 1620 * @soc: core DP main context 1621 * @buf_addr_info: opaque pointer to the REO error ring descriptor 1622 * @buf_addr_info: void pointer to the buffer_addr_info 1623 * @bm_action: put to idle_list or release to msdu_list 1624 * 1625 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1626 */ 1627 QDF_STATUS 1628 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 1629 uint8_t bm_action); 1630 1631 /** 1632 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 1633 * (WBM) by address 1634 * 1635 * @soc: core DP main context 1636 * @link_desc_addr: link descriptor addr 1637 * 1638 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1639 */ 1640 QDF_STATUS 1641 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 1642 hal_buff_addrinfo_t link_desc_addr, 1643 uint8_t bm_action); 1644 1645 /** 1646 * dp_rxdma_err_process() - RxDMA error processing functionality 1647 * @soc: core txrx main contex 1648 * @mac_id: mac id which is one of 3 mac_ids 1649 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1650 * @quota: No. of units (packets) that can be serviced in one shot. 1651 * 1652 * Return: num of buffers processed 1653 */ 1654 uint32_t 1655 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1656 uint32_t mac_id, uint32_t quota); 1657 1658 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1659 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1660 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1661 uint8_t *rx_tlv_hdr); 1662 1663 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1664 struct dp_txrx_peer *peer); 1665 1666 /* 1667 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 1668 * 1669 * @soc: core txrx main context 1670 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1671 * @ring_desc: opaque pointer to the RX ring descriptor 1672 * @rx_desc: host rx descriptor 1673 * 1674 * Return: void 1675 */ 1676 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 1677 hal_ring_handle_t hal_ring_hdl, 1678 hal_ring_desc_t ring_desc, 1679 struct dp_rx_desc *rx_desc); 1680 1681 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1682 1683 #ifdef QCA_PEER_EXT_STATS 1684 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1685 qdf_nbuf_t nbuf); 1686 #endif /* QCA_PEER_EXT_STATS */ 1687 1688 #ifdef RX_DESC_DEBUG_CHECK 1689 /** 1690 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1691 * @rx_desc: rx descriptor pointer 1692 * 1693 * Return: true, if magic is correct, else false. 1694 */ 1695 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1696 { 1697 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1698 return false; 1699 1700 rx_desc->magic = 0; 1701 return true; 1702 } 1703 1704 /** 1705 * dp_rx_desc_prep() - prepare rx desc 1706 * @rx_desc: rx descriptor pointer to be prepared 1707 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1708 * 1709 * Note: assumption is that we are associating a nbuf which is mapped 1710 * 1711 * Return: none 1712 */ 1713 static inline 1714 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1715 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1716 { 1717 rx_desc->magic = DP_RX_DESC_MAGIC; 1718 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1719 rx_desc->unmapped = 0; 1720 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf); 1721 } 1722 1723 /** 1724 * dp_rx_desc_frag_prep() - prepare rx desc 1725 * @rx_desc: rx descriptor pointer to be prepared 1726 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1727 * 1728 * Note: assumption is that we frag address is mapped 1729 * 1730 * Return: none 1731 */ 1732 #ifdef DP_RX_MON_MEM_FRAG 1733 static inline 1734 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1735 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1736 { 1737 rx_desc->magic = DP_RX_DESC_MAGIC; 1738 rx_desc->rx_buf_start = 1739 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1740 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1741 rx_desc->unmapped = 0; 1742 } 1743 #else 1744 static inline 1745 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1746 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1747 { 1748 } 1749 #endif /* DP_RX_MON_MEM_FRAG */ 1750 1751 /** 1752 * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc 1753 * @rx_desc: rx descriptor 1754 * @ring_paddr: paddr obatined from the ring 1755 * 1756 * Returns: QDF_STATUS 1757 */ 1758 static inline 1759 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1760 uint64_t ring_paddr) 1761 { 1762 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); 1763 } 1764 #else 1765 1766 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1767 { 1768 return true; 1769 } 1770 1771 static inline 1772 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1773 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1774 { 1775 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1776 rx_desc->unmapped = 0; 1777 } 1778 1779 #ifdef DP_RX_MON_MEM_FRAG 1780 static inline 1781 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1782 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1783 { 1784 rx_desc->rx_buf_start = 1785 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1786 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1787 rx_desc->unmapped = 0; 1788 } 1789 #else 1790 static inline 1791 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1792 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1793 { 1794 } 1795 #endif /* DP_RX_MON_MEM_FRAG */ 1796 1797 static inline 1798 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1799 uint64_t ring_paddr) 1800 { 1801 return true; 1802 } 1803 #endif /* RX_DESC_DEBUG_CHECK */ 1804 1805 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 1806 bool is_mon_dest_desc); 1807 1808 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1809 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 1810 uint8_t err_code, uint8_t mac_id); 1811 1812 #ifndef QCA_MULTIPASS_SUPPORT 1813 static inline 1814 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1815 uint8_t tid) 1816 { 1817 return false; 1818 } 1819 #else 1820 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1821 uint8_t tid); 1822 #endif 1823 1824 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1825 1826 #ifndef WLAN_RX_PKT_CAPTURE_ENH 1827 static inline 1828 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev, 1829 struct dp_peer *peer_handle, 1830 bool value, uint8_t *mac_addr) 1831 { 1832 return QDF_STATUS_SUCCESS; 1833 } 1834 #endif 1835 1836 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1837 1838 /** 1839 * dp_rx_deliver_to_stack() - deliver pkts to network stack 1840 * Caller to hold peer refcount and check for valid peer 1841 * @soc: soc 1842 * @vdev: vdev 1843 * @txrx_peer: txrx peer 1844 * @nbuf_head: skb list head 1845 * @nbuf_tail: skb list tail 1846 * 1847 * Return: QDF_STATUS 1848 */ 1849 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 1850 struct dp_vdev *vdev, 1851 struct dp_txrx_peer *peer, 1852 qdf_nbuf_t nbuf_head, 1853 qdf_nbuf_t nbuf_tail); 1854 1855 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1856 /** 1857 * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack 1858 * caller to hold peer refcount and check for valid peer 1859 * @soc: soc 1860 * @vdev: vdev 1861 * @peer: peer 1862 * @nbuf_head: skb list head 1863 * @nbuf_tail: skb list tail 1864 * 1865 * return: QDF_STATUS 1866 */ 1867 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 1868 struct dp_vdev *vdev, 1869 struct dp_txrx_peer *peer, 1870 qdf_nbuf_t nbuf_head, 1871 qdf_nbuf_t nbuf_tail); 1872 #endif 1873 1874 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1875 1876 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS 1877 /* 1878 * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring 1879 * @int_ctx: pointer to DP interrupt context 1880 * @dp_soc - DP soc structure pointer 1881 * @hal_ring_hdl - HAL ring handle 1882 * 1883 * Return: 0 on success; error on failure 1884 */ 1885 static inline int 1886 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1887 hal_ring_handle_t hal_ring_hdl) 1888 { 1889 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 1890 } 1891 1892 /* 1893 * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring 1894 * @int_ctx: pointer to DP interrupt context 1895 * @dp_soc - DP soc structure pointer 1896 * @hal_ring_hdl - HAL ring handle 1897 * 1898 * Return - None 1899 */ 1900 static inline void 1901 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1902 hal_ring_handle_t hal_ring_hdl) 1903 { 1904 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 1905 } 1906 #else 1907 static inline int 1908 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1909 hal_ring_handle_t hal_ring_hdl) 1910 { 1911 return dp_srng_access_start(int_ctx, soc, hal_ring_hdl); 1912 } 1913 1914 static inline void 1915 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1916 hal_ring_handle_t hal_ring_hdl) 1917 { 1918 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1919 } 1920 #endif 1921 1922 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1923 1924 /* 1925 * dp_rx_wbm_sg_list_reset() - Initialize sg list 1926 * 1927 * This api should be called at soc init and afterevery sg processing. 1928 *@soc: DP SOC handle 1929 */ 1930 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) 1931 { 1932 if (soc) { 1933 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false; 1934 soc->wbm_sg_param.wbm_sg_nbuf_head = NULL; 1935 soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL; 1936 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0; 1937 } 1938 } 1939 1940 /* 1941 * dp_rx_wbm_sg_list_deinit() - De-initialize sg list 1942 * 1943 * This api should be called in down path, to avoid any leak. 1944 *@soc: DP SOC handle 1945 */ 1946 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) 1947 { 1948 if (soc) { 1949 if (soc->wbm_sg_param.wbm_sg_nbuf_head) 1950 qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head); 1951 1952 dp_rx_wbm_sg_list_reset(soc); 1953 } 1954 } 1955 1956 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1957 1958 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 1959 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1960 do { \ 1961 if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \ 1962 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \ 1963 break; \ 1964 } \ 1965 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \ 1966 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \ 1967 if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \ 1968 rx_desc->pool_id)) \ 1969 DP_RX_MERGE_TWO_LIST(head, tail, \ 1970 ebuf_head, ebuf_tail);\ 1971 ebuf_head = NULL; \ 1972 ebuf_tail = NULL; \ 1973 } \ 1974 } while (0) 1975 #else 1976 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1977 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf) 1978 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */ 1979 1980 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1981 1982 /* 1983 * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate 1984 to refill 1985 * @soc: DP SOC handle 1986 * @buf_info: the last link desc buf info 1987 * @ring_buf_info: current buf address pointor including link desc 1988 * 1989 * return: none. 1990 */ 1991 void dp_rx_link_desc_refill_duplicate_check( 1992 struct dp_soc *soc, 1993 struct hal_buf_info *buf_info, 1994 hal_buff_addrinfo_t ring_buf_info); 1995 1996 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 1997 /** 1998 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 1999 * @soc : dp_soc handle 2000 * @pdev: dp_pdev handle 2001 * @peer_id: peer_id of the peer for which completion came 2002 * @ppdu_id: ppdu_id 2003 * @netbuf: Buffer pointer 2004 * 2005 * This function is used to deliver rx packet to packet capture 2006 */ 2007 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2008 uint16_t peer_id, uint32_t is_offload, 2009 qdf_nbuf_t netbuf); 2010 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2011 uint32_t is_offload); 2012 #else 2013 static inline void 2014 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2015 uint16_t peer_id, uint32_t is_offload, 2016 qdf_nbuf_t netbuf) 2017 { 2018 } 2019 2020 static inline void 2021 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2022 uint32_t is_offload) 2023 { 2024 } 2025 #endif 2026 2027 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2028 #ifdef FEATURE_MEC 2029 /** 2030 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 2031 * back on same vap or a different vap. 2032 * @soc: core DP main context 2033 * @peer: dp peer handler 2034 * @rx_tlv_hdr: start of the rx TLV header 2035 * @nbuf: pkt buffer 2036 * 2037 * Return: bool (true if it is a looped back pkt else false) 2038 * 2039 */ 2040 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2041 struct dp_txrx_peer *peer, 2042 uint8_t *rx_tlv_hdr, 2043 qdf_nbuf_t nbuf); 2044 #else 2045 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2046 struct dp_txrx_peer *peer, 2047 uint8_t *rx_tlv_hdr, 2048 qdf_nbuf_t nbuf) 2049 { 2050 return false; 2051 } 2052 #endif /* FEATURE_MEC */ 2053 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2054 2055 #ifdef RECEIVE_OFFLOAD 2056 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2057 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); 2058 #else 2059 static inline 2060 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2061 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 2062 { 2063 } 2064 #endif 2065 2066 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2067 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 2068 uint8_t ring_id, 2069 struct cdp_tid_rx_stats *tid_stats); 2070 2071 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 2072 2073 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2074 hal_ring_handle_t hal_ring_hdl, 2075 uint32_t num_entries, 2076 bool *near_full); 2077 2078 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2079 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2080 hal_ring_desc_t ring_desc); 2081 #else 2082 static inline void 2083 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2084 hal_ring_desc_t ring_desc) 2085 { 2086 } 2087 #endif 2088 2089 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2090 #ifdef RX_DESC_SANITY_WAR 2091 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2092 hal_ring_handle_t hal_ring_hdl, 2093 hal_ring_desc_t ring_desc, 2094 struct dp_rx_desc *rx_desc); 2095 #else 2096 static inline 2097 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2098 hal_ring_handle_t hal_ring_hdl, 2099 hal_ring_desc_t ring_desc, 2100 struct dp_rx_desc *rx_desc) 2101 { 2102 return QDF_STATUS_SUCCESS; 2103 } 2104 #endif 2105 2106 #ifdef DP_RX_DROP_RAW_FRM 2107 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); 2108 #else 2109 static inline 2110 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2111 { 2112 return false; 2113 } 2114 #endif 2115 2116 #ifdef RX_DESC_DEBUG_CHECK 2117 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2118 hal_ring_desc_t ring_desc, 2119 struct dp_rx_desc *rx_desc); 2120 #else 2121 static inline 2122 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2123 hal_ring_desc_t ring_desc, 2124 struct dp_rx_desc *rx_desc) 2125 { 2126 return QDF_STATUS_SUCCESS; 2127 } 2128 #endif 2129 2130 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2131 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2132 #else 2133 static inline 2134 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2135 { 2136 } 2137 #endif 2138 2139 /** 2140 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 2141 * @nbuf: pointer to the first msdu of an amsdu. 2142 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2143 * 2144 * The ipsumed field of the skb is set based on whether HW validated the 2145 * IP/TCP/UDP checksum. 2146 * 2147 * Return: void 2148 */ 2149 static inline 2150 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2151 qdf_nbuf_t nbuf, 2152 uint8_t *rx_tlv_hdr) 2153 { 2154 qdf_nbuf_rx_cksum_t cksum = {0}; 2155 //TODO - Move this to ring desc api 2156 //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET 2157 //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET 2158 uint32_t ip_csum_err, tcp_udp_csum_er; 2159 2160 hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, 2161 &tcp_udp_csum_er); 2162 2163 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 2164 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 2165 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 2166 } else { 2167 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 2168 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 2169 } 2170 } 2171 2172 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2173 2174 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 2175 static inline 2176 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2177 int max_reap_limit) 2178 { 2179 bool limit_hit = false; 2180 2181 limit_hit = 2182 (num_reaped >= max_reap_limit) ? true : false; 2183 2184 if (limit_hit) 2185 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 2186 2187 return limit_hit; 2188 } 2189 2190 static inline 2191 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2192 { 2193 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 2194 } 2195 2196 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2197 { 2198 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 2199 2200 return cfg->rx_reap_loop_pkt_limit; 2201 } 2202 #else 2203 static inline 2204 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2205 int max_reap_limit) 2206 { 2207 return false; 2208 } 2209 2210 static inline 2211 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2212 { 2213 return false; 2214 } 2215 2216 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2217 { 2218 return 0; 2219 } 2220 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 2221 2222 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2223 2224 #ifdef QCA_SUPPORT_WDS_EXTENDED 2225 /** 2226 * dp_rx_is_list_ready() - Make different lists for 4-address 2227 and 3-address frames 2228 * @nbuf_head: skb list head 2229 * @vdev: vdev 2230 * @txrx_peer : txrx_peer 2231 * @peer_id: peer id of new received frame 2232 * @vdev_id: vdev_id of new received frame 2233 * 2234 * Return: true if peer_ids are different. 2235 */ 2236 static inline bool 2237 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2238 struct dp_vdev *vdev, 2239 struct dp_txrx_peer *txrx_peer, 2240 uint16_t peer_id, 2241 uint8_t vdev_id) 2242 { 2243 if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id) 2244 return true; 2245 2246 return false; 2247 } 2248 #else 2249 static inline bool 2250 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2251 struct dp_vdev *vdev, 2252 struct dp_txrx_peer *txrx_peer, 2253 uint16_t peer_id, 2254 uint8_t vdev_id) 2255 { 2256 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) 2257 return true; 2258 2259 return false; 2260 } 2261 #endif 2262 2263 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 2264 /** 2265 * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup 2266 * @pdev: pointer to dp_pdev structure 2267 * @rx_tlv: pointer to rx_pkt_tlvs structure 2268 * @nbuf: pointer to skb buffer 2269 * 2270 * Return: None 2271 */ 2272 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2273 uint8_t *rx_tlv, 2274 qdf_nbuf_t nbuf); 2275 #else 2276 static inline void 2277 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2278 uint8_t *rx_tlv, 2279 qdf_nbuf_t nbuf) 2280 { 2281 } 2282 #endif 2283 2284 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 2285 static inline uint8_t 2286 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2287 { 2288 return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id); 2289 } 2290 2291 static inline uint8_t 2292 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2293 { 2294 return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id); 2295 } 2296 #else 2297 static inline uint8_t 2298 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2299 { 2300 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx; 2301 uint8_t wbm2_sw_rx_rel_ring_id; 2302 2303 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx); 2304 2305 return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id, 2306 wbm2_sw_rx_rel_ring_id); 2307 } 2308 2309 static inline uint8_t 2310 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2311 { 2312 return dp_rx_get_rx_bm_id(soc); 2313 } 2314 #endif 2315 2316 static inline uint16_t 2317 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata) 2318 { 2319 return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc, 2320 peer_metadata); 2321 } 2322 2323 /** 2324 * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization 2325 * @soc: SOC handle 2326 * @rx_desc_pool: pointer to RX descriptor pool 2327 * @pool_id: pool ID 2328 * 2329 * Return: None 2330 */ 2331 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 2332 struct rx_desc_pool *rx_desc_pool, 2333 uint32_t pool_id); 2334 2335 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 2336 struct rx_desc_pool *rx_desc_pool, 2337 uint32_t pool_id); 2338 2339 /** 2340 * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint 2341 * 2342 * Return: True if any rx pkt tracepoint is enabled else false 2343 */ 2344 static inline 2345 bool dp_rx_pkt_tracepoints_enabled(void) 2346 { 2347 return (qdf_trace_dp_rx_tcp_pkt_enabled() || 2348 qdf_trace_dp_rx_udp_pkt_enabled() || 2349 qdf_trace_dp_rx_pkt_enabled()); 2350 } 2351 2352 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 2353 static inline 2354 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2355 struct dp_srng *rxdma_srng, 2356 struct rx_desc_pool *rx_desc_pool, 2357 uint32_t num_req_buffers) 2358 { 2359 return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id, 2360 rxdma_srng, 2361 rx_desc_pool, 2362 num_req_buffers); 2363 } 2364 2365 static inline 2366 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2367 struct dp_srng *rxdma_srng, 2368 struct rx_desc_pool *rx_desc_pool, 2369 uint32_t num_req_buffers, 2370 union dp_rx_desc_list_elem_t **desc_list, 2371 union dp_rx_desc_list_elem_t **tail) 2372 { 2373 __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2374 num_req_buffers, desc_list, tail); 2375 } 2376 2377 static inline 2378 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2379 struct dp_srng *rxdma_srng, 2380 struct rx_desc_pool *rx_desc_pool, 2381 uint32_t num_req_buffers, 2382 union dp_rx_desc_list_elem_t **desc_list, 2383 union dp_rx_desc_list_elem_t **tail) 2384 { 2385 __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng, 2386 rx_desc_pool); 2387 } 2388 2389 static inline 2390 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2391 qdf_nbuf_t nbuf, 2392 uint32_t buf_size) 2393 { 2394 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2395 (void *)(nbuf->data + buf_size)); 2396 2397 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2398 } 2399 2400 static inline 2401 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2402 qdf_nbuf_t nbuf, 2403 uint32_t buf_size) 2404 { 2405 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2406 (void *)(nbuf->data + buf_size)); 2407 2408 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2409 } 2410 2411 #if !defined(SPECULATIVE_READ_DISABLED) 2412 static inline 2413 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2414 struct dp_rx_desc *rx_desc, 2415 uint8_t reo_ring_num) 2416 { 2417 struct rx_desc_pool *rx_desc_pool; 2418 qdf_nbuf_t nbuf; 2419 2420 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2421 nbuf = rx_desc->nbuf; 2422 2423 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2424 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2425 } 2426 2427 static inline 2428 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2429 struct rx_desc_pool *rx_desc_pool, 2430 qdf_nbuf_t nbuf) 2431 { 2432 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2433 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2434 } 2435 2436 #else 2437 static inline 2438 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2439 struct dp_rx_desc *rx_desc, 2440 uint8_t reo_ring_num) 2441 { 2442 } 2443 2444 static inline 2445 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2446 struct rx_desc_pool *rx_desc_pool, 2447 qdf_nbuf_t nbuf) 2448 { 2449 } 2450 #endif 2451 2452 static inline 2453 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2454 uint32_t bufs_reaped) 2455 { 2456 } 2457 2458 static inline 2459 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2460 struct rx_desc_pool *rx_desc_pool) 2461 { 2462 return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size, 2463 RX_BUFFER_RESERVATION, 2464 rx_desc_pool->buf_alignment, FALSE); 2465 } 2466 2467 static inline 2468 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2469 { 2470 qdf_nbuf_free_simple(nbuf); 2471 } 2472 #else 2473 static inline 2474 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2475 struct dp_srng *rxdma_srng, 2476 struct rx_desc_pool *rx_desc_pool, 2477 uint32_t num_req_buffers) 2478 { 2479 return dp_pdev_rx_buffers_attach(soc, mac_id, 2480 rxdma_srng, 2481 rx_desc_pool, 2482 num_req_buffers); 2483 } 2484 2485 static inline 2486 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2487 struct dp_srng *rxdma_srng, 2488 struct rx_desc_pool *rx_desc_pool, 2489 uint32_t num_req_buffers, 2490 union dp_rx_desc_list_elem_t **desc_list, 2491 union dp_rx_desc_list_elem_t **tail) 2492 { 2493 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2494 num_req_buffers, desc_list, tail); 2495 } 2496 2497 static inline 2498 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2499 struct dp_srng *rxdma_srng, 2500 struct rx_desc_pool *rx_desc_pool, 2501 uint32_t num_req_buffers, 2502 union dp_rx_desc_list_elem_t **desc_list, 2503 union dp_rx_desc_list_elem_t **tail) 2504 { 2505 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2506 num_req_buffers, desc_list, tail); 2507 } 2508 2509 static inline 2510 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2511 qdf_nbuf_t nbuf, 2512 uint32_t buf_size) 2513 { 2514 return (qdf_dma_addr_t)NULL; 2515 } 2516 2517 static inline 2518 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2519 qdf_nbuf_t nbuf, 2520 uint32_t buf_size) 2521 { 2522 return (qdf_dma_addr_t)NULL; 2523 } 2524 2525 static inline 2526 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2527 struct dp_rx_desc *rx_desc, 2528 uint8_t reo_ring_num) 2529 { 2530 struct rx_desc_pool *rx_desc_pool; 2531 2532 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2533 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 2534 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 2535 rx_desc_pool->buf_size, 2536 false); 2537 2538 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2539 QDF_DMA_FROM_DEVICE, 2540 rx_desc_pool->buf_size); 2541 2542 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 2543 } 2544 2545 static inline 2546 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2547 struct rx_desc_pool *rx_desc_pool, 2548 qdf_nbuf_t nbuf) 2549 { 2550 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size, 2551 false); 2552 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE, 2553 rx_desc_pool->buf_size); 2554 } 2555 2556 static inline 2557 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2558 uint32_t bufs_reaped) 2559 { 2560 int cpu_id = qdf_get_cpu(); 2561 2562 DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped); 2563 } 2564 2565 static inline 2566 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2567 struct rx_desc_pool *rx_desc_pool) 2568 { 2569 return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 2570 RX_BUFFER_RESERVATION, 2571 rx_desc_pool->buf_alignment, FALSE); 2572 } 2573 2574 static inline 2575 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2576 { 2577 qdf_nbuf_free(nbuf); 2578 } 2579 #endif 2580 2581 /** 2582 * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id 2583 * @nbuf : pointer to the first msdu of an amsdu. 2584 * @peer_id : Peer id of the peer 2585 * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference 2586 * @pkt_capture_offload : Flag indicating if pkt capture offload is needed 2587 * @vdev : Buffer to hold pointer to vdev 2588 * @rx_pdev : Buffer to hold pointer to rx pdev 2589 * @dsf : delay stats flag 2590 * @old_tid : Old tid 2591 * 2592 * Get txrx peer and vdev from peer id 2593 * 2594 * Return: Pointer to txrx peer 2595 */ 2596 static inline struct dp_txrx_peer * 2597 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc, 2598 qdf_nbuf_t nbuf, 2599 uint16_t peer_id, 2600 dp_txrx_ref_handle *txrx_ref_handle, 2601 bool pkt_capture_offload, 2602 struct dp_vdev **vdev, 2603 struct dp_pdev **rx_pdev, 2604 uint32_t *dsf, 2605 uint32_t *old_tid) 2606 { 2607 struct dp_txrx_peer *txrx_peer = NULL; 2608 2609 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle, 2610 DP_MOD_ID_RX); 2611 2612 if (qdf_likely(txrx_peer)) { 2613 *vdev = txrx_peer->vdev; 2614 } else { 2615 nbuf->next = NULL; 2616 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf, 2617 pkt_capture_offload); 2618 if (!pkt_capture_offload) 2619 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2620 2621 goto end; 2622 } 2623 2624 if (qdf_unlikely(!(*vdev))) { 2625 qdf_nbuf_free(nbuf); 2626 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2627 goto end; 2628 } 2629 2630 *rx_pdev = (*vdev)->pdev; 2631 *dsf = (*rx_pdev)->delay_stats_flag; 2632 *old_tid = 0xff; 2633 2634 end: 2635 return txrx_peer; 2636 } 2637 2638 static inline QDF_STATUS 2639 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer, 2640 int tid, uint32_t ba_window_size) 2641 { 2642 return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc, 2643 peer, tid, 2644 ba_window_size); 2645 } 2646 2647 static inline 2648 void dp_rx_nbuf_list_deliver(struct dp_soc *soc, 2649 struct dp_vdev *vdev, 2650 struct dp_txrx_peer *txrx_peer, 2651 uint16_t peer_id, 2652 uint8_t pkt_capture_offload, 2653 qdf_nbuf_t deliver_list_head, 2654 qdf_nbuf_t deliver_list_tail) 2655 { 2656 qdf_nbuf_t nbuf, next; 2657 2658 if (qdf_likely(deliver_list_head)) { 2659 if (qdf_likely(txrx_peer)) { 2660 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 2661 pkt_capture_offload, 2662 deliver_list_head); 2663 if (!pkt_capture_offload) 2664 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 2665 deliver_list_head, 2666 deliver_list_tail); 2667 } else { 2668 nbuf = deliver_list_head; 2669 while (nbuf) { 2670 next = nbuf->next; 2671 nbuf->next = NULL; 2672 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2673 nbuf = next; 2674 } 2675 } 2676 } 2677 } 2678 2679 #ifdef DP_TX_RX_TPUT_SIMULATE 2680 /* 2681 * Change this macro value to simulate different RX T-put, 2682 * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor 2683 * is 2, set macro value as 1 (multiplication factor - 1). 2684 */ 2685 #define DP_RX_PKTS_DUPLICATE_CNT 0 2686 static inline 2687 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc, 2688 struct dp_vdev *vdev, 2689 struct dp_txrx_peer *txrx_peer, 2690 uint16_t peer_id, 2691 uint8_t pkt_capture_offload, 2692 qdf_nbuf_t ori_list_head, 2693 qdf_nbuf_t ori_list_tail) 2694 { 2695 qdf_nbuf_t new_skb = NULL; 2696 qdf_nbuf_t new_list_head = NULL; 2697 qdf_nbuf_t new_list_tail = NULL; 2698 qdf_nbuf_t nbuf = NULL; 2699 int i; 2700 2701 for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) { 2702 nbuf = ori_list_head; 2703 new_list_head = NULL; 2704 new_list_tail = NULL; 2705 2706 while (nbuf) { 2707 new_skb = qdf_nbuf_copy(nbuf); 2708 if (qdf_likely(new_skb)) 2709 DP_RX_LIST_APPEND(new_list_head, 2710 new_list_tail, 2711 new_skb); 2712 else 2713 dp_err("copy skb failed"); 2714 2715 nbuf = qdf_nbuf_next(nbuf); 2716 } 2717 2718 /* deliver the copied nbuf list */ 2719 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2720 pkt_capture_offload, 2721 new_list_head, 2722 new_list_tail); 2723 } 2724 2725 /* deliver the original skb_list */ 2726 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2727 pkt_capture_offload, 2728 ori_list_head, 2729 ori_list_tail); 2730 } 2731 2732 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver 2733 2734 #else /* !DP_TX_RX_TPUT_SIMULATE */ 2735 2736 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver 2737 2738 #endif /* DP_TX_RX_TPUT_SIMULATE */ 2739 2740 #endif /* _DP_RX_H */ 2741