1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_RX_H 21 #define _DP_RX_H 22 23 #include "hal_rx.h" 24 #include "dp_peer.h" 25 #include "dp_internal.h" 26 #include <qdf_tracepoint.h> 27 #include "dp_ipa.h" 28 29 #ifdef RXDMA_OPTIMIZATION 30 #ifndef RX_DATA_BUFFER_ALIGNMENT 31 #define RX_DATA_BUFFER_ALIGNMENT 128 32 #endif 33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT 34 #define RX_MONITOR_BUFFER_ALIGNMENT 128 35 #endif 36 #else /* RXDMA_OPTIMIZATION */ 37 #define RX_DATA_BUFFER_ALIGNMENT 4 38 #define RX_MONITOR_BUFFER_ALIGNMENT 4 39 #endif /* RXDMA_OPTIMIZATION */ 40 41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 42 #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id) 43 /* RBM value used for re-injecting defragmented packets into REO */ 44 #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) 45 #endif 46 47 /* Max buffer in invalid peer SG list*/ 48 #define DP_MAX_INVALID_BUFFERS 10 49 #ifdef DP_INVALID_PEER_ASSERT 50 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ 51 do { \ 52 qdf_assert_always(!(head)); \ 53 qdf_assert_always(!(tail)); \ 54 } while (0) 55 #else 56 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ 57 #endif 58 59 #define RX_BUFFER_RESERVATION 0 60 #ifdef BE_PKTLOG_SUPPORT 61 #define BUFFER_RESIDUE 1 62 #define RX_MON_MIN_HEAD_ROOM 64 63 #endif 64 65 #define DP_DEFAULT_NOISEFLOOR (-96) 66 67 #define DP_RX_DESC_MAGIC 0xdec0de 68 69 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params) 70 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params) 71 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params) 72 #define dp_rx_info(params...) \ 73 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 74 #define dp_rx_info_rl(params...) \ 75 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) 76 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params) 77 78 /** 79 * enum dp_rx_desc_state 80 * 81 * @RX_DESC_REPLENISH: rx desc replenished 82 * @RX_DESC_FREELIST: rx desc in freelist 83 */ 84 enum dp_rx_desc_state { 85 RX_DESC_REPLENISHED, 86 RX_DESC_IN_FREELIST, 87 }; 88 89 #ifndef QCA_HOST_MODE_WIFI_DISABLED 90 /** 91 * struct dp_rx_desc_dbg_info 92 * 93 * @freelist_caller: name of the function that put the 94 * the rx desc in freelist 95 * @freelist_ts: timestamp when the rx desc is put in 96 * a freelist 97 * @replenish_caller: name of the function that last 98 * replenished the rx desc 99 * @replenish_ts: last replenish timestamp 100 * @prev_nbuf: previous nbuf info 101 * @prev_nbuf_data_addr: previous nbuf data address 102 */ 103 struct dp_rx_desc_dbg_info { 104 char freelist_caller[QDF_MEM_FUNC_NAME_SIZE]; 105 uint64_t freelist_ts; 106 char replenish_caller[QDF_MEM_FUNC_NAME_SIZE]; 107 uint64_t replenish_ts; 108 qdf_nbuf_t prev_nbuf; 109 uint8_t *prev_nbuf_data_addr; 110 }; 111 112 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 113 114 /** 115 * struct dp_rx_desc 116 * 117 * @nbuf : VA of the "skb" posted 118 * @rx_buf_start : VA of the original Rx buffer, before 119 * movement of any skb->data pointer 120 * @paddr_buf_start : PA of the original Rx buffer, before 121 * movement of any frag pointer 122 * @cookie : index into the sw array which holds 123 * the sw Rx descriptors 124 * Cookie space is 21 bits: 125 * lower 18 bits -- index 126 * upper 3 bits -- pool_id 127 * @pool_id : pool Id for which this allocated. 128 * Can only be used if there is no flow 129 * steering 130 * @chip_id : chip_id indicating MLO chip_id 131 * valid or used only in case of multi-chip MLO 132 * @in_use rx_desc is in use 133 * @unmapped used to mark rx_desc an unmapped if the corresponding 134 * nbuf is already unmapped 135 * @in_err_state : Nbuf sanity failed for this descriptor. 136 * @nbuf_data_addr : VA of nbuf data posted 137 */ 138 struct dp_rx_desc { 139 qdf_nbuf_t nbuf; 140 uint8_t *rx_buf_start; 141 qdf_dma_addr_t paddr_buf_start; 142 uint32_t cookie; 143 uint8_t pool_id; 144 uint8_t chip_id; 145 #ifdef RX_DESC_DEBUG_CHECK 146 uint32_t magic; 147 uint8_t *nbuf_data_addr; 148 struct dp_rx_desc_dbg_info *dbg_info; 149 #endif 150 uint8_t in_use:1, 151 unmapped:1, 152 in_err_state:1; 153 }; 154 155 #ifndef QCA_HOST_MODE_WIFI_DISABLED 156 #ifdef ATH_RX_PRI_SAVE 157 #define DP_RX_TID_SAVE(_nbuf, _tid) \ 158 (qdf_nbuf_set_priority(_nbuf, _tid)) 159 #else 160 #define DP_RX_TID_SAVE(_nbuf, _tid) 161 #endif 162 163 /* RX Descriptor Multi Page memory alloc related */ 164 #define DP_RX_DESC_OFFSET_NUM_BITS 8 165 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8 166 #define DP_RX_DESC_POOL_ID_NUM_BITS 4 167 168 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS 169 #define DP_RX_DESC_POOL_ID_SHIFT \ 170 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) 171 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ 172 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) 173 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ 174 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ 175 DP_RX_DESC_PAGE_ID_SHIFT) 176 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ 177 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) 178 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ 179 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ 180 DP_RX_DESC_POOL_ID_SHIFT) 181 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ 182 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ 183 DP_RX_DESC_PAGE_ID_SHIFT) 184 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ 185 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) 186 187 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 188 189 #define RX_DESC_COOKIE_INDEX_SHIFT 0 190 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ 191 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18 192 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 193 194 #define DP_RX_DESC_COOKIE_MAX \ 195 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) 196 197 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ 198 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ 199 RX_DESC_COOKIE_POOL_ID_SHIFT) 200 201 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ 202 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ 203 RX_DESC_COOKIE_INDEX_SHIFT) 204 205 #define dp_rx_add_to_free_desc_list(head, tail, new) \ 206 __dp_rx_add_to_free_desc_list(head, tail, new, __func__) 207 208 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 209 num_buffers, desc_list, tail, req_only) \ 210 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ 211 num_buffers, desc_list, tail, req_only, \ 212 __func__) 213 214 #ifdef WLAN_SUPPORT_RX_FISA 215 /** 216 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb 217 * @nbuf: pkt skb pointer 218 * @l3_padding: l3 padding 219 * 220 * Return: None 221 */ 222 static inline 223 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 224 { 225 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; 226 } 227 #else 228 static inline 229 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) 230 { 231 } 232 #endif 233 234 #ifdef DP_RX_SPECIAL_FRAME_NEED 235 /** 236 * dp_rx_is_special_frame() - check is RX frame special needed 237 * 238 * @nbuf: RX skb pointer 239 * @frame_mask: the mask for special frame needed 240 * 241 * Check is RX frame wanted matched with mask 242 * 243 * Return: true - special frame needed, false - no 244 */ 245 static inline 246 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 247 { 248 if (((frame_mask & FRAME_MASK_IPV4_ARP) && 249 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || 250 ((frame_mask & FRAME_MASK_IPV4_DHCP) && 251 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || 252 ((frame_mask & FRAME_MASK_IPV4_EAPOL) && 253 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || 254 ((frame_mask & FRAME_MASK_IPV6_DHCP) && 255 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))) 256 return true; 257 258 return false; 259 } 260 261 /** 262 * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack 263 * if matches mask 264 * 265 * @soc: Datapath soc handler 266 * @peer: pointer to DP peer 267 * @nbuf: pointer to the skb of RX frame 268 * @frame_mask: the mask for special frame needed 269 * @rx_tlv_hdr: start of rx tlv header 270 * 271 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and 272 * single nbuf is expected. 273 * 274 * return: true - nbuf has been delivered to stack, false - not. 275 */ 276 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 277 qdf_nbuf_t nbuf, uint32_t frame_mask, 278 uint8_t *rx_tlv_hdr); 279 #else 280 static inline 281 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) 282 { 283 return false; 284 } 285 286 static inline 287 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, 288 qdf_nbuf_t nbuf, uint32_t frame_mask, 289 uint8_t *rx_tlv_hdr) 290 { 291 return false; 292 } 293 #endif 294 295 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER 296 /** 297 * dp_rx_data_is_specific() - Used to exclude specific frames 298 * not practical for getting rx 299 * stats like rate, mcs, nss, etc. 300 * 301 * @hal-soc_hdl: soc handler 302 * @rx_tlv_hdr: rx tlv header 303 * @nbuf: RX skb pointer 304 * 305 * Return: true - a specific frame not suitable 306 * for getting rx stats from it. 307 * false - a common frame suitable for 308 * getting rx stats from it. 309 */ 310 static inline 311 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 312 uint8_t *rx_tlv_hdr, 313 qdf_nbuf_t nbuf) 314 { 315 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf))) 316 return true; 317 318 if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr)) 319 return true; 320 321 if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr)) 322 return true; 323 324 /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */ 325 if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 326 QDF_NBUF_TRAC_IPV4_ETH_TYPE)) { 327 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) 328 return true; 329 } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) == 330 QDF_NBUF_TRAC_IPV6_ETH_TYPE)) { 331 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) 332 return true; 333 } else { 334 return true; 335 } 336 return false; 337 } 338 #else 339 static inline 340 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl, 341 uint8_t *rx_tlv_hdr, 342 qdf_nbuf_t nbuf) 343 344 { 345 /* 346 * default return is true to make sure that rx stats 347 * will not be handled when this feature is disabled 348 */ 349 return true; 350 } 351 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */ 352 353 #ifndef QCA_HOST_MODE_WIFI_DISABLED 354 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING 355 static inline 356 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 357 qdf_nbuf_t nbuf) 358 { 359 if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi && 360 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { 361 DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer, 362 rx.intra_bss.mdns_no_fwd, 1); 363 return false; 364 } 365 return true; 366 } 367 #else 368 static inline 369 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, 370 qdf_nbuf_t nbuf) 371 { 372 return true; 373 } 374 #endif 375 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 376 377 /* DOC: Offset to obtain LLC hdr 378 * 379 * In the case of Wifi parse error 380 * to reach LLC header from beginning 381 * of VLAN tag we need to skip 8 bytes. 382 * Vlan_tag(4)+length(2)+length added 383 * by HW(2) = 8 bytes. 384 */ 385 #define DP_SKIP_VLAN 8 386 387 #ifndef QCA_HOST_MODE_WIFI_DISABLED 388 389 /** 390 * struct dp_rx_cached_buf - rx cached buffer 391 * @list: linked list node 392 * @buf: skb buffer 393 */ 394 struct dp_rx_cached_buf { 395 qdf_list_node_t node; 396 qdf_nbuf_t buf; 397 }; 398 399 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 400 401 /* 402 *dp_rx_xor_block() - xor block of data 403 *@b: destination data block 404 *@a: source data block 405 *@len: length of the data to process 406 * 407 *Returns: None 408 */ 409 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) 410 { 411 qdf_size_t i; 412 413 for (i = 0; i < len; i++) 414 b[i] ^= a[i]; 415 } 416 417 /* 418 *dp_rx_rotl() - rotate the bits left 419 *@val: unsigned integer input value 420 *@bits: number of bits 421 * 422 *Returns: Integer with left rotated by number of 'bits' 423 */ 424 static inline uint32_t dp_rx_rotl(uint32_t val, int bits) 425 { 426 return (val << bits) | (val >> (32 - bits)); 427 } 428 429 /* 430 *dp_rx_rotr() - rotate the bits right 431 *@val: unsigned integer input value 432 *@bits: number of bits 433 * 434 *Returns: Integer with right rotated by number of 'bits' 435 */ 436 static inline uint32_t dp_rx_rotr(uint32_t val, int bits) 437 { 438 return (val >> bits) | (val << (32 - bits)); 439 } 440 441 /* 442 * dp_set_rx_queue() - set queue_mapping in skb 443 * @nbuf: skb 444 * @queue_id: rx queue_id 445 * 446 * Return: void 447 */ 448 #ifdef QCA_OL_RX_MULTIQ_SUPPORT 449 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 450 { 451 qdf_nbuf_record_rx_queue(nbuf, queue_id); 452 return; 453 } 454 #else 455 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) 456 { 457 } 458 #endif 459 460 /* 461 *dp_rx_xswap() - swap the bits left 462 *@val: unsigned integer input value 463 * 464 *Returns: Integer with bits swapped 465 */ 466 static inline uint32_t dp_rx_xswap(uint32_t val) 467 { 468 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); 469 } 470 471 /* 472 *dp_rx_get_le32_split() - get little endian 32 bits split 473 *@b0: byte 0 474 *@b1: byte 1 475 *@b2: byte 2 476 *@b3: byte 3 477 * 478 *Returns: Integer with split little endian 32 bits 479 */ 480 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, 481 uint8_t b3) 482 { 483 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); 484 } 485 486 /* 487 *dp_rx_get_le32() - get little endian 32 bits 488 *@b0: byte 0 489 *@b1: byte 1 490 *@b2: byte 2 491 *@b3: byte 3 492 * 493 *Returns: Integer with little endian 32 bits 494 */ 495 static inline uint32_t dp_rx_get_le32(const uint8_t *p) 496 { 497 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); 498 } 499 500 /* 501 * dp_rx_put_le32() - put little endian 32 bits 502 * @p: destination char array 503 * @v: source 32-bit integer 504 * 505 * Returns: None 506 */ 507 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) 508 { 509 p[0] = (v) & 0xff; 510 p[1] = (v >> 8) & 0xff; 511 p[2] = (v >> 16) & 0xff; 512 p[3] = (v >> 24) & 0xff; 513 } 514 515 /* Extract michal mic block of data */ 516 #define dp_rx_michael_block(l, r) \ 517 do { \ 518 r ^= dp_rx_rotl(l, 17); \ 519 l += r; \ 520 r ^= dp_rx_xswap(l); \ 521 l += r; \ 522 r ^= dp_rx_rotl(l, 3); \ 523 l += r; \ 524 r ^= dp_rx_rotr(l, 2); \ 525 l += r; \ 526 } while (0) 527 528 /** 529 * struct dp_rx_desc_list_elem_t 530 * 531 * @next : Next pointer to form free list 532 * @rx_desc : DP Rx descriptor 533 */ 534 union dp_rx_desc_list_elem_t { 535 union dp_rx_desc_list_elem_t *next; 536 struct dp_rx_desc rx_desc; 537 }; 538 539 #ifdef RX_DESC_MULTI_PAGE_ALLOC 540 /** 541 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset 542 * @page_id: Page ID 543 * @offset: Offset of the descriptor element 544 * 545 * Return: RX descriptor element 546 */ 547 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, 548 struct rx_desc_pool *rx_pool); 549 550 static inline 551 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, 552 struct rx_desc_pool *pool, 553 uint32_t cookie) 554 { 555 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 556 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 557 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 558 struct rx_desc_pool *rx_desc_pool; 559 union dp_rx_desc_list_elem_t *rx_desc_elem; 560 561 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 562 return NULL; 563 564 rx_desc_pool = &pool[pool_id]; 565 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 566 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 567 rx_desc_pool->elem_size * offset); 568 569 return &rx_desc_elem->rx_desc; 570 } 571 572 static inline 573 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc, 574 struct rx_desc_pool *pool, 575 uint32_t cookie) 576 { 577 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 578 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 579 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 580 struct rx_desc_pool *rx_desc_pool; 581 union dp_rx_desc_list_elem_t *rx_desc_elem; 582 583 if (qdf_unlikely(pool_id >= NUM_RXDMA_RINGS_PER_PDEV)) 584 return NULL; 585 586 rx_desc_pool = &pool[pool_id]; 587 rx_desc_elem = (union dp_rx_desc_list_elem_t *) 588 (rx_desc_pool->desc_pages.cacheable_pages[page_id] + 589 rx_desc_pool->elem_size * offset); 590 591 return &rx_desc_elem->rx_desc; 592 } 593 594 /** 595 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 596 * the Rx descriptor on Rx DMA source ring buffer 597 * @soc: core txrx main context 598 * @cookie: cookie used to lookup virtual address 599 * 600 * Return: Pointer to the Rx descriptor 601 */ 602 static inline 603 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, 604 uint32_t cookie) 605 { 606 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); 607 } 608 609 /** 610 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 611 * the Rx descriptor on monitor ring buffer 612 * @soc: core txrx main context 613 * @cookie: cookie used to lookup virtual address 614 * 615 * Return: Pointer to the Rx descriptor 616 */ 617 static inline 618 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, 619 uint32_t cookie) 620 { 621 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); 622 } 623 624 /** 625 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 626 * the Rx descriptor on monitor status ring buffer 627 * @soc: core txrx main context 628 * @cookie: cookie used to lookup virtual address 629 * 630 * Return: Pointer to the Rx descriptor 631 */ 632 static inline 633 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, 634 uint32_t cookie) 635 { 636 return dp_get_rx_mon_status_desc_from_cookie(soc, 637 &soc->rx_desc_status[0], 638 cookie); 639 } 640 #else 641 642 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 643 uint32_t pool_size, 644 struct rx_desc_pool *rx_desc_pool); 645 646 /** 647 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of 648 * the Rx descriptor on Rx DMA source ring buffer 649 * @soc: core txrx main context 650 * @cookie: cookie used to lookup virtual address 651 * 652 * Return: void *: Virtual Address of the Rx descriptor 653 */ 654 static inline 655 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) 656 { 657 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 658 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 659 struct rx_desc_pool *rx_desc_pool; 660 661 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) 662 return NULL; 663 664 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 665 666 if (qdf_unlikely(index >= rx_desc_pool->pool_size)) 667 return NULL; 668 669 return &rx_desc_pool->array[index].rx_desc; 670 } 671 672 /** 673 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of 674 * the Rx descriptor on monitor ring buffer 675 * @soc: core txrx main context 676 * @cookie: cookie used to lookup virtual address 677 * 678 * Return: void *: Virtual Address of the Rx descriptor 679 */ 680 static inline 681 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) 682 { 683 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 684 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 685 /* TODO */ 686 /* Add sanity for pool_id & index */ 687 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); 688 } 689 690 /** 691 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of 692 * the Rx descriptor on monitor status ring buffer 693 * @soc: core txrx main context 694 * @cookie: cookie used to lookup virtual address 695 * 696 * Return: void *: Virtual Address of the Rx descriptor 697 */ 698 static inline 699 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) 700 { 701 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); 702 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); 703 /* TODO */ 704 /* Add sanity for pool_id & index */ 705 return &(soc->rx_desc_status[pool_id].array[index].rx_desc); 706 } 707 #endif /* RX_DESC_MULTI_PAGE_ALLOC */ 708 709 #ifndef QCA_HOST_MODE_WIFI_DISABLED 710 711 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) 712 { 713 return vdev->ap_bridge_enabled; 714 } 715 716 #ifdef DP_RX_DESC_COOKIE_INVALIDATE 717 static inline QDF_STATUS 718 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 719 { 720 if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc))) 721 return QDF_STATUS_E_FAILURE; 722 723 HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc); 724 return QDF_STATUS_SUCCESS; 725 } 726 727 /** 728 * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie 729 * field in ring descriptor 730 * @ring_desc: ring descriptor 731 * 732 * Return: None 733 */ 734 static inline void 735 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 736 { 737 HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc); 738 } 739 #else 740 static inline QDF_STATUS 741 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) 742 { 743 return QDF_STATUS_SUCCESS; 744 } 745 746 static inline void 747 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc) 748 { 749 } 750 #endif 751 752 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 753 754 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \ 755 defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE) 756 /** 757 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 758 * @soc: dp soc ref 759 * @cookie: Rx buf SW cookie value 760 * 761 * Return: true if cookie is valid else false 762 */ 763 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 764 uint32_t cookie) 765 { 766 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); 767 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); 768 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); 769 struct rx_desc_pool *rx_desc_pool; 770 771 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT)) 772 goto fail; 773 774 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 775 776 if (page_id >= rx_desc_pool->desc_pages.num_pages || 777 offset >= rx_desc_pool->desc_pages.num_element_per_page) 778 goto fail; 779 780 return true; 781 782 fail: 783 DP_STATS_INC(soc, rx.err.invalid_cookie, 1); 784 return false; 785 } 786 #else 787 /** 788 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid 789 * @soc: dp soc ref 790 * @cookie: Rx buf SW cookie value 791 * 792 * When multi page alloc is disabled SW cookie validness is 793 * checked while fetching Rx descriptor, so no need to check here 794 * Return: true if cookie is valid else false 795 */ 796 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, 797 uint32_t cookie) 798 { 799 return true; 800 } 801 #endif 802 803 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool); 804 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, 805 uint32_t pool_size, 806 struct rx_desc_pool *rx_desc_pool); 807 808 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, 809 uint32_t pool_size, 810 struct rx_desc_pool *rx_desc_pool); 811 812 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, 813 union dp_rx_desc_list_elem_t **local_desc_list, 814 union dp_rx_desc_list_elem_t **tail, 815 uint16_t pool_id, 816 struct rx_desc_pool *rx_desc_pool); 817 818 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, 819 struct rx_desc_pool *rx_desc_pool, 820 uint16_t num_descs, 821 union dp_rx_desc_list_elem_t **desc_list, 822 union dp_rx_desc_list_elem_t **tail); 823 824 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev); 825 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev); 826 827 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev); 828 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev); 829 void dp_rx_desc_pool_deinit(struct dp_soc *soc, 830 struct rx_desc_pool *rx_desc_pool, 831 uint32_t pool_id); 832 833 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); 834 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev); 835 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev); 836 837 void dp_rx_pdev_detach(struct dp_pdev *pdev); 838 839 void dp_print_napi_stats(struct dp_soc *soc); 840 841 /** 842 * dp_rx_vdev_detach() - detach vdev from dp rx 843 * @vdev: virtual device instance 844 * 845 * Return: QDF_STATUS_SUCCESS: success 846 * QDF_STATUS_E_RESOURCES: Error return 847 */ 848 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); 849 850 #ifndef QCA_HOST_MODE_WIFI_DISABLED 851 852 uint32_t 853 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, 854 uint8_t reo_ring_num, 855 uint32_t quota); 856 857 /** 858 * dp_rx_err_process() - Processes error frames routed to REO error ring 859 * @int_ctx: pointer to DP interrupt context 860 * @soc: core txrx main context 861 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 862 * @quota: No. of units (packets) that can be serviced in one shot. 863 * 864 * This function implements error processing and top level demultiplexer 865 * for all the frames routed to REO error ring. 866 * 867 * Return: uint32_t: No. of elements processed 868 */ 869 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 870 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 871 872 /** 873 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring 874 * @int_ctx: pointer to DP interrupt context 875 * @soc: core txrx main context 876 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 877 * @quota: No. of units (packets) that can be serviced in one shot. 878 * 879 * This function implements error processing and top level demultiplexer 880 * for all the frames routed to WBM2HOST sw release ring. 881 * 882 * Return: uint32_t: No. of elements processed 883 */ 884 uint32_t 885 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 886 hal_ring_handle_t hal_ring_hdl, uint32_t quota); 887 888 /** 889 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across 890 * multiple nbufs. 891 * @soc: core txrx main context 892 * @nbuf: pointer to the first msdu of an amsdu. 893 * 894 * This function implements the creation of RX frag_list for cases 895 * where an MSDU is spread across multiple nbufs. 896 * 897 * Return: returns the head nbuf which contains complete frag_list. 898 */ 899 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); 900 901 /** 902 * dp_rx_is_sg_supported() - SG packets processing supported or not. 903 * 904 * Return: returns true when processing is supported else false. 905 */ 906 bool dp_rx_is_sg_supported(void); 907 908 /* 909 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during 910 * de-initialization of wifi module. 911 * 912 * @soc: core txrx main context 913 * @pool_id: pool_id which is one of 3 mac_ids 914 * @rx_desc_pool: rx descriptor pool pointer 915 * 916 * Return: None 917 */ 918 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, 919 struct rx_desc_pool *rx_desc_pool); 920 921 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 922 923 /* 924 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during 925 * de-initialization of wifi module. 926 * 927 * @soc: core txrx main context 928 * @pool_id: pool_id which is one of 3 mac_ids 929 * @rx_desc_pool: rx descriptor pool pointer 930 * 931 * Return: None 932 */ 933 void dp_rx_desc_nbuf_free(struct dp_soc *soc, 934 struct rx_desc_pool *rx_desc_pool, 935 bool is_mon_pool); 936 937 #ifdef DP_RX_MON_MEM_FRAG 938 /* 939 * dp_rx_desc_frag_free() - free the sw rx desc frag called during 940 * de-initialization of wifi module. 941 * 942 * @soc: core txrx main context 943 * @rx_desc_pool: rx descriptor pool pointer 944 * 945 * Return: None 946 */ 947 void dp_rx_desc_frag_free(struct dp_soc *soc, 948 struct rx_desc_pool *rx_desc_pool); 949 #else 950 static inline 951 void dp_rx_desc_frag_free(struct dp_soc *soc, 952 struct rx_desc_pool *rx_desc_pool) 953 { 954 } 955 #endif 956 /* 957 * dp_rx_desc_pool_free() - free the sw rx desc array called during 958 * de-initialization of wifi module. 959 * 960 * @soc: core txrx main context 961 * @rx_desc_pool: rx descriptor pool pointer 962 * 963 * Return: None 964 */ 965 void dp_rx_desc_pool_free(struct dp_soc *soc, 966 struct rx_desc_pool *rx_desc_pool); 967 968 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, 969 struct dp_txrx_peer *peer); 970 971 #ifdef RX_DESC_LOGGING 972 /* 973 * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug 974 * structure 975 * @rx_desc: rx descriptor pointer 976 * 977 * Return: None 978 */ 979 static inline 980 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 981 { 982 rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); 983 } 984 985 /* 986 * dp_rx_desc_free_dbg_info() - Free rx descriptor debug 987 * structure memory 988 * @rx_desc: rx descriptor pointer 989 * 990 * Return: None 991 */ 992 static inline 993 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 994 { 995 qdf_mem_free(rx_desc->dbg_info); 996 } 997 998 /* 999 * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info 1000 * structure memory 1001 * @rx_desc: rx descriptor pointer 1002 * 1003 * Return: None 1004 */ 1005 static 1006 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 1007 const char *func_name, uint8_t flag) 1008 { 1009 struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info; 1010 1011 if (!info) 1012 return; 1013 1014 if (flag == RX_DESC_REPLENISHED) { 1015 qdf_str_lcopy(info->replenish_caller, func_name, 1016 QDF_MEM_FUNC_NAME_SIZE); 1017 info->replenish_ts = qdf_get_log_timestamp(); 1018 } else { 1019 qdf_str_lcopy(info->freelist_caller, func_name, 1020 QDF_MEM_FUNC_NAME_SIZE); 1021 info->freelist_ts = qdf_get_log_timestamp(); 1022 info->prev_nbuf = rx_desc->nbuf; 1023 info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr; 1024 rx_desc->nbuf_data_addr = NULL; 1025 } 1026 } 1027 #else 1028 1029 static inline 1030 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) 1031 { 1032 } 1033 1034 static inline 1035 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) 1036 { 1037 } 1038 1039 static inline 1040 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, 1041 const char *func_name, uint8_t flag) 1042 { 1043 } 1044 #endif /* RX_DESC_LOGGING */ 1045 1046 /** 1047 * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list 1048 * 1049 * @head: pointer to the head of local free list 1050 * @tail: pointer to the tail of local free list 1051 * @new: new descriptor that is added to the free list 1052 * @func_name: caller func name 1053 * 1054 * Return: void: 1055 */ 1056 static inline 1057 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, 1058 union dp_rx_desc_list_elem_t **tail, 1059 struct dp_rx_desc *new, const char *func_name) 1060 { 1061 qdf_assert(head && new); 1062 1063 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); 1064 1065 new->nbuf = NULL; 1066 new->in_use = 0; 1067 1068 ((union dp_rx_desc_list_elem_t *)new)->next = *head; 1069 *head = (union dp_rx_desc_list_elem_t *)new; 1070 /* reset tail if head->next is NULL */ 1071 if (!*tail || !(*head)->next) 1072 *tail = *head; 1073 } 1074 1075 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 1076 uint8_t mac_id); 1077 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, 1078 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); 1079 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, 1080 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1081 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1082 uint16_t peer_id, uint8_t tid); 1083 1084 #define DP_RX_HEAD_APPEND(head, elem) \ 1085 do { \ 1086 qdf_nbuf_set_next((elem), (head)); \ 1087 (head) = (elem); \ 1088 } while (0) 1089 1090 1091 #define DP_RX_LIST_APPEND(head, tail, elem) \ 1092 do { \ 1093 if (!(head)) { \ 1094 (head) = (elem); \ 1095 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ 1096 } else { \ 1097 qdf_nbuf_set_next((tail), (elem)); \ 1098 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ 1099 } \ 1100 (tail) = (elem); \ 1101 qdf_nbuf_set_next((tail), NULL); \ 1102 } while (0) 1103 1104 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \ 1105 do { \ 1106 if (!(phead)) { \ 1107 (phead) = (chead); \ 1108 } else { \ 1109 qdf_nbuf_set_next((ptail), (chead)); \ 1110 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \ 1111 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \ 1112 } \ 1113 (ptail) = (ctail); \ 1114 qdf_nbuf_set_next((ptail), NULL); \ 1115 } while (0) 1116 1117 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) 1118 /* 1119 * on some third-party platform, the memory below 0x2000 1120 * is reserved for target use, so any memory allocated in this 1121 * region should not be used by host 1122 */ 1123 #define MAX_RETRY 50 1124 #define DP_PHY_ADDR_RESERVED 0x2000 1125 #elif defined(BUILD_X86) 1126 /* 1127 * in M2M emulation platforms (x86) the memory below 0x50000000 1128 * is reserved for target use, so any memory allocated in this 1129 * region should not be used by host 1130 */ 1131 #define MAX_RETRY 100 1132 #define DP_PHY_ADDR_RESERVED 0x50000000 1133 #endif 1134 1135 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86) 1136 /** 1137 * dp_check_paddr() - check if current phy address is valid or not 1138 * @dp_soc: core txrx main context 1139 * @rx_netbuf: skb buffer 1140 * @paddr: physical address 1141 * @rx_desc_pool: struct of rx descriptor pool 1142 * check if the physical address of the nbuf->data is less 1143 * than DP_PHY_ADDR_RESERVED then free the nbuf and try 1144 * allocating new nbuf. We can try for 100 times. 1145 * 1146 * This is a temp WAR till we fix it properly. 1147 * 1148 * Return: success or failure. 1149 */ 1150 static inline 1151 int dp_check_paddr(struct dp_soc *dp_soc, 1152 qdf_nbuf_t *rx_netbuf, 1153 qdf_dma_addr_t *paddr, 1154 struct rx_desc_pool *rx_desc_pool) 1155 { 1156 uint32_t nbuf_retry = 0; 1157 int32_t ret; 1158 1159 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1160 return QDF_STATUS_SUCCESS; 1161 1162 do { 1163 dp_debug("invalid phy addr 0x%llx, trying again", 1164 (uint64_t)(*paddr)); 1165 nbuf_retry++; 1166 if ((*rx_netbuf)) { 1167 /* Not freeing buffer intentionally. 1168 * Observed that same buffer is getting 1169 * re-allocated resulting in longer load time 1170 * WMI init timeout. 1171 * This buffer is anyway not useful so skip it. 1172 *.Add such buffer to invalid list and free 1173 *.them when driver unload. 1174 **/ 1175 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1176 *rx_netbuf, 1177 QDF_DMA_FROM_DEVICE, 1178 rx_desc_pool->buf_size); 1179 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1180 *rx_netbuf); 1181 } 1182 1183 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, 1184 rx_desc_pool->buf_size, 1185 RX_BUFFER_RESERVATION, 1186 rx_desc_pool->buf_alignment, 1187 FALSE); 1188 1189 if (qdf_unlikely(!(*rx_netbuf))) 1190 return QDF_STATUS_E_FAILURE; 1191 1192 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, 1193 *rx_netbuf, 1194 QDF_DMA_FROM_DEVICE, 1195 rx_desc_pool->buf_size); 1196 1197 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1198 qdf_nbuf_free(*rx_netbuf); 1199 *rx_netbuf = NULL; 1200 continue; 1201 } 1202 1203 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); 1204 1205 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED)) 1206 return QDF_STATUS_SUCCESS; 1207 1208 } while (nbuf_retry < MAX_RETRY); 1209 1210 if ((*rx_netbuf)) { 1211 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, 1212 *rx_netbuf, 1213 QDF_DMA_FROM_DEVICE, 1214 rx_desc_pool->buf_size); 1215 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue, 1216 *rx_netbuf); 1217 } 1218 1219 return QDF_STATUS_E_FAILURE; 1220 } 1221 1222 #else 1223 static inline 1224 int dp_check_paddr(struct dp_soc *dp_soc, 1225 qdf_nbuf_t *rx_netbuf, 1226 qdf_dma_addr_t *paddr, 1227 struct rx_desc_pool *rx_desc_pool) 1228 { 1229 return QDF_STATUS_SUCCESS; 1230 } 1231 1232 #endif 1233 1234 /** 1235 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of 1236 * the MSDU Link Descriptor 1237 * @soc: core txrx main context 1238 * @buf_info: buf_info includes cookie that is used to lookup 1239 * virtual address of link descriptor after deriving the page id 1240 * and the offset or index of the desc on the associatde page. 1241 * 1242 * This is the VA of the link descriptor, that HAL layer later uses to 1243 * retrieve the list of MSDU's for a given MPDU. 1244 * 1245 * Return: void *: Virtual Address of the Rx descriptor 1246 */ 1247 static inline 1248 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, 1249 struct hal_buf_info *buf_info) 1250 { 1251 void *link_desc_va; 1252 struct qdf_mem_multi_page_t *pages; 1253 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie); 1254 1255 pages = &soc->link_desc_pages; 1256 if (!pages) 1257 return NULL; 1258 if (qdf_unlikely(page_id >= pages->num_pages)) 1259 return NULL; 1260 link_desc_va = pages->dma_pages[page_id].page_v_addr_start + 1261 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr); 1262 return link_desc_va; 1263 } 1264 1265 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1266 #ifdef DISABLE_EAPOL_INTRABSS_FWD 1267 #ifdef WLAN_FEATURE_11BE_MLO 1268 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1269 qdf_nbuf_t nbuf) 1270 { 1271 struct qdf_mac_addr *self_mld_mac_addr = 1272 (struct qdf_mac_addr *)vdev->mld_mac_addr.raw; 1273 return qdf_is_macaddr_equal(self_mld_mac_addr, 1274 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1275 QDF_NBUF_DEST_MAC_OFFSET); 1276 } 1277 #else 1278 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev, 1279 qdf_nbuf_t nbuf) 1280 { 1281 return false; 1282 } 1283 #endif 1284 1285 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev, 1286 qdf_nbuf_t nbuf) 1287 { 1288 return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw, 1289 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + 1290 QDF_NBUF_DEST_MAC_OFFSET); 1291 } 1292 1293 /* 1294 * dp_rx_intrabss_eapol_drop_check() - API For EAPOL 1295 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 1296 * @soc: core txrx main context 1297 * @ta_txrx_peer: source peer entry 1298 * @rx_tlv_hdr: start address of rx tlvs 1299 * @nbuf: nbuf that has to be intrabss forwarded 1300 * 1301 * Return: true if it is forwarded else false 1302 */ 1303 static inline 1304 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1305 struct dp_txrx_peer *ta_txrx_peer, 1306 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1307 { 1308 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && 1309 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev, 1310 nbuf) || 1311 dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev, 1312 nbuf)))) { 1313 qdf_nbuf_free(nbuf); 1314 DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); 1315 return true; 1316 } 1317 1318 return false; 1319 } 1320 #else /* DISABLE_EAPOL_INTRABSS_FWD */ 1321 1322 static inline 1323 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, 1324 struct dp_txrx_peer *ta_txrx_peer, 1325 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) 1326 { 1327 return false; 1328 } 1329 #endif /* DISABLE_EAPOL_INTRABSS_FWD */ 1330 1331 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, 1332 struct dp_txrx_peer *ta_txrx_peer, 1333 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1334 struct cdp_tid_rx_stats *tid_stats); 1335 1336 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, 1337 struct dp_txrx_peer *ta_txrx_peer, 1338 uint8_t tx_vdev_id, 1339 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, 1340 struct cdp_tid_rx_stats *tid_stats); 1341 1342 /** 1343 * dp_rx_defrag_concat() - Concatenate the fragments 1344 * 1345 * @dst: destination pointer to the buffer 1346 * @src: source pointer from where the fragment payload is to be copied 1347 * 1348 * Return: QDF_STATUS 1349 */ 1350 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) 1351 { 1352 /* 1353 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst 1354 * to provide space for src, the headroom portion is copied from 1355 * the original dst buffer to the larger new dst buffer. 1356 * (This is needed, because the headroom of the dst buffer 1357 * contains the rx desc.) 1358 */ 1359 if (!qdf_nbuf_cat(dst, src)) { 1360 /* 1361 * qdf_nbuf_cat does not free the src memory. 1362 * Free src nbuf before returning 1363 * For failure case the caller takes of freeing the nbuf 1364 */ 1365 qdf_nbuf_free(src); 1366 return QDF_STATUS_SUCCESS; 1367 } 1368 1369 return QDF_STATUS_E_DEFRAG_ERROR; 1370 } 1371 1372 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1373 1374 #ifndef FEATURE_WDS 1375 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, 1376 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf); 1377 1378 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) 1379 { 1380 return QDF_STATUS_SUCCESS; 1381 } 1382 1383 static inline void 1384 dp_rx_wds_srcport_learn(struct dp_soc *soc, 1385 uint8_t *rx_tlv_hdr, 1386 struct dp_txrx_peer *txrx_peer, 1387 qdf_nbuf_t nbuf, 1388 struct hal_rx_msdu_metadata msdu_metadata) 1389 { 1390 } 1391 1392 static inline void 1393 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc, 1394 struct dp_peer *ta_peer, qdf_nbuf_t nbuf, 1395 struct hal_rx_msdu_metadata msdu_end_info, 1396 bool ad4_valid, bool chfrag_start) 1397 { 1398 } 1399 #endif 1400 1401 /* 1402 * dp_rx_desc_dump() - dump the sw rx descriptor 1403 * 1404 * @rx_desc: sw rx descriptor 1405 */ 1406 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) 1407 { 1408 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", 1409 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, 1410 rx_desc->in_use, rx_desc->unmapped); 1411 } 1412 1413 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1414 1415 /* 1416 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. 1417 * In qwrap mode, packets originated from 1418 * any vdev should not loopback and 1419 * should be dropped. 1420 * @vdev: vdev on which rx packet is received 1421 * @nbuf: rx pkt 1422 * 1423 */ 1424 #if ATH_SUPPORT_WRAP 1425 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1426 qdf_nbuf_t nbuf) 1427 { 1428 struct dp_vdev *psta_vdev; 1429 struct dp_pdev *pdev = vdev->pdev; 1430 uint8_t *data = qdf_nbuf_data(nbuf); 1431 1432 if (qdf_unlikely(vdev->proxysta_vdev)) { 1433 /* In qwrap isolation mode, allow loopback packets as all 1434 * packets go to RootAP and Loopback on the mpsta. 1435 */ 1436 if (vdev->isolation_vdev) 1437 return false; 1438 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { 1439 if (qdf_unlikely(psta_vdev->proxysta_vdev && 1440 !qdf_mem_cmp(psta_vdev->mac_addr.raw, 1441 &data[QDF_MAC_ADDR_SIZE], 1442 QDF_MAC_ADDR_SIZE))) { 1443 /* Drop packet if source address is equal to 1444 * any of the vdev addresses. 1445 */ 1446 return true; 1447 } 1448 } 1449 } 1450 return false; 1451 } 1452 #else 1453 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, 1454 qdf_nbuf_t nbuf) 1455 { 1456 return false; 1457 } 1458 #endif 1459 1460 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1461 1462 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ 1463 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ 1464 defined(WLAN_SUPPORT_RX_FLOW_TAG) 1465 #include "dp_rx_tag.h" 1466 #endif 1467 1468 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ 1469 !defined(WLAN_SUPPORT_RX_FLOW_TAG) 1470 /** 1471 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV 1472 * and set the corresponding tag in QDF packet 1473 * @soc: core txrx main context 1474 * @vdev: vdev on which the packet is received 1475 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1476 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1477 * @ring_index: REO ring number, not used for error & monitor ring 1478 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring 1479 * @is_update_stats: flag to indicate whether to update stats or not 1480 * Return: void 1481 */ 1482 static inline void 1483 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1484 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, 1485 uint16_t ring_index, 1486 bool is_reo_exception, bool is_update_stats) 1487 { 1488 } 1489 #endif 1490 1491 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1492 /** 1493 * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV 1494 * and returns whether cce metadata matches 1495 * @soc: core txrx main context 1496 * @vdev: vdev on which the packet is received 1497 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1498 * @rx_tlv_hdr: rBbase address where the RX TLVs starts 1499 * Return: bool 1500 */ 1501 static inline bool 1502 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev, 1503 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) 1504 { 1505 return false; 1506 } 1507 1508 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1509 1510 #ifndef WLAN_SUPPORT_RX_FLOW_TAG 1511 /** 1512 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV 1513 * and set the corresponding tag in QDF packet 1514 * @soc: core txrx main context 1515 * @vdev: vdev on which the packet is received 1516 * @nbuf: QDF pkt buffer on which the protocol tag should be set 1517 * @rx_tlv_hdr: base address where the RX TLVs starts 1518 * @is_update_stats: flag to indicate whether to update stats or not 1519 * 1520 * Return: void 1521 */ 1522 static inline void 1523 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, 1524 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) 1525 { 1526 } 1527 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1528 1529 #define CRITICAL_BUFFER_THRESHOLD 64 1530 /* 1531 * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs 1532 * called during dp rx initialization 1533 * and at the end of dp_rx_process. 1534 * 1535 * @soc: core txrx main context 1536 * @mac_id: mac_id which is one of 3 mac_ids 1537 * @dp_rxdma_srng: dp rxdma circular ring 1538 * @rx_desc_pool: Pointer to free Rx descriptor pool 1539 * @num_req_buffers: number of buffer to be replenished 1540 * @desc_list: list of descs if called from dp_rx_process 1541 * or NULL during dp rx initialization or out of buffer 1542 * interrupt. 1543 * @tail: tail of descs list 1544 * @req_only: If true don't replenish more than req buffers 1545 * @func_name: name of the caller function 1546 * Return: return success or failure 1547 */ 1548 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1549 struct dp_srng *dp_rxdma_srng, 1550 struct rx_desc_pool *rx_desc_pool, 1551 uint32_t num_req_buffers, 1552 union dp_rx_desc_list_elem_t **desc_list, 1553 union dp_rx_desc_list_elem_t **tail, 1554 bool req_only, 1555 const char *func_name); 1556 /* 1557 * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs 1558 * use direct APIs to get invalidate 1559 * and get the physical address of the 1560 * nbuf instead of map api,called during 1561 * dp rx initialization and at the end 1562 * of dp_rx_process. 1563 * 1564 * @soc: core txrx main context 1565 * @mac_id: mac_id which is one of 3 mac_ids 1566 * @dp_rxdma_srng: dp rxdma circular ring 1567 * @rx_desc_pool: Pointer to free Rx descriptor pool 1568 * @num_req_buffers: number of buffer to be replenished 1569 * @desc_list: list of descs if called from dp_rx_process 1570 * or NULL during dp rx initialization or out of buffer 1571 * interrupt. 1572 * @tail: tail of descs list 1573 * Return: return success or failure 1574 */ 1575 QDF_STATUS 1576 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1577 struct dp_srng *dp_rxdma_srng, 1578 struct rx_desc_pool *rx_desc_pool, 1579 uint32_t num_req_buffers, 1580 union dp_rx_desc_list_elem_t **desc_list, 1581 union dp_rx_desc_list_elem_t **tail); 1582 1583 /* 1584 * __dp_rx_buffers_no_map__lt_replenish() - replenish rxdma ring with rx nbufs 1585 * use direct APIs to get invalidate 1586 * and get the physical address of the 1587 * nbuf instead of map api,called when 1588 * low threshold interrupt is triggered 1589 * 1590 * @soc: core txrx main context 1591 * @mac_id: mac_id which is one of 3 mac_ids 1592 * @dp_rxdma_srng: dp rxdma circular ring 1593 * @rx_desc_pool: Pointer to free Rx descriptor pool 1594 * Return: return success or failure 1595 */ 1596 QDF_STATUS 1597 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id, 1598 struct dp_srng *dp_rxdma_srng, 1599 struct rx_desc_pool *rx_desc_pool); 1600 /* 1601 * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs 1602 * use direct APIs to get invalidate 1603 * and get the physical address of the 1604 * nbuf instead of map api,called during 1605 * dp rx initialization. 1606 * 1607 * @soc: core txrx main context 1608 * @mac_id: mac_id which is one of 3 mac_ids 1609 * @dp_rxdma_srng: dp rxdma circular ring 1610 * @rx_desc_pool: Pointer to free Rx descriptor pool 1611 * @num_req_buffers: number of buffer to be replenished 1612 * Return: return success or failure 1613 */ 1614 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, 1615 uint32_t mac_id, 1616 struct dp_srng *dp_rxdma_srng, 1617 struct rx_desc_pool *rx_desc_pool, 1618 uint32_t num_req_buffers); 1619 1620 /* 1621 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs 1622 * called during dp rx initialization 1623 * 1624 * @soc: core txrx main context 1625 * @mac_id: mac_id which is one of 3 mac_ids 1626 * @dp_rxdma_srng: dp rxdma circular ring 1627 * @rx_desc_pool: Pointer to free Rx descriptor pool 1628 * @num_req_buffers: number of buffer to be replenished 1629 * 1630 * Return: return success or failure 1631 */ 1632 QDF_STATUS 1633 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, 1634 struct dp_srng *dp_rxdma_srng, 1635 struct rx_desc_pool *rx_desc_pool, 1636 uint32_t num_req_buffers); 1637 1638 /** 1639 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW 1640 * (WBM), following error handling 1641 * 1642 * @soc: core DP main context 1643 * @buf_addr_info: opaque pointer to the REO error ring descriptor 1644 * @buf_addr_info: void pointer to the buffer_addr_info 1645 * @bm_action: put to idle_list or release to msdu_list 1646 * 1647 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1648 */ 1649 QDF_STATUS 1650 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, 1651 uint8_t bm_action); 1652 1653 /** 1654 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to 1655 * (WBM) by address 1656 * 1657 * @soc: core DP main context 1658 * @link_desc_addr: link descriptor addr 1659 * 1660 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS 1661 */ 1662 QDF_STATUS 1663 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, 1664 hal_buff_addrinfo_t link_desc_addr, 1665 uint8_t bm_action); 1666 1667 /** 1668 * dp_rxdma_err_process() - RxDMA error processing functionality 1669 * @soc: core txrx main context 1670 * @mac_id: mac id which is one of 3 mac_ids 1671 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1672 * @quota: No. of units (packets) that can be serviced in one shot. 1673 * 1674 * Return: num of buffers processed 1675 */ 1676 uint32_t 1677 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, 1678 uint32_t mac_id, uint32_t quota); 1679 1680 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1681 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); 1682 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, 1683 uint8_t *rx_tlv_hdr); 1684 1685 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, 1686 struct dp_txrx_peer *peer); 1687 1688 /* 1689 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info 1690 * 1691 * @soc: core txrx main context 1692 * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced 1693 * @ring_desc: opaque pointer to the RX ring descriptor 1694 * @rx_desc: host rx descriptor 1695 * 1696 * Return: void 1697 */ 1698 void dp_rx_dump_info_and_assert(struct dp_soc *soc, 1699 hal_ring_handle_t hal_ring_hdl, 1700 hal_ring_desc_t ring_desc, 1701 struct dp_rx_desc *rx_desc); 1702 1703 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); 1704 1705 #ifdef QCA_PEER_EXT_STATS 1706 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, 1707 qdf_nbuf_t nbuf); 1708 #endif /* QCA_PEER_EXT_STATS */ 1709 1710 #ifdef RX_DESC_DEBUG_CHECK 1711 /** 1712 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc 1713 * @rx_desc: rx descriptor pointer 1714 * 1715 * Return: true, if magic is correct, else false. 1716 */ 1717 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1718 { 1719 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) 1720 return false; 1721 1722 rx_desc->magic = 0; 1723 return true; 1724 } 1725 1726 /** 1727 * dp_rx_desc_prep() - prepare rx desc 1728 * @rx_desc: rx descriptor pointer to be prepared 1729 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1730 * 1731 * Note: assumption is that we are associating a nbuf which is mapped 1732 * 1733 * Return: none 1734 */ 1735 static inline 1736 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1737 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1738 { 1739 rx_desc->magic = DP_RX_DESC_MAGIC; 1740 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1741 rx_desc->unmapped = 0; 1742 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf); 1743 } 1744 1745 /** 1746 * dp_rx_desc_frag_prep() - prepare rx desc 1747 * @rx_desc: rx descriptor pointer to be prepared 1748 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info * 1749 * 1750 * Note: assumption is that we frag address is mapped 1751 * 1752 * Return: none 1753 */ 1754 #ifdef DP_RX_MON_MEM_FRAG 1755 static inline 1756 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1757 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1758 { 1759 rx_desc->magic = DP_RX_DESC_MAGIC; 1760 rx_desc->rx_buf_start = 1761 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1762 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1763 rx_desc->unmapped = 0; 1764 } 1765 #else 1766 static inline 1767 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1768 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1769 { 1770 } 1771 #endif /* DP_RX_MON_MEM_FRAG */ 1772 1773 /** 1774 * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc 1775 * @rx_desc: rx descriptor 1776 * @ring_paddr: paddr obatined from the ring 1777 * 1778 * Returns: QDF_STATUS 1779 */ 1780 static inline 1781 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1782 uint64_t ring_paddr) 1783 { 1784 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); 1785 } 1786 #else 1787 1788 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) 1789 { 1790 return true; 1791 } 1792 1793 static inline 1794 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, 1795 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1796 { 1797 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; 1798 rx_desc->unmapped = 0; 1799 } 1800 1801 #ifdef DP_RX_MON_MEM_FRAG 1802 static inline 1803 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1804 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1805 { 1806 rx_desc->rx_buf_start = 1807 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr); 1808 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr; 1809 rx_desc->unmapped = 0; 1810 } 1811 #else 1812 static inline 1813 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, 1814 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t) 1815 { 1816 } 1817 #endif /* DP_RX_MON_MEM_FRAG */ 1818 1819 static inline 1820 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, 1821 uint64_t ring_paddr) 1822 { 1823 return true; 1824 } 1825 #endif /* RX_DESC_DEBUG_CHECK */ 1826 1827 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, 1828 bool is_mon_dest_desc); 1829 1830 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, 1831 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 1832 uint8_t err_code, uint8_t mac_id); 1833 1834 #ifndef QCA_MULTIPASS_SUPPORT 1835 static inline 1836 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1837 uint8_t tid) 1838 { 1839 return false; 1840 } 1841 #else 1842 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, 1843 uint8_t tid); 1844 #endif 1845 1846 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1847 1848 #ifndef WLAN_RX_PKT_CAPTURE_ENH 1849 static inline 1850 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev, 1851 struct dp_peer *peer_handle, 1852 bool value, uint8_t *mac_addr) 1853 { 1854 return QDF_STATUS_SUCCESS; 1855 } 1856 #endif 1857 1858 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1859 1860 /** 1861 * dp_rx_deliver_to_stack() - deliver pkts to network stack 1862 * Caller to hold peer refcount and check for valid peer 1863 * @soc: soc 1864 * @vdev: vdev 1865 * @txrx_peer: txrx peer 1866 * @nbuf_head: skb list head 1867 * @nbuf_tail: skb list tail 1868 * 1869 * Return: QDF_STATUS 1870 */ 1871 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, 1872 struct dp_vdev *vdev, 1873 struct dp_txrx_peer *peer, 1874 qdf_nbuf_t nbuf_head, 1875 qdf_nbuf_t nbuf_tail); 1876 1877 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT 1878 /** 1879 * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack 1880 * caller to hold peer refcount and check for valid peer 1881 * @soc: soc 1882 * @vdev: vdev 1883 * @peer: peer 1884 * @nbuf_head: skb list head 1885 * @nbuf_tail: skb list tail 1886 * 1887 * return: QDF_STATUS 1888 */ 1889 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, 1890 struct dp_vdev *vdev, 1891 struct dp_txrx_peer *peer, 1892 qdf_nbuf_t nbuf_head, 1893 qdf_nbuf_t nbuf_tail); 1894 #endif 1895 1896 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1897 1898 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS 1899 /* 1900 * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring 1901 * @int_ctx: pointer to DP interrupt context 1902 * @dp_soc - DP soc structure pointer 1903 * @hal_ring_hdl - HAL ring handle 1904 * 1905 * Return: 0 on success; error on failure 1906 */ 1907 static inline int 1908 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1909 hal_ring_handle_t hal_ring_hdl) 1910 { 1911 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); 1912 } 1913 1914 /* 1915 * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring 1916 * @int_ctx: pointer to DP interrupt context 1917 * @dp_soc - DP soc structure pointer 1918 * @hal_ring_hdl - HAL ring handle 1919 * 1920 * Return - None 1921 */ 1922 static inline void 1923 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1924 hal_ring_handle_t hal_ring_hdl) 1925 { 1926 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl); 1927 } 1928 #else 1929 static inline int 1930 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, 1931 hal_ring_handle_t hal_ring_hdl) 1932 { 1933 return dp_srng_access_start(int_ctx, soc, hal_ring_hdl); 1934 } 1935 1936 static inline void 1937 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, 1938 hal_ring_handle_t hal_ring_hdl) 1939 { 1940 dp_srng_access_end(int_ctx, soc, hal_ring_hdl); 1941 } 1942 #endif 1943 1944 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 1945 1946 /* 1947 * dp_rx_wbm_sg_list_reset() - Initialize sg list 1948 * 1949 * This api should be called at soc init and afterevery sg processing. 1950 *@soc: DP SOC handle 1951 */ 1952 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) 1953 { 1954 if (soc) { 1955 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false; 1956 soc->wbm_sg_param.wbm_sg_nbuf_head = NULL; 1957 soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL; 1958 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0; 1959 } 1960 } 1961 1962 /* 1963 * dp_rx_wbm_sg_list_deinit() - De-initialize sg list 1964 * 1965 * This api should be called in down path, to avoid any leak. 1966 *@soc: DP SOC handle 1967 */ 1968 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) 1969 { 1970 if (soc) { 1971 if (soc->wbm_sg_param.wbm_sg_nbuf_head) 1972 qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head); 1973 1974 dp_rx_wbm_sg_list_reset(soc); 1975 } 1976 } 1977 1978 #ifndef QCA_HOST_MODE_WIFI_DISABLED 1979 1980 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL 1981 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1982 do { \ 1983 if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \ 1984 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \ 1985 break; \ 1986 } \ 1987 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \ 1988 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \ 1989 if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \ 1990 rx_desc->pool_id)) \ 1991 DP_RX_MERGE_TWO_LIST(head, tail, \ 1992 ebuf_head, ebuf_tail);\ 1993 ebuf_head = NULL; \ 1994 ebuf_tail = NULL; \ 1995 } \ 1996 } while (0) 1997 #else 1998 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \ 1999 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf) 2000 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */ 2001 2002 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2003 2004 /* 2005 * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate 2006 to refill 2007 * @soc: DP SOC handle 2008 * @buf_info: the last link desc buf info 2009 * @ring_buf_info: current buf address pointor including link desc 2010 * 2011 * return: none. 2012 */ 2013 void dp_rx_link_desc_refill_duplicate_check( 2014 struct dp_soc *soc, 2015 struct hal_buf_info *buf_info, 2016 hal_buff_addrinfo_t ring_buf_info); 2017 2018 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 2019 /** 2020 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture 2021 * @soc : dp_soc handle 2022 * @pdev: dp_pdev handle 2023 * @peer_id: peer_id of the peer for which completion came 2024 * @ppdu_id: ppdu_id 2025 * @netbuf: Buffer pointer 2026 * 2027 * This function is used to deliver rx packet to packet capture 2028 */ 2029 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2030 uint16_t peer_id, uint32_t is_offload, 2031 qdf_nbuf_t netbuf); 2032 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2033 uint32_t is_offload); 2034 #else 2035 static inline void 2036 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, 2037 uint16_t peer_id, uint32_t is_offload, 2038 qdf_nbuf_t netbuf) 2039 { 2040 } 2041 2042 static inline void 2043 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, 2044 uint32_t is_offload) 2045 { 2046 } 2047 #endif 2048 2049 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2050 #ifdef FEATURE_MEC 2051 /** 2052 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop 2053 * back on same vap or a different vap. 2054 * @soc: core DP main context 2055 * @peer: dp peer handler 2056 * @rx_tlv_hdr: start of the rx TLV header 2057 * @nbuf: pkt buffer 2058 * 2059 * Return: bool (true if it is a looped back pkt else false) 2060 * 2061 */ 2062 bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2063 struct dp_txrx_peer *peer, 2064 uint8_t *rx_tlv_hdr, 2065 qdf_nbuf_t nbuf); 2066 #else 2067 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, 2068 struct dp_txrx_peer *peer, 2069 uint8_t *rx_tlv_hdr, 2070 qdf_nbuf_t nbuf) 2071 { 2072 return false; 2073 } 2074 #endif /* FEATURE_MEC */ 2075 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2076 2077 #ifdef RECEIVE_OFFLOAD 2078 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2079 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); 2080 #else 2081 static inline 2082 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, 2083 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) 2084 { 2085 } 2086 #endif 2087 2088 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, 2089 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, 2090 uint8_t ring_id, 2091 struct cdp_tid_rx_stats *tid_stats); 2092 2093 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); 2094 2095 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, 2096 hal_ring_handle_t hal_ring_hdl, 2097 uint32_t num_entries, 2098 bool *near_full); 2099 2100 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY 2101 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2102 hal_ring_desc_t ring_desc); 2103 #else 2104 static inline void 2105 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, 2106 hal_ring_desc_t ring_desc) 2107 { 2108 } 2109 #endif 2110 2111 #ifndef QCA_HOST_MODE_WIFI_DISABLED 2112 #ifdef RX_DESC_SANITY_WAR 2113 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2114 hal_ring_handle_t hal_ring_hdl, 2115 hal_ring_desc_t ring_desc, 2116 struct dp_rx_desc *rx_desc); 2117 #else 2118 static inline 2119 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, 2120 hal_ring_handle_t hal_ring_hdl, 2121 hal_ring_desc_t ring_desc, 2122 struct dp_rx_desc *rx_desc) 2123 { 2124 return QDF_STATUS_SUCCESS; 2125 } 2126 #endif 2127 2128 #ifdef DP_RX_DROP_RAW_FRM 2129 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); 2130 #else 2131 static inline 2132 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) 2133 { 2134 return false; 2135 } 2136 #endif 2137 2138 #ifdef RX_DESC_DEBUG_CHECK 2139 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2140 hal_ring_desc_t ring_desc, 2141 struct dp_rx_desc *rx_desc); 2142 #else 2143 static inline 2144 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, 2145 hal_ring_desc_t ring_desc, 2146 struct dp_rx_desc *rx_desc) 2147 { 2148 return QDF_STATUS_SUCCESS; 2149 } 2150 #endif 2151 2152 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR 2153 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2154 #else 2155 static inline 2156 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) 2157 { 2158 } 2159 #endif 2160 2161 /** 2162 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. 2163 * @nbuf: pointer to the first msdu of an amsdu. 2164 * @rx_tlv_hdr: pointer to the start of RX TLV headers. 2165 * 2166 * The ipsumed field of the skb is set based on whether HW validated the 2167 * IP/TCP/UDP checksum. 2168 * 2169 * Return: void 2170 */ 2171 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1) 2172 static inline 2173 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2174 qdf_nbuf_t nbuf, 2175 uint8_t *rx_tlv_hdr) 2176 { 2177 qdf_nbuf_rx_cksum_t cksum = {0}; 2178 //TODO - Move this to ring desc api 2179 //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET 2180 //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET 2181 uint32_t ip_csum_err, tcp_udp_csum_er; 2182 2183 hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, 2184 &tcp_udp_csum_er); 2185 2186 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { 2187 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; 2188 qdf_nbuf_set_rx_cksum(nbuf, &cksum); 2189 } else { 2190 DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); 2191 DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); 2192 } 2193 } 2194 #else 2195 static inline 2196 void dp_rx_cksum_offload(struct dp_pdev *pdev, 2197 qdf_nbuf_t nbuf, 2198 uint8_t *rx_tlv_hdr) 2199 { 2200 } 2201 #endif 2202 #endif /* QCA_HOST_MODE_WIFI_DISABLED */ 2203 2204 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 2205 static inline 2206 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2207 int max_reap_limit) 2208 { 2209 bool limit_hit = false; 2210 2211 limit_hit = 2212 (num_reaped >= max_reap_limit) ? true : false; 2213 2214 if (limit_hit) 2215 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) 2216 2217 return limit_hit; 2218 } 2219 2220 static inline 2221 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2222 { 2223 return soc->wlan_cfg_ctx->rx_enable_eol_data_check; 2224 } 2225 2226 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2227 { 2228 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; 2229 2230 return cfg->rx_reap_loop_pkt_limit; 2231 } 2232 #else 2233 static inline 2234 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped, 2235 int max_reap_limit) 2236 { 2237 return false; 2238 } 2239 2240 static inline 2241 bool dp_rx_enable_eol_data_check(struct dp_soc *soc) 2242 { 2243 return false; 2244 } 2245 2246 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc) 2247 { 2248 return 0; 2249 } 2250 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 2251 2252 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); 2253 2254 #ifdef QCA_SUPPORT_WDS_EXTENDED 2255 /** 2256 * dp_rx_is_list_ready() - Make different lists for 4-address 2257 and 3-address frames 2258 * @nbuf_head: skb list head 2259 * @vdev: vdev 2260 * @txrx_peer : txrx_peer 2261 * @peer_id: peer id of new received frame 2262 * @vdev_id: vdev_id of new received frame 2263 * 2264 * Return: true if peer_ids are different. 2265 */ 2266 static inline bool 2267 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2268 struct dp_vdev *vdev, 2269 struct dp_txrx_peer *txrx_peer, 2270 uint16_t peer_id, 2271 uint8_t vdev_id) 2272 { 2273 if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id) 2274 return true; 2275 2276 return false; 2277 } 2278 #else 2279 static inline bool 2280 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, 2281 struct dp_vdev *vdev, 2282 struct dp_txrx_peer *txrx_peer, 2283 uint16_t peer_id, 2284 uint8_t vdev_id) 2285 { 2286 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) 2287 return true; 2288 2289 return false; 2290 } 2291 #endif 2292 2293 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET 2294 /** 2295 * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup 2296 * @pdev: pointer to dp_pdev structure 2297 * @rx_tlv: pointer to rx_pkt_tlvs structure 2298 * @nbuf: pointer to skb buffer 2299 * 2300 * Return: None 2301 */ 2302 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2303 uint8_t *rx_tlv, 2304 qdf_nbuf_t nbuf); 2305 #else 2306 static inline void 2307 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev, 2308 uint8_t *rx_tlv, 2309 qdf_nbuf_t nbuf) 2310 { 2311 } 2312 #endif 2313 2314 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 2315 static inline uint8_t 2316 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2317 { 2318 return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id); 2319 } 2320 2321 static inline uint8_t 2322 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2323 { 2324 return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id); 2325 } 2326 #else 2327 static inline uint8_t 2328 dp_rx_get_rx_bm_id(struct dp_soc *soc) 2329 { 2330 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx; 2331 uint8_t wbm2_sw_rx_rel_ring_id; 2332 2333 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx); 2334 2335 return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id, 2336 wbm2_sw_rx_rel_ring_id); 2337 } 2338 2339 static inline uint8_t 2340 dp_rx_get_defrag_bm_id(struct dp_soc *soc) 2341 { 2342 return dp_rx_get_rx_bm_id(soc); 2343 } 2344 #endif 2345 2346 static inline uint16_t 2347 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata) 2348 { 2349 return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc, 2350 peer_metadata); 2351 } 2352 2353 /** 2354 * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization 2355 * @soc: SOC handle 2356 * @rx_desc_pool: pointer to RX descriptor pool 2357 * @pool_id: pool ID 2358 * 2359 * Return: None 2360 */ 2361 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc, 2362 struct rx_desc_pool *rx_desc_pool, 2363 uint32_t pool_id); 2364 2365 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc, 2366 struct rx_desc_pool *rx_desc_pool, 2367 uint32_t pool_id); 2368 2369 /** 2370 * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint 2371 * 2372 * Return: True if any rx pkt tracepoint is enabled else false 2373 */ 2374 static inline 2375 bool dp_rx_pkt_tracepoints_enabled(void) 2376 { 2377 return (qdf_trace_dp_rx_tcp_pkt_enabled() || 2378 qdf_trace_dp_rx_udp_pkt_enabled() || 2379 qdf_trace_dp_rx_pkt_enabled()); 2380 } 2381 2382 #ifdef FEATURE_DIRECT_LINK 2383 /** 2384 * dp_audio_smmu_map()- Map memory region into Audio SMMU CB 2385 * @qdf_dev: pointer to QDF device structure 2386 * @paddr: physical address 2387 * @iova: DMA address 2388 * @size: memory region size 2389 * 2390 * Return: 0 on success else failure code 2391 */ 2392 static inline 2393 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr, 2394 qdf_dma_addr_t iova, qdf_size_t size) 2395 { 2396 return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size); 2397 } 2398 2399 /** 2400 * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB 2401 * @qdf_dev: pointer to QDF device structure 2402 * @iova: DMA address 2403 * @size: memory region size 2404 * 2405 * Return: None 2406 */ 2407 static inline 2408 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova, 2409 qdf_size_t size) 2410 { 2411 pld_audio_smmu_unmap(qdf_dev->dev, iova, size); 2412 } 2413 #else 2414 static inline 2415 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr, 2416 qdf_dma_addr_t iova, qdf_size_t size) 2417 { 2418 return 0; 2419 } 2420 2421 static inline 2422 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova, 2423 qdf_size_t size) 2424 { 2425 } 2426 #endif 2427 2428 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) 2429 static inline 2430 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2431 struct dp_srng *rxdma_srng, 2432 struct rx_desc_pool *rx_desc_pool, 2433 uint32_t num_req_buffers) 2434 { 2435 return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id, 2436 rxdma_srng, 2437 rx_desc_pool, 2438 num_req_buffers); 2439 } 2440 2441 static inline 2442 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2443 struct dp_srng *rxdma_srng, 2444 struct rx_desc_pool *rx_desc_pool, 2445 uint32_t num_req_buffers, 2446 union dp_rx_desc_list_elem_t **desc_list, 2447 union dp_rx_desc_list_elem_t **tail) 2448 { 2449 __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2450 num_req_buffers, desc_list, tail); 2451 } 2452 2453 static inline 2454 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2455 struct dp_srng *rxdma_srng, 2456 struct rx_desc_pool *rx_desc_pool, 2457 uint32_t num_req_buffers, 2458 union dp_rx_desc_list_elem_t **desc_list, 2459 union dp_rx_desc_list_elem_t **tail) 2460 { 2461 __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng, 2462 rx_desc_pool); 2463 } 2464 2465 #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK 2466 static inline 2467 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2468 qdf_nbuf_t nbuf, 2469 uint32_t buf_size) 2470 { 2471 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2472 (void *)(nbuf->data + buf_size)); 2473 2474 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2475 } 2476 #else 2477 #define L3_HEADER_PAD 2 2478 static inline 2479 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2480 qdf_nbuf_t nbuf, 2481 uint32_t buf_size) 2482 { 2483 if (nbuf->recycled_for_ds) { 2484 nbuf->recycled_for_ds = 0; 2485 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2486 } 2487 2488 if (unlikely(!nbuf->fast_recycled)) { 2489 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2490 (void *)(nbuf->data + buf_size)); 2491 } else { 2492 /* 2493 * In case of fast_recycled is set we can avoid invalidating 2494 * the complete buffer as it would have been invalidated 2495 * by tx driver before giving to recycler. 2496 * 2497 * But we need to still invalidate rx_pkt_tlv_size as this 2498 * area will not be invalidated in TX path 2499 */ 2500 DP_STATS_INC(dp_soc, rx.fast_recycled, 1); 2501 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2502 (void *)(nbuf->data + 2503 dp_soc->rx_pkt_tlv_size + 2504 L3_HEADER_PAD)); 2505 } 2506 2507 nbuf->fast_recycled = 0; 2508 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2509 } 2510 #endif 2511 2512 static inline 2513 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2514 qdf_nbuf_t nbuf, 2515 uint32_t buf_size) 2516 { 2517 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2518 (void *)(nbuf->data + buf_size)); 2519 2520 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); 2521 } 2522 2523 #if !defined(SPECULATIVE_READ_DISABLED) 2524 static inline 2525 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2526 struct dp_rx_desc *rx_desc, 2527 uint8_t reo_ring_num) 2528 { 2529 struct rx_desc_pool *rx_desc_pool; 2530 qdf_nbuf_t nbuf; 2531 2532 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2533 nbuf = rx_desc->nbuf; 2534 2535 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, 2536 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2537 } 2538 2539 static inline 2540 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2541 struct rx_desc_pool *rx_desc_pool, 2542 qdf_nbuf_t nbuf) 2543 { 2544 qdf_nbuf_dma_inv_range((void *)nbuf->data, 2545 (void *)(nbuf->data + rx_desc_pool->buf_size)); 2546 } 2547 2548 #else 2549 static inline 2550 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2551 struct dp_rx_desc *rx_desc, 2552 uint8_t reo_ring_num) 2553 { 2554 } 2555 2556 static inline 2557 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2558 struct rx_desc_pool *rx_desc_pool, 2559 qdf_nbuf_t nbuf) 2560 { 2561 } 2562 #endif 2563 2564 static inline 2565 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2566 uint32_t bufs_reaped) 2567 { 2568 } 2569 2570 static inline 2571 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2572 struct rx_desc_pool *rx_desc_pool) 2573 { 2574 return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size, 2575 RX_BUFFER_RESERVATION, 2576 rx_desc_pool->buf_alignment, FALSE); 2577 } 2578 2579 static inline 2580 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2581 { 2582 qdf_nbuf_free_simple(nbuf); 2583 } 2584 #else 2585 static inline 2586 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, 2587 struct dp_srng *rxdma_srng, 2588 struct rx_desc_pool *rx_desc_pool, 2589 uint32_t num_req_buffers) 2590 { 2591 return dp_pdev_rx_buffers_attach(soc, mac_id, 2592 rxdma_srng, 2593 rx_desc_pool, 2594 num_req_buffers); 2595 } 2596 2597 static inline 2598 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2599 struct dp_srng *rxdma_srng, 2600 struct rx_desc_pool *rx_desc_pool, 2601 uint32_t num_req_buffers, 2602 union dp_rx_desc_list_elem_t **desc_list, 2603 union dp_rx_desc_list_elem_t **tail) 2604 { 2605 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2606 num_req_buffers, desc_list, tail, false); 2607 } 2608 2609 static inline 2610 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id, 2611 struct dp_srng *rxdma_srng, 2612 struct rx_desc_pool *rx_desc_pool, 2613 uint32_t num_req_buffers, 2614 union dp_rx_desc_list_elem_t **desc_list, 2615 union dp_rx_desc_list_elem_t **tail) 2616 { 2617 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, 2618 num_req_buffers, desc_list, tail, false); 2619 } 2620 2621 static inline 2622 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc, 2623 qdf_nbuf_t nbuf, 2624 uint32_t buf_size) 2625 { 2626 return (qdf_dma_addr_t)NULL; 2627 } 2628 2629 static inline 2630 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc, 2631 qdf_nbuf_t nbuf, 2632 uint32_t buf_size) 2633 { 2634 return (qdf_dma_addr_t)NULL; 2635 } 2636 2637 static inline 2638 void dp_rx_nbuf_unmap(struct dp_soc *soc, 2639 struct dp_rx_desc *rx_desc, 2640 uint8_t reo_ring_num) 2641 { 2642 struct rx_desc_pool *rx_desc_pool; 2643 2644 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; 2645 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); 2646 2647 dp_audio_smmu_unmap(soc->osdev, 2648 QDF_NBUF_CB_PADDR(rx_desc->nbuf), 2649 rx_desc_pool->buf_size); 2650 2651 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, 2652 rx_desc_pool->buf_size, 2653 false, __func__, __LINE__); 2654 2655 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, 2656 QDF_DMA_FROM_DEVICE, 2657 rx_desc_pool->buf_size); 2658 2659 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); 2660 } 2661 2662 static inline 2663 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc, 2664 struct rx_desc_pool *rx_desc_pool, 2665 qdf_nbuf_t nbuf) 2666 { 2667 dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf), 2668 rx_desc_pool->buf_size); 2669 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size, 2670 false, __func__, __LINE__); 2671 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE, 2672 rx_desc_pool->buf_size); 2673 } 2674 2675 static inline 2676 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id, 2677 uint32_t bufs_reaped) 2678 { 2679 int cpu_id = qdf_get_cpu(); 2680 2681 DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped); 2682 } 2683 2684 static inline 2685 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc, 2686 struct rx_desc_pool *rx_desc_pool) 2687 { 2688 return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size, 2689 RX_BUFFER_RESERVATION, 2690 rx_desc_pool->buf_alignment, FALSE); 2691 } 2692 2693 static inline 2694 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) 2695 { 2696 qdf_nbuf_free(nbuf); 2697 } 2698 #endif 2699 2700 #ifdef DP_UMAC_HW_RESET_SUPPORT 2701 /* 2702 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring 2703 * 2704 * @soc: core txrx main context 2705 * @nbuf_list: nbuf list for delayed free 2706 * 2707 * Return: void 2708 */ 2709 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list); 2710 2711 /* 2712 * dp_rx_desc_delayed_free() - Delayed free of the rx descs 2713 * 2714 * @soc: core txrx main context 2715 * 2716 * Return: void 2717 */ 2718 void dp_rx_desc_delayed_free(struct dp_soc *soc); 2719 #endif 2720 2721 /** 2722 * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id 2723 * @nbuf : pointer to the first msdu of an amsdu. 2724 * @peer_id : Peer id of the peer 2725 * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference 2726 * @pkt_capture_offload : Flag indicating if pkt capture offload is needed 2727 * @vdev : Buffer to hold pointer to vdev 2728 * @rx_pdev : Buffer to hold pointer to rx pdev 2729 * @dsf : delay stats flag 2730 * @old_tid : Old tid 2731 * 2732 * Get txrx peer and vdev from peer id 2733 * 2734 * Return: Pointer to txrx peer 2735 */ 2736 static inline struct dp_txrx_peer * 2737 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc, 2738 qdf_nbuf_t nbuf, 2739 uint16_t peer_id, 2740 dp_txrx_ref_handle *txrx_ref_handle, 2741 bool pkt_capture_offload, 2742 struct dp_vdev **vdev, 2743 struct dp_pdev **rx_pdev, 2744 uint32_t *dsf, 2745 uint32_t *old_tid) 2746 { 2747 struct dp_txrx_peer *txrx_peer = NULL; 2748 2749 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle, 2750 DP_MOD_ID_RX); 2751 2752 if (qdf_likely(txrx_peer)) { 2753 *vdev = txrx_peer->vdev; 2754 } else { 2755 nbuf->next = NULL; 2756 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf, 2757 pkt_capture_offload); 2758 if (!pkt_capture_offload) 2759 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2760 2761 goto end; 2762 } 2763 2764 if (qdf_unlikely(!(*vdev))) { 2765 qdf_nbuf_free(nbuf); 2766 DP_STATS_INC(soc, rx.err.invalid_vdev, 1); 2767 goto end; 2768 } 2769 2770 *rx_pdev = (*vdev)->pdev; 2771 *dsf = (*rx_pdev)->delay_stats_flag; 2772 *old_tid = 0xff; 2773 2774 end: 2775 return txrx_peer; 2776 } 2777 2778 static inline QDF_STATUS 2779 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer, 2780 int tid, uint32_t ba_window_size) 2781 { 2782 return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc, 2783 peer, tid, 2784 ba_window_size); 2785 } 2786 2787 static inline 2788 void dp_rx_nbuf_list_deliver(struct dp_soc *soc, 2789 struct dp_vdev *vdev, 2790 struct dp_txrx_peer *txrx_peer, 2791 uint16_t peer_id, 2792 uint8_t pkt_capture_offload, 2793 qdf_nbuf_t deliver_list_head, 2794 qdf_nbuf_t deliver_list_tail) 2795 { 2796 qdf_nbuf_t nbuf, next; 2797 2798 if (qdf_likely(deliver_list_head)) { 2799 if (qdf_likely(txrx_peer)) { 2800 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, 2801 pkt_capture_offload, 2802 deliver_list_head); 2803 if (!pkt_capture_offload) 2804 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, 2805 deliver_list_head, 2806 deliver_list_tail); 2807 } else { 2808 nbuf = deliver_list_head; 2809 while (nbuf) { 2810 next = nbuf->next; 2811 nbuf->next = NULL; 2812 dp_rx_deliver_to_stack_no_peer(soc, nbuf); 2813 nbuf = next; 2814 } 2815 } 2816 } 2817 } 2818 2819 #ifdef DP_TX_RX_TPUT_SIMULATE 2820 /* 2821 * Change this macro value to simulate different RX T-put, 2822 * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor 2823 * is 2, set macro value as 1 (multiplication factor - 1). 2824 */ 2825 #define DP_RX_PKTS_DUPLICATE_CNT 0 2826 static inline 2827 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc, 2828 struct dp_vdev *vdev, 2829 struct dp_txrx_peer *txrx_peer, 2830 uint16_t peer_id, 2831 uint8_t pkt_capture_offload, 2832 qdf_nbuf_t ori_list_head, 2833 qdf_nbuf_t ori_list_tail) 2834 { 2835 qdf_nbuf_t new_skb = NULL; 2836 qdf_nbuf_t new_list_head = NULL; 2837 qdf_nbuf_t new_list_tail = NULL; 2838 qdf_nbuf_t nbuf = NULL; 2839 int i; 2840 2841 for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) { 2842 nbuf = ori_list_head; 2843 new_list_head = NULL; 2844 new_list_tail = NULL; 2845 2846 while (nbuf) { 2847 new_skb = qdf_nbuf_copy(nbuf); 2848 if (qdf_likely(new_skb)) 2849 DP_RX_LIST_APPEND(new_list_head, 2850 new_list_tail, 2851 new_skb); 2852 else 2853 dp_err("copy skb failed"); 2854 2855 nbuf = qdf_nbuf_next(nbuf); 2856 } 2857 2858 /* deliver the copied nbuf list */ 2859 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2860 pkt_capture_offload, 2861 new_list_head, 2862 new_list_tail); 2863 } 2864 2865 /* deliver the original skb_list */ 2866 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id, 2867 pkt_capture_offload, 2868 ori_list_head, 2869 ori_list_tail); 2870 } 2871 2872 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver 2873 2874 #else /* !DP_TX_RX_TPUT_SIMULATE */ 2875 2876 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver 2877 2878 #endif /* DP_TX_RX_TPUT_SIMULATE */ 2879 2880 #endif /* _DP_RX_H */ 2881