1 /* 2 * Copyright (c) 2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 #ifndef __DP_BE_H 20 #define __DP_BE_H 21 22 #include <dp_types.h> 23 #include <hal_be_tx.h> 24 #ifdef WLAN_MLO_MULTI_CHIP 25 #include "mlo/dp_mlo.h" 26 #else 27 #include <dp_peer.h> 28 #endif 29 #ifdef WIFI_MONITOR_SUPPORT 30 #include <dp_mon.h> 31 #endif 32 33 enum CMEM_MEM_CLIENTS { 34 COOKIE_CONVERSION, 35 FISA_FST, 36 }; 37 38 /* maximum number of entries in one page of secondary page table */ 39 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512 40 41 /* maximum number of entries in one page of secondary page table */ 42 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1) 43 44 /* maximum number of entries in primary page table */ 45 #define DP_CC_PPT_MAX_ENTRIES \ 46 DP_CC_PPT_MEM_SIZE / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 47 48 /* cookie conversion required CMEM offset from CMEM pool */ 49 #define DP_CC_MEM_OFFSET_IN_CMEM 0 50 51 /* cookie conversion primary page table size 4K */ 52 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 53 #define DP_CC_PPT_MEM_SIZE 4096 54 #else 55 #define DP_CC_PPT_MEM_SIZE 8192 56 #endif 57 58 /* FST required CMEM offset M pool */ 59 #define DP_FST_MEM_OFFSET_IN_CMEM \ 60 (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE) 61 62 /* lower 9 bits in Desc ID for offset in page of SPT */ 63 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0 64 65 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF 66 67 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0 68 69 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8 70 71 /* higher 11 bits in Desc ID for offset in CMEM of PPT */ 72 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9 73 74 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19 75 76 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9 77 78 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00 79 80 /* 81 * page 4K unaligned case, single SPT page physical address 82 * need 8 bytes in PPT 83 */ 84 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8 85 /* 86 * page 4K aligned case, single SPT page physical address 87 * need 4 bytes in PPT 88 */ 89 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4 90 91 /* 4K aligned case, number of bits HW append for one PPT entry value */ 92 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12 93 94 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 95 /* WBM2SW ring id for rx release */ 96 #define WBM2SW_REL_ERR_RING_NUM 3 97 #else 98 /* WBM2SW ring id for rx release */ 99 #define WBM2SW_REL_ERR_RING_NUM 5 100 #endif 101 102 #ifdef WLAN_SUPPORT_PPEDS 103 #define DP_PPEDS_STAMODE_ASTIDX_MAP_REG_IDX 1 104 /* The MAX PPE PRI2TID */ 105 #define DP_TX_INT_PRI2TID_MAX 15 106 107 /* size of CMEM needed for a ppeds tx desc pool */ 108 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \ 109 ((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \ 110 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED) 111 112 /* Offset of ppeds tx descripotor pool */ 113 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0 114 115 #define PEER_ROUTING_USE_PPE 1 116 #define PEER_ROUTING_ENABLED 1 117 #define DP_PPE_INTR_STRNG_LEN 32 118 #define DP_PPE_INTR_MAX 3 119 120 #else 121 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0 122 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0 123 124 #define DP_PPE_INTR_STRNG_LEN 0 125 #define DP_PPE_INTR_MAX 0 126 #endif 127 128 /* tx descriptor are programmed at start of CMEM region*/ 129 #define DP_TX_DESC_CMEM_OFFSET \ 130 (DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE) 131 132 /* size of CMEM needed for a tx desc pool*/ 133 #define DP_TX_DESC_POOL_CMEM_SIZE \ 134 ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \ 135 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED) 136 137 #ifndef QCA_SUPPORT_DP_GLOBAL_CTX 138 /* Offset of rx descripotor pool */ 139 #define DP_RX_DESC_CMEM_OFFSET \ 140 DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE) 141 142 #else 143 /* tx special descriptor are programmed after tx desc CMEM region*/ 144 #define DP_TX_SPCL_DESC_CMEM_OFFSET \ 145 DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE) 146 147 /* size of CMEM needed for a tx special desc pool*/ 148 #define DP_TX_SPCL_DESC_POOL_CMEM_SIZE \ 149 ((WLAN_CFG_NUM_TX_SPL_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \ 150 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED) 151 152 /* Offset of rx descripotor pool */ 153 #define DP_RX_DESC_CMEM_OFFSET \ 154 DP_TX_SPCL_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * \ 155 DP_TX_SPCL_DESC_POOL_CMEM_SIZE) 156 #endif 157 158 /* size of CMEM needed for a rx desc pool */ 159 #define DP_RX_DESC_POOL_CMEM_SIZE \ 160 ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \ 161 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED) 162 163 /* get ppt_id from CMEM_OFFSET */ 164 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \ 165 ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED) 166 167 /** 168 * struct dp_spt_page_desc - secondary page table page descriptors 169 * @page_v_addr: page virtual address 170 * @page_p_addr: page physical address 171 * @ppt_index: entry index in primary page table where this page physical 172 * address stored 173 */ 174 struct dp_spt_page_desc { 175 uint8_t *page_v_addr; 176 qdf_dma_addr_t page_p_addr; 177 uint32_t ppt_index; 178 }; 179 180 /** 181 * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion 182 * @cmem_offset: CMEM offset from base address for primary page table setup 183 * @total_page_num: total DDR page allocated 184 * @page_desc_freelist: available page Desc list 185 * @page_desc_base: page Desc buffer base address. 186 * @page_pool: DDR pages pool 187 * @cc_lock: locks for page acquiring/free 188 */ 189 struct dp_hw_cookie_conversion_t { 190 uint32_t cmem_offset; 191 uint32_t total_page_num; 192 struct dp_spt_page_desc *page_desc_base; 193 struct qdf_mem_multi_page_t page_pool; 194 qdf_spinlock_t cc_lock; 195 }; 196 197 /** 198 * struct dp_spt_page_desc_list - containor of SPT page desc list info 199 * @spt_page_list_head: head of SPT page descriptor list 200 * @spt_page_list_tail: tail of SPT page descriptor list 201 * @num_spt_pages: number of SPT page descriptor allocated 202 */ 203 struct dp_spt_page_desc_list { 204 struct dp_spt_page_desc *spt_page_list_head; 205 struct dp_spt_page_desc *spt_page_list_tail; 206 uint16_t num_spt_pages; 207 }; 208 209 /* HW reading 8 bytes for VA */ 210 #define DP_CC_HW_READ_BYTES 8 211 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \ 212 { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \ 213 = (uintptr_t)(_desc_va); } 214 215 /** 216 * struct dp_tx_bank_profile - DP wrapper for TCL banks 217 * @is_configured: flag indicating if this bank is configured 218 * @ref_count: ref count indicating number of users of the bank 219 * @bank_config: HAL TX bank configuration 220 */ 221 struct dp_tx_bank_profile { 222 uint8_t is_configured; 223 qdf_atomic_t ref_count; 224 union hal_tx_bank_config bank_config; 225 }; 226 227 #ifdef WLAN_SUPPORT_PPEDS 228 /** 229 * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry 230 * @is_configured: Boolean that the entry is configured. 231 */ 232 struct dp_ppe_vp_tbl_entry { 233 bool is_configured; 234 }; 235 236 /** 237 * struct dp_ppe_vp_search_idx_tbl_entry - PPE Virtual search table entry 238 * @is_configured: Boolean that the entry is configured. 239 */ 240 struct dp_ppe_vp_search_idx_tbl_entry { 241 bool is_configured; 242 }; 243 244 /** 245 * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev 246 * @is_configured: Boolean that the entry is configured. 247 * @vp_num: Virtual port number 248 * @ppe_vp_num_idx: Index to the PPE VP table entry 249 * @search_idx_reg_num: Address search Index register number 250 * @drop_prec_enable: Drop precedance enable 251 * @to_fw: To FW exception enable/disable. 252 * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table 253 */ 254 struct dp_ppe_vp_profile { 255 bool is_configured; 256 uint8_t vp_num; 257 uint8_t ppe_vp_num_idx; 258 uint8_t search_idx_reg_num; 259 uint8_t drop_prec_enable; 260 uint8_t to_fw; 261 uint8_t use_ppe_int_pri; 262 }; 263 264 /** 265 * struct dp_ppeds_tx_desc_pool_s - PPEDS Tx Descriptor Pool 266 * @elem_size: Size of each descriptor 267 * @hot_list_len: Length of hotlist chain 268 * @num_allocated: Number of used descriptors 269 * @freelist: Chain of free descriptors 270 * @hotlist: Chain of descriptors with attached nbufs 271 * @desc_pages: multiple page allocation information for actual descriptors 272 * @elem_count: Number of descriptors in the pool 273 * @num_free: Number of free descriptors 274 * @lock: Lock for descriptor allocation/free from/to the pool 275 */ 276 struct dp_ppeds_tx_desc_pool_s { 277 uint16_t elem_size; 278 uint32_t num_allocated; 279 uint32_t hot_list_len; 280 struct dp_tx_desc_s *freelist; 281 struct dp_tx_desc_s *hotlist; 282 struct qdf_mem_multi_page_t desc_pages; 283 uint16_t elem_count; 284 uint32_t num_free; 285 qdf_spinlock_t lock; 286 }; 287 #endif 288 289 /** 290 * struct dp_ppeds_napi - napi parameters for ppe ds 291 * @napi: napi structure to register with napi infra 292 * @ndev: net_dev structure 293 */ 294 struct dp_ppeds_napi { 295 struct napi_struct napi; 296 struct net_device ndev; 297 }; 298 299 /* 300 * NB: intentionally not using kernel-doc comment because the kernel-doc 301 * script does not handle the TAILQ_HEAD macro 302 * struct dp_soc_be - Extended DP soc for BE targets 303 * @soc: dp soc structure 304 * @num_bank_profiles: num TX bank profiles 305 * @tx_bank_lock: lock for @bank_profiles 306 * @bank_profiles: bank profiles for various TX banks 307 * @page_desc_base: 308 * @cc_cmem_base: cmem offset reserved for CC 309 * @tx_cc_ctx: Cookie conversion context for tx desc pools 310 * @rx_cc_ctx: Cookie conversion context for rx desc pools 311 * @ppeds_int_mode_enabled: PPE DS interrupt mode enabled 312 * @ppeds_stopped: 313 * @reo2ppe_ring: REO2PPE ring 314 * @ppe2tcl_ring: PPE2TCL ring 315 * @ppeds_wbm_release_ring: 316 * @ppe_vp_tbl: PPE VP table 317 * @ppe_vp_search_idx_tbl: PPE VP search idx table 318 * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool 319 * @ppeds_tx_desc: PPEDS tx desc pool 320 * @ppeds_napi_ctxt: 321 * @ppeds_handle: PPEDS soc instance handle 322 * @dp_ppeds_txdesc_hotlist_len: PPEDS tx desc hotlist length 323 * @ppe_vp_tbl_lock: PPE VP table lock 324 * @num_ppe_vp_entries: Number of PPE VP entries 325 * @num_ppe_vp_search_idx_entries: PPEDS VP search idx entries 326 * @irq_name: PPEDS VP irq names 327 * @ppeds_stats: PPEDS stats 328 * @mlo_enabled: Flag to indicate MLO is enabled or not 329 * @mlo_chip_id: MLO chip_id 330 * @ml_ctxt: pointer to global ml_context 331 * @delta_tqm: delta_tqm 332 * @mlo_tstamp_offset: mlo timestamp offset 333 * @mld_peer_hash_lock: lock to protect mld_peer_hash 334 * @mld_peer_hash: peer hash table for ML peers 335 * @mlo_dev_list: list of MLO device context 336 * @mlo_dev_list_lock: lock to protect MLO device ctxt 337 * @ipa_bank_id: TCL bank id used by IPA 338 */ 339 struct dp_soc_be { 340 struct dp_soc soc; 341 uint8_t num_bank_profiles; 342 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 343 qdf_mutex_t tx_bank_lock; 344 #else 345 qdf_spinlock_t tx_bank_lock; 346 #endif 347 struct dp_tx_bank_profile *bank_profiles; 348 struct dp_spt_page_desc *page_desc_base; 349 uint32_t cc_cmem_base; 350 struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS]; 351 struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS]; 352 #ifdef WLAN_SUPPORT_PPEDS 353 uint8_t ppeds_int_mode_enabled:1, 354 ppeds_stopped:1; 355 struct dp_srng reo2ppe_ring; 356 struct dp_srng ppe2tcl_ring; 357 struct dp_srng ppeds_wbm_release_ring; 358 struct dp_ppe_vp_tbl_entry *ppe_vp_tbl; 359 struct dp_ppe_vp_search_idx_tbl_entry *ppe_vp_search_idx_tbl; 360 struct dp_ppe_vp_profile *ppe_vp_profile; 361 struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx; 362 struct dp_ppeds_tx_desc_pool_s ppeds_tx_desc; 363 struct dp_ppeds_napi ppeds_napi_ctxt; 364 void *ppeds_handle; 365 int dp_ppeds_txdesc_hotlist_len; 366 qdf_mutex_t ppe_vp_tbl_lock; 367 uint8_t num_ppe_vp_entries; 368 uint8_t num_ppe_vp_search_idx_entries; 369 uint8_t num_ppe_vp_profiles; 370 char irq_name[DP_PPE_INTR_MAX][DP_PPE_INTR_STRNG_LEN]; 371 struct { 372 struct { 373 uint64_t desc_alloc_failed; 374 #ifdef GLOBAL_ASSERT_AVOIDANCE 375 uint32_t tx_comp_buf_src; 376 uint32_t tx_comp_desc_null; 377 uint32_t tx_comp_invalid_flag; 378 #endif 379 } tx; 380 } ppeds_stats; 381 #endif 382 #ifdef WLAN_FEATURE_11BE_MLO 383 #ifdef WLAN_MLO_MULTI_CHIP 384 uint8_t mlo_enabled; 385 uint8_t mlo_chip_id; 386 struct dp_mlo_ctxt *ml_ctxt; 387 uint64_t delta_tqm; 388 uint64_t mlo_tstamp_offset; 389 #else 390 /* Protect mld peer hash table */ 391 DP_MUTEX_TYPE mld_peer_hash_lock; 392 struct { 393 uint32_t mask; 394 uint32_t idx_bits; 395 396 TAILQ_HEAD(, dp_peer) * bins; 397 } mld_peer_hash; 398 399 /* MLO device ctxt list */ 400 TAILQ_HEAD(, dp_mlo_dev_ctxt) mlo_dev_list; 401 qdf_spinlock_t mlo_dev_list_lock; 402 #endif 403 #endif 404 #ifdef IPA_OFFLOAD 405 int8_t ipa_bank_id; 406 #endif 407 }; 408 409 /* convert struct dp_soc_be pointer to struct dp_soc pointer */ 410 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc) 411 412 /** 413 * struct dp_pdev_be - Extended DP pdev for BE targets 414 * @pdev: dp pdev structure 415 * @monitor_pdev_be: BE specific monitor object 416 * @mlo_link_id: MLO link id for PDEV 417 * @delta_tsf2: delta_tsf2 418 */ 419 struct dp_pdev_be { 420 struct dp_pdev pdev; 421 #ifdef WLAN_MLO_MULTI_CHIP 422 uint8_t mlo_link_id; 423 uint64_t delta_tsf2; 424 #endif 425 }; 426 427 /** 428 * struct dp_vdev_be - Extended DP vdev for BE targets 429 * @vdev: dp vdev structure 430 * @bank_id: bank_id to be used for TX 431 * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev 432 * @partner_vdev_list: partner list used for Intra-BSS 433 * @bridge_vdev_list: partner bridge vdev list 434 * @mlo_stats: structure to hold stats for mlo unmapped peers 435 * @mcast_primary: MLO Mcast primary vdev 436 * @mlo_dev_ctxt: MLO device context pointer 437 */ 438 struct dp_vdev_be { 439 struct dp_vdev vdev; 440 int8_t bank_id; 441 uint8_t vdev_id_check_en; 442 #ifdef WLAN_MLO_MULTI_CHIP 443 struct cdp_vdev_stats mlo_stats; 444 #ifdef WLAN_FEATURE_11BE_MLO 445 #ifdef WLAN_MCAST_MLO 446 bool mcast_primary; 447 #endif 448 #endif 449 #endif 450 #ifdef WLAN_FEATURE_11BE_MLO 451 struct dp_mlo_dev_ctxt *mlo_dev_ctxt; 452 #endif /* WLAN_FEATURE_11BE_MLO */ 453 }; 454 455 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX) 456 /** 457 * struct dp_mlo_dev_ctxt - Datapath MLO device context 458 * 459 * @ml_dev_list_elem: node in the ML dev list of Global MLO context 460 * @mld_mac_addr: MLO device MAC address 461 * @vdev_list: list of vdevs associated with this MLO connection 462 * @vdev_list_lock: lock to protect vdev list 463 * @bridge_vdev: list of bridge vdevs associated with this MLO connection 464 * @is_bridge_vdev_present: flag to check if bridge vdev is present 465 * @vdev_list_lock: lock to protect vdev list 466 * @vdev_count: number of elements in the vdev list 467 * @seq_num: DP MLO multicast sequence number 468 * @ref_cnt: reference count 469 * @mod_refs: module reference count 470 * @ref_delete_pending: flag to monitor last ref delete 471 * @stats: structure to store vdev stats of removed MLO Link 472 */ 473 struct dp_mlo_dev_ctxt { 474 TAILQ_ENTRY(dp_mlo_dev_ctxt) ml_dev_list_elem; 475 union dp_align_mac_addr mld_mac_addr; 476 #ifdef WLAN_MLO_MULTI_CHIP 477 uint8_t vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC]; 478 uint8_t bridge_vdev[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC]; 479 bool is_bridge_vdev_present; 480 qdf_spinlock_t vdev_list_lock; 481 uint16_t vdev_count; 482 uint16_t seq_num; 483 #endif 484 qdf_atomic_t ref_cnt; 485 qdf_atomic_t mod_refs[DP_MOD_ID_MAX]; 486 uint8_t ref_delete_pending; 487 struct dp_vdev_stats stats; 488 }; 489 #endif /* WLAN_FEATURE_11BE_MLO */ 490 491 /** 492 * struct dp_peer_be - Extended DP peer for BE targets 493 * @peer: dp peer structure 494 * @priority_valid: 495 */ 496 struct dp_peer_be { 497 struct dp_peer peer; 498 #ifdef WLAN_SUPPORT_PPEDS 499 uint8_t priority_valid; 500 #endif 501 }; 502 503 /** 504 * dp_get_soc_context_size_be() - get context size for target specific DP soc 505 * 506 * Return: value in bytes for BE specific soc structure 507 */ 508 qdf_size_t dp_get_soc_context_size_be(void); 509 510 /** 511 * dp_initialize_arch_ops_be() - initialize BE specific arch ops 512 * @arch_ops: arch ops pointer 513 * 514 * Return: none 515 */ 516 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops); 517 518 /** 519 * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc 520 * @context_type: context type for which the size is needed 521 * 522 * Return: size in bytes for the context_type 523 */ 524 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type); 525 526 /** 527 * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc 528 * @soc: dp_soc pointer 529 * 530 * Return: dp_soc_be pointer 531 */ 532 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc) 533 { 534 return (struct dp_soc_be *)soc; 535 } 536 537 /** 538 * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback 539 * @be_soc: dp_soc_be pointer 540 * @func: Function to be called for each soc 541 * @arg: context to be passed to the callback 542 * 543 * Return: true if mlo is enabled, false if mlo is disabled 544 */ 545 bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func, 546 void *arg); 547 548 #ifdef WLAN_MLO_MULTI_CHIP 549 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t; 550 typedef struct dp_mlo_ctxt *dp_mlo_dev_obj_t; 551 552 /** 553 * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table 554 * @soc: soc handle 555 * 556 * return: MLD peer hash object 557 */ 558 static inline dp_mld_peer_hash_obj_t 559 dp_mlo_get_peer_hash_obj(struct dp_soc *soc) 560 { 561 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 562 563 return be_soc->ml_ctxt; 564 } 565 566 /** 567 * dp_get_mlo_dev_list_obj() - return the container struct of MLO Dev list 568 * @be_soc: be soc handle 569 * 570 * return: MLO dev list object 571 */ 572 static inline dp_mlo_dev_obj_t 573 dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc) 574 { 575 return be_soc->ml_ctxt; 576 } 577 578 #if defined(WLAN_FEATURE_11BE_MLO) 579 /** 580 * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs 581 * @soc: Soc handle 582 * @peer: DP peer handle for ML peer 583 * @peer_id: peer_id 584 * Return: None 585 */ 586 void dp_mlo_partner_chips_map(struct dp_soc *soc, 587 struct dp_peer *peer, 588 uint16_t peer_id); 589 590 /** 591 * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs 592 * @soc: Soc handle 593 * @peer_id: peer_id 594 * Return: None 595 */ 596 void dp_mlo_partner_chips_unmap(struct dp_soc *soc, 597 uint16_t peer_id); 598 599 /** 600 * dp_soc_initialize_cdp_cmn_mlo_ops() - Initialize common CDP API's 601 * @soc: Soc handle 602 * 603 * Return: None 604 */ 605 void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc); 606 607 #ifdef WLAN_MLO_MULTI_CHIP 608 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev, 609 struct dp_vdev *ptnr_vdev, 610 void *arg); 611 612 /** 613 * dp_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list 614 * @be_soc: dp_soc_be pointer 615 * @be_vdev: dp_vdev_be pointer 616 * @func: function to be called for each peer 617 * @arg: argument need to be passed to func 618 * @mod_id: module id 619 * @type: iterate type 620 * @include_self_vdev: flag to include/exclude self vdev in iteration 621 * 622 * Return: None 623 */ 624 void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc, 625 struct dp_vdev_be *be_vdev, 626 dp_ptnr_vdev_iter_func func, void *arg, 627 enum dp_mod_id mod_id, 628 uint8_t type, 629 bool include_self_vdev); 630 #endif 631 632 #ifdef WLAN_MCAST_MLO 633 /** 634 * dp_mlo_get_mcast_primary_vdev() - get ref to mcast primary vdev 635 * @be_soc: dp_soc_be pointer 636 * @be_vdev: dp_vdev_be pointer 637 * @mod_id: module id 638 * 639 * Return: mcast primary DP VDEV handle on success, NULL on failure 640 */ 641 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc, 642 struct dp_vdev_be *be_vdev, 643 enum dp_mod_id mod_id); 644 #endif 645 #endif 646 647 #else 648 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t; 649 typedef struct dp_soc_be *dp_mlo_dev_obj_t; 650 651 static inline dp_mld_peer_hash_obj_t 652 dp_mlo_get_peer_hash_obj(struct dp_soc *soc) 653 { 654 return dp_get_be_soc_from_dp_soc(soc); 655 } 656 657 static inline dp_mlo_dev_obj_t 658 dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc) 659 { 660 return be_soc; 661 } 662 #endif 663 664 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 665 static inline 666 struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc, 667 uint8_t pool_id) 668 { 669 struct dp_global_context *dp_global = NULL; 670 671 dp_global = wlan_objmgr_get_global_ctx(); 672 return dp_global->tx_cc_ctx[pool_id]; 673 } 674 675 static inline 676 struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc, 677 uint8_t pool_id) 678 { 679 struct dp_global_context *dp_global = NULL; 680 681 dp_global = wlan_objmgr_get_global_ctx(); 682 return dp_global->spcl_tx_cc_ctx[pool_id]; 683 } 684 #else 685 static inline 686 struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc, 687 uint8_t pool_id) 688 { 689 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 690 691 return &be_soc->tx_cc_ctx[pool_id]; 692 } 693 694 static inline 695 struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc, 696 uint8_t pool_id) 697 { 698 struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc); 699 700 return &be_soc->tx_cc_ctx[pool_id]; 701 } 702 #endif 703 704 /** 705 * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table 706 * @mld_hash_obj: Peer has object 707 * @hash_elems: number of entries in hash table 708 * 709 * Return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE 710 */ 711 QDF_STATUS 712 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj, 713 int hash_elems); 714 715 /** 716 * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table 717 * 718 * @mld_hash_obj: Peer has object 719 * 720 * Return: void 721 */ 722 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj); 723 724 /** 725 * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev 726 * @pdev: dp_pdev pointer 727 * 728 * Return: dp_pdev_be pointer 729 */ 730 static inline 731 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev) 732 { 733 return (struct dp_pdev_be *)pdev; 734 } 735 736 /** 737 * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev 738 * @vdev: dp_vdev pointer 739 * 740 * Return: dp_vdev_be pointer 741 */ 742 static inline 743 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev) 744 { 745 return (struct dp_vdev_be *)vdev; 746 } 747 748 /** 749 * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer 750 * @peer: dp_peer pointer 751 * 752 * Return: dp_peer_be pointer 753 */ 754 static inline 755 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer) 756 { 757 return (struct dp_peer_be *)peer; 758 } 759 760 void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng); 761 void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng); 762 763 QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc, struct dp_peer *peer, 764 struct dp_vdev_be *be_vdev, 765 void *args); 766 767 QDF_STATUS 768 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc, 769 struct dp_hw_cookie_conversion_t *cc_ctx, 770 uint32_t num_descs, 771 enum qdf_dp_desc_type desc_type, 772 uint8_t desc_pool_id); 773 774 void dp_reo_shared_qaddr_detach(struct dp_soc *soc); 775 776 QDF_STATUS 777 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc, 778 struct dp_hw_cookie_conversion_t *cc_ctx); 779 QDF_STATUS 780 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc, 781 struct dp_hw_cookie_conversion_t *cc_ctx); 782 QDF_STATUS 783 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc, 784 struct dp_hw_cookie_conversion_t *cc_ctx); 785 786 /** 787 * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool 788 * @be_soc: beryllium soc handler 789 * @list_head: pointer to page desc head 790 * @list_tail: pointer to page desc tail 791 * @num_desc: number of TX/RX Descs required for SPT pages 792 * 793 * Return: number of SPT page Desc allocated 794 */ 795 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc, 796 struct dp_spt_page_desc **list_head, 797 struct dp_spt_page_desc **list_tail, 798 uint16_t num_desc); 799 800 /** 801 * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool 802 * @be_soc: beryllium soc handler 803 * @list_head: pointer to page desc head 804 * @list_tail: pointer to page desc tail 805 * @page_nums: number of page desc freed back to pool 806 */ 807 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc, 808 struct dp_spt_page_desc **list_head, 809 struct dp_spt_page_desc **list_tail, 810 uint16_t page_nums); 811 812 /** 813 * dp_cc_desc_id_generate() - generate SW cookie ID according to 814 * DDR page 4K aligned or not 815 * @ppt_index: offset index in primary page table 816 * @spt_index: offset index in sceondary DDR page 817 * 818 * Generate SW cookie ID to match as HW expected 819 * 820 * Return: cookie ID 821 */ 822 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index, 823 uint16_t spt_index) 824 { 825 /* 826 * for 4k aligned case, cmem entry size is 4 bytes, 827 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag 828 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is 829 * exactly same with original ppt_index value. 830 * for 4k un-aligned case, cmem entry size is 8 bytes. 831 * bit19 ~ bit9 will be HW index value, same as ppt_index value. 832 */ 833 return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) | 834 spt_index); 835 } 836 837 /** 838 * dp_cc_desc_find() - find TX/RX Descs virtual address by ID 839 * @soc: be soc handle 840 * @desc_id: TX/RX Dess ID 841 * 842 * Return: TX/RX Desc virtual address 843 */ 844 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc, 845 uint32_t desc_id) 846 { 847 struct dp_soc_be *be_soc; 848 uint16_t ppt_page_id, spt_va_id; 849 uint8_t *spt_page_va; 850 851 be_soc = dp_get_be_soc_from_dp_soc(soc); 852 ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >> 853 DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT; 854 855 spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >> 856 DP_CC_DESC_ID_SPT_VA_OS_SHIFT; 857 858 /* 859 * ppt index in cmem is same order where the page in the 860 * page desc array during initialization. 861 * entry size in DDR page is 64 bits, for 32 bits system, 862 * only lower 32 bits VA value is needed. 863 */ 864 spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr; 865 866 return (*((uintptr_t *)(spt_page_va + 867 spt_va_id * DP_CC_HW_READ_BYTES))); 868 } 869 870 /** 871 * dp_update_mlo_mld_vdev_ctxt_stats() - aggregate stats from mlo ctx 872 * @buf: vdev stats buf 873 * @mlo_ctxt_stats: mlo ctxt stats 874 * 875 * return: void 876 */ 877 static inline 878 void dp_update_mlo_mld_vdev_ctxt_stats(void *buf, 879 struct dp_vdev_stats *mlo_ctxt_stats) 880 { 881 struct dp_vdev_stats *tgt_vdev_stats = (struct dp_vdev_stats *)buf; 882 883 DP_UPDATE_TO_MLD_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats, 884 DP_XMIT_TOTAL); 885 } 886 887 /** 888 * dp_update_mlo_link_vdev_ctxt_stats() - aggregate stats from mlo ctx 889 * @buf: vdev stats buf 890 * @mlo_ctxt_stats: mlo ctxt stats 891 * @xmit_type: xmit type of packet - MLD/Link 892 * return: void 893 */ 894 static inline 895 void dp_update_mlo_link_vdev_ctxt_stats(void *buf, 896 struct dp_vdev_stats *mlo_ctxt_stats, 897 enum dp_pkt_xmit_type xmit_type) 898 { 899 struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)buf; 900 901 DP_UPDATE_TO_LINK_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats, xmit_type); 902 } 903 904 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 905 /** 906 * enum dp_srng_near_full_levels - SRNG Near FULL levels 907 * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode 908 * of processing the entries in SRNG 909 * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode 910 * of processing the entries in SRNG 911 * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full 912 * condition and drastic steps need to be taken for processing 913 * the entries in SRNG 914 */ 915 enum dp_srng_near_full_levels { 916 DP_SRNG_THRESH_SAFE, 917 DP_SRNG_THRESH_NEAR_FULL, 918 DP_SRNG_THRESH_CRITICAL, 919 }; 920 921 /** 922 * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from 923 * its corresponding near-full irq handler 924 * @soc: Datapath SoC handle 925 * @dp_srng: datapath handle for this SRNG 926 * 927 * Return: 1, if the srng was marked as near-full 928 * 0, if the srng was not marked as near-full 929 */ 930 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc, 931 struct dp_srng *dp_srng) 932 { 933 return qdf_atomic_read(&dp_srng->near_full); 934 } 935 936 /** 937 * dp_srng_get_near_full_level() - Check the num available entries in the 938 * consumer srng and return the level of the srng 939 * near full state. 940 * @soc: Datapath SoC Handle [To be validated by the caller] 941 * @dp_srng: SRNG handle 942 * 943 * Return: near-full level 944 */ 945 static inline int 946 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng) 947 { 948 uint32_t num_valid; 949 950 num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc, 951 dp_srng->hal_srng, 952 true); 953 954 if (num_valid > dp_srng->crit_thresh) 955 return DP_SRNG_THRESH_CRITICAL; 956 else if (num_valid < dp_srng->safe_thresh) 957 return DP_SRNG_THRESH_SAFE; 958 else 959 return DP_SRNG_THRESH_NEAR_FULL; 960 } 961 962 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2 963 964 /** 965 * _dp_srng_test_and_update_nf_params() - Test the near full level and update 966 * the reap_limit and flags to reflect the state. 967 * @soc: Datapath soc handle 968 * @srng: Datapath handle for the srng 969 * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as 970 * per the near-full state 971 * 972 * Return: 1, if the srng is near full 973 * 0, if the srng is not near full 974 */ 975 static inline int 976 _dp_srng_test_and_update_nf_params(struct dp_soc *soc, 977 struct dp_srng *srng, 978 int *max_reap_limit) 979 { 980 int ring_near_full = 0, near_full_level; 981 982 if (dp_srng_check_ring_near_full(soc, srng)) { 983 near_full_level = dp_srng_get_near_full_level(soc, srng); 984 switch (near_full_level) { 985 case DP_SRNG_THRESH_CRITICAL: 986 /* Currently not doing anything special here */ 987 fallthrough; 988 case DP_SRNG_THRESH_NEAR_FULL: 989 ring_near_full = 1; 990 *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER; 991 break; 992 case DP_SRNG_THRESH_SAFE: 993 qdf_atomic_set(&srng->near_full, 0); 994 ring_near_full = 0; 995 break; 996 default: 997 qdf_assert(0); 998 break; 999 } 1000 } 1001 1002 return ring_near_full; 1003 } 1004 #else 1005 static inline int 1006 _dp_srng_test_and_update_nf_params(struct dp_soc *soc, 1007 struct dp_srng *srng, 1008 int *max_reap_limit) 1009 { 1010 return 0; 1011 } 1012 #endif 1013 1014 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX 1015 static inline 1016 uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id) 1017 { 1018 return (DP_TX_SPCL_DESC_CMEM_OFFSET + 1019 (desc_pool_id * DP_TX_SPCL_DESC_POOL_CMEM_SIZE)); 1020 } 1021 #else 1022 static inline 1023 uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id) 1024 { 1025 QDF_BUG(0); 1026 return 0; 1027 } 1028 #endif 1029 static inline 1030 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id, 1031 enum qdf_dp_desc_type desc_type) 1032 { 1033 switch (desc_type) { 1034 case QDF_DP_TX_DESC_TYPE: 1035 return (DP_TX_DESC_CMEM_OFFSET + 1036 (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE)); 1037 case QDF_DP_TX_SPCL_DESC_TYPE: 1038 return dp_desc_pool_get_spcl_cmem_base(desc_pool_id); 1039 case QDF_DP_RX_DESC_BUF_TYPE: 1040 return (DP_RX_DESC_CMEM_OFFSET + 1041 ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) * 1042 DP_RX_DESC_POOL_CMEM_SIZE); 1043 case QDF_DP_TX_PPEDS_DESC_TYPE: 1044 return DP_TX_PPEDS_DESC_CMEM_OFFSET; 1045 default: 1046 QDF_BUG(0); 1047 } 1048 return 0; 1049 } 1050 1051 #ifndef WLAN_MLO_MULTI_CHIP 1052 static inline 1053 void dp_soc_mlo_fill_params(struct dp_soc *soc, 1054 struct cdp_soc_attach_params *params) 1055 { 1056 } 1057 1058 static inline 1059 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev, 1060 struct cdp_pdev_attach_params *params) 1061 { 1062 } 1063 1064 static inline 1065 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev) 1066 { 1067 } 1068 1069 static inline 1070 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev) 1071 { 1072 } 1073 1074 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc) 1075 { 1076 return 0; 1077 } 1078 #endif 1079 1080 /** 1081 * dp_mlo_dev_ctxt_list_attach_wrapper() - Wrapper API for MLO dev list Init 1082 * 1083 * @mlo_dev_obj: MLO device object 1084 * 1085 * Return: void 1086 */ 1087 void dp_mlo_dev_ctxt_list_attach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj); 1088 1089 /** 1090 * dp_mlo_dev_ctxt_list_detach_wrapper() - Wrapper API for MLO dev list de-Init 1091 * 1092 * @mlo_dev_obj: MLO device object 1093 * 1094 * Return: void 1095 */ 1096 void dp_mlo_dev_ctxt_list_detach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj); 1097 1098 /** 1099 * dp_mlo_dev_ctxt_list_attach() - API to initialize MLO device List 1100 * 1101 * @mlo_dev_obj: MLO device object 1102 * 1103 * Return: void 1104 */ 1105 void dp_mlo_dev_ctxt_list_attach(dp_mlo_dev_obj_t mlo_dev_obj); 1106 1107 /** 1108 * dp_mlo_dev_ctxt_list_detach() - API to de-initialize MLO device List 1109 * 1110 * @mlo_dev_obj: MLO device object 1111 * 1112 * Return: void 1113 */ 1114 void dp_mlo_dev_ctxt_list_detach(dp_mlo_dev_obj_t mlo_dev_obj); 1115 1116 /** 1117 * dp_soc_initialize_cdp_cmn_mlo_ops() - API to initialize common CDP MLO ops 1118 * 1119 * @soc: Datapath soc handle 1120 * 1121 * Return: void 1122 */ 1123 void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc); 1124 1125 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX) 1126 /** 1127 * dp_mlo_dev_ctxt_unref_delete() - Releasing the ref for MLO device ctxt 1128 * 1129 * @mlo_dev_ctxt: MLO device context handle 1130 * @mod_id: module id which is releasing the reference 1131 * 1132 * Return: void 1133 */ 1134 void dp_mlo_dev_ctxt_unref_delete(struct dp_mlo_dev_ctxt *mlo_dev_ctxt, 1135 enum dp_mod_id mod_id); 1136 1137 /** 1138 * dp_mlo_dev_get_ref() - Get the ref for MLO device ctxt 1139 * 1140 * @mlo_dev_ctxt: MLO device context handle 1141 * @mod_id: module id which is requesting the reference 1142 * 1143 * Return: SUCCESS on acquiring the ref. 1144 */ 1145 QDF_STATUS 1146 dp_mlo_dev_get_ref(struct dp_mlo_dev_ctxt *mlo_dev_ctxt, 1147 enum dp_mod_id mod_id); 1148 1149 /** 1150 * dp_get_mlo_dev_ctx_by_mld_mac_addr() - Get MLO device ctx based on MLD MAC 1151 * 1152 * @be_soc: be soc handle 1153 * @mldaddr: MLD MAC address 1154 * @mod_id: module id which is requesting the reference 1155 * 1156 * Return: MLO device context Handle on success, NULL on failure 1157 */ 1158 struct dp_mlo_dev_ctxt * 1159 dp_get_mlo_dev_ctx_by_mld_mac_addr(struct dp_soc_be *be_soc, 1160 uint8_t *mldaddr, enum dp_mod_id mod_id); 1161 #endif /* WLAN_DP_MLO_DEV_CTX */ 1162 #endif 1163