xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 #ifdef WIFI_MONITOR_SUPPORT
30 #include <dp_mon.h>
31 #endif
32 
33 enum CMEM_MEM_CLIENTS {
34 	COOKIE_CONVERSION,
35 	FISA_FST,
36 };
37 
38 /* maximum number of entries in one page of secondary page table */
39 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
40 
41 /* maximum number of entries in one page of secondary page table */
42 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
43 
44 /* maximum number of entries in primary page table */
45 #define DP_CC_PPT_MAX_ENTRIES \
46 	DP_CC_PPT_MEM_SIZE / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED
47 
48 /* cookie conversion required CMEM offset from CMEM pool */
49 #define DP_CC_MEM_OFFSET_IN_CMEM 0
50 
51 /* cookie conversion primary page table size 4K */
52 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
53 #define DP_CC_PPT_MEM_SIZE 4096
54 #else
55 #define DP_CC_PPT_MEM_SIZE 8192
56 #endif
57 
58 /* FST required CMEM offset from CMEM pool */
59 #define DP_FST_MEM_OFFSET_IN_CMEM \
60 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
61 
62 /* CMEM size for FISA FST 16K */
63 #define DP_CMEM_FST_SIZE 16384
64 
65 /* lower 9 bits in Desc ID for offset in page of SPT */
66 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
67 
68 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
69 
70 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
71 
72 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
73 
74 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
75 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
76 
77 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
78 
79 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
80 
81 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
82 
83 /*
84  * page 4K unaligned case, single SPT page physical address
85  * need 8 bytes in PPT
86  */
87 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
88 /*
89  * page 4K aligned case, single SPT page physical address
90  * need 4 bytes in PPT
91  */
92 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
93 
94 /* 4K aligned case, number of bits HW append for one PPT entry value */
95 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
96 
97 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
98 /* WBM2SW ring id for rx release */
99 #define WBM2SW_REL_ERR_RING_NUM 3
100 #else
101 /* WBM2SW ring id for rx release */
102 #define WBM2SW_REL_ERR_RING_NUM 5
103 #endif
104 
105 #ifdef WLAN_SUPPORT_PPEDS
106 #define DP_PPEDS_STAMODE_ASTIDX_MAP_REG_IDX 1
107 /* The MAX PPE PRI2TID */
108 #define DP_TX_INT_PRI2TID_MAX 15
109 
110 #define DP_TX_PPEDS_POOL_ID 0
111 
112 /* size of CMEM needed for a ppeds tx desc pool */
113 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
114 	((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
115 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
116 
117 /* Offset of ppeds tx descripotor pool */
118 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
119 
120 #define PEER_ROUTING_USE_PPE 1
121 #define PEER_ROUTING_ENABLED 1
122 #define DP_PPE_INTR_STRNG_LEN 32
123 #define DP_PPE_INTR_MAX 3
124 
125 #else
126 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
127 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
128 
129 #define DP_PPE_INTR_STRNG_LEN 0
130 #define DP_PPE_INTR_MAX 0
131 #endif
132 
133 /* tx descriptor are programmed at start of CMEM region*/
134 #define DP_TX_DESC_CMEM_OFFSET \
135 	(DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
136 
137 /* size of CMEM needed for a tx desc pool*/
138 #define DP_TX_DESC_POOL_CMEM_SIZE \
139 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
140 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
141 
142 /* Offset of rx descripotor pool */
143 #define DP_RX_DESC_CMEM_OFFSET \
144 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
145 
146 /* size of CMEM needed for a rx desc pool */
147 #define DP_RX_DESC_POOL_CMEM_SIZE \
148 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
149 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
150 
151 /* get ppt_id from CMEM_OFFSET */
152 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
153 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
154 
155 /**
156  * struct dp_spt_page_desc - secondary page table page descriptors
157  * @next: pointer to next linked SPT page Desc
158  * @page_v_addr: page virtual address
159  * @page_p_addr: page physical address
160  * @ppt_index: entry index in primary page table where this page physical
161 		address stored
162  * @avail_entry_index: index for available entry that store TX/RX Desc VA
163  */
164 struct dp_spt_page_desc {
165 	uint8_t *page_v_addr;
166 	qdf_dma_addr_t page_p_addr;
167 	uint32_t ppt_index;
168 };
169 
170 /**
171  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
172  * @cmem_offset: CMEM offset from base address for primary page table setup
173  * @total_page_num: total DDR page allocated
174  * @page_desc_freelist: available page Desc list
175  * @page_desc_base: page Desc buffer base address.
176  * @page_pool: DDR pages pool
177  * @cc_lock: locks for page acquiring/free
178  */
179 struct dp_hw_cookie_conversion_t {
180 	uint32_t cmem_offset;
181 	uint32_t total_page_num;
182 	struct dp_spt_page_desc *page_desc_base;
183 	struct qdf_mem_multi_page_t page_pool;
184 	qdf_spinlock_t cc_lock;
185 };
186 
187 /**
188  * struct dp_spt_page_desc_list - containor of SPT page desc list info
189  * @spt_page_list_head: head of SPT page descriptor list
190  * @spt_page_list_tail: tail of SPT page descriptor list
191  * @num_spt_pages: number of SPT page descriptor allocated
192  */
193 struct dp_spt_page_desc_list {
194 	struct dp_spt_page_desc *spt_page_list_head;
195 	struct dp_spt_page_desc *spt_page_list_tail;
196 	uint16_t num_spt_pages;
197 };
198 
199 /* HW reading 8 bytes for VA */
200 #define DP_CC_HW_READ_BYTES 8
201 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
202 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
203 	= (uintptr_t)(_desc_va); }
204 
205 /**
206  * struct dp_tx_bank_profile - DP wrapper for TCL banks
207  * @is_configured: flag indicating if this bank is configured
208  * @ref_count: ref count indicating number of users of the bank
209  * @bank_config: HAL TX bank configuration
210  */
211 struct dp_tx_bank_profile {
212 	uint8_t is_configured;
213 	qdf_atomic_t  ref_count;
214 	union hal_tx_bank_config bank_config;
215 };
216 
217 #ifdef WLAN_SUPPORT_PPEDS
218 /**
219  * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
220  * @is_configured: Boolean that the entry is configured.
221  */
222 struct dp_ppe_vp_tbl_entry {
223 	bool is_configured;
224 };
225 
226 /**
227  * struct dp_ppe_vp_search_idx_tbl_entry - PPE Virtual search table entry
228  * @is_configured: Boolean that the entry is configured.
229  */
230 struct dp_ppe_vp_search_idx_tbl_entry {
231 	bool is_configured;
232 };
233 
234 /**
235  * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
236  * @vp_num: Virtual port number
237  * @ppe_vp_num_idx: Index to the PPE VP table entry
238  * @search_idx_reg_num: Address search Index register number
239  * @drop_prec_enable: Drop precedance enable
240  * @to_fw: To FW exception enable/disable.
241  * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
242  */
243 struct dp_ppe_vp_profile {
244 	uint8_t vp_num;
245 	uint8_t ppe_vp_num_idx;
246 	uint8_t search_idx_reg_num;
247 	uint8_t drop_prec_enable;
248 	uint8_t to_fw;
249 	uint8_t use_ppe_int_pri;
250 };
251 
252 /**
253  * struct dp_ppeds_tx_desc_pool_s - PPEDS Tx Descriptor Pool
254  * @elem_size: Size of each descriptor
255  * @num_allocated: Number of used descriptors
256  * @freelist: Chain of free descriptors
257  * @desc_pages: multiple page allocation information for actual descriptors
258  * @elem_count: Number of descriptors in the pool
259  * @num_free: Number of free descriptors
260  * @lock- Lock for descriptor allocation/free from/to the pool
261  */
262 struct dp_ppeds_tx_desc_pool_s {
263 	uint16_t elem_size;
264 	uint32_t num_allocated;
265 	struct dp_tx_desc_s *freelist;
266 	struct qdf_mem_multi_page_t desc_pages;
267 	uint16_t elem_count;
268 	uint32_t num_free;
269 	qdf_spinlock_t lock;
270 };
271 #endif
272 
273 /**
274  * struct dp_ppeds_napi - napi parameters for ppe ds
275  * @napi: napi structure to register with napi infra
276  * @ndev: net_dev structure
277  */
278 struct dp_ppeds_napi {
279 	struct napi_struct napi;
280 	struct net_device ndev;
281 };
282 
283 /**
284  * struct dp_soc_be - Extended DP soc for BE targets
285  * @soc: dp soc structure
286  * @num_bank_profiles: num TX bank profiles
287  * @bank_profiles: bank profiles for various TX banks
288  * @cc_cmem_base: cmem offset reserved for CC
289  * @tx_cc_ctx: Cookie conversion context for tx desc pools
290  * @rx_cc_ctx: Cookie conversion context for rx desc pools
291  * @monitor_soc_be: BE specific monitor object
292  * @mlo_enabled: Flag to indicate MLO is enabled or not
293  * @mlo_chip_id: MLO chip_id
294  * @ml_ctxt: pointer to global ml_context
295  * @delta_tqm: delta_tqm
296  * @mlo_tstamp_offset: mlo timestamp offset
297  * @mld_peer_hash: peer hash table for ML peers
298  *           Associated peer with this MAC address)
299  * @mld_peer_hash_lock: lock to protect mld_peer_hash
300  * @ppe_ds_int_mode_enabled: PPE DS interrupt mode enabled
301  * @reo2ppe_ring: REO2PPE ring
302  * @ppe2tcl_ring: PPE2TCL ring
303  * @ppe_vp_tbl: PPE VP table
304  * @ppe_vp_search_idx_tbl: PPE VP search idx table
305  * @ppe_vp_tbl_lock: PPE VP table lock
306  * @num_ppe_vp_entries : Number of PPE VP entries
307  * @ipa_bank_id: TCL bank id used by IPA
308  * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
309  * @ppeds_tx_desc: PPEDS tx desc pool
310  * @ppeds_handle: PPEDS soc instance handle
311  * @ppe_vp_tbl_lock: PPEDS VP table lock
312  * @num_ppe_vp_entries: PPEDS number of VP entries
313  * @num_ppe_vp_search_idx_entries: PPEDS VP search idx entries
314  * @irq_name: PPEDS VP irq names
315  */
316 struct dp_soc_be {
317 	struct dp_soc soc;
318 	uint8_t num_bank_profiles;
319 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
320 	qdf_mutex_t tx_bank_lock;
321 #else
322 	qdf_spinlock_t tx_bank_lock;
323 #endif
324 	struct dp_tx_bank_profile *bank_profiles;
325 	struct dp_spt_page_desc *page_desc_base;
326 	uint32_t cc_cmem_base;
327 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
328 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
329 #ifdef WLAN_SUPPORT_PPEDS
330 	uint8_t ppeds_int_mode_enabled:1,
331 		ppeds_stopped:1;
332 	struct dp_srng reo2ppe_ring;
333 	struct dp_srng ppe2tcl_ring;
334 	struct dp_srng ppeds_wbm_release_ring;
335 	struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
336 	struct dp_ppe_vp_search_idx_tbl_entry *ppe_vp_search_idx_tbl;
337 	struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
338 	struct dp_ppeds_tx_desc_pool_s ppeds_tx_desc;
339 	struct dp_ppeds_napi ppeds_napi_ctxt;
340 	void *ppeds_handle;
341 	qdf_mutex_t ppe_vp_tbl_lock;
342 	uint8_t num_ppe_vp_entries;
343 	uint8_t num_ppe_vp_search_idx_entries;
344 	char irq_name[DP_PPE_INTR_MAX][DP_PPE_INTR_STRNG_LEN];
345 #endif
346 #ifdef WLAN_FEATURE_11BE_MLO
347 #ifdef WLAN_MLO_MULTI_CHIP
348 	uint8_t mlo_enabled;
349 	uint8_t mlo_chip_id;
350 	struct dp_mlo_ctxt *ml_ctxt;
351 	uint64_t delta_tqm;
352 	uint64_t mlo_tstamp_offset;
353 #else
354 	/* Protect mld peer hash table */
355 	DP_MUTEX_TYPE mld_peer_hash_lock;
356 	struct {
357 		uint32_t mask;
358 		uint32_t idx_bits;
359 
360 		TAILQ_HEAD(, dp_peer) * bins;
361 	} mld_peer_hash;
362 #endif
363 #endif
364 #ifdef IPA_OFFLOAD
365 	int8_t ipa_bank_id;
366 #endif
367 };
368 
369 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
370 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
371 
372 /**
373  * struct dp_pdev_be - Extended DP pdev for BE targets
374  * @pdev: dp pdev structure
375  * @monitor_pdev_be: BE specific monitor object
376  * @mlo_link_id: MLO link id for PDEV
377  * @delta_tsf2: delta_tsf2
378  */
379 struct dp_pdev_be {
380 	struct dp_pdev pdev;
381 #ifdef WLAN_MLO_MULTI_CHIP
382 	uint8_t mlo_link_id;
383 	uint64_t delta_tsf2;
384 #endif
385 };
386 
387 /**
388  * struct dp_vdev_be - Extended DP vdev for BE targets
389  * @vdev: dp vdev structure
390  * @bank_id: bank_id to be used for TX
391  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
392  * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
393  * @ppe_vp_profile: PPE VP profile
394  */
395 struct dp_vdev_be {
396 	struct dp_vdev vdev;
397 	int8_t bank_id;
398 	uint8_t vdev_id_check_en;
399 #ifdef WLAN_MLO_MULTI_CHIP
400 	/* partner list used for Intra-BSS */
401 	uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
402 #ifdef WLAN_FEATURE_11BE_MLO
403 #ifdef WLAN_MCAST_MLO
404 	/* DP MLO seq number */
405 	uint16_t seq_num;
406 	/* MLO Mcast primary vdev */
407 	bool mcast_primary;
408 #endif
409 #endif
410 #endif
411 	unsigned long ppe_vp_enabled;
412 #ifdef WLAN_SUPPORT_PPEDS
413 	struct dp_ppe_vp_profile ppe_vp_profile;
414 #endif
415 };
416 
417 /**
418  * struct dp_peer_be - Extended DP peer for BE targets
419  * @dp_peer: dp peer structure
420  */
421 struct dp_peer_be {
422 	struct dp_peer peer;
423 #ifdef WLAN_SUPPORT_PPEDS
424 	uint8_t priority_valid;
425 #endif
426 };
427 
428 /**
429  * dp_get_soc_context_size_be() - get context size for target specific DP soc
430  *
431  * Return: value in bytes for BE specific soc structure
432  */
433 qdf_size_t dp_get_soc_context_size_be(void);
434 
435 /**
436  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
437  * @arch_ops: arch ops pointer
438  *
439  * Return: none
440  */
441 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
442 
443 /**
444  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
445  * @arch_ops: arch ops pointer
446  *
447  * Return: size in bytes for the context_type
448  */
449 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
450 
451 /**
452  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
453  * @soc: dp_soc pointer
454  *
455  * Return: dp_soc_be pointer
456  */
457 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
458 {
459 	return (struct dp_soc_be *)soc;
460 }
461 
462 #ifdef WLAN_MLO_MULTI_CHIP
463 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
464 
465 /*
466  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
467  *
468  * @soc: soc handle
469  *
470  * return: MLD peer hash object
471  */
472 static inline dp_mld_peer_hash_obj_t
473 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
474 {
475 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
476 
477 	return be_soc->ml_ctxt;
478 }
479 
480 void  dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
481 
482 #if defined(WLAN_FEATURE_11BE_MLO)
483 /**
484  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
485  * @soc: Soc handle
486  * @peer: DP peer handle for ML peer
487  * @peer_id: peer_id
488  * Return: None
489  */
490 void dp_mlo_partner_chips_map(struct dp_soc *soc,
491 			      struct dp_peer *peer,
492 			      uint16_t peer_id);
493 
494 /**
495  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
496  * @soc: Soc handle
497  * @peer_id: peer_id
498  * Return: None
499  */
500 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
501 				uint16_t peer_id);
502 
503 #ifdef WLAN_MCAST_MLO
504 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
505 				    struct dp_vdev *ptnr_vdev,
506 				    void *arg);
507 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
508 				   void *arg);
509 /*
510  * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
511  * @be_soc: dp_soc_be pointer
512  * @be_vdev: dp_vdev_be pointer
513  * @func        : function to be called for each peer
514  * @arg         : argument need to be passed to func
515  * @mod_id: module id
516  *
517  * Return: None
518  */
519 void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
520 				 struct dp_vdev_be *be_vdev,
521 				 dp_ptnr_vdev_iter_func func,
522 				 void *arg,
523 				 enum dp_mod_id mod_id);
524 /*
525  * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
526  * @be_soc: dp_soc_be pointer
527  * @func        : function to be called for each peer
528  * @arg         : argument need to be passed to func
529  *
530  * Return: None
531  */
532 void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
533 				dp_ptnr_soc_iter_func func,
534 				void *arg);
535 /*
536  * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
537  * @be_soc: dp_soc_be pointer
538  * @be_vdev: dp_vdev_be pointer
539  * @mod_id: module id
540  *
541  * Return: mcast primary DP VDEV handle on success, NULL on failure
542  */
543 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
544 					      struct dp_vdev_be *be_vdev,
545 					      enum dp_mod_id mod_id);
546 #endif
547 #endif
548 
549 #else
550 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
551 
552 static inline dp_mld_peer_hash_obj_t
553 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
554 {
555 	return dp_get_be_soc_from_dp_soc(soc);
556 }
557 
558 static inline void  dp_clr_mlo_ptnr_list(struct dp_soc *soc,
559 					 struct dp_vdev *vdev)
560 {
561 }
562 #endif
563 
564 /*
565  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
566  *
567  * @mld_hash_obj: Peer has object
568  * @hash_elems: number of entries in hash table
569  *
570  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
571  */
572 QDF_STATUS
573 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
574 				int hash_elems);
575 
576 /*
577  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
578  *
579  * @mld_hash_obj: Peer has object
580  *
581  * return: void
582  */
583 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
584 
585 /**
586  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
587  * @pdev: dp_pdev pointer
588  *
589  * Return: dp_pdev_be pointer
590  */
591 static inline
592 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
593 {
594 	return (struct dp_pdev_be *)pdev;
595 }
596 
597 /**
598  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
599  * @vdev: dp_vdev pointer
600  *
601  * Return: dp_vdev_be pointer
602  */
603 static inline
604 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
605 {
606 	return (struct dp_vdev_be *)vdev;
607 }
608 
609 /**
610  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
611  * @peer: dp_peer pointer
612  *
613  * Return: dp_peer_be pointer
614  */
615 static inline
616 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
617 {
618 	return (struct dp_peer_be *)peer;
619 }
620 
621 void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng);
622 void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng);
623 
624 QDF_STATUS
625 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
626 			       struct dp_hw_cookie_conversion_t *cc_ctx,
627 			       uint32_t num_descs,
628 			       enum dp_desc_type desc_type,
629 			       uint8_t desc_pool_id);
630 
631 void dp_reo_shared_qaddr_detach(struct dp_soc *soc);
632 
633 QDF_STATUS
634 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
635 			       struct dp_hw_cookie_conversion_t *cc_ctx);
636 QDF_STATUS
637 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
638 			     struct dp_hw_cookie_conversion_t *cc_ctx);
639 QDF_STATUS
640 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
641 			       struct dp_hw_cookie_conversion_t *cc_ctx);
642 /**
643  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
644  * @be_soc: beryllium soc handler
645  * @list_head: pointer to page desc head
646  * @list_tail: pointer to page desc tail
647  * @num_desc: number of TX/RX Descs required for SPT pages
648  *
649  * Return: number of SPT page Desc allocated
650  */
651 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
652 				   struct dp_spt_page_desc **list_head,
653 				   struct dp_spt_page_desc **list_tail,
654 				   uint16_t num_desc);
655 /**
656  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
657  * @be_soc: beryllium soc handler
658  * @list_head: pointer to page desc head
659  * @list_tail: pointer to page desc tail
660  * @page_nums: number of page desc freed back to pool
661  */
662 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
663 			      struct dp_spt_page_desc **list_head,
664 			      struct dp_spt_page_desc **list_tail,
665 			      uint16_t page_nums);
666 
667 /**
668  * dp_cc_desc_id_generate() - generate SW cookie ID according to
669 				DDR page 4K aligned or not
670  * @ppt_index: offset index in primary page table
671  * @spt_index: offset index in sceondary DDR page
672  *
673  * Generate SW cookie ID to match as HW expected
674  *
675  * Return: cookie ID
676  */
677 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
678 					      uint16_t spt_index)
679 {
680 	/*
681 	 * for 4k aligned case, cmem entry size is 4 bytes,
682 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
683 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
684 	 * exactly same with original ppt_index value.
685 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
686 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
687 	 */
688 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
689 		spt_index);
690 }
691 
692 /**
693  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
694  * @be_soc: be soc handle
695  * @desc_id: TX/RX Dess ID
696  *
697  * Return: TX/RX Desc virtual address
698  */
699 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
700 					uint32_t desc_id)
701 {
702 	struct dp_soc_be *be_soc;
703 	uint16_t ppt_page_id, spt_va_id;
704 	uint8_t *spt_page_va;
705 
706 	be_soc = dp_get_be_soc_from_dp_soc(soc);
707 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
708 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
709 
710 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
711 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
712 
713 	/*
714 	 * ppt index in cmem is same order where the page in the
715 	 * page desc array during initialization.
716 	 * entry size in DDR page is 64 bits, for 32 bits system,
717 	 * only lower 32 bits VA value is needed.
718 	 */
719 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
720 
721 	return (*((uintptr_t *)(spt_page_va  +
722 				spt_va_id * DP_CC_HW_READ_BYTES)));
723 }
724 
725 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
726 /**
727  * enum dp_srng_near_full_levels - SRNG Near FULL levels
728  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
729  *		of processing the entries in SRNG
730  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
731  *		of processing the entries in SRNG
732  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
733  *		condition and drastic steps need to be taken for processing
734  *		the entries in SRNG
735  */
736 enum dp_srng_near_full_levels {
737 	DP_SRNG_THRESH_SAFE,
738 	DP_SRNG_THRESH_NEAR_FULL,
739 	DP_SRNG_THRESH_CRITICAL,
740 };
741 
742 /**
743  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
744  *				its corresponding near-full irq handler
745  * @soc: Datapath SoC handle
746  * @dp_srng: datapath handle for this SRNG
747  *
748  * Return: 1, if the srng was marked as near-full
749  *	   0, if the srng was not marked as near-full
750  */
751 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
752 					       struct dp_srng *dp_srng)
753 {
754 	return qdf_atomic_read(&dp_srng->near_full);
755 }
756 
757 /**
758  * dp_srng_get_near_full_level() - Check the num available entries in the
759  *			consumer srng and return the level of the srng
760  *			near full state.
761  * @soc: Datapath SoC Handle [To be validated by the caller]
762  * @hal_ring_hdl: SRNG handle
763  *
764  * Return: near-full level
765  */
766 static inline int
767 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
768 {
769 	uint32_t num_valid;
770 
771 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
772 						  dp_srng->hal_srng,
773 						  true);
774 
775 	if (num_valid > dp_srng->crit_thresh)
776 		return DP_SRNG_THRESH_CRITICAL;
777 	else if (num_valid < dp_srng->safe_thresh)
778 		return DP_SRNG_THRESH_SAFE;
779 	else
780 		return DP_SRNG_THRESH_NEAR_FULL;
781 }
782 
783 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
784 
785 /**
786  * dp_srng_test_and_update_nf_params() - Test the near full level and update
787  *			the reap_limit and flags to reflect the state.
788  * @soc: Datapath soc handle
789  * @srng: Datapath handle for the srng
790  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
791  *			per the near-full state
792  *
793  * Return: 1, if the srng is near full
794  *	   0, if the srng is not near full
795  */
796 static inline int
797 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
798 				   struct dp_srng *srng,
799 				   int *max_reap_limit)
800 {
801 	int ring_near_full = 0, near_full_level;
802 
803 	if (dp_srng_check_ring_near_full(soc, srng)) {
804 		near_full_level = dp_srng_get_near_full_level(soc, srng);
805 		switch (near_full_level) {
806 		case DP_SRNG_THRESH_CRITICAL:
807 			/* Currently not doing anything special here */
808 			fallthrough;
809 		case DP_SRNG_THRESH_NEAR_FULL:
810 			ring_near_full = 1;
811 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
812 			break;
813 		case DP_SRNG_THRESH_SAFE:
814 			qdf_atomic_set(&srng->near_full, 0);
815 			ring_near_full = 0;
816 			break;
817 		default:
818 			qdf_assert(0);
819 			break;
820 		}
821 	}
822 
823 	return ring_near_full;
824 }
825 #else
826 static inline int
827 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
828 				   struct dp_srng *srng,
829 				   int *max_reap_limit)
830 {
831 	return 0;
832 }
833 #endif
834 
835 static inline
836 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
837 				    enum dp_desc_type desc_type)
838 {
839 	switch (desc_type) {
840 	case DP_TX_DESC_TYPE:
841 		return (DP_TX_DESC_CMEM_OFFSET +
842 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
843 	case DP_RX_DESC_BUF_TYPE:
844 		return (DP_RX_DESC_CMEM_OFFSET +
845 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
846 			DP_RX_DESC_POOL_CMEM_SIZE);
847 	case DP_TX_PPEDS_DESC_TYPE:
848 		return DP_TX_PPEDS_DESC_CMEM_OFFSET;
849 	default:
850 			QDF_BUG(0);
851 	}
852 	return 0;
853 }
854 
855 #ifndef WLAN_MLO_MULTI_CHIP
856 static inline
857 void dp_soc_mlo_fill_params(struct dp_soc *soc,
858 			    struct cdp_soc_attach_params *params)
859 {
860 }
861 
862 static inline
863 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
864 			     struct cdp_pdev_attach_params *params)
865 {
866 }
867 
868 static inline
869 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
870 {
871 }
872 
873 static inline
874 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
875 {
876 }
877 #endif
878 #endif
879