xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 #include <dp_mon.h>
30 
31 enum CMEM_MEM_CLIENTS {
32 	COOKIE_CONVERSION,
33 	FISA_FST,
34 };
35 
36 /* maximum number of entries in one page of secondary page table */
37 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
38 
39 /* maximum number of entries in one page of secondary page table */
40 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
41 
42 /* maximum number of entries in primary page table */
43 #define DP_CC_PPT_MAX_ENTRIES 1024
44 
45 /* cookie conversion required CMEM offset from CMEM pool */
46 #define DP_CC_MEM_OFFSET_IN_CMEM 0
47 
48 /* cookie conversion primary page table size 4K */
49 #define DP_CC_PPT_MEM_SIZE 4096
50 
51 /* FST required CMEM offset from CMEM pool */
52 #define DP_FST_MEM_OFFSET_IN_CMEM \
53 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
54 
55 /* CMEM size for FISA FST 16K */
56 #define DP_CMEM_FST_SIZE 16384
57 
58 /* lower 9 bits in Desc ID for offset in page of SPT */
59 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
60 
61 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
62 
63 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
64 
65 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
66 
67 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
68 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
69 
70 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
71 
72 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
73 
74 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
75 
76 /*
77  * page 4K unaligned case, single SPT page physical address
78  * need 8 bytes in PPT
79  */
80 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
81 /*
82  * page 4K aligned case, single SPT page physical address
83  * need 4 bytes in PPT
84  */
85 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
86 
87 /* 4K aligned case, number of bits HW append for one PPT entry value */
88 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
89 
90 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
91 /* WBM2SW ring id for rx release */
92 #define WBM2SW_REL_ERR_RING_NUM 3
93 #else
94 /* WBM2SW ring id for rx release */
95 #define WBM2SW_REL_ERR_RING_NUM 5
96 #endif
97 
98 /* tx descriptor are programmed at start of CMEM region*/
99 #define DP_TX_DESC_CMEM_OFFSET	0
100 
101 /* size of CMEM needed for a tx desc pool*/
102 #define DP_TX_DESC_POOL_CMEM_SIZE \
103 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
104 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
105 
106 /* Offset of rx descripotor pool */
107 #define DP_RX_DESC_CMEM_OFFSET \
108 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
109 
110 /* size of CMEM needed for a rx desc pool */
111 #define DP_RX_DESC_POOL_CMEM_SIZE \
112 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
113 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
114 
115 /* get ppt_id from CMEM_OFFSET */
116 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
117 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
118 
119 /* The MAX PPE PRI2TID */
120 #ifdef WLAN_SUPPORT_PPEDS
121 #define DP_TX_INT_PRI2TID_MAX 15
122 #endif
123 
124 /**
125  * struct dp_spt_page_desc - secondary page table page descriptors
126  * @next: pointer to next linked SPT page Desc
127  * @page_v_addr: page virtual address
128  * @page_p_addr: page physical address
129  * @ppt_index: entry index in primary page table where this page physical
130 		address stored
131  * @avail_entry_index: index for available entry that store TX/RX Desc VA
132  */
133 struct dp_spt_page_desc {
134 	uint8_t *page_v_addr;
135 	qdf_dma_addr_t page_p_addr;
136 	uint32_t ppt_index;
137 };
138 
139 /**
140  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
141  * @cmem_offset: CMEM offset from base address for primary page table setup
142  * @total_page_num: total DDR page allocated
143  * @page_desc_freelist: available page Desc list
144  * @page_desc_base: page Desc buffer base address.
145  * @page_pool: DDR pages pool
146  * @cc_lock: locks for page acquiring/free
147  */
148 struct dp_hw_cookie_conversion_t {
149 	uint32_t cmem_offset;
150 	uint32_t total_page_num;
151 	struct dp_spt_page_desc *page_desc_base;
152 	struct qdf_mem_multi_page_t page_pool;
153 	qdf_spinlock_t cc_lock;
154 };
155 
156 /**
157  * struct dp_spt_page_desc_list - containor of SPT page desc list info
158  * @spt_page_list_head: head of SPT page descriptor list
159  * @spt_page_list_tail: tail of SPT page descriptor list
160  * @num_spt_pages: number of SPT page descriptor allocated
161  */
162 struct dp_spt_page_desc_list {
163 	struct dp_spt_page_desc *spt_page_list_head;
164 	struct dp_spt_page_desc *spt_page_list_tail;
165 	uint16_t num_spt_pages;
166 };
167 
168 /* HW reading 8 bytes for VA */
169 #define DP_CC_HW_READ_BYTES 8
170 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
171 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
172 	= (uintptr_t)(_desc_va); }
173 
174 /**
175  * struct dp_tx_bank_profile - DP wrapper for TCL banks
176  * @is_configured: flag indicating if this bank is configured
177  * @ref_count: ref count indicating number of users of the bank
178  * @bank_config: HAL TX bank configuration
179  */
180 struct dp_tx_bank_profile {
181 	uint8_t is_configured;
182 	qdf_atomic_t  ref_count;
183 	union hal_tx_bank_config bank_config;
184 };
185 
186 #ifdef WLAN_SUPPORT_PPEDS
187 /**
188  * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
189  * @is_configured: Boolean that the entry is configured.
190  */
191 struct dp_ppe_vp_tbl_entry {
192 	bool is_configured;
193 };
194 
195 /**
196  * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
197  * @vp_num: Virtual port number
198  * @ppe_vp_num_idx: Index to the PPE VP table entry
199  * @search_idx_reg_num: Address search Index register number
200  * @drop_prec_enable: Drop precedance enable
201  * @to_fw: To FW exception enable/disable.
202  * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
203  */
204 struct dp_ppe_vp_profile {
205 	uint8_t vp_num;
206 	uint8_t ppe_vp_num_idx;
207 	uint8_t search_idx_reg_num;
208 	uint8_t drop_prec_enable;
209 	uint8_t to_fw;
210 	uint8_t use_ppe_int_pri;
211 };
212 #endif
213 
214 /**
215  * struct dp_soc_be - Extended DP soc for BE targets
216  * @soc: dp soc structure
217  * @num_bank_profiles: num TX bank profiles
218  * @bank_profiles: bank profiles for various TX banks
219  * @cc_cmem_base: cmem offset reserved for CC
220  * @tx_cc_ctx: Cookie conversion context for tx desc pools
221  * @rx_cc_ctx: Cookie conversion context for rx desc pools
222  * @monitor_soc_be: BE specific monitor object
223  * @mlo_enabled: Flag to indicate MLO is enabled or not
224  * @mlo_chip_id: MLO chip_id
225  * @ml_ctxt: pointer to global ml_context
226  * @delta_tqm: delta_tqm
227  * @mlo_tstamp_offset: mlo timestamp offset
228  * @mld_peer_hash: peer hash table for ML peers
229  *           Associated peer with this MAC address)
230  * @mld_peer_hash_lock: lock to protect mld_peer_hash
231  * @reo2ppe_ring: REO2PPE ring
232  * @ppe2tcl_ring: PPE2TCL ring
233  * @ppe_release_ring: PPE release ring
234  * @ppe_vp_tbl: PPE VP table
235  * @ppe_vp_tbl_lock: PPE VP table lock
236  * @num_ppe_vp_entries : Number of PPE VP entries
237  */
238 struct dp_soc_be {
239 	struct dp_soc soc;
240 	uint8_t num_bank_profiles;
241 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
242 	qdf_mutex_t tx_bank_lock;
243 #else
244 	qdf_spinlock_t tx_bank_lock;
245 #endif
246 	struct dp_tx_bank_profile *bank_profiles;
247 	struct dp_spt_page_desc *page_desc_base;
248 	uint32_t cc_cmem_base;
249 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
250 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
251 #ifdef WLAN_SUPPORT_PPEDS
252 	struct dp_srng reo2ppe_ring;
253 	struct dp_srng ppe2tcl_ring;
254 	struct dp_srng ppe_release_ring;
255 	struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
256 	qdf_mutex_t ppe_vp_tbl_lock;
257 	uint8_t num_ppe_vp_entries;
258 #endif
259 #ifdef WLAN_FEATURE_11BE_MLO
260 #ifdef WLAN_MLO_MULTI_CHIP
261 	uint8_t mlo_enabled;
262 	uint8_t mlo_chip_id;
263 	struct dp_mlo_ctxt *ml_ctxt;
264 	uint64_t delta_tqm;
265 	uint64_t mlo_tstamp_offset;
266 #else
267 	/* Protect mld peer hash table */
268 	DP_MUTEX_TYPE mld_peer_hash_lock;
269 	struct {
270 		uint32_t mask;
271 		uint32_t idx_bits;
272 
273 		TAILQ_HEAD(, dp_peer) * bins;
274 	} mld_peer_hash;
275 #endif
276 #endif
277 };
278 
279 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
280 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
281 
282 /**
283  * struct dp_pdev_be - Extended DP pdev for BE targets
284  * @pdev: dp pdev structure
285  * @monitor_pdev_be: BE specific monitor object
286  * @mlo_link_id: MLO link id for PDEV
287  * @delta_tsf2: delta_tsf2
288  */
289 struct dp_pdev_be {
290 	struct dp_pdev pdev;
291 #ifdef WLAN_MLO_MULTI_CHIP
292 	uint8_t mlo_link_id;
293 	uint64_t delta_tsf2;
294 #endif
295 };
296 
297 /**
298  * struct dp_vdev_be - Extended DP vdev for BE targets
299  * @vdev: dp vdev structure
300  * @bank_id: bank_id to be used for TX
301  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
302  * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
303  * @ppe_vp_profile: PPE VP profile
304  */
305 struct dp_vdev_be {
306 	struct dp_vdev vdev;
307 	int8_t bank_id;
308 	uint8_t vdev_id_check_en;
309 #ifdef WLAN_MLO_MULTI_CHIP
310 	/* partner list used for Intra-BSS */
311 	uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
312 #ifdef WLAN_FEATURE_11BE_MLO
313 #ifdef WLAN_MCAST_MLO
314 	/* DP MLO seq number */
315 	uint16_t seq_num;
316 	/* MLO Mcast primary vdev */
317 	bool mcast_primary;
318 #endif
319 #endif
320 #endif
321 	unsigned long ppe_vp_enabled;
322 #ifdef WLAN_SUPPORT_PPEDS
323 	struct dp_ppe_vp_profile ppe_vp_profile;
324 #endif
325 };
326 
327 /**
328  * struct dp_peer_be - Extended DP peer for BE targets
329  * @dp_peer: dp peer structure
330  */
331 struct dp_peer_be {
332 	struct dp_peer peer;
333 };
334 
335 /**
336  * dp_get_soc_context_size_be() - get context size for target specific DP soc
337  *
338  * Return: value in bytes for BE specific soc structure
339  */
340 qdf_size_t dp_get_soc_context_size_be(void);
341 
342 /**
343  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
344  * @arch_ops: arch ops pointer
345  *
346  * Return: none
347  */
348 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
349 
350 /**
351  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
352  * @arch_ops: arch ops pointer
353  *
354  * Return: size in bytes for the context_type
355  */
356 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
357 
358 /**
359  * dp_mon_get_context_size_be() - get BE specific size for mon pdev/soc
360  * @arch_ops: arch ops pointer
361  *
362  * Return: size in bytes for the context_type
363  */
364 qdf_size_t dp_mon_get_context_size_be(enum dp_context_type context_type);
365 
366 /**
367  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
368  * @soc: dp_soc pointer
369  *
370  * Return: dp_soc_be pointer
371  */
372 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
373 {
374 	return (struct dp_soc_be *)soc;
375 }
376 
377 /**
378  * dp_get_be_mon_soc_from_dp_mon_soc() - get dp_mon_soc_be from dp_mon_soc
379  * @soc: dp_mon_soc pointer
380  *
381  * Return: dp_mon_soc_be pointer
382  */
383 static inline
384 struct dp_mon_soc_be *dp_get_be_mon_soc_from_dp_mon_soc(struct dp_mon_soc *soc)
385 {
386 	return (struct dp_mon_soc_be *)soc;
387 }
388 
389 #ifdef WLAN_MLO_MULTI_CHIP
390 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
391 
392 /*
393  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
394  *
395  * @soc: soc handle
396  *
397  * return: MLD peer hash object
398  */
399 static inline dp_mld_peer_hash_obj_t
400 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
401 {
402 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
403 
404 	return be_soc->ml_ctxt;
405 }
406 
407 void  dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
408 
409 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO)
410 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
411 				    struct dp_vdev *ptnr_vdev,
412 				    void *arg);
413 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
414 				   void *arg);
415 /*
416  * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
417  * @be_soc: dp_soc_be pointer
418  * @be_vdev: dp_vdev_be pointer
419  * @func        : function to be called for each peer
420  * @arg         : argument need to be passed to func
421  * @mod_id: module id
422  *
423  * Return: None
424  */
425 void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
426 				 struct dp_vdev_be *be_vdev,
427 				 dp_ptnr_vdev_iter_func func,
428 				 void *arg,
429 				 enum dp_mod_id mod_id);
430 /*
431  * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
432  * @be_soc: dp_soc_be pointer
433  * @func        : function to be called for each peer
434  * @arg         : argument need to be passed to func
435  *
436  * Return: None
437  */
438 void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
439 				dp_ptnr_soc_iter_func func,
440 				void *arg);
441 /*
442  * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
443  * @be_soc: dp_soc_be pointer
444  * @be_vdev: dp_vdev_be pointer
445  * @mod_id: module id
446  *
447  * Return: mcast primary DP VDEV handle on success, NULL on failure
448  */
449 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
450 					      struct dp_vdev_be *be_vdev,
451 					      enum dp_mod_id mod_id);
452 #endif
453 
454 #else
455 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
456 
457 static inline dp_mld_peer_hash_obj_t
458 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
459 {
460 	return dp_get_be_soc_from_dp_soc(soc);
461 }
462 
463 static inline void  dp_clr_mlo_ptnr_list(struct dp_soc *soc,
464 					 struct dp_vdev *vdev)
465 {
466 }
467 #endif
468 
469 /*
470  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
471  *
472  * @mld_hash_obj: Peer has object
473  * @hash_elems: number of entries in hash table
474  *
475  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
476  */
477 QDF_STATUS
478 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
479 				int hash_elems);
480 
481 /*
482  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
483  *
484  * @mld_hash_obj: Peer has object
485  *
486  * return: void
487  */
488 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
489 
490 /**
491  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
492  * @pdev: dp_pdev pointer
493  *
494  * Return: dp_pdev_be pointer
495  */
496 static inline
497 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
498 {
499 	return (struct dp_pdev_be *)pdev;
500 }
501 
502 #ifdef QCA_MONITOR_2_0_SUPPORT
503 /**
504  * dp_get_be_mon_pdev_from_dp_mon_pdev() - get dp_mon_pdev_be from dp_mon_pdev
505  * @pdev: dp_mon_pdev pointer
506  *
507  * Return: dp_mon_pdev_be pointer
508  */
509 static inline
510 struct dp_mon_pdev_be *dp_get_be_mon_pdev_from_dp_mon_pdev(struct dp_mon_pdev *mon_pdev)
511 {
512 	return (struct dp_mon_pdev_be *)mon_pdev;
513 }
514 #endif
515 
516 /**
517  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
518  * @vdev: dp_vdev pointer
519  *
520  * Return: dp_vdev_be pointer
521  */
522 static inline
523 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
524 {
525 	return (struct dp_vdev_be *)vdev;
526 }
527 
528 /**
529  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
530  * @peer: dp_peer pointer
531  *
532  * Return: dp_peer_be pointer
533  */
534 static inline
535 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
536 {
537 	return (struct dp_peer_be *)peer;
538 }
539 
540 QDF_STATUS
541 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
542 			       struct dp_hw_cookie_conversion_t *cc_ctx,
543 			       uint32_t num_descs,
544 			       enum dp_desc_type desc_type,
545 			       uint8_t desc_pool_id);
546 
547 QDF_STATUS
548 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
549 			       struct dp_hw_cookie_conversion_t *cc_ctx);
550 QDF_STATUS
551 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
552 			     struct dp_hw_cookie_conversion_t *cc_ctx);
553 QDF_STATUS
554 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
555 			       struct dp_hw_cookie_conversion_t *cc_ctx);
556 /**
557  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
558  * @be_soc: beryllium soc handler
559  * @list_head: pointer to page desc head
560  * @list_tail: pointer to page desc tail
561  * @num_desc: number of TX/RX Descs required for SPT pages
562  *
563  * Return: number of SPT page Desc allocated
564  */
565 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
566 				   struct dp_spt_page_desc **list_head,
567 				   struct dp_spt_page_desc **list_tail,
568 				   uint16_t num_desc);
569 /**
570  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
571  * @be_soc: beryllium soc handler
572  * @list_head: pointer to page desc head
573  * @list_tail: pointer to page desc tail
574  * @page_nums: number of page desc freed back to pool
575  */
576 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
577 			      struct dp_spt_page_desc **list_head,
578 			      struct dp_spt_page_desc **list_tail,
579 			      uint16_t page_nums);
580 
581 /**
582  * dp_cc_desc_id_generate() - generate SW cookie ID according to
583 				DDR page 4K aligned or not
584  * @ppt_index: offset index in primary page table
585  * @spt_index: offset index in sceondary DDR page
586  *
587  * Generate SW cookie ID to match as HW expected
588  *
589  * Return: cookie ID
590  */
591 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
592 					      uint16_t spt_index)
593 {
594 	/*
595 	 * for 4k aligned case, cmem entry size is 4 bytes,
596 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
597 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
598 	 * exactly same with original ppt_index value.
599 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
600 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
601 	 */
602 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
603 		spt_index);
604 }
605 
606 /**
607  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
608  * @be_soc: be soc handle
609  * @desc_id: TX/RX Dess ID
610  *
611  * Return: TX/RX Desc virtual address
612  */
613 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
614 					uint32_t desc_id)
615 {
616 	struct dp_soc_be *be_soc;
617 	uint16_t ppt_page_id, spt_va_id;
618 	uint8_t *spt_page_va;
619 
620 	be_soc = dp_get_be_soc_from_dp_soc(soc);
621 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
622 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
623 
624 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
625 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
626 
627 	/*
628 	 * ppt index in cmem is same order where the page in the
629 	 * page desc array during initialization.
630 	 * entry size in DDR page is 64 bits, for 32 bits system,
631 	 * only lower 32 bits VA value is needed.
632 	 */
633 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
634 
635 	return (*((uintptr_t *)(spt_page_va  +
636 				spt_va_id * DP_CC_HW_READ_BYTES)));
637 }
638 
639 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
640 /**
641  * enum dp_srng_near_full_levels - SRNG Near FULL levels
642  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
643  *		of processing the entries in SRNG
644  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
645  *		of processing the entries in SRNG
646  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
647  *		condition and drastic steps need to be taken for processing
648  *		the entries in SRNG
649  */
650 enum dp_srng_near_full_levels {
651 	DP_SRNG_THRESH_SAFE,
652 	DP_SRNG_THRESH_NEAR_FULL,
653 	DP_SRNG_THRESH_CRITICAL,
654 };
655 
656 /**
657  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
658  *				its corresponding near-full irq handler
659  * @soc: Datapath SoC handle
660  * @dp_srng: datapath handle for this SRNG
661  *
662  * Return: 1, if the srng was marked as near-full
663  *	   0, if the srng was not marked as near-full
664  */
665 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
666 					       struct dp_srng *dp_srng)
667 {
668 	return qdf_atomic_read(&dp_srng->near_full);
669 }
670 
671 /**
672  * dp_srng_get_near_full_level() - Check the num available entries in the
673  *			consumer srng and return the level of the srng
674  *			near full state.
675  * @soc: Datapath SoC Handle [To be validated by the caller]
676  * @hal_ring_hdl: SRNG handle
677  *
678  * Return: near-full level
679  */
680 static inline int
681 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
682 {
683 	uint32_t num_valid;
684 
685 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
686 						  dp_srng->hal_srng,
687 						  true);
688 
689 	if (num_valid > dp_srng->crit_thresh)
690 		return DP_SRNG_THRESH_CRITICAL;
691 	else if (num_valid < dp_srng->safe_thresh)
692 		return DP_SRNG_THRESH_SAFE;
693 	else
694 		return DP_SRNG_THRESH_NEAR_FULL;
695 }
696 
697 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
698 
699 /**
700  * dp_srng_test_and_update_nf_params() - Test the near full level and update
701  *			the reap_limit and flags to reflect the state.
702  * @soc: Datapath soc handle
703  * @srng: Datapath handle for the srng
704  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
705  *			per the near-full state
706  *
707  * Return: 1, if the srng is near full
708  *	   0, if the srng is not near full
709  */
710 static inline int
711 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
712 				   struct dp_srng *srng,
713 				   int *max_reap_limit)
714 {
715 	int ring_near_full = 0, near_full_level;
716 
717 	if (dp_srng_check_ring_near_full(soc, srng)) {
718 		near_full_level = dp_srng_get_near_full_level(soc, srng);
719 		switch (near_full_level) {
720 		case DP_SRNG_THRESH_CRITICAL:
721 			/* Currently not doing anything special here */
722 			fallthrough;
723 		case DP_SRNG_THRESH_NEAR_FULL:
724 			ring_near_full = 1;
725 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
726 			break;
727 		case DP_SRNG_THRESH_SAFE:
728 			qdf_atomic_set(&srng->near_full, 0);
729 			ring_near_full = 0;
730 			break;
731 		default:
732 			qdf_assert(0);
733 			break;
734 		}
735 	}
736 
737 	return ring_near_full;
738 }
739 #else
740 static inline int
741 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
742 				   struct dp_srng *srng,
743 				   int *max_reap_limit)
744 {
745 	return 0;
746 }
747 #endif
748 
749 static inline
750 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
751 				    enum dp_desc_type desc_type)
752 {
753 	switch (desc_type) {
754 	case DP_TX_DESC_TYPE:
755 		return (DP_TX_DESC_CMEM_OFFSET +
756 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
757 	case DP_RX_DESC_BUF_TYPE:
758 		return (DP_RX_DESC_CMEM_OFFSET +
759 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
760 			DP_RX_DESC_POOL_CMEM_SIZE);
761 	default:
762 			QDF_BUG(0);
763 	}
764 	return 0;
765 }
766 
767 #ifndef WLAN_MLO_MULTI_CHIP
768 static inline
769 void dp_soc_mlo_fill_params(struct dp_soc *soc,
770 			    struct cdp_soc_attach_params *params)
771 {
772 }
773 
774 static inline
775 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
776 			     struct cdp_pdev_attach_params *params)
777 {
778 }
779 
780 static inline
781 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
782 {
783 }
784 
785 static inline
786 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
787 {
788 }
789 #endif
790 
791 /*
792  * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
793  * @soc : DP soc handle
794  * @vdev: pointer to vdev structure
795  * @param: parameter type to get value
796  * @val: value
797  *
798  * return: QDF_STATUS
799  */
800 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
801 				     struct dp_vdev *vdev,
802 				     enum cdp_vdev_param_type param,
803 				     cdp_config_param_type val);
804 
805 #endif
806