xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 #ifdef WIFI_MONITOR_SUPPORT
30 #include <dp_mon.h>
31 #endif
32 
33 enum CMEM_MEM_CLIENTS {
34 	COOKIE_CONVERSION,
35 	FISA_FST,
36 };
37 
38 /* maximum number of entries in one page of secondary page table */
39 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
40 
41 /* maximum number of entries in one page of secondary page table */
42 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
43 
44 /* maximum number of entries in primary page table */
45 #define DP_CC_PPT_MAX_ENTRIES 1024
46 
47 /* cookie conversion required CMEM offset from CMEM pool */
48 #define DP_CC_MEM_OFFSET_IN_CMEM 0
49 
50 /* cookie conversion primary page table size 4K */
51 #define DP_CC_PPT_MEM_SIZE 4096
52 
53 /* FST required CMEM offset from CMEM pool */
54 #define DP_FST_MEM_OFFSET_IN_CMEM \
55 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
56 
57 /* CMEM size for FISA FST 16K */
58 #define DP_CMEM_FST_SIZE 16384
59 
60 /* lower 9 bits in Desc ID for offset in page of SPT */
61 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
62 
63 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
64 
65 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
66 
67 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
68 
69 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
70 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
71 
72 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
73 
74 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
75 
76 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
77 
78 /*
79  * page 4K unaligned case, single SPT page physical address
80  * need 8 bytes in PPT
81  */
82 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
83 /*
84  * page 4K aligned case, single SPT page physical address
85  * need 4 bytes in PPT
86  */
87 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
88 
89 /* 4K aligned case, number of bits HW append for one PPT entry value */
90 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
91 
92 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
93 /* WBM2SW ring id for rx release */
94 #define WBM2SW_REL_ERR_RING_NUM 3
95 #else
96 /* WBM2SW ring id for rx release */
97 #define WBM2SW_REL_ERR_RING_NUM 5
98 #endif
99 
100 #ifdef WLAN_SUPPORT_PPEDS
101 /* The MAX PPE PRI2TID */
102 #define DP_TX_INT_PRI2TID_MAX 15
103 
104 #define DP_TX_PPEDS_POOL_ID 0
105 
106 /* size of CMEM needed for a ppeds tx desc pool */
107 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
108 	((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
109 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
110 
111 /* Offset of ppeds tx descripotor pool */
112 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
113 #else
114 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
115 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
116 #endif
117 
118 /* tx descriptor are programmed at start of CMEM region*/
119 #define DP_TX_DESC_CMEM_OFFSET \
120 	(DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
121 
122 /* size of CMEM needed for a tx desc pool*/
123 #define DP_TX_DESC_POOL_CMEM_SIZE \
124 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
125 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
126 
127 /* Offset of rx descripotor pool */
128 #define DP_RX_DESC_CMEM_OFFSET \
129 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
130 
131 /* size of CMEM needed for a rx desc pool */
132 #define DP_RX_DESC_POOL_CMEM_SIZE \
133 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
134 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
135 
136 /* get ppt_id from CMEM_OFFSET */
137 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
138 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
139 
140 /**
141  * struct dp_spt_page_desc - secondary page table page descriptors
142  * @next: pointer to next linked SPT page Desc
143  * @page_v_addr: page virtual address
144  * @page_p_addr: page physical address
145  * @ppt_index: entry index in primary page table where this page physical
146 		address stored
147  * @avail_entry_index: index for available entry that store TX/RX Desc VA
148  */
149 struct dp_spt_page_desc {
150 	uint8_t *page_v_addr;
151 	qdf_dma_addr_t page_p_addr;
152 	uint32_t ppt_index;
153 };
154 
155 /**
156  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
157  * @cmem_offset: CMEM offset from base address for primary page table setup
158  * @total_page_num: total DDR page allocated
159  * @page_desc_freelist: available page Desc list
160  * @page_desc_base: page Desc buffer base address.
161  * @page_pool: DDR pages pool
162  * @cc_lock: locks for page acquiring/free
163  */
164 struct dp_hw_cookie_conversion_t {
165 	uint32_t cmem_offset;
166 	uint32_t total_page_num;
167 	struct dp_spt_page_desc *page_desc_base;
168 	struct qdf_mem_multi_page_t page_pool;
169 	qdf_spinlock_t cc_lock;
170 };
171 
172 /**
173  * struct dp_spt_page_desc_list - containor of SPT page desc list info
174  * @spt_page_list_head: head of SPT page descriptor list
175  * @spt_page_list_tail: tail of SPT page descriptor list
176  * @num_spt_pages: number of SPT page descriptor allocated
177  */
178 struct dp_spt_page_desc_list {
179 	struct dp_spt_page_desc *spt_page_list_head;
180 	struct dp_spt_page_desc *spt_page_list_tail;
181 	uint16_t num_spt_pages;
182 };
183 
184 /* HW reading 8 bytes for VA */
185 #define DP_CC_HW_READ_BYTES 8
186 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
187 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
188 	= (uintptr_t)(_desc_va); }
189 
190 /**
191  * struct dp_tx_bank_profile - DP wrapper for TCL banks
192  * @is_configured: flag indicating if this bank is configured
193  * @ref_count: ref count indicating number of users of the bank
194  * @bank_config: HAL TX bank configuration
195  */
196 struct dp_tx_bank_profile {
197 	uint8_t is_configured;
198 	qdf_atomic_t  ref_count;
199 	union hal_tx_bank_config bank_config;
200 };
201 
202 #ifdef WLAN_SUPPORT_PPEDS
203 /**
204  * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
205  * @is_configured: Boolean that the entry is configured.
206  */
207 struct dp_ppe_vp_tbl_entry {
208 	bool is_configured;
209 };
210 
211 /**
212  * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
213  * @vp_num: Virtual port number
214  * @ppe_vp_num_idx: Index to the PPE VP table entry
215  * @search_idx_reg_num: Address search Index register number
216  * @drop_prec_enable: Drop precedance enable
217  * @to_fw: To FW exception enable/disable.
218  * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
219  */
220 struct dp_ppe_vp_profile {
221 	uint8_t vp_num;
222 	uint8_t ppe_vp_num_idx;
223 	uint8_t search_idx_reg_num;
224 	uint8_t drop_prec_enable;
225 	uint8_t to_fw;
226 	uint8_t use_ppe_int_pri;
227 };
228 
229 /**
230  * struct dp_ppe_tx_desc_pool_s - PPEDS Tx Descriptor Pool
231  * @elem_size: Size of each descriptor
232  * @num_allocated: Number of used descriptors
233  * @freelist: Chain of free descriptors
234  * @desc_pages: multiple page allocation information for actual descriptors
235  * @elem_count: Number of descriptors in the pool
236  * @num_free: Number of free descriptors
237  * @lock- Lock for descriptor allocation/free from/to the pool
238  */
239 struct dp_ppe_tx_desc_pool_s {
240 	uint16_t elem_size;
241 	uint32_t num_allocated;
242 	struct dp_tx_desc_s *freelist;
243 	struct qdf_mem_multi_page_t desc_pages;
244 	uint16_t elem_count;
245 	uint32_t num_free;
246 	qdf_spinlock_t lock;
247 };
248 #endif
249 
250 /**
251  * struct dp_soc_be - Extended DP soc for BE targets
252  * @soc: dp soc structure
253  * @num_bank_profiles: num TX bank profiles
254  * @bank_profiles: bank profiles for various TX banks
255  * @cc_cmem_base: cmem offset reserved for CC
256  * @tx_cc_ctx: Cookie conversion context for tx desc pools
257  * @rx_cc_ctx: Cookie conversion context for rx desc pools
258  * @monitor_soc_be: BE specific monitor object
259  * @mlo_enabled: Flag to indicate MLO is enabled or not
260  * @mlo_chip_id: MLO chip_id
261  * @ml_ctxt: pointer to global ml_context
262  * @delta_tqm: delta_tqm
263  * @mlo_tstamp_offset: mlo timestamp offset
264  * @mld_peer_hash: peer hash table for ML peers
265  *           Associated peer with this MAC address)
266  * @mld_peer_hash_lock: lock to protect mld_peer_hash
267  * @reo2ppe_ring: REO2PPE ring
268  * @ppe2tcl_ring: PPE2TCL ring
269  * @ppe_release_ring: PPE release ring
270  * @ppe_vp_tbl: PPE VP table
271  * @ppe_vp_tbl_lock: PPE VP table lock
272  * @num_ppe_vp_entries : Number of PPE VP entries
273  * @ipa_bank_id: TCL bank id used by IPA
274  * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
275  * @ppeds_tx_desc: PPEDS tx desc pool
276  * @ppeds_handle: PPEDS soc instance handle
277  */
278 struct dp_soc_be {
279 	struct dp_soc soc;
280 	uint8_t num_bank_profiles;
281 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
282 	qdf_mutex_t tx_bank_lock;
283 #else
284 	qdf_spinlock_t tx_bank_lock;
285 #endif
286 	struct dp_tx_bank_profile *bank_profiles;
287 	struct dp_spt_page_desc *page_desc_base;
288 	uint32_t cc_cmem_base;
289 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
290 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
291 #ifdef WLAN_SUPPORT_PPEDS
292 	struct dp_srng reo2ppe_ring;
293 	struct dp_srng ppe2tcl_ring;
294 	struct dp_srng ppe_release_ring;
295 	struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
296 	struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
297 	struct dp_ppe_tx_desc_pool_s ppeds_tx_desc;
298 	void *ppeds_handle;
299 	qdf_mutex_t ppe_vp_tbl_lock;
300 	uint8_t num_ppe_vp_entries;
301 #endif
302 #ifdef WLAN_FEATURE_11BE_MLO
303 #ifdef WLAN_MLO_MULTI_CHIP
304 	uint8_t mlo_enabled;
305 	uint8_t mlo_chip_id;
306 	struct dp_mlo_ctxt *ml_ctxt;
307 	uint64_t delta_tqm;
308 	uint64_t mlo_tstamp_offset;
309 #else
310 	/* Protect mld peer hash table */
311 	DP_MUTEX_TYPE mld_peer_hash_lock;
312 	struct {
313 		uint32_t mask;
314 		uint32_t idx_bits;
315 
316 		TAILQ_HEAD(, dp_peer) * bins;
317 	} mld_peer_hash;
318 #endif
319 #endif
320 #ifdef IPA_OFFLOAD
321 	int8_t ipa_bank_id;
322 #endif
323 };
324 
325 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
326 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
327 
328 /**
329  * struct dp_pdev_be - Extended DP pdev for BE targets
330  * @pdev: dp pdev structure
331  * @monitor_pdev_be: BE specific monitor object
332  * @mlo_link_id: MLO link id for PDEV
333  * @delta_tsf2: delta_tsf2
334  */
335 struct dp_pdev_be {
336 	struct dp_pdev pdev;
337 #ifdef WLAN_MLO_MULTI_CHIP
338 	uint8_t mlo_link_id;
339 	uint64_t delta_tsf2;
340 #endif
341 };
342 
343 /**
344  * struct dp_vdev_be - Extended DP vdev for BE targets
345  * @vdev: dp vdev structure
346  * @bank_id: bank_id to be used for TX
347  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
348  * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
349  * @ppe_vp_profile: PPE VP profile
350  */
351 struct dp_vdev_be {
352 	struct dp_vdev vdev;
353 	int8_t bank_id;
354 	uint8_t vdev_id_check_en;
355 #ifdef WLAN_MLO_MULTI_CHIP
356 	/* partner list used for Intra-BSS */
357 	uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
358 #ifdef WLAN_FEATURE_11BE_MLO
359 #ifdef WLAN_MCAST_MLO
360 	/* DP MLO seq number */
361 	uint16_t seq_num;
362 	/* MLO Mcast primary vdev */
363 	bool mcast_primary;
364 #endif
365 #endif
366 #endif
367 	unsigned long ppe_vp_enabled;
368 #ifdef WLAN_SUPPORT_PPEDS
369 	struct dp_ppe_vp_profile ppe_vp_profile;
370 #endif
371 };
372 
373 /**
374  * struct dp_peer_be - Extended DP peer for BE targets
375  * @dp_peer: dp peer structure
376  */
377 struct dp_peer_be {
378 	struct dp_peer peer;
379 };
380 
381 /**
382  * dp_get_soc_context_size_be() - get context size for target specific DP soc
383  *
384  * Return: value in bytes for BE specific soc structure
385  */
386 qdf_size_t dp_get_soc_context_size_be(void);
387 
388 /**
389  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
390  * @arch_ops: arch ops pointer
391  *
392  * Return: none
393  */
394 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
395 
396 /**
397  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
398  * @arch_ops: arch ops pointer
399  *
400  * Return: size in bytes for the context_type
401  */
402 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
403 
404 /**
405  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
406  * @soc: dp_soc pointer
407  *
408  * Return: dp_soc_be pointer
409  */
410 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
411 {
412 	return (struct dp_soc_be *)soc;
413 }
414 
415 #ifdef WLAN_MLO_MULTI_CHIP
416 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
417 
418 /*
419  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
420  *
421  * @soc: soc handle
422  *
423  * return: MLD peer hash object
424  */
425 static inline dp_mld_peer_hash_obj_t
426 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
427 {
428 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
429 
430 	return be_soc->ml_ctxt;
431 }
432 
433 void  dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
434 
435 #if defined(WLAN_FEATURE_11BE_MLO)
436 /**
437  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
438  * @soc: Soc handle
439  * @peer: DP peer handle for ML peer
440  * @peer_id: peer_id
441  * Return: None
442  */
443 void dp_mlo_partner_chips_map(struct dp_soc *soc,
444 			      struct dp_peer *peer,
445 			      uint16_t peer_id);
446 
447 /**
448  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
449  * @soc: Soc handle
450  * @peer_id: peer_id
451  * Return: None
452  */
453 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
454 				uint16_t peer_id);
455 
456 #ifdef WLAN_MCAST_MLO
457 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
458 				    struct dp_vdev *ptnr_vdev,
459 				    void *arg);
460 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
461 				   void *arg);
462 /*
463  * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
464  * @be_soc: dp_soc_be pointer
465  * @be_vdev: dp_vdev_be pointer
466  * @func        : function to be called for each peer
467  * @arg         : argument need to be passed to func
468  * @mod_id: module id
469  *
470  * Return: None
471  */
472 void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
473 				 struct dp_vdev_be *be_vdev,
474 				 dp_ptnr_vdev_iter_func func,
475 				 void *arg,
476 				 enum dp_mod_id mod_id);
477 /*
478  * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
479  * @be_soc: dp_soc_be pointer
480  * @func        : function to be called for each peer
481  * @arg         : argument need to be passed to func
482  *
483  * Return: None
484  */
485 void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
486 				dp_ptnr_soc_iter_func func,
487 				void *arg);
488 /*
489  * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
490  * @be_soc: dp_soc_be pointer
491  * @be_vdev: dp_vdev_be pointer
492  * @mod_id: module id
493  *
494  * Return: mcast primary DP VDEV handle on success, NULL on failure
495  */
496 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
497 					      struct dp_vdev_be *be_vdev,
498 					      enum dp_mod_id mod_id);
499 #endif
500 #endif
501 
502 #else
503 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
504 
505 static inline dp_mld_peer_hash_obj_t
506 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
507 {
508 	return dp_get_be_soc_from_dp_soc(soc);
509 }
510 
511 static inline void  dp_clr_mlo_ptnr_list(struct dp_soc *soc,
512 					 struct dp_vdev *vdev)
513 {
514 }
515 #endif
516 
517 /*
518  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
519  *
520  * @mld_hash_obj: Peer has object
521  * @hash_elems: number of entries in hash table
522  *
523  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
524  */
525 QDF_STATUS
526 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
527 				int hash_elems);
528 
529 /*
530  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
531  *
532  * @mld_hash_obj: Peer has object
533  *
534  * return: void
535  */
536 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
537 
538 /**
539  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
540  * @pdev: dp_pdev pointer
541  *
542  * Return: dp_pdev_be pointer
543  */
544 static inline
545 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
546 {
547 	return (struct dp_pdev_be *)pdev;
548 }
549 
550 /**
551  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
552  * @vdev: dp_vdev pointer
553  *
554  * Return: dp_vdev_be pointer
555  */
556 static inline
557 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
558 {
559 	return (struct dp_vdev_be *)vdev;
560 }
561 
562 /**
563  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
564  * @peer: dp_peer pointer
565  *
566  * Return: dp_peer_be pointer
567  */
568 static inline
569 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
570 {
571 	return (struct dp_peer_be *)peer;
572 }
573 
574 QDF_STATUS
575 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
576 			       struct dp_hw_cookie_conversion_t *cc_ctx,
577 			       uint32_t num_descs,
578 			       enum dp_desc_type desc_type,
579 			       uint8_t desc_pool_id);
580 
581 QDF_STATUS
582 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
583 			       struct dp_hw_cookie_conversion_t *cc_ctx);
584 QDF_STATUS
585 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
586 			     struct dp_hw_cookie_conversion_t *cc_ctx);
587 QDF_STATUS
588 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
589 			       struct dp_hw_cookie_conversion_t *cc_ctx);
590 /**
591  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
592  * @be_soc: beryllium soc handler
593  * @list_head: pointer to page desc head
594  * @list_tail: pointer to page desc tail
595  * @num_desc: number of TX/RX Descs required for SPT pages
596  *
597  * Return: number of SPT page Desc allocated
598  */
599 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
600 				   struct dp_spt_page_desc **list_head,
601 				   struct dp_spt_page_desc **list_tail,
602 				   uint16_t num_desc);
603 /**
604  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
605  * @be_soc: beryllium soc handler
606  * @list_head: pointer to page desc head
607  * @list_tail: pointer to page desc tail
608  * @page_nums: number of page desc freed back to pool
609  */
610 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
611 			      struct dp_spt_page_desc **list_head,
612 			      struct dp_spt_page_desc **list_tail,
613 			      uint16_t page_nums);
614 
615 /**
616  * dp_cc_desc_id_generate() - generate SW cookie ID according to
617 				DDR page 4K aligned or not
618  * @ppt_index: offset index in primary page table
619  * @spt_index: offset index in sceondary DDR page
620  *
621  * Generate SW cookie ID to match as HW expected
622  *
623  * Return: cookie ID
624  */
625 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
626 					      uint16_t spt_index)
627 {
628 	/*
629 	 * for 4k aligned case, cmem entry size is 4 bytes,
630 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
631 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
632 	 * exactly same with original ppt_index value.
633 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
634 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
635 	 */
636 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
637 		spt_index);
638 }
639 
640 /**
641  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
642  * @be_soc: be soc handle
643  * @desc_id: TX/RX Dess ID
644  *
645  * Return: TX/RX Desc virtual address
646  */
647 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
648 					uint32_t desc_id)
649 {
650 	struct dp_soc_be *be_soc;
651 	uint16_t ppt_page_id, spt_va_id;
652 	uint8_t *spt_page_va;
653 
654 	be_soc = dp_get_be_soc_from_dp_soc(soc);
655 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
656 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
657 
658 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
659 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
660 
661 	/*
662 	 * ppt index in cmem is same order where the page in the
663 	 * page desc array during initialization.
664 	 * entry size in DDR page is 64 bits, for 32 bits system,
665 	 * only lower 32 bits VA value is needed.
666 	 */
667 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
668 
669 	return (*((uintptr_t *)(spt_page_va  +
670 				spt_va_id * DP_CC_HW_READ_BYTES)));
671 }
672 
673 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
674 /**
675  * enum dp_srng_near_full_levels - SRNG Near FULL levels
676  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
677  *		of processing the entries in SRNG
678  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
679  *		of processing the entries in SRNG
680  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
681  *		condition and drastic steps need to be taken for processing
682  *		the entries in SRNG
683  */
684 enum dp_srng_near_full_levels {
685 	DP_SRNG_THRESH_SAFE,
686 	DP_SRNG_THRESH_NEAR_FULL,
687 	DP_SRNG_THRESH_CRITICAL,
688 };
689 
690 /**
691  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
692  *				its corresponding near-full irq handler
693  * @soc: Datapath SoC handle
694  * @dp_srng: datapath handle for this SRNG
695  *
696  * Return: 1, if the srng was marked as near-full
697  *	   0, if the srng was not marked as near-full
698  */
699 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
700 					       struct dp_srng *dp_srng)
701 {
702 	return qdf_atomic_read(&dp_srng->near_full);
703 }
704 
705 /**
706  * dp_srng_get_near_full_level() - Check the num available entries in the
707  *			consumer srng and return the level of the srng
708  *			near full state.
709  * @soc: Datapath SoC Handle [To be validated by the caller]
710  * @hal_ring_hdl: SRNG handle
711  *
712  * Return: near-full level
713  */
714 static inline int
715 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
716 {
717 	uint32_t num_valid;
718 
719 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
720 						  dp_srng->hal_srng,
721 						  true);
722 
723 	if (num_valid > dp_srng->crit_thresh)
724 		return DP_SRNG_THRESH_CRITICAL;
725 	else if (num_valid < dp_srng->safe_thresh)
726 		return DP_SRNG_THRESH_SAFE;
727 	else
728 		return DP_SRNG_THRESH_NEAR_FULL;
729 }
730 
731 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
732 
733 /**
734  * dp_srng_test_and_update_nf_params() - Test the near full level and update
735  *			the reap_limit and flags to reflect the state.
736  * @soc: Datapath soc handle
737  * @srng: Datapath handle for the srng
738  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
739  *			per the near-full state
740  *
741  * Return: 1, if the srng is near full
742  *	   0, if the srng is not near full
743  */
744 static inline int
745 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
746 				   struct dp_srng *srng,
747 				   int *max_reap_limit)
748 {
749 	int ring_near_full = 0, near_full_level;
750 
751 	if (dp_srng_check_ring_near_full(soc, srng)) {
752 		near_full_level = dp_srng_get_near_full_level(soc, srng);
753 		switch (near_full_level) {
754 		case DP_SRNG_THRESH_CRITICAL:
755 			/* Currently not doing anything special here */
756 			fallthrough;
757 		case DP_SRNG_THRESH_NEAR_FULL:
758 			ring_near_full = 1;
759 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
760 			break;
761 		case DP_SRNG_THRESH_SAFE:
762 			qdf_atomic_set(&srng->near_full, 0);
763 			ring_near_full = 0;
764 			break;
765 		default:
766 			qdf_assert(0);
767 			break;
768 		}
769 	}
770 
771 	return ring_near_full;
772 }
773 #else
774 static inline int
775 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
776 				   struct dp_srng *srng,
777 				   int *max_reap_limit)
778 {
779 	return 0;
780 }
781 #endif
782 
783 static inline
784 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
785 				    enum dp_desc_type desc_type)
786 {
787 	switch (desc_type) {
788 	case DP_TX_DESC_TYPE:
789 		return (DP_TX_DESC_CMEM_OFFSET +
790 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
791 	case DP_RX_DESC_BUF_TYPE:
792 		return (DP_RX_DESC_CMEM_OFFSET +
793 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
794 			DP_RX_DESC_POOL_CMEM_SIZE);
795 	case DP_TX_PPEDS_DESC_TYPE:
796 		return DP_TX_PPEDS_DESC_CMEM_OFFSET;
797 	default:
798 			QDF_BUG(0);
799 	}
800 	return 0;
801 }
802 
803 #ifndef WLAN_MLO_MULTI_CHIP
804 static inline
805 void dp_soc_mlo_fill_params(struct dp_soc *soc,
806 			    struct cdp_soc_attach_params *params)
807 {
808 }
809 
810 static inline
811 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
812 			     struct cdp_pdev_attach_params *params)
813 {
814 }
815 
816 static inline
817 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
818 {
819 }
820 
821 static inline
822 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
823 {
824 }
825 #endif
826 
827 /*
828  * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
829  * @soc : DP soc handle
830  * @vdev: pointer to vdev structure
831  * @param: parameter type to get value
832  * @val: value
833  *
834  * return: QDF_STATUS
835  */
836 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
837 				     struct dp_vdev *vdev,
838 				     enum cdp_vdev_param_type param,
839 				     cdp_config_param_type val);
840 
841 #endif
842