xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 #ifdef WIFI_MONITOR_SUPPORT
30 #include <dp_mon.h>
31 #endif
32 
33 enum CMEM_MEM_CLIENTS {
34 	COOKIE_CONVERSION,
35 	FISA_FST,
36 };
37 
38 /* maximum number of entries in one page of secondary page table */
39 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
40 
41 /* maximum number of entries in one page of secondary page table */
42 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
43 
44 /* maximum number of entries in primary page table */
45 #define DP_CC_PPT_MAX_ENTRIES 1024
46 
47 /* cookie conversion required CMEM offset from CMEM pool */
48 #define DP_CC_MEM_OFFSET_IN_CMEM 0
49 
50 /* cookie conversion primary page table size 4K */
51 #define DP_CC_PPT_MEM_SIZE 4096
52 
53 /* FST required CMEM offset from CMEM pool */
54 #define DP_FST_MEM_OFFSET_IN_CMEM \
55 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
56 
57 /* CMEM size for FISA FST 16K */
58 #define DP_CMEM_FST_SIZE 16384
59 
60 /* lower 9 bits in Desc ID for offset in page of SPT */
61 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
62 
63 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
64 
65 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
66 
67 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
68 
69 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
70 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
71 
72 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
73 
74 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
75 
76 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
77 
78 /*
79  * page 4K unaligned case, single SPT page physical address
80  * need 8 bytes in PPT
81  */
82 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
83 /*
84  * page 4K aligned case, single SPT page physical address
85  * need 4 bytes in PPT
86  */
87 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
88 
89 /* 4K aligned case, number of bits HW append for one PPT entry value */
90 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
91 
92 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
93 /* WBM2SW ring id for rx release */
94 #define WBM2SW_REL_ERR_RING_NUM 3
95 #else
96 /* WBM2SW ring id for rx release */
97 #define WBM2SW_REL_ERR_RING_NUM 5
98 #endif
99 
100 /* tx descriptor are programmed at start of CMEM region*/
101 #define DP_TX_DESC_CMEM_OFFSET	0
102 
103 /* size of CMEM needed for a tx desc pool*/
104 #define DP_TX_DESC_POOL_CMEM_SIZE \
105 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
106 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
107 
108 /* Offset of rx descripotor pool */
109 #define DP_RX_DESC_CMEM_OFFSET \
110 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
111 
112 /* size of CMEM needed for a rx desc pool */
113 #define DP_RX_DESC_POOL_CMEM_SIZE \
114 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
115 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
116 
117 /* get ppt_id from CMEM_OFFSET */
118 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
119 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
120 
121 /* The MAX PPE PRI2TID */
122 #ifdef WLAN_SUPPORT_PPEDS
123 #define DP_TX_INT_PRI2TID_MAX 15
124 #endif
125 
126 /**
127  * struct dp_spt_page_desc - secondary page table page descriptors
128  * @next: pointer to next linked SPT page Desc
129  * @page_v_addr: page virtual address
130  * @page_p_addr: page physical address
131  * @ppt_index: entry index in primary page table where this page physical
132 		address stored
133  * @avail_entry_index: index for available entry that store TX/RX Desc VA
134  */
135 struct dp_spt_page_desc {
136 	uint8_t *page_v_addr;
137 	qdf_dma_addr_t page_p_addr;
138 	uint32_t ppt_index;
139 };
140 
141 /**
142  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
143  * @cmem_offset: CMEM offset from base address for primary page table setup
144  * @total_page_num: total DDR page allocated
145  * @page_desc_freelist: available page Desc list
146  * @page_desc_base: page Desc buffer base address.
147  * @page_pool: DDR pages pool
148  * @cc_lock: locks for page acquiring/free
149  */
150 struct dp_hw_cookie_conversion_t {
151 	uint32_t cmem_offset;
152 	uint32_t total_page_num;
153 	struct dp_spt_page_desc *page_desc_base;
154 	struct qdf_mem_multi_page_t page_pool;
155 	qdf_spinlock_t cc_lock;
156 };
157 
158 /**
159  * struct dp_spt_page_desc_list - containor of SPT page desc list info
160  * @spt_page_list_head: head of SPT page descriptor list
161  * @spt_page_list_tail: tail of SPT page descriptor list
162  * @num_spt_pages: number of SPT page descriptor allocated
163  */
164 struct dp_spt_page_desc_list {
165 	struct dp_spt_page_desc *spt_page_list_head;
166 	struct dp_spt_page_desc *spt_page_list_tail;
167 	uint16_t num_spt_pages;
168 };
169 
170 /* HW reading 8 bytes for VA */
171 #define DP_CC_HW_READ_BYTES 8
172 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
173 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
174 	= (uintptr_t)(_desc_va); }
175 
176 /**
177  * struct dp_tx_bank_profile - DP wrapper for TCL banks
178  * @is_configured: flag indicating if this bank is configured
179  * @ref_count: ref count indicating number of users of the bank
180  * @bank_config: HAL TX bank configuration
181  */
182 struct dp_tx_bank_profile {
183 	uint8_t is_configured;
184 	qdf_atomic_t  ref_count;
185 	union hal_tx_bank_config bank_config;
186 };
187 
188 #ifdef WLAN_SUPPORT_PPEDS
189 /**
190  * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
191  * @is_configured: Boolean that the entry is configured.
192  */
193 struct dp_ppe_vp_tbl_entry {
194 	bool is_configured;
195 };
196 
197 /**
198  * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
199  * @vp_num: Virtual port number
200  * @ppe_vp_num_idx: Index to the PPE VP table entry
201  * @search_idx_reg_num: Address search Index register number
202  * @drop_prec_enable: Drop precedance enable
203  * @to_fw: To FW exception enable/disable.
204  * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
205  */
206 struct dp_ppe_vp_profile {
207 	uint8_t vp_num;
208 	uint8_t ppe_vp_num_idx;
209 	uint8_t search_idx_reg_num;
210 	uint8_t drop_prec_enable;
211 	uint8_t to_fw;
212 	uint8_t use_ppe_int_pri;
213 };
214 #endif
215 
216 /**
217  * struct dp_soc_be - Extended DP soc for BE targets
218  * @soc: dp soc structure
219  * @num_bank_profiles: num TX bank profiles
220  * @bank_profiles: bank profiles for various TX banks
221  * @cc_cmem_base: cmem offset reserved for CC
222  * @tx_cc_ctx: Cookie conversion context for tx desc pools
223  * @rx_cc_ctx: Cookie conversion context for rx desc pools
224  * @monitor_soc_be: BE specific monitor object
225  * @mlo_enabled: Flag to indicate MLO is enabled or not
226  * @mlo_chip_id: MLO chip_id
227  * @ml_ctxt: pointer to global ml_context
228  * @delta_tqm: delta_tqm
229  * @mlo_tstamp_offset: mlo timestamp offset
230  * @mld_peer_hash: peer hash table for ML peers
231  *           Associated peer with this MAC address)
232  * @mld_peer_hash_lock: lock to protect mld_peer_hash
233  * @reo2ppe_ring: REO2PPE ring
234  * @ppe2tcl_ring: PPE2TCL ring
235  * @ppe_release_ring: PPE release ring
236  * @ppe_vp_tbl: PPE VP table
237  * @ppe_vp_tbl_lock: PPE VP table lock
238  * @num_ppe_vp_entries : Number of PPE VP entries
239  * @ipa_bank_id: TCL bank id used by IPA
240  */
241 struct dp_soc_be {
242 	struct dp_soc soc;
243 	uint8_t num_bank_profiles;
244 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
245 	qdf_mutex_t tx_bank_lock;
246 #else
247 	qdf_spinlock_t tx_bank_lock;
248 #endif
249 	struct dp_tx_bank_profile *bank_profiles;
250 	struct dp_spt_page_desc *page_desc_base;
251 	uint32_t cc_cmem_base;
252 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
253 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
254 #ifdef WLAN_SUPPORT_PPEDS
255 	struct dp_srng reo2ppe_ring;
256 	struct dp_srng ppe2tcl_ring;
257 	struct dp_srng ppe_release_ring;
258 	struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
259 	qdf_mutex_t ppe_vp_tbl_lock;
260 	uint8_t num_ppe_vp_entries;
261 #endif
262 #ifdef WLAN_FEATURE_11BE_MLO
263 #ifdef WLAN_MLO_MULTI_CHIP
264 	uint8_t mlo_enabled;
265 	uint8_t mlo_chip_id;
266 	struct dp_mlo_ctxt *ml_ctxt;
267 	uint64_t delta_tqm;
268 	uint64_t mlo_tstamp_offset;
269 #else
270 	/* Protect mld peer hash table */
271 	DP_MUTEX_TYPE mld_peer_hash_lock;
272 	struct {
273 		uint32_t mask;
274 		uint32_t idx_bits;
275 
276 		TAILQ_HEAD(, dp_peer) * bins;
277 	} mld_peer_hash;
278 #endif
279 #endif
280 #ifdef IPA_OFFLOAD
281 	int8_t ipa_bank_id;
282 #endif
283 };
284 
285 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
286 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
287 
288 /**
289  * struct dp_pdev_be - Extended DP pdev for BE targets
290  * @pdev: dp pdev structure
291  * @monitor_pdev_be: BE specific monitor object
292  * @mlo_link_id: MLO link id for PDEV
293  * @delta_tsf2: delta_tsf2
294  */
295 struct dp_pdev_be {
296 	struct dp_pdev pdev;
297 #ifdef WLAN_MLO_MULTI_CHIP
298 	uint8_t mlo_link_id;
299 	uint64_t delta_tsf2;
300 #endif
301 };
302 
303 /**
304  * struct dp_vdev_be - Extended DP vdev for BE targets
305  * @vdev: dp vdev structure
306  * @bank_id: bank_id to be used for TX
307  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
308  * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
309  * @ppe_vp_profile: PPE VP profile
310  */
311 struct dp_vdev_be {
312 	struct dp_vdev vdev;
313 	int8_t bank_id;
314 	uint8_t vdev_id_check_en;
315 #ifdef WLAN_MLO_MULTI_CHIP
316 	/* partner list used for Intra-BSS */
317 	uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
318 #ifdef WLAN_FEATURE_11BE_MLO
319 #ifdef WLAN_MCAST_MLO
320 	/* DP MLO seq number */
321 	uint16_t seq_num;
322 	/* MLO Mcast primary vdev */
323 	bool mcast_primary;
324 #endif
325 #endif
326 #endif
327 	unsigned long ppe_vp_enabled;
328 #ifdef WLAN_SUPPORT_PPEDS
329 	struct dp_ppe_vp_profile ppe_vp_profile;
330 #endif
331 };
332 
333 /**
334  * struct dp_peer_be - Extended DP peer for BE targets
335  * @dp_peer: dp peer structure
336  */
337 struct dp_peer_be {
338 	struct dp_peer peer;
339 };
340 
341 /**
342  * dp_get_soc_context_size_be() - get context size for target specific DP soc
343  *
344  * Return: value in bytes for BE specific soc structure
345  */
346 qdf_size_t dp_get_soc_context_size_be(void);
347 
348 /**
349  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
350  * @arch_ops: arch ops pointer
351  *
352  * Return: none
353  */
354 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
355 
356 /**
357  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
358  * @arch_ops: arch ops pointer
359  *
360  * Return: size in bytes for the context_type
361  */
362 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
363 
364 /**
365  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
366  * @soc: dp_soc pointer
367  *
368  * Return: dp_soc_be pointer
369  */
370 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
371 {
372 	return (struct dp_soc_be *)soc;
373 }
374 
375 #ifdef WLAN_MLO_MULTI_CHIP
376 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
377 
378 /*
379  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
380  *
381  * @soc: soc handle
382  *
383  * return: MLD peer hash object
384  */
385 static inline dp_mld_peer_hash_obj_t
386 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
387 {
388 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
389 
390 	return be_soc->ml_ctxt;
391 }
392 
393 void  dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
394 
395 #if defined(WLAN_FEATURE_11BE_MLO)
396 /**
397  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
398  * @soc: Soc handle
399  * @peer: DP peer handle for ML peer
400  * @peer_id: peer_id
401  * Return: None
402  */
403 void dp_mlo_partner_chips_map(struct dp_soc *soc,
404 			      struct dp_peer *peer,
405 			      uint16_t peer_id);
406 
407 /**
408  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
409  * @soc: Soc handle
410  * @peer_id: peer_id
411  * Return: None
412  */
413 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
414 				uint16_t peer_id);
415 
416 #ifdef WLAN_MCAST_MLO
417 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
418 				    struct dp_vdev *ptnr_vdev,
419 				    void *arg);
420 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
421 				   void *arg);
422 /*
423  * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
424  * @be_soc: dp_soc_be pointer
425  * @be_vdev: dp_vdev_be pointer
426  * @func        : function to be called for each peer
427  * @arg         : argument need to be passed to func
428  * @mod_id: module id
429  *
430  * Return: None
431  */
432 void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
433 				 struct dp_vdev_be *be_vdev,
434 				 dp_ptnr_vdev_iter_func func,
435 				 void *arg,
436 				 enum dp_mod_id mod_id);
437 /*
438  * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
439  * @be_soc: dp_soc_be pointer
440  * @func        : function to be called for each peer
441  * @arg         : argument need to be passed to func
442  *
443  * Return: None
444  */
445 void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
446 				dp_ptnr_soc_iter_func func,
447 				void *arg);
448 /*
449  * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
450  * @be_soc: dp_soc_be pointer
451  * @be_vdev: dp_vdev_be pointer
452  * @mod_id: module id
453  *
454  * Return: mcast primary DP VDEV handle on success, NULL on failure
455  */
456 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
457 					      struct dp_vdev_be *be_vdev,
458 					      enum dp_mod_id mod_id);
459 #endif
460 #endif
461 
462 #else
463 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
464 
465 static inline dp_mld_peer_hash_obj_t
466 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
467 {
468 	return dp_get_be_soc_from_dp_soc(soc);
469 }
470 
471 static inline void  dp_clr_mlo_ptnr_list(struct dp_soc *soc,
472 					 struct dp_vdev *vdev)
473 {
474 }
475 #endif
476 
477 /*
478  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
479  *
480  * @mld_hash_obj: Peer has object
481  * @hash_elems: number of entries in hash table
482  *
483  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
484  */
485 QDF_STATUS
486 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
487 				int hash_elems);
488 
489 /*
490  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
491  *
492  * @mld_hash_obj: Peer has object
493  *
494  * return: void
495  */
496 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
497 
498 /**
499  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
500  * @pdev: dp_pdev pointer
501  *
502  * Return: dp_pdev_be pointer
503  */
504 static inline
505 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
506 {
507 	return (struct dp_pdev_be *)pdev;
508 }
509 
510 /**
511  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
512  * @vdev: dp_vdev pointer
513  *
514  * Return: dp_vdev_be pointer
515  */
516 static inline
517 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
518 {
519 	return (struct dp_vdev_be *)vdev;
520 }
521 
522 /**
523  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
524  * @peer: dp_peer pointer
525  *
526  * Return: dp_peer_be pointer
527  */
528 static inline
529 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
530 {
531 	return (struct dp_peer_be *)peer;
532 }
533 
534 QDF_STATUS
535 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
536 			       struct dp_hw_cookie_conversion_t *cc_ctx,
537 			       uint32_t num_descs,
538 			       enum dp_desc_type desc_type,
539 			       uint8_t desc_pool_id);
540 
541 QDF_STATUS
542 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
543 			       struct dp_hw_cookie_conversion_t *cc_ctx);
544 QDF_STATUS
545 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
546 			     struct dp_hw_cookie_conversion_t *cc_ctx);
547 QDF_STATUS
548 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
549 			       struct dp_hw_cookie_conversion_t *cc_ctx);
550 /**
551  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
552  * @be_soc: beryllium soc handler
553  * @list_head: pointer to page desc head
554  * @list_tail: pointer to page desc tail
555  * @num_desc: number of TX/RX Descs required for SPT pages
556  *
557  * Return: number of SPT page Desc allocated
558  */
559 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
560 				   struct dp_spt_page_desc **list_head,
561 				   struct dp_spt_page_desc **list_tail,
562 				   uint16_t num_desc);
563 /**
564  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
565  * @be_soc: beryllium soc handler
566  * @list_head: pointer to page desc head
567  * @list_tail: pointer to page desc tail
568  * @page_nums: number of page desc freed back to pool
569  */
570 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
571 			      struct dp_spt_page_desc **list_head,
572 			      struct dp_spt_page_desc **list_tail,
573 			      uint16_t page_nums);
574 
575 /**
576  * dp_cc_desc_id_generate() - generate SW cookie ID according to
577 				DDR page 4K aligned or not
578  * @ppt_index: offset index in primary page table
579  * @spt_index: offset index in sceondary DDR page
580  *
581  * Generate SW cookie ID to match as HW expected
582  *
583  * Return: cookie ID
584  */
585 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
586 					      uint16_t spt_index)
587 {
588 	/*
589 	 * for 4k aligned case, cmem entry size is 4 bytes,
590 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
591 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
592 	 * exactly same with original ppt_index value.
593 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
594 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
595 	 */
596 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
597 		spt_index);
598 }
599 
600 /**
601  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
602  * @be_soc: be soc handle
603  * @desc_id: TX/RX Dess ID
604  *
605  * Return: TX/RX Desc virtual address
606  */
607 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
608 					uint32_t desc_id)
609 {
610 	struct dp_soc_be *be_soc;
611 	uint16_t ppt_page_id, spt_va_id;
612 	uint8_t *spt_page_va;
613 
614 	be_soc = dp_get_be_soc_from_dp_soc(soc);
615 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
616 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
617 
618 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
619 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
620 
621 	/*
622 	 * ppt index in cmem is same order where the page in the
623 	 * page desc array during initialization.
624 	 * entry size in DDR page is 64 bits, for 32 bits system,
625 	 * only lower 32 bits VA value is needed.
626 	 */
627 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
628 
629 	return (*((uintptr_t *)(spt_page_va  +
630 				spt_va_id * DP_CC_HW_READ_BYTES)));
631 }
632 
633 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
634 /**
635  * enum dp_srng_near_full_levels - SRNG Near FULL levels
636  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
637  *		of processing the entries in SRNG
638  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
639  *		of processing the entries in SRNG
640  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
641  *		condition and drastic steps need to be taken for processing
642  *		the entries in SRNG
643  */
644 enum dp_srng_near_full_levels {
645 	DP_SRNG_THRESH_SAFE,
646 	DP_SRNG_THRESH_NEAR_FULL,
647 	DP_SRNG_THRESH_CRITICAL,
648 };
649 
650 /**
651  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
652  *				its corresponding near-full irq handler
653  * @soc: Datapath SoC handle
654  * @dp_srng: datapath handle for this SRNG
655  *
656  * Return: 1, if the srng was marked as near-full
657  *	   0, if the srng was not marked as near-full
658  */
659 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
660 					       struct dp_srng *dp_srng)
661 {
662 	return qdf_atomic_read(&dp_srng->near_full);
663 }
664 
665 /**
666  * dp_srng_get_near_full_level() - Check the num available entries in the
667  *			consumer srng and return the level of the srng
668  *			near full state.
669  * @soc: Datapath SoC Handle [To be validated by the caller]
670  * @hal_ring_hdl: SRNG handle
671  *
672  * Return: near-full level
673  */
674 static inline int
675 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
676 {
677 	uint32_t num_valid;
678 
679 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
680 						  dp_srng->hal_srng,
681 						  true);
682 
683 	if (num_valid > dp_srng->crit_thresh)
684 		return DP_SRNG_THRESH_CRITICAL;
685 	else if (num_valid < dp_srng->safe_thresh)
686 		return DP_SRNG_THRESH_SAFE;
687 	else
688 		return DP_SRNG_THRESH_NEAR_FULL;
689 }
690 
691 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
692 
693 /**
694  * dp_srng_test_and_update_nf_params() - Test the near full level and update
695  *			the reap_limit and flags to reflect the state.
696  * @soc: Datapath soc handle
697  * @srng: Datapath handle for the srng
698  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
699  *			per the near-full state
700  *
701  * Return: 1, if the srng is near full
702  *	   0, if the srng is not near full
703  */
704 static inline int
705 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
706 				   struct dp_srng *srng,
707 				   int *max_reap_limit)
708 {
709 	int ring_near_full = 0, near_full_level;
710 
711 	if (dp_srng_check_ring_near_full(soc, srng)) {
712 		near_full_level = dp_srng_get_near_full_level(soc, srng);
713 		switch (near_full_level) {
714 		case DP_SRNG_THRESH_CRITICAL:
715 			/* Currently not doing anything special here */
716 			fallthrough;
717 		case DP_SRNG_THRESH_NEAR_FULL:
718 			ring_near_full = 1;
719 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
720 			break;
721 		case DP_SRNG_THRESH_SAFE:
722 			qdf_atomic_set(&srng->near_full, 0);
723 			ring_near_full = 0;
724 			break;
725 		default:
726 			qdf_assert(0);
727 			break;
728 		}
729 	}
730 
731 	return ring_near_full;
732 }
733 #else
734 static inline int
735 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
736 				   struct dp_srng *srng,
737 				   int *max_reap_limit)
738 {
739 	return 0;
740 }
741 #endif
742 
743 static inline
744 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
745 				    enum dp_desc_type desc_type)
746 {
747 	switch (desc_type) {
748 	case DP_TX_DESC_TYPE:
749 		return (DP_TX_DESC_CMEM_OFFSET +
750 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
751 	case DP_RX_DESC_BUF_TYPE:
752 		return (DP_RX_DESC_CMEM_OFFSET +
753 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
754 			DP_RX_DESC_POOL_CMEM_SIZE);
755 	default:
756 			QDF_BUG(0);
757 	}
758 	return 0;
759 }
760 
761 #ifndef WLAN_MLO_MULTI_CHIP
762 static inline
763 void dp_soc_mlo_fill_params(struct dp_soc *soc,
764 			    struct cdp_soc_attach_params *params)
765 {
766 }
767 
768 static inline
769 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
770 			     struct cdp_pdev_attach_params *params)
771 {
772 }
773 
774 static inline
775 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
776 {
777 }
778 
779 static inline
780 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
781 {
782 }
783 #endif
784 
785 /*
786  * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
787  * @soc : DP soc handle
788  * @vdev: pointer to vdev structure
789  * @param: parameter type to get value
790  * @val: value
791  *
792  * return: QDF_STATUS
793  */
794 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
795 				     struct dp_vdev *vdev,
796 				     enum cdp_vdev_param_type param,
797 				     cdp_config_param_type val);
798 
799 #endif
800