xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision 7be08f15775e0e24440f836dfb6edcd5fe601d80)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 
30 /* maximum number of entries in one page of secondary page table */
31 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
32 
33 /* maximum number of entries in one page of secondary page table */
34 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
35 
36 /* maximum number of entries in primary page table */
37 #define DP_CC_PPT_MAX_ENTRIES 1024
38 
39 /* cookie conversion required CMEM offset from CMEM pool */
40 #define DP_CC_MEM_OFFSET_IN_CMEM 0
41 
42 /* cookie conversion primary page table size 4K */
43 #define DP_CC_PPT_MEM_SIZE 4096
44 
45 /* FST required CMEM offset from CMEM pool */
46 #define DP_FST_MEM_OFFSET_IN_CMEM \
47 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
48 
49 /* lower 9 bits in Desc ID for offset in page of SPT */
50 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
51 
52 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
53 
54 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
55 
56 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
57 
58 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
59 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
60 
61 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
62 
63 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
64 
65 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
66 
67 /*
68  * page 4K unaligned case, single SPT page physical address
69  * need 8 bytes in PPT
70  */
71 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
72 /*
73  * page 4K aligned case, single SPT page physical address
74  * need 4 bytes in PPT
75  */
76 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
77 
78 /* 4K aligned case, number of bits HW append for one PPT entry value */
79 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
80 
81 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
82 /* WBM2SW ring id for rx release */
83 #define WBM2SW_REL_ERR_RING_NUM 3
84 #else
85 /* WBM2SW ring id for rx release */
86 #define WBM2SW_REL_ERR_RING_NUM 5
87 #endif
88 
89 /* tx descriptor are programmed at start of CMEM region*/
90 #define DP_TX_DESC_CMEM_OFFSET	0
91 
92 /* size of CMEM needed for a tx desc pool*/
93 #define DP_TX_DESC_POOL_CMEM_SIZE \
94 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
95 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
96 
97 /* Offset of rx descripotor pool */
98 #define DP_RX_DESC_CMEM_OFFSET \
99 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
100 
101 /* size of CMEM needed for a rx desc pool */
102 #define DP_RX_DESC_POOL_CMEM_SIZE \
103 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
104 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
105 
106 /* get ppt_id from CMEM_OFFSET */
107 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
108 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
109 /**
110  * struct dp_spt_page_desc - secondary page table page descriptors
111  * @next: pointer to next linked SPT page Desc
112  * @page_v_addr: page virtual address
113  * @page_p_addr: page physical address
114  * @ppt_index: entry index in primary page table where this page physical
115 		address stored
116  * @avail_entry_index: index for available entry that store TX/RX Desc VA
117  */
118 struct dp_spt_page_desc {
119 	uint8_t *page_v_addr;
120 	qdf_dma_addr_t page_p_addr;
121 	uint32_t ppt_index;
122 };
123 
124 /**
125  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
126  * @cmem_offset: CMEM offset from base address for primary page table setup
127  * @total_page_num: total DDR page allocated
128  * @page_desc_freelist: available page Desc list
129  * @page_desc_base: page Desc buffer base address.
130  * @page_pool: DDR pages pool
131  * @cc_lock: locks for page acquiring/free
132  */
133 struct dp_hw_cookie_conversion_t {
134 	uint32_t cmem_offset;
135 	uint32_t total_page_num;
136 	struct dp_spt_page_desc *page_desc_base;
137 	struct qdf_mem_multi_page_t page_pool;
138 	qdf_spinlock_t cc_lock;
139 };
140 
141 /**
142  * struct dp_spt_page_desc_list - containor of SPT page desc list info
143  * @spt_page_list_head: head of SPT page descriptor list
144  * @spt_page_list_tail: tail of SPT page descriptor list
145  * @num_spt_pages: number of SPT page descriptor allocated
146  */
147 struct dp_spt_page_desc_list {
148 	struct dp_spt_page_desc *spt_page_list_head;
149 	struct dp_spt_page_desc *spt_page_list_tail;
150 	uint16_t num_spt_pages;
151 };
152 
153 /* HW reading 8 bytes for VA */
154 #define DP_CC_HW_READ_BYTES 8
155 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
156 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
157 	= (uintptr_t)(_desc_va); }
158 
159 /**
160  * struct dp_tx_bank_profile - DP wrapper for TCL banks
161  * @is_configured: flag indicating if this bank is configured
162  * @ref_count: ref count indicating number of users of the bank
163  * @bank_config: HAL TX bank configuration
164  */
165 struct dp_tx_bank_profile {
166 	uint8_t is_configured;
167 	qdf_atomic_t  ref_count;
168 	union hal_tx_bank_config bank_config;
169 };
170 
171 /**
172  * struct dp_soc_be - Extended DP soc for BE targets
173  * @soc: dp soc structure
174  * @num_bank_profiles: num TX bank profiles
175  * @bank_profiles: bank profiles for various TX banks
176  * @cc_cmem_base: cmem offset reserved for CC
177  * @tx_cc_ctx: Cookie conversion context for tx desc pools
178  * @rx_cc_ctx: Cookie conversion context for rx desc pools
179  * @monitor_soc_be: BE specific monitor object
180  * @mlo_enabled: Flag to indicate MLO is enabled or not
181  * @mlo_chip_id: MLO chip_id
182  * @ml_ctxt: pointer to global ml_context
183  * @mld_peer_hash: peer hash table for ML peers
184  *           Associated peer with this MAC address)
185  * @mld_peer_hash_lock: lock to protect mld_peer_hash
186  */
187 struct dp_soc_be {
188 	struct dp_soc soc;
189 	uint8_t num_bank_profiles;
190 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
191 	qdf_mutex_t tx_bank_lock;
192 #else
193 	qdf_spinlock_t tx_bank_lock;
194 #endif
195 	struct dp_tx_bank_profile *bank_profiles;
196 	struct dp_spt_page_desc *page_desc_base;
197 	uint32_t cc_cmem_base;
198 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
199 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
200 #ifdef WLAN_SUPPORT_PPEDS
201 	struct dp_srng reo2ppe_ring;
202 	struct dp_srng ppe2tcl_ring;
203 	struct dp_srng ppe_release_ring;
204 #endif
205 #if !defined(DISABLE_MON_CONFIG)
206 	struct dp_mon_soc_be *monitor_soc_be;
207 #endif
208 #ifdef WLAN_FEATURE_11BE_MLO
209 #ifdef WLAN_MLO_MULTI_CHIP
210 	uint8_t mlo_enabled;
211 	uint8_t mlo_chip_id;
212 	struct dp_mlo_ctxt *ml_ctxt;
213 #else
214 	/* Protect mld peer hash table */
215 	DP_MUTEX_TYPE mld_peer_hash_lock;
216 	struct {
217 		uint32_t mask;
218 		uint32_t idx_bits;
219 
220 		TAILQ_HEAD(, dp_peer) * bins;
221 	} mld_peer_hash;
222 #endif
223 #endif
224 };
225 
226 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
227 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
228 
229 /**
230  * struct dp_pdev_be - Extended DP pdev for BE targets
231  * @pdev: dp pdev structure
232  * @monitor_pdev_be: BE specific monitor object
233  * @mlo_link_id: MLO link id for PDEV
234  */
235 struct dp_pdev_be {
236 	struct dp_pdev pdev;
237 #if !defined(DISABLE_MON_CONFIG)
238 	struct dp_mon_pdev_be *monitor_pdev_be;
239 #endif
240 #ifdef WLAN_MLO_MULTI_CHIP
241 	uint8_t mlo_link_id;
242 #endif
243 };
244 
245 /**
246  * struct dp_vdev_be - Extended DP vdev for BE targets
247  * @vdev: dp vdev structure
248  * @bank_id: bank_id to be used for TX
249  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
250  */
251 struct dp_vdev_be {
252 	struct dp_vdev vdev;
253 	int8_t bank_id;
254 	uint8_t vdev_id_check_en;
255 };
256 
257 /**
258  * struct dp_peer_be - Extended DP peer for BE targets
259  * @dp_peer: dp peer structure
260  */
261 struct dp_peer_be {
262 	struct dp_peer peer;
263 };
264 
265 /**
266  * dp_get_soc_context_size_be() - get context size for target specific DP soc
267  *
268  * Return: value in bytes for BE specific soc structure
269  */
270 qdf_size_t dp_get_soc_context_size_be(void);
271 
272 /**
273  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
274  * @arch_ops: arch ops pointer
275  *
276  * Return: none
277  */
278 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
279 
280 /**
281  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
282  * @arch_ops: arch ops pointer
283  *
284  * Return: size in bytes for the context_type
285  */
286 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
287 
288 /**
289  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
290  * @soc: dp_soc pointer
291  *
292  * Return: dp_soc_be pointer
293  */
294 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
295 {
296 	return (struct dp_soc_be *)soc;
297 }
298 
299 #ifdef WLAN_MLO_MULTI_CHIP
300 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
301 
302 /*
303  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
304  *
305  * @soc: soc handle
306  *
307  * return: MLD peer hash object
308  */
309 static inline dp_mld_peer_hash_obj_t
310 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
311 {
312 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
313 
314 	return be_soc->ml_ctxt;
315 }
316 
317 #else
318 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
319 
320 static inline dp_mld_peer_hash_obj_t
321 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
322 {
323 	return dp_get_be_soc_from_dp_soc(soc);
324 }
325 #endif
326 
327 /*
328  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
329  *
330  * @mld_hash_obj: Peer has object
331  * @hash_elems: number of entries in hash table
332  *
333  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
334  */
335 QDF_STATUS
336 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
337 				int hash_elems);
338 
339 /*
340  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
341  *
342  * @mld_hash_obj: Peer has object
343  *
344  * return: void
345  */
346 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
347 
348 /**
349  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
350  * @pdev: dp_pdev pointer
351  *
352  * Return: dp_pdev_be pointer
353  */
354 static inline
355 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
356 {
357 	return (struct dp_pdev_be *)pdev;
358 }
359 
360 /**
361  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
362  * @vdev: dp_vdev pointer
363  *
364  * Return: dp_vdev_be pointer
365  */
366 static inline
367 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
368 {
369 	return (struct dp_vdev_be *)vdev;
370 }
371 
372 /**
373  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
374  * @peer: dp_peer pointer
375  *
376  * Return: dp_peer_be pointer
377  */
378 static inline
379 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
380 {
381 	return (struct dp_peer_be *)peer;
382 }
383 
384 QDF_STATUS
385 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
386 			       struct dp_hw_cookie_conversion_t *cc_ctx,
387 			       uint32_t num_descs,
388 			       enum dp_desc_type desc_type,
389 			       uint8_t desc_pool_id);
390 
391 QDF_STATUS
392 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
393 			       struct dp_hw_cookie_conversion_t *cc_ctx);
394 QDF_STATUS
395 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
396 			     struct dp_hw_cookie_conversion_t *cc_ctx);
397 QDF_STATUS
398 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
399 			       struct dp_hw_cookie_conversion_t *cc_ctx);
400 /**
401  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
402  * @be_soc: beryllium soc handler
403  * @list_head: pointer to page desc head
404  * @list_tail: pointer to page desc tail
405  * @num_desc: number of TX/RX Descs required for SPT pages
406  *
407  * Return: number of SPT page Desc allocated
408  */
409 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
410 				   struct dp_spt_page_desc **list_head,
411 				   struct dp_spt_page_desc **list_tail,
412 				   uint16_t num_desc);
413 /**
414  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
415  * @be_soc: beryllium soc handler
416  * @list_head: pointer to page desc head
417  * @list_tail: pointer to page desc tail
418  * @page_nums: number of page desc freed back to pool
419  */
420 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
421 			      struct dp_spt_page_desc **list_head,
422 			      struct dp_spt_page_desc **list_tail,
423 			      uint16_t page_nums);
424 
425 /**
426  * dp_cc_desc_id_generate() - generate SW cookie ID according to
427 				DDR page 4K aligned or not
428  * @ppt_index: offset index in primary page table
429  * @spt_index: offset index in sceondary DDR page
430  *
431  * Generate SW cookie ID to match as HW expected
432  *
433  * Return: cookie ID
434  */
435 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
436 					      uint16_t spt_index)
437 {
438 	/*
439 	 * for 4k aligned case, cmem entry size is 4 bytes,
440 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
441 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
442 	 * exactly same with original ppt_index value.
443 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
444 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
445 	 */
446 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
447 		spt_index);
448 }
449 
450 /**
451  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
452  * @be_soc: be soc handle
453  * @desc_id: TX/RX Dess ID
454  *
455  * Return: TX/RX Desc virtual address
456  */
457 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
458 					uint32_t desc_id)
459 {
460 	struct dp_soc_be *be_soc;
461 	uint16_t ppt_page_id, spt_va_id;
462 	uint8_t *spt_page_va;
463 
464 	be_soc = dp_get_be_soc_from_dp_soc(soc);
465 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
466 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
467 
468 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
469 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
470 
471 	/*
472 	 * ppt index in cmem is same order where the page in the
473 	 * page desc array during initialization.
474 	 * entry size in DDR page is 64 bits, for 32 bits system,
475 	 * only lower 32 bits VA value is needed.
476 	 */
477 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
478 
479 	return (*((uintptr_t *)(spt_page_va  +
480 				spt_va_id * DP_CC_HW_READ_BYTES)));
481 }
482 
483 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
484 /**
485  * enum dp_srng_near_full_levels - SRNG Near FULL levels
486  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
487  *		of processing the entries in SRNG
488  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
489  *		of processing the entries in SRNG
490  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
491  *		condition and drastic steps need to be taken for processing
492  *		the entries in SRNG
493  */
494 enum dp_srng_near_full_levels {
495 	DP_SRNG_THRESH_SAFE,
496 	DP_SRNG_THRESH_NEAR_FULL,
497 	DP_SRNG_THRESH_CRITICAL,
498 };
499 
500 /**
501  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
502  *				its corresponding near-full irq handler
503  * @soc: Datapath SoC handle
504  * @dp_srng: datapath handle for this SRNG
505  *
506  * Return: 1, if the srng was marked as near-full
507  *	   0, if the srng was not marked as near-full
508  */
509 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
510 					       struct dp_srng *dp_srng)
511 {
512 	return qdf_atomic_read(&dp_srng->near_full);
513 }
514 
515 /**
516  * dp_srng_get_near_full_level() - Check the num available entries in the
517  *			consumer srng and return the level of the srng
518  *			near full state.
519  * @soc: Datapath SoC Handle [To be validated by the caller]
520  * @hal_ring_hdl: SRNG handle
521  *
522  * Return: near-full level
523  */
524 static inline int
525 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
526 {
527 	uint32_t num_valid;
528 
529 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
530 						  dp_srng->hal_srng,
531 						  true);
532 
533 	if (num_valid > dp_srng->crit_thresh)
534 		return DP_SRNG_THRESH_CRITICAL;
535 	else if (num_valid < dp_srng->safe_thresh)
536 		return DP_SRNG_THRESH_SAFE;
537 	else
538 		return DP_SRNG_THRESH_NEAR_FULL;
539 }
540 
541 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
542 
543 /**
544  * dp_srng_test_and_update_nf_params() - Test the near full level and update
545  *			the reap_limit and flags to reflect the state.
546  * @soc: Datapath soc handle
547  * @srng: Datapath handle for the srng
548  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
549  *			per the near-full state
550  *
551  * Return: 1, if the srng is near full
552  *	   0, if the srng is not near full
553  */
554 static inline int
555 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
556 				   struct dp_srng *srng,
557 				   int *max_reap_limit)
558 {
559 	int ring_near_full = 0, near_full_level;
560 
561 	if (dp_srng_check_ring_near_full(soc, srng)) {
562 		near_full_level = dp_srng_get_near_full_level(soc, srng);
563 		switch (near_full_level) {
564 		case DP_SRNG_THRESH_CRITICAL:
565 			/* Currently not doing anything special here */
566 			/* fall through */
567 		case DP_SRNG_THRESH_NEAR_FULL:
568 			ring_near_full = 1;
569 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
570 			break;
571 		case DP_SRNG_THRESH_SAFE:
572 			qdf_atomic_set(&srng->near_full, 0);
573 			ring_near_full = 0;
574 			break;
575 		default:
576 			qdf_assert(0);
577 			break;
578 		}
579 	}
580 
581 	return ring_near_full;
582 }
583 #else
584 static inline int
585 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
586 				   struct dp_srng *srng,
587 				   int *max_reap_limit)
588 {
589 	return 0;
590 }
591 #endif
592 
593 static inline
594 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
595 				    enum dp_desc_type desc_type)
596 {
597 	switch (desc_type) {
598 	case DP_TX_DESC_TYPE:
599 		return (DP_TX_DESC_CMEM_OFFSET +
600 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
601 	case DP_RX_DESC_BUF_TYPE:
602 		return (DP_RX_DESC_CMEM_OFFSET +
603 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
604 			DP_RX_DESC_POOL_CMEM_SIZE);
605 	default:
606 			QDF_BUG(0);
607 	}
608 	return 0;
609 }
610 
611 #ifndef WLAN_MLO_MULTI_CHIP
612 static inline
613 void dp_soc_mlo_fill_params(struct dp_soc *soc,
614 			    struct cdp_soc_attach_params *params)
615 {
616 }
617 
618 static inline
619 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
620 			     struct cdp_pdev_attach_params *params)
621 {
622 }
623 #endif
624 
625 /*
626  * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
627  * @soc : DP soc handle
628  * @vdev: pointer to vdev structure
629  * @param: parameter type to get value
630  * @val: value
631  *
632  * return: QDF_STATUS
633  */
634 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
635 				     struct dp_vdev *vdev,
636 				     enum cdp_vdev_param_type param,
637 				     cdp_config_param_type val);
638 
639 #endif
640