xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision bd23cf7dccea2c964087a8e7bb3abed720075ec8)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 
30 /* maximum number of entries in one page of secondary page table */
31 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
32 
33 /* maximum number of entries in one page of secondary page table */
34 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
35 
36 /* maximum number of entries in primary page table */
37 #define DP_CC_PPT_MAX_ENTRIES 1024
38 
39 /* cookie conversion required CMEM offset from CMEM pool */
40 #define DP_CC_MEM_OFFSET_IN_CMEM 0
41 
42 /* cookie conversion primary page table size 4K */
43 #define DP_CC_PPT_MEM_SIZE 4096
44 
45 /* FST required CMEM offset from CMEM pool */
46 #define DP_FST_MEM_OFFSET_IN_CMEM \
47 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
48 
49 /* lower 9 bits in Desc ID for offset in page of SPT */
50 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
51 
52 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
53 
54 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
55 
56 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
57 
58 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
59 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
60 
61 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
62 
63 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
64 
65 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
66 
67 /*
68  * page 4K unaligned case, single SPT page physical address
69  * need 8 bytes in PPT
70  */
71 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
72 /*
73  * page 4K aligned case, single SPT page physical address
74  * need 4 bytes in PPT
75  */
76 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
77 
78 /* 4K aligned case, number of bits HW append for one PPT entry value */
79 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
80 
81 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
82 /* WBM2SW ring id for rx release */
83 #define WBM2SW_REL_ERR_RING_NUM 3
84 #else
85 /* WBM2SW ring id for rx release */
86 #define WBM2SW_REL_ERR_RING_NUM 5
87 #endif
88 
89 /* tx descriptor are programmed at start of CMEM region*/
90 #define DP_TX_DESC_CMEM_OFFSET	0
91 
92 /* size of CMEM needed for a tx desc pool*/
93 #define DP_TX_DESC_POOL_CMEM_SIZE \
94 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
95 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
96 
97 /* Offset of rx descripotor pool */
98 #define DP_RX_DESC_CMEM_OFFSET \
99 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
100 
101 /* size of CMEM needed for a rx desc pool */
102 #define DP_RX_DESC_POOL_CMEM_SIZE \
103 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
104 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
105 
106 /* get ppt_id from CMEM_OFFSET */
107 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
108 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
109 /**
110  * struct dp_spt_page_desc - secondary page table page descriptors
111  * @next: pointer to next linked SPT page Desc
112  * @page_v_addr: page virtual address
113  * @page_p_addr: page physical address
114  * @ppt_index: entry index in primary page table where this page physical
115 		address stored
116  * @avail_entry_index: index for available entry that store TX/RX Desc VA
117  */
118 struct dp_spt_page_desc {
119 	uint8_t *page_v_addr;
120 	qdf_dma_addr_t page_p_addr;
121 	uint32_t ppt_index;
122 };
123 
124 /**
125  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
126  * @cmem_offset: CMEM offset from base address for primary page table setup
127  * @total_page_num: total DDR page allocated
128  * @page_desc_freelist: available page Desc list
129  * @page_desc_base: page Desc buffer base address.
130  * @page_pool: DDR pages pool
131  * @cc_lock: locks for page acquiring/free
132  */
133 struct dp_hw_cookie_conversion_t {
134 	uint32_t cmem_offset;
135 	uint32_t total_page_num;
136 	struct dp_spt_page_desc *page_desc_base;
137 	struct qdf_mem_multi_page_t page_pool;
138 	qdf_spinlock_t cc_lock;
139 };
140 
141 /**
142  * struct dp_spt_page_desc_list - containor of SPT page desc list info
143  * @spt_page_list_head: head of SPT page descriptor list
144  * @spt_page_list_tail: tail of SPT page descriptor list
145  * @num_spt_pages: number of SPT page descriptor allocated
146  */
147 struct dp_spt_page_desc_list {
148 	struct dp_spt_page_desc *spt_page_list_head;
149 	struct dp_spt_page_desc *spt_page_list_tail;
150 	uint16_t num_spt_pages;
151 };
152 
153 /* HW reading 8 bytes for VA */
154 #define DP_CC_HW_READ_BYTES 8
155 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
156 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
157 	= (uintptr_t)(_desc_va); }
158 
159 /**
160  * struct dp_tx_bank_profile - DP wrapper for TCL banks
161  * @is_configured: flag indicating if this bank is configured
162  * @ref_count: ref count indicating number of users of the bank
163  * @bank_config: HAL TX bank configuration
164  */
165 struct dp_tx_bank_profile {
166 	uint8_t is_configured;
167 	qdf_atomic_t  ref_count;
168 	union hal_tx_bank_config bank_config;
169 };
170 
171 /**
172  * struct dp_soc_be - Extended DP soc for BE targets
173  * @soc: dp soc structure
174  * @num_bank_profiles: num TX bank profiles
175  * @bank_profiles: bank profiles for various TX banks
176  * @cc_cmem_base: cmem offset reserved for CC
177  * @tx_cc_ctx: Cookie conversion context for tx desc pools
178  * @rx_cc_ctx: Cookie conversion context for rx desc pools
179  * @monitor_soc_be: BE specific monitor object
180  * @mlo_enabled: Flag to indicate MLO is enabled or not
181  * @mlo_chip_id: MLO chip_id
182  * @ml_ctxt: pointer to global ml_context
183  * @mld_peer_hash: peer hash table for ML peers
184  *           Associated peer with this MAC address)
185  * @mld_peer_hash_lock: lock to protect mld_peer_hash
186  */
187 struct dp_soc_be {
188 	struct dp_soc soc;
189 	uint8_t num_bank_profiles;
190 	qdf_mutex_t tx_bank_lock;
191 	struct dp_tx_bank_profile *bank_profiles;
192 	struct dp_spt_page_desc *page_desc_base;
193 	uint32_t cc_cmem_base;
194 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
195 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
196 #ifdef WLAN_SUPPORT_PPEDS
197 	struct dp_srng reo2ppe_ring;
198 	struct dp_srng ppe2tcl_ring;
199 	struct dp_srng ppe_release_ring;
200 #endif
201 #if !defined(DISABLE_MON_CONFIG)
202 	struct dp_mon_soc_be *monitor_soc_be;
203 #endif
204 #ifdef WLAN_FEATURE_11BE_MLO
205 #ifdef WLAN_MLO_MULTI_CHIP
206 	uint8_t mlo_enabled;
207 	uint8_t mlo_chip_id;
208 	struct dp_mlo_ctxt *ml_ctxt;
209 #else
210 	/* Protect mld peer hash table */
211 	DP_MUTEX_TYPE mld_peer_hash_lock;
212 	struct {
213 		uint32_t mask;
214 		uint32_t idx_bits;
215 
216 		TAILQ_HEAD(, dp_peer) * bins;
217 	} mld_peer_hash;
218 #endif
219 #endif
220 };
221 
222 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
223 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
224 
225 /**
226  * struct dp_pdev_be - Extended DP pdev for BE targets
227  * @pdev: dp pdev structure
228  * @monitor_pdev_be: BE specific monitor object
229  * @mlo_link_id: MLO link id for PDEV
230  */
231 struct dp_pdev_be {
232 	struct dp_pdev pdev;
233 #if !defined(DISABLE_MON_CONFIG)
234 	struct dp_mon_pdev_be *monitor_pdev_be;
235 #endif
236 #ifdef WLAN_MLO_MULTI_CHIP
237 	uint8_t mlo_link_id;
238 #endif
239 };
240 
241 /**
242  * struct dp_vdev_be - Extended DP vdev for BE targets
243  * @vdev: dp vdev structure
244  * @bank_id: bank_id to be used for TX
245  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
246  */
247 struct dp_vdev_be {
248 	struct dp_vdev vdev;
249 	int8_t bank_id;
250 	uint8_t vdev_id_check_en;
251 };
252 
253 /**
254  * struct dp_peer_be - Extended DP peer for BE targets
255  * @dp_peer: dp peer structure
256  */
257 struct dp_peer_be {
258 	struct dp_peer peer;
259 };
260 
261 /**
262  * dp_get_soc_context_size_be() - get context size for target specific DP soc
263  *
264  * Return: value in bytes for BE specific soc structure
265  */
266 qdf_size_t dp_get_soc_context_size_be(void);
267 
268 /**
269  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
270  * @arch_ops: arch ops pointer
271  *
272  * Return: none
273  */
274 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
275 
276 /**
277  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
278  * @arch_ops: arch ops pointer
279  *
280  * Return: size in bytes for the context_type
281  */
282 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
283 
284 /**
285  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
286  * @soc: dp_soc pointer
287  *
288  * Return: dp_soc_be pointer
289  */
290 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
291 {
292 	return (struct dp_soc_be *)soc;
293 }
294 
295 #ifdef WLAN_MLO_MULTI_CHIP
296 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
297 
298 /*
299  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
300  *
301  * @soc: soc handle
302  *
303  * return: MLD peer hash object
304  */
305 static inline dp_mld_peer_hash_obj_t
306 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
307 {
308 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
309 
310 	return be_soc->ml_ctxt;
311 }
312 
313 #else
314 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
315 
316 static inline dp_mld_peer_hash_obj_t
317 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
318 {
319 	return dp_get_be_soc_from_dp_soc(soc);
320 }
321 #endif
322 
323 /*
324  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
325  *
326  * @mld_hash_obj: Peer has object
327  * @hash_elems: number of entries in hash table
328  *
329  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
330  */
331 QDF_STATUS
332 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
333 				int hash_elems);
334 
335 /*
336  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
337  *
338  * @mld_hash_obj: Peer has object
339  *
340  * return: void
341  */
342 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
343 
344 /**
345  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
346  * @pdev: dp_pdev pointer
347  *
348  * Return: dp_pdev_be pointer
349  */
350 static inline
351 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
352 {
353 	return (struct dp_pdev_be *)pdev;
354 }
355 
356 /**
357  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
358  * @vdev: dp_vdev pointer
359  *
360  * Return: dp_vdev_be pointer
361  */
362 static inline
363 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
364 {
365 	return (struct dp_vdev_be *)vdev;
366 }
367 
368 /**
369  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
370  * @peer: dp_peer pointer
371  *
372  * Return: dp_peer_be pointer
373  */
374 static inline
375 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
376 {
377 	return (struct dp_peer_be *)peer;
378 }
379 
380 QDF_STATUS
381 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
382 			       struct dp_hw_cookie_conversion_t *cc_ctx,
383 			       uint32_t num_descs,
384 			       enum dp_desc_type desc_type,
385 			       uint8_t desc_pool_id);
386 
387 QDF_STATUS
388 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
389 			       struct dp_hw_cookie_conversion_t *cc_ctx);
390 QDF_STATUS
391 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
392 			     struct dp_hw_cookie_conversion_t *cc_ctx);
393 QDF_STATUS
394 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
395 			       struct dp_hw_cookie_conversion_t *cc_ctx);
396 /**
397  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
398  * @be_soc: beryllium soc handler
399  * @list_head: pointer to page desc head
400  * @list_tail: pointer to page desc tail
401  * @num_desc: number of TX/RX Descs required for SPT pages
402  *
403  * Return: number of SPT page Desc allocated
404  */
405 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
406 				   struct dp_spt_page_desc **list_head,
407 				   struct dp_spt_page_desc **list_tail,
408 				   uint16_t num_desc);
409 /**
410  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
411  * @be_soc: beryllium soc handler
412  * @list_head: pointer to page desc head
413  * @list_tail: pointer to page desc tail
414  * @page_nums: number of page desc freed back to pool
415  */
416 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
417 			      struct dp_spt_page_desc **list_head,
418 			      struct dp_spt_page_desc **list_tail,
419 			      uint16_t page_nums);
420 
421 /**
422  * dp_cc_desc_id_generate() - generate SW cookie ID according to
423 				DDR page 4K aligned or not
424  * @ppt_index: offset index in primary page table
425  * @spt_index: offset index in sceondary DDR page
426  *
427  * Generate SW cookie ID to match as HW expected
428  *
429  * Return: cookie ID
430  */
431 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
432 					      uint16_t spt_index)
433 {
434 	/*
435 	 * for 4k aligned case, cmem entry size is 4 bytes,
436 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
437 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
438 	 * exactly same with original ppt_index value.
439 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
440 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
441 	 */
442 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
443 		spt_index);
444 }
445 
446 /**
447  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
448  * @be_soc: be soc handle
449  * @desc_id: TX/RX Dess ID
450  *
451  * Return: TX/RX Desc virtual address
452  */
453 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
454 					uint32_t desc_id)
455 {
456 	struct dp_soc_be *be_soc;
457 	uint16_t ppt_page_id, spt_va_id;
458 	uint8_t *spt_page_va;
459 
460 	be_soc = dp_get_be_soc_from_dp_soc(soc);
461 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
462 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
463 
464 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
465 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
466 
467 	/*
468 	 * ppt index in cmem is same order where the page in the
469 	 * page desc array during initialization.
470 	 * entry size in DDR page is 64 bits, for 32 bits system,
471 	 * only lower 32 bits VA value is needed.
472 	 */
473 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
474 
475 	return (*((uintptr_t *)(spt_page_va  +
476 				spt_va_id * DP_CC_HW_READ_BYTES)));
477 }
478 
479 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
480 /**
481  * enum dp_srng_near_full_levels - SRNG Near FULL levels
482  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
483  *		of processing the entries in SRNG
484  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
485  *		of processing the entries in SRNG
486  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
487  *		condition and drastic steps need to be taken for processing
488  *		the entries in SRNG
489  */
490 enum dp_srng_near_full_levels {
491 	DP_SRNG_THRESH_SAFE,
492 	DP_SRNG_THRESH_NEAR_FULL,
493 	DP_SRNG_THRESH_CRITICAL,
494 };
495 
496 /**
497  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
498  *				its corresponding near-full irq handler
499  * @soc: Datapath SoC handle
500  * @dp_srng: datapath handle for this SRNG
501  *
502  * Return: 1, if the srng was marked as near-full
503  *	   0, if the srng was not marked as near-full
504  */
505 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
506 					       struct dp_srng *dp_srng)
507 {
508 	return qdf_atomic_read(&dp_srng->near_full);
509 }
510 
511 /**
512  * dp_srng_get_near_full_level() - Check the num available entries in the
513  *			consumer srng and return the level of the srng
514  *			near full state.
515  * @soc: Datapath SoC Handle [To be validated by the caller]
516  * @hal_ring_hdl: SRNG handle
517  *
518  * Return: near-full level
519  */
520 static inline int
521 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
522 {
523 	uint32_t num_valid;
524 
525 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
526 						  dp_srng->hal_srng,
527 						  true);
528 
529 	if (num_valid > dp_srng->crit_thresh)
530 		return DP_SRNG_THRESH_CRITICAL;
531 	else if (num_valid < dp_srng->safe_thresh)
532 		return DP_SRNG_THRESH_SAFE;
533 	else
534 		return DP_SRNG_THRESH_NEAR_FULL;
535 }
536 
537 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
538 
539 /**
540  * dp_srng_test_and_update_nf_params() - Test the near full level and update
541  *			the reap_limit and flags to reflect the state.
542  * @soc: Datapath soc handle
543  * @srng: Datapath handle for the srng
544  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
545  *			per the near-full state
546  *
547  * Return: 1, if the srng is near full
548  *	   0, if the srng is not near full
549  */
550 static inline int
551 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
552 				   struct dp_srng *srng,
553 				   int *max_reap_limit)
554 {
555 	int ring_near_full = 0, near_full_level;
556 
557 	if (dp_srng_check_ring_near_full(soc, srng)) {
558 		near_full_level = dp_srng_get_near_full_level(soc, srng);
559 		switch (near_full_level) {
560 		case DP_SRNG_THRESH_CRITICAL:
561 			/* Currently not doing anything special here */
562 			/* fall through */
563 		case DP_SRNG_THRESH_NEAR_FULL:
564 			ring_near_full = 1;
565 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
566 			break;
567 		case DP_SRNG_THRESH_SAFE:
568 			qdf_atomic_set(&srng->near_full, 0);
569 			ring_near_full = 0;
570 			break;
571 		default:
572 			qdf_assert(0);
573 			break;
574 		}
575 	}
576 
577 	return ring_near_full;
578 }
579 #else
580 static inline int
581 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
582 				   struct dp_srng *srng,
583 				   int *max_reap_limit)
584 {
585 	return 0;
586 }
587 #endif
588 
589 static inline
590 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
591 				    enum dp_desc_type desc_type)
592 {
593 	switch (desc_type) {
594 	case DP_TX_DESC_TYPE:
595 		return (DP_TX_DESC_CMEM_OFFSET +
596 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
597 	case DP_RX_DESC_BUF_TYPE:
598 		return (DP_RX_DESC_CMEM_OFFSET +
599 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
600 			DP_RX_DESC_POOL_CMEM_SIZE);
601 	default:
602 			QDF_BUG(0);
603 	}
604 	return 0;
605 }
606 
607 #ifndef WLAN_MLO_MULTI_CHIP
608 static inline
609 void dp_soc_mlo_fill_params(struct dp_soc *soc,
610 			    struct cdp_soc_attach_params *params)
611 {
612 }
613 
614 static inline
615 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
616 			     struct cdp_pdev_attach_params *params)
617 {
618 }
619 #endif
620 #endif
621