xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision 3ccef6c0a61da1d3506c43c42483179438d1cc1f)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 #ifdef WIFI_MONITOR_SUPPORT
30 #include <dp_mon.h>
31 #endif
32 
33 enum CMEM_MEM_CLIENTS {
34 	COOKIE_CONVERSION,
35 	FISA_FST,
36 };
37 
38 /* maximum number of entries in one page of secondary page table */
39 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
40 
41 /* maximum number of entries in one page of secondary page table */
42 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
43 
44 /* maximum number of entries in primary page table */
45 #define DP_CC_PPT_MAX_ENTRIES 1024
46 
47 /* cookie conversion required CMEM offset from CMEM pool */
48 #define DP_CC_MEM_OFFSET_IN_CMEM 0
49 
50 /* cookie conversion primary page table size 4K */
51 #define DP_CC_PPT_MEM_SIZE 4096
52 
53 /* FST required CMEM offset from CMEM pool */
54 #define DP_FST_MEM_OFFSET_IN_CMEM \
55 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
56 
57 /* CMEM size for FISA FST 16K */
58 #define DP_CMEM_FST_SIZE 16384
59 
60 /* lower 9 bits in Desc ID for offset in page of SPT */
61 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
62 
63 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
64 
65 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
66 
67 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
68 
69 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
70 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
71 
72 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
73 
74 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
75 
76 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
77 
78 /*
79  * page 4K unaligned case, single SPT page physical address
80  * need 8 bytes in PPT
81  */
82 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
83 /*
84  * page 4K aligned case, single SPT page physical address
85  * need 4 bytes in PPT
86  */
87 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
88 
89 /* 4K aligned case, number of bits HW append for one PPT entry value */
90 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
91 
92 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
93 /* WBM2SW ring id for rx release */
94 #define WBM2SW_REL_ERR_RING_NUM 3
95 #else
96 /* WBM2SW ring id for rx release */
97 #define WBM2SW_REL_ERR_RING_NUM 5
98 #endif
99 
100 #ifdef WLAN_SUPPORT_PPEDS
101 /* The MAX PPE PRI2TID */
102 #define DP_TX_INT_PRI2TID_MAX 15
103 
104 #define DP_TX_PPEDS_POOL_ID 0
105 
106 /* size of CMEM needed for a ppeds tx desc pool */
107 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
108 	((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
109 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
110 
111 /* Offset of ppeds tx descripotor pool */
112 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
113 
114 #define PEER_ROUTING_USE_PPE 1
115 #define PEER_ROUTING_ENABLED 1
116 #else
117 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
118 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
119 #endif
120 
121 /* tx descriptor are programmed at start of CMEM region*/
122 #define DP_TX_DESC_CMEM_OFFSET \
123 	(DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
124 
125 /* size of CMEM needed for a tx desc pool*/
126 #define DP_TX_DESC_POOL_CMEM_SIZE \
127 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
128 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
129 
130 /* Offset of rx descripotor pool */
131 #define DP_RX_DESC_CMEM_OFFSET \
132 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
133 
134 /* size of CMEM needed for a rx desc pool */
135 #define DP_RX_DESC_POOL_CMEM_SIZE \
136 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
137 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
138 
139 /* get ppt_id from CMEM_OFFSET */
140 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
141 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
142 
143 /**
144  * struct dp_spt_page_desc - secondary page table page descriptors
145  * @next: pointer to next linked SPT page Desc
146  * @page_v_addr: page virtual address
147  * @page_p_addr: page physical address
148  * @ppt_index: entry index in primary page table where this page physical
149 		address stored
150  * @avail_entry_index: index for available entry that store TX/RX Desc VA
151  */
152 struct dp_spt_page_desc {
153 	uint8_t *page_v_addr;
154 	qdf_dma_addr_t page_p_addr;
155 	uint32_t ppt_index;
156 };
157 
158 /**
159  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
160  * @cmem_offset: CMEM offset from base address for primary page table setup
161  * @total_page_num: total DDR page allocated
162  * @page_desc_freelist: available page Desc list
163  * @page_desc_base: page Desc buffer base address.
164  * @page_pool: DDR pages pool
165  * @cc_lock: locks for page acquiring/free
166  */
167 struct dp_hw_cookie_conversion_t {
168 	uint32_t cmem_offset;
169 	uint32_t total_page_num;
170 	struct dp_spt_page_desc *page_desc_base;
171 	struct qdf_mem_multi_page_t page_pool;
172 	qdf_spinlock_t cc_lock;
173 };
174 
175 /**
176  * struct dp_spt_page_desc_list - containor of SPT page desc list info
177  * @spt_page_list_head: head of SPT page descriptor list
178  * @spt_page_list_tail: tail of SPT page descriptor list
179  * @num_spt_pages: number of SPT page descriptor allocated
180  */
181 struct dp_spt_page_desc_list {
182 	struct dp_spt_page_desc *spt_page_list_head;
183 	struct dp_spt_page_desc *spt_page_list_tail;
184 	uint16_t num_spt_pages;
185 };
186 
187 /* HW reading 8 bytes for VA */
188 #define DP_CC_HW_READ_BYTES 8
189 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
190 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
191 	= (uintptr_t)(_desc_va); }
192 
193 /**
194  * struct dp_tx_bank_profile - DP wrapper for TCL banks
195  * @is_configured: flag indicating if this bank is configured
196  * @ref_count: ref count indicating number of users of the bank
197  * @bank_config: HAL TX bank configuration
198  */
199 struct dp_tx_bank_profile {
200 	uint8_t is_configured;
201 	qdf_atomic_t  ref_count;
202 	union hal_tx_bank_config bank_config;
203 };
204 
205 #ifdef WLAN_SUPPORT_PPEDS
206 /**
207  * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
208  * @is_configured: Boolean that the entry is configured.
209  */
210 struct dp_ppe_vp_tbl_entry {
211 	bool is_configured;
212 };
213 
214 /**
215  * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
216  * @vp_num: Virtual port number
217  * @ppe_vp_num_idx: Index to the PPE VP table entry
218  * @search_idx_reg_num: Address search Index register number
219  * @drop_prec_enable: Drop precedance enable
220  * @to_fw: To FW exception enable/disable.
221  * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
222  */
223 struct dp_ppe_vp_profile {
224 	uint8_t vp_num;
225 	uint8_t ppe_vp_num_idx;
226 	uint8_t search_idx_reg_num;
227 	uint8_t drop_prec_enable;
228 	uint8_t to_fw;
229 	uint8_t use_ppe_int_pri;
230 };
231 
232 /**
233  * struct dp_ppe_tx_desc_pool_s - PPEDS Tx Descriptor Pool
234  * @elem_size: Size of each descriptor
235  * @num_allocated: Number of used descriptors
236  * @freelist: Chain of free descriptors
237  * @desc_pages: multiple page allocation information for actual descriptors
238  * @elem_count: Number of descriptors in the pool
239  * @num_free: Number of free descriptors
240  * @lock- Lock for descriptor allocation/free from/to the pool
241  */
242 struct dp_ppe_tx_desc_pool_s {
243 	uint16_t elem_size;
244 	uint32_t num_allocated;
245 	struct dp_tx_desc_s *freelist;
246 	struct qdf_mem_multi_page_t desc_pages;
247 	uint16_t elem_count;
248 	uint32_t num_free;
249 	qdf_spinlock_t lock;
250 };
251 #endif
252 
253 /**
254  * struct dp_soc_be - Extended DP soc for BE targets
255  * @soc: dp soc structure
256  * @num_bank_profiles: num TX bank profiles
257  * @bank_profiles: bank profiles for various TX banks
258  * @cc_cmem_base: cmem offset reserved for CC
259  * @tx_cc_ctx: Cookie conversion context for tx desc pools
260  * @rx_cc_ctx: Cookie conversion context for rx desc pools
261  * @monitor_soc_be: BE specific monitor object
262  * @mlo_enabled: Flag to indicate MLO is enabled or not
263  * @mlo_chip_id: MLO chip_id
264  * @ml_ctxt: pointer to global ml_context
265  * @delta_tqm: delta_tqm
266  * @mlo_tstamp_offset: mlo timestamp offset
267  * @mld_peer_hash: peer hash table for ML peers
268  *           Associated peer with this MAC address)
269  * @mld_peer_hash_lock: lock to protect mld_peer_hash
270  * @reo2ppe_ring: REO2PPE ring
271  * @ppe2tcl_ring: PPE2TCL ring
272  * @ppe_release_ring: PPE release ring
273  * @ppe_vp_tbl: PPE VP table
274  * @ppe_vp_tbl_lock: PPE VP table lock
275  * @num_ppe_vp_entries : Number of PPE VP entries
276  * @ipa_bank_id: TCL bank id used by IPA
277  * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
278  * @ppeds_tx_desc: PPEDS tx desc pool
279  * @ppeds_handle: PPEDS soc instance handle
280  */
281 struct dp_soc_be {
282 	struct dp_soc soc;
283 	uint8_t num_bank_profiles;
284 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
285 	qdf_mutex_t tx_bank_lock;
286 #else
287 	qdf_spinlock_t tx_bank_lock;
288 #endif
289 	struct dp_tx_bank_profile *bank_profiles;
290 	struct dp_spt_page_desc *page_desc_base;
291 	uint32_t cc_cmem_base;
292 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
293 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
294 #ifdef WLAN_SUPPORT_PPEDS
295 	struct dp_srng reo2ppe_ring;
296 	struct dp_srng ppe2tcl_ring;
297 	struct dp_srng ppe_release_ring;
298 	struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
299 	struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
300 	struct dp_ppe_tx_desc_pool_s ppeds_tx_desc;
301 	void *ppeds_handle;
302 	qdf_mutex_t ppe_vp_tbl_lock;
303 	uint8_t num_ppe_vp_entries;
304 #endif
305 #ifdef WLAN_FEATURE_11BE_MLO
306 #ifdef WLAN_MLO_MULTI_CHIP
307 	uint8_t mlo_enabled;
308 	uint8_t mlo_chip_id;
309 	struct dp_mlo_ctxt *ml_ctxt;
310 	uint64_t delta_tqm;
311 	uint64_t mlo_tstamp_offset;
312 #else
313 	/* Protect mld peer hash table */
314 	DP_MUTEX_TYPE mld_peer_hash_lock;
315 	struct {
316 		uint32_t mask;
317 		uint32_t idx_bits;
318 
319 		TAILQ_HEAD(, dp_peer) * bins;
320 	} mld_peer_hash;
321 #endif
322 #endif
323 #ifdef IPA_OFFLOAD
324 	int8_t ipa_bank_id;
325 #endif
326 };
327 
328 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
329 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
330 
331 /**
332  * struct dp_pdev_be - Extended DP pdev for BE targets
333  * @pdev: dp pdev structure
334  * @monitor_pdev_be: BE specific monitor object
335  * @mlo_link_id: MLO link id for PDEV
336  * @delta_tsf2: delta_tsf2
337  */
338 struct dp_pdev_be {
339 	struct dp_pdev pdev;
340 #ifdef WLAN_MLO_MULTI_CHIP
341 	uint8_t mlo_link_id;
342 	uint64_t delta_tsf2;
343 #endif
344 };
345 
346 /**
347  * struct dp_vdev_be - Extended DP vdev for BE targets
348  * @vdev: dp vdev structure
349  * @bank_id: bank_id to be used for TX
350  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
351  * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
352  * @ppe_vp_profile: PPE VP profile
353  */
354 struct dp_vdev_be {
355 	struct dp_vdev vdev;
356 	int8_t bank_id;
357 	uint8_t vdev_id_check_en;
358 #ifdef WLAN_MLO_MULTI_CHIP
359 	/* partner list used for Intra-BSS */
360 	uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
361 #ifdef WLAN_FEATURE_11BE_MLO
362 #ifdef WLAN_MCAST_MLO
363 	/* DP MLO seq number */
364 	uint16_t seq_num;
365 	/* MLO Mcast primary vdev */
366 	bool mcast_primary;
367 #endif
368 #endif
369 #endif
370 	unsigned long ppe_vp_enabled;
371 #ifdef WLAN_SUPPORT_PPEDS
372 	struct dp_ppe_vp_profile ppe_vp_profile;
373 #endif
374 };
375 
376 /**
377  * struct dp_peer_be - Extended DP peer for BE targets
378  * @dp_peer: dp peer structure
379  */
380 struct dp_peer_be {
381 	struct dp_peer peer;
382 #ifdef WLAN_SUPPORT_PPEDS
383 	uint8_t priority_valid;
384 #endif
385 };
386 
387 /**
388  * dp_get_soc_context_size_be() - get context size for target specific DP soc
389  *
390  * Return: value in bytes for BE specific soc structure
391  */
392 qdf_size_t dp_get_soc_context_size_be(void);
393 
394 /**
395  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
396  * @arch_ops: arch ops pointer
397  *
398  * Return: none
399  */
400 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
401 
402 /**
403  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
404  * @arch_ops: arch ops pointer
405  *
406  * Return: size in bytes for the context_type
407  */
408 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
409 
410 /**
411  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
412  * @soc: dp_soc pointer
413  *
414  * Return: dp_soc_be pointer
415  */
416 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
417 {
418 	return (struct dp_soc_be *)soc;
419 }
420 
421 #ifdef WLAN_MLO_MULTI_CHIP
422 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
423 
424 /*
425  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
426  *
427  * @soc: soc handle
428  *
429  * return: MLD peer hash object
430  */
431 static inline dp_mld_peer_hash_obj_t
432 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
433 {
434 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
435 
436 	return be_soc->ml_ctxt;
437 }
438 
439 void  dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
440 
441 #if defined(WLAN_FEATURE_11BE_MLO)
442 /**
443  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
444  * @soc: Soc handle
445  * @peer: DP peer handle for ML peer
446  * @peer_id: peer_id
447  * Return: None
448  */
449 void dp_mlo_partner_chips_map(struct dp_soc *soc,
450 			      struct dp_peer *peer,
451 			      uint16_t peer_id);
452 
453 /**
454  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
455  * @soc: Soc handle
456  * @peer_id: peer_id
457  * Return: None
458  */
459 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
460 				uint16_t peer_id);
461 
462 #ifdef WLAN_MCAST_MLO
463 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
464 				    struct dp_vdev *ptnr_vdev,
465 				    void *arg);
466 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
467 				   void *arg);
468 /*
469  * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
470  * @be_soc: dp_soc_be pointer
471  * @be_vdev: dp_vdev_be pointer
472  * @func        : function to be called for each peer
473  * @arg         : argument need to be passed to func
474  * @mod_id: module id
475  *
476  * Return: None
477  */
478 void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
479 				 struct dp_vdev_be *be_vdev,
480 				 dp_ptnr_vdev_iter_func func,
481 				 void *arg,
482 				 enum dp_mod_id mod_id);
483 /*
484  * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
485  * @be_soc: dp_soc_be pointer
486  * @func        : function to be called for each peer
487  * @arg         : argument need to be passed to func
488  *
489  * Return: None
490  */
491 void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
492 				dp_ptnr_soc_iter_func func,
493 				void *arg);
494 /*
495  * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
496  * @be_soc: dp_soc_be pointer
497  * @be_vdev: dp_vdev_be pointer
498  * @mod_id: module id
499  *
500  * Return: mcast primary DP VDEV handle on success, NULL on failure
501  */
502 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
503 					      struct dp_vdev_be *be_vdev,
504 					      enum dp_mod_id mod_id);
505 #endif
506 #endif
507 
508 #else
509 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
510 
511 static inline dp_mld_peer_hash_obj_t
512 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
513 {
514 	return dp_get_be_soc_from_dp_soc(soc);
515 }
516 
517 static inline void  dp_clr_mlo_ptnr_list(struct dp_soc *soc,
518 					 struct dp_vdev *vdev)
519 {
520 }
521 #endif
522 
523 /*
524  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
525  *
526  * @mld_hash_obj: Peer has object
527  * @hash_elems: number of entries in hash table
528  *
529  * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
530  */
531 QDF_STATUS
532 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
533 				int hash_elems);
534 
535 /*
536  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
537  *
538  * @mld_hash_obj: Peer has object
539  *
540  * return: void
541  */
542 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
543 
544 /**
545  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
546  * @pdev: dp_pdev pointer
547  *
548  * Return: dp_pdev_be pointer
549  */
550 static inline
551 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
552 {
553 	return (struct dp_pdev_be *)pdev;
554 }
555 
556 /**
557  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
558  * @vdev: dp_vdev pointer
559  *
560  * Return: dp_vdev_be pointer
561  */
562 static inline
563 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
564 {
565 	return (struct dp_vdev_be *)vdev;
566 }
567 
568 /**
569  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
570  * @peer: dp_peer pointer
571  *
572  * Return: dp_peer_be pointer
573  */
574 static inline
575 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
576 {
577 	return (struct dp_peer_be *)peer;
578 }
579 
580 QDF_STATUS
581 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
582 			       struct dp_hw_cookie_conversion_t *cc_ctx,
583 			       uint32_t num_descs,
584 			       enum dp_desc_type desc_type,
585 			       uint8_t desc_pool_id);
586 
587 QDF_STATUS
588 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
589 			       struct dp_hw_cookie_conversion_t *cc_ctx);
590 QDF_STATUS
591 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
592 			     struct dp_hw_cookie_conversion_t *cc_ctx);
593 QDF_STATUS
594 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
595 			       struct dp_hw_cookie_conversion_t *cc_ctx);
596 /**
597  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
598  * @be_soc: beryllium soc handler
599  * @list_head: pointer to page desc head
600  * @list_tail: pointer to page desc tail
601  * @num_desc: number of TX/RX Descs required for SPT pages
602  *
603  * Return: number of SPT page Desc allocated
604  */
605 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
606 				   struct dp_spt_page_desc **list_head,
607 				   struct dp_spt_page_desc **list_tail,
608 				   uint16_t num_desc);
609 /**
610  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
611  * @be_soc: beryllium soc handler
612  * @list_head: pointer to page desc head
613  * @list_tail: pointer to page desc tail
614  * @page_nums: number of page desc freed back to pool
615  */
616 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
617 			      struct dp_spt_page_desc **list_head,
618 			      struct dp_spt_page_desc **list_tail,
619 			      uint16_t page_nums);
620 
621 /**
622  * dp_cc_desc_id_generate() - generate SW cookie ID according to
623 				DDR page 4K aligned or not
624  * @ppt_index: offset index in primary page table
625  * @spt_index: offset index in sceondary DDR page
626  *
627  * Generate SW cookie ID to match as HW expected
628  *
629  * Return: cookie ID
630  */
631 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
632 					      uint16_t spt_index)
633 {
634 	/*
635 	 * for 4k aligned case, cmem entry size is 4 bytes,
636 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
637 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
638 	 * exactly same with original ppt_index value.
639 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
640 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
641 	 */
642 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
643 		spt_index);
644 }
645 
646 /**
647  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
648  * @be_soc: be soc handle
649  * @desc_id: TX/RX Dess ID
650  *
651  * Return: TX/RX Desc virtual address
652  */
653 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
654 					uint32_t desc_id)
655 {
656 	struct dp_soc_be *be_soc;
657 	uint16_t ppt_page_id, spt_va_id;
658 	uint8_t *spt_page_va;
659 
660 	be_soc = dp_get_be_soc_from_dp_soc(soc);
661 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
662 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
663 
664 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
665 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
666 
667 	/*
668 	 * ppt index in cmem is same order where the page in the
669 	 * page desc array during initialization.
670 	 * entry size in DDR page is 64 bits, for 32 bits system,
671 	 * only lower 32 bits VA value is needed.
672 	 */
673 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
674 
675 	return (*((uintptr_t *)(spt_page_va  +
676 				spt_va_id * DP_CC_HW_READ_BYTES)));
677 }
678 
679 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
680 /**
681  * enum dp_srng_near_full_levels - SRNG Near FULL levels
682  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
683  *		of processing the entries in SRNG
684  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
685  *		of processing the entries in SRNG
686  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
687  *		condition and drastic steps need to be taken for processing
688  *		the entries in SRNG
689  */
690 enum dp_srng_near_full_levels {
691 	DP_SRNG_THRESH_SAFE,
692 	DP_SRNG_THRESH_NEAR_FULL,
693 	DP_SRNG_THRESH_CRITICAL,
694 };
695 
696 /**
697  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
698  *				its corresponding near-full irq handler
699  * @soc: Datapath SoC handle
700  * @dp_srng: datapath handle for this SRNG
701  *
702  * Return: 1, if the srng was marked as near-full
703  *	   0, if the srng was not marked as near-full
704  */
705 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
706 					       struct dp_srng *dp_srng)
707 {
708 	return qdf_atomic_read(&dp_srng->near_full);
709 }
710 
711 /**
712  * dp_srng_get_near_full_level() - Check the num available entries in the
713  *			consumer srng and return the level of the srng
714  *			near full state.
715  * @soc: Datapath SoC Handle [To be validated by the caller]
716  * @hal_ring_hdl: SRNG handle
717  *
718  * Return: near-full level
719  */
720 static inline int
721 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
722 {
723 	uint32_t num_valid;
724 
725 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
726 						  dp_srng->hal_srng,
727 						  true);
728 
729 	if (num_valid > dp_srng->crit_thresh)
730 		return DP_SRNG_THRESH_CRITICAL;
731 	else if (num_valid < dp_srng->safe_thresh)
732 		return DP_SRNG_THRESH_SAFE;
733 	else
734 		return DP_SRNG_THRESH_NEAR_FULL;
735 }
736 
737 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
738 
739 /**
740  * dp_srng_test_and_update_nf_params() - Test the near full level and update
741  *			the reap_limit and flags to reflect the state.
742  * @soc: Datapath soc handle
743  * @srng: Datapath handle for the srng
744  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
745  *			per the near-full state
746  *
747  * Return: 1, if the srng is near full
748  *	   0, if the srng is not near full
749  */
750 static inline int
751 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
752 				   struct dp_srng *srng,
753 				   int *max_reap_limit)
754 {
755 	int ring_near_full = 0, near_full_level;
756 
757 	if (dp_srng_check_ring_near_full(soc, srng)) {
758 		near_full_level = dp_srng_get_near_full_level(soc, srng);
759 		switch (near_full_level) {
760 		case DP_SRNG_THRESH_CRITICAL:
761 			/* Currently not doing anything special here */
762 			fallthrough;
763 		case DP_SRNG_THRESH_NEAR_FULL:
764 			ring_near_full = 1;
765 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
766 			break;
767 		case DP_SRNG_THRESH_SAFE:
768 			qdf_atomic_set(&srng->near_full, 0);
769 			ring_near_full = 0;
770 			break;
771 		default:
772 			qdf_assert(0);
773 			break;
774 		}
775 	}
776 
777 	return ring_near_full;
778 }
779 #else
780 static inline int
781 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
782 				   struct dp_srng *srng,
783 				   int *max_reap_limit)
784 {
785 	return 0;
786 }
787 #endif
788 
789 static inline
790 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
791 				    enum dp_desc_type desc_type)
792 {
793 	switch (desc_type) {
794 	case DP_TX_DESC_TYPE:
795 		return (DP_TX_DESC_CMEM_OFFSET +
796 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
797 	case DP_RX_DESC_BUF_TYPE:
798 		return (DP_RX_DESC_CMEM_OFFSET +
799 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
800 			DP_RX_DESC_POOL_CMEM_SIZE);
801 	case DP_TX_PPEDS_DESC_TYPE:
802 		return DP_TX_PPEDS_DESC_CMEM_OFFSET;
803 	default:
804 			QDF_BUG(0);
805 	}
806 	return 0;
807 }
808 
809 #ifndef WLAN_MLO_MULTI_CHIP
810 static inline
811 void dp_soc_mlo_fill_params(struct dp_soc *soc,
812 			    struct cdp_soc_attach_params *params)
813 {
814 }
815 
816 static inline
817 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
818 			     struct cdp_pdev_attach_params *params)
819 {
820 }
821 
822 static inline
823 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
824 {
825 }
826 
827 static inline
828 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
829 {
830 }
831 #endif
832 
833 /*
834  * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
835  * @soc : DP soc handle
836  * @vdev: pointer to vdev structure
837  * @param: parameter type to get value
838  * @val: value
839  *
840  * return: QDF_STATUS
841  */
842 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
843 				     struct dp_vdev *vdev,
844 				     enum cdp_vdev_param_type param,
845 				     cdp_config_param_type val);
846 
847 #endif
848