xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision a0751b2b6be239b5ad7274f9e927ad1360ac506e)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #ifndef __DP_BE_H
19 #define __DP_BE_H
20 
21 #include <dp_types.h>
22 #include <hal_be_tx.h>
23 
24 /* maximum number of entries in one page of secondary page table */
25 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
26 
27 /* maximum number of entries in primary page table */
28 #define DP_CC_PPT_MAX_ENTRIES 1024
29 
30 /* cookie conversion required CMEM offset from CMEM pool */
31 #define DP_CC_MEM_OFFSET_IN_CMEM 0
32 
33 /* cookie conversion primary page table size 4K */
34 #define DP_CC_PPT_MEM_SIZE 4096
35 
36 /* FST required CMEM offset from CMEM pool */
37 #define DP_FST_MEM_OFFSET_IN_CMEM \
38 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
39 
40 /* lower 9 bits in Desc ID for offset in page of SPT */
41 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
42 
43 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
44 
45 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
46 
47 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
48 
49 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
50 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
51 
52 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
53 
54 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
55 
56 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
57 
58 /*
59  * page 4K unaligned case, single SPT page physical address
60  * need 8 bytes in PPT
61  */
62 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
63 /*
64  * page 4K aligned case, single SPT page physical address
65  * need 4 bytes in PPT
66  */
67 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
68 
69 /* 4K aligned case, number of bits HW append for one PPT entry value */
70 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
71 
72 /**
73  * struct dp_spt_page_desc - secondary page table page descriptors
74  * @next: pointer to next linked SPT page Desc
75  * @page_v_addr: page virtual address
76  * @page_p_addr: page physical address
77  * @ppt_index: entry index in primary page table where this page physical
78 		address stored
79  * @avail_entry_index: index for available entry that store TX/RX Desc VA
80  */
81 struct dp_spt_page_desc {
82 	struct dp_spt_page_desc *next;
83 	uint8_t *page_v_addr;
84 	qdf_dma_addr_t page_p_addr;
85 	uint16_t ppt_index;
86 	uint16_t avail_entry_index;
87 };
88 
89 /**
90  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
91  * @cmem_base: CMEM base address for primary page table setup
92  * @total_page_num: total DDR page allocated
93  * @free_page_num: available DDR page number for TX/RX Desc ID initialization
94  * @page_desc_freelist: available page Desc list
95  * @page_desc_base: page Desc buffer base address.
96  * @page_pool: DDR pages pool
97  * @cc_lock: locks for page acquiring/free
98  */
99 struct dp_hw_cookie_conversion_t {
100 	uint32_t cmem_base;
101 	uint32_t total_page_num;
102 	uint32_t free_page_num;
103 	struct dp_spt_page_desc *page_desc_freelist;
104 	struct dp_spt_page_desc *page_desc_base;
105 	struct qdf_mem_multi_page_t page_pool;
106 	qdf_spinlock_t cc_lock;
107 };
108 
109 /**
110  * struct dp_spt_page_desc_list - containor of SPT page desc list info
111  * @spt_page_list_head: head of SPT page descriptor list
112  * @spt_page_list_tail: tail of SPT page descriptor list
113  * @num_spt_pages: number of SPT page descriptor allocated
114  */
115 struct dp_spt_page_desc_list {
116 	struct dp_spt_page_desc *spt_page_list_head;
117 	struct dp_spt_page_desc *spt_page_list_tail;
118 	uint16_t num_spt_pages;
119 };
120 
121 /* HW reading 8 bytes for VA */
122 #define DP_CC_HW_READ_BYTES 8
123 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
124 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
125 	= (uintptr_t)(_desc_va); }
126 
127 /**
128  * struct dp_tx_bank_profile - DP wrapper for TCL banks
129  * @is_configured: flag indicating if this bank is configured
130  * @ref_count: ref count indicating number of users of the bank
131  * @bank_config: HAL TX bank configuration
132  */
133 struct dp_tx_bank_profile {
134 	uint8_t is_configured;
135 	qdf_atomic_t  ref_count;
136 	union hal_tx_bank_config bank_config;
137 };
138 
139 /**
140  * struct dp_soc_be - Extended DP soc for BE targets
141  * @soc: dp soc structure
142  * @num_bank_profiles: num TX bank profiles
143  * @bank_profiles: bank profiles for various TX banks
144  * @hw_cc_ctx: core context of HW cookie conversion
145  * @tx_spt_page_desc: spt page desc allocated for TX desc pool
146  * @rx_spt_page_desc: spt page desc allocated for RX desc pool
147  */
148 struct dp_soc_be {
149 	struct dp_soc soc;
150 	uint8_t num_bank_profiles;
151 	qdf_mutex_t tx_bank_lock;
152 	struct dp_tx_bank_profile *bank_profiles;
153 	struct dp_hw_cookie_conversion_t hw_cc_ctx;
154 	struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
155 	struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
156 #ifdef WLAN_SUPPORT_PPEDS
157 	struct dp_srng reo2ppe_ring;
158 	struct dp_srng ppe2tcl_ring;
159 	struct dp_srng ppe_release_ring;
160 #endif
161 };
162 
163 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
164 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
165 
166 /**
167  * struct dp_pdev_be - Extended DP pdev for BE targets
168  * @pdev: dp pdev structure
169  */
170 struct dp_pdev_be {
171 	struct dp_pdev pdev;
172 };
173 
174 /**
175  * struct dp_vdev_be - Extended DP vdev for BE targets
176  * @vdev: dp vdev structure
177  * @bank_id: bank_id to be used for TX
178  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
179  */
180 struct dp_vdev_be {
181 	struct dp_vdev vdev;
182 	int8_t bank_id;
183 	uint8_t vdev_id_check_en;
184 };
185 
186 /**
187  * struct dp_peer_be - Extended DP peer for BE targets
188  * @dp_peer: dp peer structure
189  */
190 struct dp_peer_be {
191 	struct dp_peer peer;
192 };
193 
194 /**
195  * dp_get_soc_context_size_be() - get context size for target specific DP soc
196  *
197  * Return: value in bytes for BE specific soc structure
198  */
199 qdf_size_t dp_get_soc_context_size_be(void);
200 
201 /**
202  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
203  * @arch_ops: arch ops pointer
204  *
205  * Return: none
206  */
207 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
208 
209 /**
210  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
211  * @arch_ops: arch ops pointer
212  *
213  * Return: size in bytes for the context_type
214  */
215 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
216 
217 /**
218  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
219  * @soc: dp_soc pointer
220  *
221  * Return: dp_soc_be pointer
222  */
223 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
224 {
225 	return (struct dp_soc_be *)soc;
226 }
227 
228 /**
229  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
230  * @pdev: dp_pdev pointer
231  *
232  * Return: dp_pdev_be pointer
233  */
234 static inline
235 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
236 {
237 	return (struct dp_pdev_be *)pdev;
238 }
239 
240 /**
241  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
242  * @vdev: dp_vdev pointer
243  *
244  * Return: dp_vdev_be pointer
245  */
246 static inline
247 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
248 {
249 	return (struct dp_vdev_be *)vdev;
250 }
251 
252 /**
253  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
254  * @peer: dp_peer pointer
255  *
256  * Return: dp_peer_be pointer
257  */
258 static inline
259 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
260 {
261 	return (struct dp_peer_be *)peer;
262 }
263 
264 /**
265  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
266  * @be_soc: beryllium soc handler
267  * @list_head: pointer to page desc head
268  * @list_tail: pointer to page desc tail
269  * @num_desc: number of TX/RX Descs required for SPT pages
270  *
271  * Return: number of SPT page Desc allocated
272  */
273 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
274 				   struct dp_spt_page_desc **list_head,
275 				   struct dp_spt_page_desc **list_tail,
276 				   uint16_t num_desc);
277 /**
278  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
279  * @be_soc: beryllium soc handler
280  * @list_head: pointer to page desc head
281  * @list_tail: pointer to page desc tail
282  * @page_nums: number of page desc freed back to pool
283  */
284 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
285 			      struct dp_spt_page_desc **list_head,
286 			      struct dp_spt_page_desc **list_tail,
287 			      uint16_t page_nums);
288 
289 /**
290  * dp_cc_desc_id_generate() - generate SW cookie ID according to
291 				DDR page 4K aligned or not
292  * @ppt_index: offset index in primary page table
293  * @spt_index: offset index in sceondary DDR page
294  *
295  * Generate SW cookie ID to match as HW expected
296  *
297  * Return: cookie ID
298  */
299 static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
300 					      uint16_t spt_index)
301 {
302 	/*
303 	 * for 4k aligned case, cmem entry size is 4 bytes,
304 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
305 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
306 	 * exactly same with original ppt_index value.
307 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
308 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
309 	 */
310 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
311 		spt_index);
312 }
313 
314 /**
315  * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
316  * @be_soc: be soc handle
317  * @desc_id: TX/RX Dess ID
318  *
319  * Return: TX/RX Desc virtual address
320  */
321 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
322 					uint32_t desc_id)
323 {
324 	struct dp_soc_be *be_soc;
325 	struct dp_hw_cookie_conversion_t *cc_ctx;
326 	uint16_t ppt_page_id, spt_va_id;
327 	uint8_t *spt_page_va;
328 
329 	be_soc = dp_get_be_soc_from_dp_soc(soc);
330 	cc_ctx = &be_soc->hw_cc_ctx;
331 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
332 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
333 
334 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
335 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
336 
337 	/*
338 	 * ppt index in cmem is same order where the page in the
339 	 * page desc array during initialization.
340 	 * entry size in DDR page is 64 bits, for 32 bits system,
341 	 * only lower 32 bits VA value is needed.
342 	 */
343 	spt_page_va = cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
344 
345 	return (*((uintptr_t *)(spt_page_va  +
346 				spt_va_id * DP_CC_HW_READ_BYTES)));
347 }
348 
349 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
350 /**
351  * enum dp_srng_near_full_levels - SRNG Near FULL levels
352  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
353  *		of processing the entries in SRNG
354  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
355  *		of processing the entries in SRNG
356  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
357  *		condition and drastic steps need to be taken for processing
358  *		the entries in SRNG
359  */
360 enum dp_srng_near_full_levels {
361 	DP_SRNG_THRESH_SAFE,
362 	DP_SRNG_THRESH_NEAR_FULL,
363 	DP_SRNG_THRESH_CRITICAL,
364 };
365 
366 /**
367  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
368  *				its corresponding near-full irq handler
369  * @soc: Datapath SoC handle
370  * @dp_srng: datapath handle for this SRNG
371  *
372  * Return: 1, if the srng was marked as near-full
373  *	   0, if the srng was not marked as near-full
374  */
375 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
376 					       struct dp_srng *dp_srng)
377 {
378 	return qdf_atomic_read(&dp_srng->near_full);
379 }
380 
381 /**
382  * dp_srng_get_near_full_level() - Check the num available entries in the
383  *			consumer srng and return the level of the srng
384  *			near full state.
385  * @soc: Datapath SoC Handle [To be validated by the caller]
386  * @hal_ring_hdl: SRNG handle
387  *
388  * Return: near-full level
389  */
390 static inline int
391 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
392 {
393 	uint32_t num_valid;
394 
395 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
396 						  dp_srng->hal_srng,
397 						  true);
398 
399 	if (num_valid > dp_srng->crit_thresh)
400 		return DP_SRNG_THRESH_CRITICAL;
401 	else if (num_valid < dp_srng->safe_thresh)
402 		return DP_SRNG_THRESH_SAFE;
403 	else
404 		return DP_SRNG_THRESH_NEAR_FULL;
405 }
406 
407 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
408 
409 /**
410  * dp_srng_test_and_update_nf_params() - Test the near full level and update
411  *			the reap_limit and flags to reflect the state.
412  * @soc: Datapath soc handle
413  * @srng: Datapath handle for the srng
414  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
415  *			per the near-full state
416  *
417  * Return: 1, if the srng is near full
418  *	   0, if the srng is not near full
419  */
420 static inline int
421 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
422 				   struct dp_srng *srng,
423 				   int *max_reap_limit)
424 {
425 	int ring_near_full = 0, near_full_level;
426 
427 	if (dp_srng_check_ring_near_full(soc, srng)) {
428 		near_full_level = dp_srng_get_near_full_level(soc, srng);
429 		switch (near_full_level) {
430 		case DP_SRNG_THRESH_CRITICAL:
431 			/* Currently not doing anything special here */
432 			/* fall through */
433 		case DP_SRNG_THRESH_NEAR_FULL:
434 			ring_near_full = 1;
435 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
436 			break;
437 		case DP_SRNG_THRESH_SAFE:
438 			qdf_atomic_set(&srng->near_full, 0);
439 			ring_near_full = 0;
440 			break;
441 		default:
442 			qdf_assert(0);
443 			break;
444 		}
445 	}
446 
447 	return ring_near_full;
448 }
449 #else
450 static inline int
451 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
452 				   struct dp_srng *srng,
453 				   int *max_reap_limit)
454 {
455 	return 0;
456 }
457 #endif
458 
459 #endif
460