xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision dce49ecf59c14c92bd781c4b572279dfb68173a3)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above
10  *       copyright notice, this list of conditions and the following
11  *       disclaimer in the documentation and/or other materials provided
12  *       with the distribution.
13  *     * Neither the name of The Linux Foundation nor the names of its
14  *       contributors may be used to endorse or promote products derived
15  *       from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _HAL_API_H_
31 #define _HAL_API_H_
32 
33 #include "qdf_types.h"
34 #include "hal_internal.h"
35 #include "hif_io32.h"
36 #include "rx_msdu_link.h"
37 #include "rx_reo_queue.h"
38 #include "rx_reo_queue_ext.h"
39 
40 /**
41  * hal_attach - Initalize HAL layer
42  * @hif_handle: Opaque HIF handle
43  * @qdf_dev: QDF device
44  *
45  * Return: Opaque HAL SOC handle
46  *		 NULL on failure (if given ring is not available)
47  *
48  * This function should be called as part of HIF initialization (for accessing
49  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
50  */
51 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
52 
53 /**
54  * hal_detach - Detach HAL layer
55  * @hal_soc: HAL SOC handle
56  *
57  * This function should be called as part of HIF detach
58  *
59  */
60 extern void hal_detach(void *hal_soc);
61 
62 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
63 enum hal_ring_type {
64 	REO_DST,
65 	REO_EXCEPTION,
66 	REO_REINJECT,
67 	REO_CMD,
68 	REO_STATUS,
69 	TCL_DATA,
70 	TCL_CMD,
71 	TCL_STATUS,
72 	CE_SRC,
73 	CE_DST,
74 	CE_DST_STATUS,
75 	WBM_IDLE_LINK,
76 	SW2WBM_RELEASE,
77 	WBM2SW_RELEASE,
78 	RXDMA_BUF,
79 	RXDMA_DST,
80 	RXDMA_MONITOR_BUF,
81 	RXDMA_MONITOR_STATUS,
82 	RXDMA_MONITOR_DST,
83 	MAX_RING_TYPES
84 };
85 
86 /* SRNG flags passed in hal_srng_params.flags */
87 #define HAL_SRNG_MSI_SWAP				0x00000008
88 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
89 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
90 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
91 #define HAL_SRNG_MSI_INTR				0x00020000
92 
93 /**
94  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
95  * used by callers for calculating the size of memory to be allocated before
96  * calling hal_srng_setup to setup the ring
97  *
98  * @hal_soc: Opaque HAL SOC handle
99  * @ring_type: one of the types from hal_ring_type
100  *
101  */
102 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
103 
104 /* SRNG parameters to be passed to hal_srng_setup */
105 struct hal_srng_params {
106 	/* Physical base address of the ring */
107 	qdf_dma_addr_t ring_base_paddr;
108 	/* Virtual base address of the ring */
109 	void *ring_base_vaddr;
110 	/* Number of entries in ring */
111 	uint32_t num_entries;
112 	/* max transfer length */
113 	uint16_t max_buffer_length;
114 	/* MSI Address */
115 	qdf_dma_addr_t msi_addr;
116 	/* MSI data */
117 	uint32_t msi_data;
118 	/* Interrupt timer threshold – in micro seconds */
119 	uint32_t intr_timer_thres_us;
120 	/* Interrupt batch counter threshold – in number of ring entries */
121 	uint32_t intr_batch_cntr_thres_entries;
122 	/* Low threshold – in number of ring entries
123 	 * (valid for src rings only)
124 	 */
125 	uint32_t low_threshold;
126 	/* Misc flags */
127 	uint32_t flags;
128 	/* Unique ring id */
129 	uint8_t ring_id;
130 };
131 
132 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
133  * @hal_soc: hal handle
134  *
135  * Return: QDF_STATUS_OK on success
136  */
137 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
138 
139 /* hal_set_one_shadow_config() - add a config for the specified ring
140  * @hal_soc: hal handle
141  * @ring_type: ring type
142  * @ring_num: ring num
143  *
144  * The ring type and ring num uniquely specify the ring.  After this call,
145  * the hp/tp will be added as the next entry int the shadow register
146  * configuration table.  The hal code will use the shadow register address
147  * in place of the hp/tp address.
148  *
149  * This function is exposed, so that the CE module can skip configuring shadow
150  * registers for unused ring and rings assigned to the firmware.
151  *
152  * Return: QDF_STATUS_OK on success
153  */
154 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
155 					    int ring_num);
156 /**
157  * hal_get_shadow_config() - retrieve the config table
158  * @hal_soc: hal handle
159  * @shadow_config: will point to the table after
160  * @num_shadow_registers_configured: will contain the number of valid entries
161  */
162 extern void hal_get_shadow_config(void *hal_soc,
163 				  struct pld_shadow_reg_v2_cfg **shadow_config,
164 				  int *num_shadow_registers_configured);
165 /**
166  * hal_srng_setup - Initalize HW SRNG ring.
167  *
168  * @hal_soc: Opaque HAL SOC handle
169  * @ring_type: one of the types from hal_ring_type
170  * @ring_num: Ring number if there are multiple rings of
171  *		same type (staring from 0)
172  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
173  * @ring_params: SRNG ring params in hal_srng_params structure.
174 
175  * Callers are expected to allocate contiguous ring memory of size
176  * 'num_entries * entry_size' bytes and pass the physical and virtual base
177  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
178  * structure. Ring base address should be 8 byte aligned and size of each ring
179  * entry should be queried using the API hal_srng_get_entrysize
180  *
181  * Return: Opaque pointer to ring on success
182  *		 NULL on failure (if given ring is not available)
183  */
184 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
185 	int mac_id, struct hal_srng_params *ring_params);
186 
187 /**
188  * hal_srng_cleanup - Deinitialize HW SRNG ring.
189  * @hal_soc: Opaque HAL SOC handle
190  * @hal_srng: Opaque HAL SRNG pointer
191  */
192 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
193 
194 /**
195  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
196  * hal_srng_access_start if locked access is required
197  *
198  * @hal_soc: Opaque HAL SOC handle
199  * @hal_ring: Ring pointer (Source or Destination ring)
200  *
201  * Return: 0 on success; error on failire
202  */
203 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
204 {
205 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
206 
207 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
208 		srng->u.src_ring.cached_tp =
209 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
210 	else
211 		srng->u.dst_ring.cached_hp =
212 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
213 
214 	return 0;
215 }
216 
217 /**
218  * hal_srng_access_start - Start (locked) ring access
219  *
220  * @hal_soc: Opaque HAL SOC handle
221  * @hal_ring: Ring pointer (Source or Destination ring)
222  *
223  * Return: 0 on success; error on failire
224  */
225 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
226 {
227 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
228 
229 	SRNG_LOCK(&(srng->lock));
230 
231 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
232 }
233 
234 /**
235  * hal_srng_dst_get_next - Get next entry from a destination ring and move
236  * cached tail pointer
237  *
238  * @hal_soc: Opaque HAL SOC handle
239  * @hal_ring: Destination ring pointer
240  *
241  * Return: Opaque pointer for next ring entry; NULL on failire
242  */
243 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
244 {
245 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
246 	volatile uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
247 	uint32_t desc_loop_cnt;
248 
249 	desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
250 		>> SRNG_LOOP_CNT_LSB;
251 
252 	if (srng->u.dst_ring.loop_cnt == desc_loop_cnt) {
253 		/* TODO: Using % is expensive, but we have to do this since
254 		 * size of some SRNG rings is not power of 2 (due to descriptor
255 		 * sizes). Need to create separate API for rings used
256 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
257 		 * SW2RXDMA and CE rings)
258 		 */
259 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
260 			srng->ring_size;
261 
262 		srng->u.dst_ring.loop_cnt = (srng->u.dst_ring.loop_cnt +
263 			!srng->u.dst_ring.tp) &
264 			(SRNG_LOOP_CNT_MASK >> SRNG_LOOP_CNT_LSB);
265 		/* TODO: Confirm if loop count mask is same for all rings */
266 		return (void *)desc;
267 	}
268 	return NULL;
269 }
270 
271 /**
272  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
273  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
274  * TODO: See if we need an optimized version of get_next that doesn't check for
275  * loop_cnt
276  *
277  * @hal_soc: Opaque HAL SOC handle
278  * @hal_ring: Destination ring pointer
279  *
280  * Return: Opaque pointer for next ring entry; NULL on failire
281  */
282 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
283 {
284 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
285 	uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
286 	uint32_t desc_loop_cnt;
287 
288 	desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
289 		>> SRNG_LOOP_CNT_LSB;
290 
291 	if (srng->u.dst_ring.loop_cnt == desc_loop_cnt)
292 		return (void *)desc;
293 	return NULL;
294 }
295 
296 /**
297  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
298  * by SW) in destination ring
299  *
300  * @hal_soc: Opaque HAL SOC handle
301  * @hal_ring: Destination ring pointer
302  * @sync_hw_ptr: Sync cached head pointer with HW
303  *
304  */
305 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
306 	int sync_hw_ptr)
307 {
308 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
309 	uint32 hp;
310 	uint32 tp = srng->u.dst_ring.tp;
311 
312 	if (sync_hw_ptr) {
313 		hp = *(srng->u.dst_ring.hp_addr);
314 		srng->u.dst_ring.cached_hp = hp;
315 	} else {
316 		hp = srng->u.dst_ring.cached_hp;
317 	}
318 
319 	if (hp >= tp)
320 		return (hp - tp) / srng->entry_size;
321 	else
322 		return (srng->ring_size - tp + hp) / srng->entry_size;
323 }
324 
325 /**
326  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
327  * pointer. This can be used to release any buffers associated with completed
328  * ring entries. Note that this should not be used for posting new descriptor
329  * entries. Posting of new entries should be done only using
330  * hal_srng_src_get_next_reaped when this function is used for reaping.
331  *
332  * @hal_soc: Opaque HAL SOC handle
333  * @hal_ring: Source ring pointer
334  *
335  * Return: Opaque pointer for next ring entry; NULL on failire
336  */
337 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
338 {
339 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
340 	uint32_t *desc;
341 
342 	/* TODO: Using % is expensive, but we have to do this since
343 	 * size of some SRNG rings is not power of 2 (due to descriptor
344 	 * sizes). Need to create separate API for rings used
345 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
346 	 * SW2RXDMA and CE rings)
347 	 */
348 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
349 		srng->ring_size;
350 
351 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
352 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
353 		srng->u.src_ring.reap_hp = next_reap_hp;
354 		return (void *)desc;
355 	}
356 
357 	return NULL;
358 }
359 
360 /**
361  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
362  * already reaped using hal_srng_src_reap_next, for posting new entries to
363  * the ring
364  *
365  * @hal_soc: Opaque HAL SOC handle
366  * @hal_ring: Source ring pointer
367  *
368  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
369  */
370 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
371 {
372 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
373 	uint32_t *desc;
374 
375 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
376 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
377 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
378 			srng->ring_size;
379 
380 		return (void *)desc;
381 	}
382 
383 	return NULL;
384 }
385 
386 /**
387  * hal_srng_src_done_val -
388  *
389  * @hal_soc: Opaque HAL SOC handle
390  * @hal_ring: Source ring pointer
391  *
392  * Return: Opaque pointer for next ring entry; NULL on failire
393  */
394 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
395 {
396 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
397 	/* TODO: Using % is expensive, but we have to do this since
398 	 * size of some SRNG rings is not power of 2 (due to descriptor
399 	 * sizes). Need to create separate API for rings used
400 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
401 	 * SW2RXDMA and CE rings)
402 	 */
403 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
404 		srng->ring_size;
405 
406 	if (next_reap_hp == srng->u.src_ring.cached_tp)
407 		return 0;
408 
409 	if (srng->u.src_ring.cached_tp > next_reap_hp)
410 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
411 			srng->entry_size;
412 	else
413 		return ((srng->ring_size - next_reap_hp) +
414 			srng->u.src_ring.cached_tp) / srng->entry_size;
415 }
416 /**
417  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
418  *
419  * @hal_soc: Opaque HAL SOC handle
420  * @hal_ring: Source ring pointer
421  *
422  * Return: Opaque pointer for next ring entry; NULL on failire
423  */
424 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
425 {
426 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
427 	uint32_t *desc;
428 	/* TODO: Using % is expensive, but we have to do this since
429 	 * size of some SRNG rings is not power of 2 (due to descriptor
430 	 * sizes). Need to create separate API for rings used
431 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
432 	 * SW2RXDMA and CE rings)
433 	 */
434 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
435 		srng->ring_size;
436 
437 	if (next_hp != srng->u.src_ring.cached_tp) {
438 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
439 		srng->u.src_ring.hp = next_hp;
440 		/* TODO: Since reap function is not used by all rings, we can
441 		 * remove the following update of reap_hp in this function
442 		 * if we can ensure that only hal_srng_src_get_next_reaped
443 		 * is used for the rings requiring reap functionality
444 		 */
445 		srng->u.src_ring.reap_hp = next_hp;
446 		return (void *)desc;
447 	}
448 
449 	return NULL;
450 }
451 
452 /**
453  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
454  * hal_srng_src_get_next should be called subsequently to move the head pointer
455  *
456  * @hal_soc: Opaque HAL SOC handle
457  * @hal_ring: Source ring pointer
458  *
459  * Return: Opaque pointer for next ring entry; NULL on failire
460  */
461 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
462 {
463 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
464 	uint32_t *desc;
465 
466 	/* TODO: Using % is expensive, but we have to do this since
467 	 * size of some SRNG rings is not power of 2 (due to descriptor
468 	 * sizes). Need to create separate API for rings used
469 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
470 	 * SW2RXDMA and CE rings)
471 	 */
472 	if (((srng->u.src_ring.hp + srng->entry_size) %
473 		srng->ring_size) != srng->u.src_ring.cached_tp) {
474 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
475 		return (void *)desc;
476 	}
477 
478 	return NULL;
479 }
480 
481 /**
482  * hal_srng_src_num_avail - Returns number of available entries in src ring
483  *
484  * @hal_soc: Opaque HAL SOC handle
485  * @hal_ring: Source ring pointer
486  * @sync_hw_ptr: Sync cached tail pointer with HW
487  *
488  */
489 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
490 	void *hal_ring, int sync_hw_ptr)
491 {
492 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
493 	uint32 tp;
494 	uint32 hp = srng->u.src_ring.hp;
495 
496 	if (sync_hw_ptr) {
497 		tp = *(srng->u.src_ring.tp_addr);
498 		srng->u.src_ring.cached_tp = tp;
499 	} else {
500 		tp = srng->u.src_ring.cached_tp;
501 	}
502 
503 	if (tp > hp)
504 		return ((tp - hp) / srng->entry_size) - 1;
505 	else
506 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
507 }
508 
509 /**
510  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
511  * ring head/tail pointers to HW.
512  * This should be used only if hal_srng_access_start_unlocked to start ring
513  * access
514  *
515  * @hal_soc: Opaque HAL SOC handle
516  * @hal_ring: Ring pointer (Source or Destination ring)
517  *
518  * Return: 0 on success; error on failire
519  */
520 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
521 {
522 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
523 
524 	/* TODO: See if we need a write memory barrier here */
525 	if (srng->flags & HAL_SRNG_LMAC_RING) {
526 		/* For LMAC rings, ring pointer updates are done through FW and
527 		 * hence written to a shared memory location that is read by FW
528 		 */
529 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
530 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
531 		else
532 			*(srng->u.src_ring.tp_addr) = srng->u.dst_ring.tp;
533 	} else {
534 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
535 			hif_write32_mb(srng->u.src_ring.hp_addr,
536 				srng->u.src_ring.hp);
537 		else
538 			hif_write32_mb(srng->u.dst_ring.tp_addr,
539 				srng->u.dst_ring.tp);
540 	}
541 }
542 
543 /**
544  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
545  * pointers to HW
546  * This should be used only if hal_srng_access_start to start ring access
547  *
548  * @hal_soc: Opaque HAL SOC handle
549  * @hal_ring: Ring pointer (Source or Destination ring)
550  *
551  * Return: 0 on success; error on failire
552  */
553 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
554 {
555 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
556 
557 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
558 	SRNG_UNLOCK(&(srng->lock));
559 }
560 
561 /**
562  * hal_srng_access_end_reap - Unlock ring access
563  * This should be used only if hal_srng_access_start to start ring access
564  * and should be used only while reaping SRC ring completions
565  *
566  * @hal_soc: Opaque HAL SOC handle
567  * @hal_ring: Ring pointer (Source or Destination ring)
568  *
569  * Return: 0 on success; error on failire
570  */
571 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
572 {
573 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
574 	SRNG_UNLOCK(&(srng->lock));
575 }
576 
577 /* TODO: Check if the following definitions is available in HW headers */
578 #define WBM_IDLE_DESC_LIST 1
579 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
580 #define NUM_MPDUS_PER_LINK_DESC 6
581 #define NUM_MSDUS_PER_LINK_DESC 7
582 #define REO_QUEUE_DESC_ALIGN 128
583 
584 #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
585 #define LINK_DESC_ALIGN 128
586 
587 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
588  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
589  */
590 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
591 
592 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
593  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
594  * should be specified in 16 word units. But the number of bits defined for
595  * this field in HW header files is 5.
596  */
597 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
598 
599 /**
600  * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
601  * HW structure
602  *
603  * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
604  * @cookie: SW cookie for the buffer/descriptor
605  * @link_desc_paddr: Physical address of link descriptor entry
606  *
607  */
608 static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
609 	qdf_dma_addr_t link_desc_paddr)
610 {
611 	uint32_t *buf_addr = (uint32_t *)desc;
612 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
613 		link_desc_paddr & 0xffffffff);
614 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
615 		(uint64_t)link_desc_paddr >> 32);
616 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
617 		WBM_IDLE_DESC_LIST);
618 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
619 		cookie);
620 }
621 
622 /**
623  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
624  * in an idle list
625  *
626  * @hal_soc: Opaque HAL SOC handle
627  *
628  */
629 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
630 {
631 	return WBM_IDLE_SCATTER_BUF_SIZE;
632 }
633 
634 /**
635  * hal_get_link_desc_size - Get the size of each link descriptor
636  *
637  * @hal_soc: Opaque HAL SOC handle
638  *
639  */
640 static inline uint32_t hal_get_link_desc_size(void *hal_soc)
641 {
642 	return LINK_DESC_SIZE;
643 }
644 
645 /**
646  * hal_get_link_desc_align - Get the required start address alignment for
647  * link descriptors
648  *
649  * @hal_soc: Opaque HAL SOC handle
650  *
651  */
652 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
653 {
654 	return LINK_DESC_ALIGN;
655 }
656 
657 /**
658  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
659  *
660  * @hal_soc: Opaque HAL SOC handle
661  *
662  */
663 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
664 {
665 	return NUM_MPDUS_PER_LINK_DESC;
666 }
667 
668 /**
669  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
670  *
671  * @hal_soc: Opaque HAL SOC handle
672  *
673  */
674 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
675 {
676 	return NUM_MSDUS_PER_LINK_DESC;
677 }
678 
679 /**
680  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
681  * descriptor can hold
682  *
683  * @hal_soc: Opaque HAL SOC handle
684  *
685  */
686 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
687 {
688 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
689 }
690 
691 /**
692  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
693  * that the given buffer size
694  *
695  * @hal_soc: Opaque HAL SOC handle
696  * @scatter_buf_size: Size of scatter buffer
697  *
698  */
699 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
700 	uint32_t scatter_buf_size)
701 {
702 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
703 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
704 }
705 
706 /**
707  * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
708  * provided
709  *
710  * @hal_soc: Opaque HAL SOC handle
711  * @idle_scatter_bufs_base_paddr: Array of physical base addresses
712  * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
713  * @num_scatter_bufs: Number of scatter buffers in the above lists
714  * @scatter_buf_size: Size of each scatter buffer
715  *
716  */
717 extern void hal_setup_link_idle_list(void *hal_soc,
718 	qdf_dma_addr_t scatter_bufs_base_paddr[],
719 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
720 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset);
721 
722 /**
723  * hal_reo_setup - Initialize HW REO block
724  *
725  * @hal_soc: Opaque HAL SOC handle
726  */
727 extern void hal_reo_setup(void *hal_soc);
728 
729 enum hal_pn_type {
730 	HAL_PN_NONE,
731 	HAL_PN_WPA,
732 	HAL_PN_WAPI_EVEN,
733 	HAL_PN_WAPI_UNEVEN,
734 };
735 
736 #define HAL_RX_MAX_BA_WINDOW 256
737 /**
738  * hal_get_reo_qdesc_size - Get size of reo queue descriptor
739  *
740  * @hal_soc: Opaque HAL SOC handle
741  * @ba_window_size: BlockAck window size
742  *
743  */
744 static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
745 	uint32_t ba_window_size)
746 {
747 	if (ba_window_size <= 1)
748 		return sizeof(struct rx_reo_queue);
749 
750 	if (ba_window_size <= 105)
751 		return sizeof(struct rx_reo_queue) +
752 			sizeof(struct rx_reo_queue_ext);
753 
754 	if (ba_window_size <= 210)
755 		return sizeof(struct rx_reo_queue) +
756 			(2 * sizeof(struct rx_reo_queue_ext));
757 
758 	return sizeof(struct rx_reo_queue) +
759 		(3 * sizeof(struct rx_reo_queue_ext));
760 }
761 
762 /**
763  * hal_get_reo_qdesc_align - Get start address alignment for reo
764  * queue descriptors
765  *
766  * @hal_soc: Opaque HAL SOC handle
767  *
768  */
769 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
770 {
771 	return REO_QUEUE_DESC_ALIGN;
772 }
773 
774 /**
775  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
776  *
777  * @hal_soc: Opaque HAL SOC handle
778  * @ba_window_size: BlockAck window size
779  * @start_seq: Starting sequence number
780  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
781  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
782  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
783  *
784  */
785 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
786 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
787 	int pn_type);
788 
789 /**
790  * hal_srng_get_hp_addr - Get head pointer physical address
791  *
792  * @hal_soc: Opaque HAL SOC handle
793  * @hal_ring: Ring pointer (Source or Destination ring)
794  *
795  */
796 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
797 {
798 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
799 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
800 
801 	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
802 		/* Currently this interface is required only for LMAC rings */
803 		return (qdf_dma_addr_t)NULL;
804 	}
805 
806 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
807 		return hal->shadow_wrptr_mem_paddr + (srng->u.src_ring.hp_addr -
808 			hal->shadow_wrptr_mem_vaddr);
809 	} else {
810 		return hal->shadow_rdptr_mem_paddr + (srng->u.dst_ring.hp_addr -
811 			hal->shadow_rdptr_mem_vaddr);
812 	}
813 }
814 
815 /**
816  * hal_srng_get_tp_addr - Get tail pointer physical address
817  *
818  * @hal_soc: Opaque HAL SOC handle
819  * @hal_ring: Ring pointer (Source or Destination ring)
820  *
821  */
822 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
823 {
824 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
825 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
826 
827 	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
828 		/* Currently this interface is required only for LMAC rings */
829 		return (qdf_dma_addr_t)NULL;
830 	}
831 
832 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
833 		return hal->shadow_rdptr_mem_paddr +
834 			((unsigned long)(srng->u.src_ring.tp_addr) -
835 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
836 	} else {
837 		return hal->shadow_wrptr_mem_paddr +
838 			((unsigned long)(srng->u.dst_ring.tp_addr) -
839 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
840 	}
841 }
842 
843 /**
844  * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
845  *
846  * @hal_soc: Opaque HAL SOC handle
847  * @hal_ring: Ring pointer (Source or Destination ring)
848  * @ring_params: SRNG parameters will be returned through this structure
849  */
850 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
851 	struct hal_srng_params *ring_params);
852 #endif /* _HAL_API_H_ */
853