xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 83f387ac42ffaa83eb34096d5b794ff1650bc865)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above
10  *       copyright notice, this list of conditions and the following
11  *       disclaimer in the documentation and/or other materials provided
12  *       with the distribution.
13  *     * Neither the name of The Linux Foundation nor the names of its
14  *       contributors may be used to endorse or promote products derived
15  *       from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _HAL_API_H_
31 #define _HAL_API_H_
32 
33 #include "qdf_types.h"
34 #include "qdf_util.h"
35 #include "hal_internal.h"
36 #include "rx_msdu_link.h"
37 #include "rx_reo_queue.h"
38 #include "rx_reo_queue_ext.h"
39 
40 #define MAX_UNWINDOWED_ADDRESS 0x80000
41 #define WINDOW_ENABLE_BIT 0x80000000
42 #define WINDOW_REG_ADDRESS 0x310C
43 #define WINDOW_SHIFT 19
44 #define WINDOW_VALUE_MASK 0x1F
45 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
46 #define WINDOW_RANGE_MASK 0x7FFFF
47 
48 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
49 {
50 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
51 	if (window != hal_soc->register_window) {
52 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
53 			      WINDOW_ENABLE_BIT | window);
54 		hal_soc->register_window = window;
55 	}
56 }
57 
58 /**
59  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
60  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
61  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
62  *				would be a bug
63  */
64 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
65 				  uint32_t value)
66 {
67 
68 	if (!hal_soc->use_register_windowing ||
69 	    offset < MAX_UNWINDOWED_ADDRESS) {
70 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
71 	} else {
72 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
73 		hal_select_window(hal_soc, offset);
74 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
75 			  (offset & WINDOW_RANGE_MASK), value);
76 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
77 	}
78 }
79 
80 /**
81  * hal_write_address_32_mb - write a value to a register
82  *
83  */
84 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
85 					   void __iomem *addr, uint32_t value)
86 {
87 	uint32_t offset;
88 
89 	if (!hal_soc->use_register_windowing)
90 		return qdf_iowrite32(addr, value);
91 
92 	offset = addr - hal_soc->dev_base_addr;
93 	hal_write32_mb(hal_soc, offset, value);
94 }
95 
96 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
97 {
98 	uint32_t ret;
99 
100 	if (!hal_soc->use_register_windowing ||
101 	    offset < MAX_UNWINDOWED_ADDRESS) {
102 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
103 	}
104 
105 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
106 	hal_select_window(hal_soc, offset);
107 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
108 		       (offset & WINDOW_RANGE_MASK));
109 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
110 
111 	return ret;
112 }
113 
114 #include "hif_io32.h"
115 
116 /**
117  * hal_attach - Initalize HAL layer
118  * @hif_handle: Opaque HIF handle
119  * @qdf_dev: QDF device
120  *
121  * Return: Opaque HAL SOC handle
122  *		 NULL on failure (if given ring is not available)
123  *
124  * This function should be called as part of HIF initialization (for accessing
125  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
126  */
127 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
128 
129 /**
130  * hal_detach - Detach HAL layer
131  * @hal_soc: HAL SOC handle
132  *
133  * This function should be called as part of HIF detach
134  *
135  */
136 extern void hal_detach(void *hal_soc);
137 
138 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
139 enum hal_ring_type {
140 	REO_DST,
141 	REO_EXCEPTION,
142 	REO_REINJECT,
143 	REO_CMD,
144 	REO_STATUS,
145 	TCL_DATA,
146 	TCL_CMD,
147 	TCL_STATUS,
148 	CE_SRC,
149 	CE_DST,
150 	CE_DST_STATUS,
151 	WBM_IDLE_LINK,
152 	SW2WBM_RELEASE,
153 	WBM2SW_RELEASE,
154 	RXDMA_BUF,
155 	RXDMA_DST,
156 	RXDMA_MONITOR_BUF,
157 	RXDMA_MONITOR_STATUS,
158 	RXDMA_MONITOR_DST,
159 	RXDMA_MONITOR_DESC,
160 	MAX_RING_TYPES
161 };
162 
163 /* SRNG flags passed in hal_srng_params.flags */
164 #define HAL_SRNG_MSI_SWAP				0x00000008
165 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
166 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
167 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
168 #define HAL_SRNG_MSI_INTR				0x00020000
169 
170 /**
171  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
172  * used by callers for calculating the size of memory to be allocated before
173  * calling hal_srng_setup to setup the ring
174  *
175  * @hal_soc: Opaque HAL SOC handle
176  * @ring_type: one of the types from hal_ring_type
177  *
178  */
179 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
180 
181 /**
182  * hal_srng_max_entries - Returns maximum possible number of ring entries
183  * @hal_soc: Opaque HAL SOC handle
184  * @ring_type: one of the types from hal_ring_type
185  *
186  * Return: Maximum number of entries for the given ring_type
187  */
188 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
189 
190 /* SRNG parameters to be passed to hal_srng_setup */
191 struct hal_srng_params {
192 	/* Physical base address of the ring */
193 	qdf_dma_addr_t ring_base_paddr;
194 	/* Virtual base address of the ring */
195 	void *ring_base_vaddr;
196 	/* Number of entries in ring */
197 	uint32_t num_entries;
198 	/* max transfer length */
199 	uint16_t max_buffer_length;
200 	/* MSI Address */
201 	qdf_dma_addr_t msi_addr;
202 	/* MSI data */
203 	uint32_t msi_data;
204 	/* Interrupt timer threshold – in micro seconds */
205 	uint32_t intr_timer_thres_us;
206 	/* Interrupt batch counter threshold – in number of ring entries */
207 	uint32_t intr_batch_cntr_thres_entries;
208 	/* Low threshold – in number of ring entries
209 	 * (valid for src rings only)
210 	 */
211 	uint32_t low_threshold;
212 	/* Misc flags */
213 	uint32_t flags;
214 	/* Unique ring id */
215 	uint8_t ring_id;
216 };
217 
218 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
219  * @hal_soc: hal handle
220  *
221  * Return: QDF_STATUS_OK on success
222  */
223 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
224 
225 /* hal_set_one_shadow_config() - add a config for the specified ring
226  * @hal_soc: hal handle
227  * @ring_type: ring type
228  * @ring_num: ring num
229  *
230  * The ring type and ring num uniquely specify the ring.  After this call,
231  * the hp/tp will be added as the next entry int the shadow register
232  * configuration table.  The hal code will use the shadow register address
233  * in place of the hp/tp address.
234  *
235  * This function is exposed, so that the CE module can skip configuring shadow
236  * registers for unused ring and rings assigned to the firmware.
237  *
238  * Return: QDF_STATUS_OK on success
239  */
240 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
241 					    int ring_num);
242 /**
243  * hal_get_shadow_config() - retrieve the config table
244  * @hal_soc: hal handle
245  * @shadow_config: will point to the table after
246  * @num_shadow_registers_configured: will contain the number of valid entries
247  */
248 extern void hal_get_shadow_config(void *hal_soc,
249 				  struct pld_shadow_reg_v2_cfg **shadow_config,
250 				  int *num_shadow_registers_configured);
251 /**
252  * hal_srng_setup - Initalize HW SRNG ring.
253  *
254  * @hal_soc: Opaque HAL SOC handle
255  * @ring_type: one of the types from hal_ring_type
256  * @ring_num: Ring number if there are multiple rings of
257  *		same type (staring from 0)
258  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
259  * @ring_params: SRNG ring params in hal_srng_params structure.
260 
261  * Callers are expected to allocate contiguous ring memory of size
262  * 'num_entries * entry_size' bytes and pass the physical and virtual base
263  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
264  * structure. Ring base address should be 8 byte aligned and size of each ring
265  * entry should be queried using the API hal_srng_get_entrysize
266  *
267  * Return: Opaque pointer to ring on success
268  *		 NULL on failure (if given ring is not available)
269  */
270 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
271 	int mac_id, struct hal_srng_params *ring_params);
272 
273 /**
274  * hal_srng_cleanup - Deinitialize HW SRNG ring.
275  * @hal_soc: Opaque HAL SOC handle
276  * @hal_srng: Opaque HAL SRNG pointer
277  */
278 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
279 
280 /**
281  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
282  * hal_srng_access_start if locked access is required
283  *
284  * @hal_soc: Opaque HAL SOC handle
285  * @hal_ring: Ring pointer (Source or Destination ring)
286  *
287  * Return: 0 on success; error on failire
288  */
289 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
290 {
291 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
292 
293 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
294 		srng->u.src_ring.cached_tp =
295 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
296 	else
297 		srng->u.dst_ring.cached_hp =
298 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
299 
300 	return 0;
301 }
302 
303 /**
304  * hal_srng_access_start - Start (locked) ring access
305  *
306  * @hal_soc: Opaque HAL SOC handle
307  * @hal_ring: Ring pointer (Source or Destination ring)
308  *
309  * Return: 0 on success; error on failire
310  */
311 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
312 {
313 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
314 
315 	SRNG_LOCK(&(srng->lock));
316 
317 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
318 }
319 
320 /**
321  * hal_srng_dst_get_next - Get next entry from a destination ring and move
322  * cached tail pointer
323  *
324  * @hal_soc: Opaque HAL SOC handle
325  * @hal_ring: Destination ring pointer
326  *
327  * Return: Opaque pointer for next ring entry; NULL on failire
328  */
329 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
330 {
331 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
332 	volatile uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
333 	uint32_t desc_loop_cnt;
334 
335 	desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
336 		>> SRNG_LOOP_CNT_LSB;
337 
338 	if (srng->u.dst_ring.loop_cnt == desc_loop_cnt) {
339 		/* TODO: Using % is expensive, but we have to do this since
340 		 * size of some SRNG rings is not power of 2 (due to descriptor
341 		 * sizes). Need to create separate API for rings used
342 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
343 		 * SW2RXDMA and CE rings)
344 		 */
345 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
346 			srng->ring_size;
347 
348 		srng->u.dst_ring.loop_cnt = (srng->u.dst_ring.loop_cnt +
349 			!srng->u.dst_ring.tp) &
350 			(SRNG_LOOP_CNT_MASK >> SRNG_LOOP_CNT_LSB);
351 		/* TODO: Confirm if loop count mask is same for all rings */
352 		return (void *)desc;
353 	}
354 	return NULL;
355 }
356 
357 /**
358  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
359  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
360  * TODO: See if we need an optimized version of get_next that doesn't check for
361  * loop_cnt
362  *
363  * @hal_soc: Opaque HAL SOC handle
364  * @hal_ring: Destination ring pointer
365  *
366  * Return: Opaque pointer for next ring entry; NULL on failire
367  */
368 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
369 {
370 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
371 	uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
372 	uint32_t desc_loop_cnt;
373 
374 	desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
375 		>> SRNG_LOOP_CNT_LSB;
376 
377 	if (srng->u.dst_ring.loop_cnt == desc_loop_cnt)
378 		return (void *)desc;
379 	return NULL;
380 }
381 
382 /**
383  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
384  * by SW) in destination ring
385  *
386  * @hal_soc: Opaque HAL SOC handle
387  * @hal_ring: Destination ring pointer
388  * @sync_hw_ptr: Sync cached head pointer with HW
389  *
390  */
391 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
392 	int sync_hw_ptr)
393 {
394 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
395 	uint32 hp;
396 	uint32 tp = srng->u.dst_ring.tp;
397 
398 	if (sync_hw_ptr) {
399 		hp = *(srng->u.dst_ring.hp_addr);
400 		srng->u.dst_ring.cached_hp = hp;
401 	} else {
402 		hp = srng->u.dst_ring.cached_hp;
403 	}
404 
405 	if (hp >= tp)
406 		return (hp - tp) / srng->entry_size;
407 	else
408 		return (srng->ring_size - tp + hp) / srng->entry_size;
409 }
410 
411 /**
412  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
413  * pointer. This can be used to release any buffers associated with completed
414  * ring entries. Note that this should not be used for posting new descriptor
415  * entries. Posting of new entries should be done only using
416  * hal_srng_src_get_next_reaped when this function is used for reaping.
417  *
418  * @hal_soc: Opaque HAL SOC handle
419  * @hal_ring: Source ring pointer
420  *
421  * Return: Opaque pointer for next ring entry; NULL on failire
422  */
423 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
424 {
425 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
426 	uint32_t *desc;
427 
428 	/* TODO: Using % is expensive, but we have to do this since
429 	 * size of some SRNG rings is not power of 2 (due to descriptor
430 	 * sizes). Need to create separate API for rings used
431 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
432 	 * SW2RXDMA and CE rings)
433 	 */
434 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
435 		srng->ring_size;
436 
437 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
438 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
439 		srng->u.src_ring.reap_hp = next_reap_hp;
440 		return (void *)desc;
441 	}
442 
443 	return NULL;
444 }
445 
446 /**
447  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
448  * already reaped using hal_srng_src_reap_next, for posting new entries to
449  * the ring
450  *
451  * @hal_soc: Opaque HAL SOC handle
452  * @hal_ring: Source ring pointer
453  *
454  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
455  */
456 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
457 {
458 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
459 	uint32_t *desc;
460 
461 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
462 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
463 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
464 			srng->ring_size;
465 
466 		return (void *)desc;
467 	}
468 
469 	return NULL;
470 }
471 
472 /**
473  * hal_srng_src_done_val -
474  *
475  * @hal_soc: Opaque HAL SOC handle
476  * @hal_ring: Source ring pointer
477  *
478  * Return: Opaque pointer for next ring entry; NULL on failire
479  */
480 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
481 {
482 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
483 	/* TODO: Using % is expensive, but we have to do this since
484 	 * size of some SRNG rings is not power of 2 (due to descriptor
485 	 * sizes). Need to create separate API for rings used
486 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
487 	 * SW2RXDMA and CE rings)
488 	 */
489 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
490 		srng->ring_size;
491 
492 	if (next_reap_hp == srng->u.src_ring.cached_tp)
493 		return 0;
494 
495 	if (srng->u.src_ring.cached_tp > next_reap_hp)
496 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
497 			srng->entry_size;
498 	else
499 		return ((srng->ring_size - next_reap_hp) +
500 			srng->u.src_ring.cached_tp) / srng->entry_size;
501 }
502 /**
503  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
504  *
505  * @hal_soc: Opaque HAL SOC handle
506  * @hal_ring: Source ring pointer
507  *
508  * Return: Opaque pointer for next ring entry; NULL on failire
509  */
510 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
511 {
512 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
513 	uint32_t *desc;
514 	/* TODO: Using % is expensive, but we have to do this since
515 	 * size of some SRNG rings is not power of 2 (due to descriptor
516 	 * sizes). Need to create separate API for rings used
517 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
518 	 * SW2RXDMA and CE rings)
519 	 */
520 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
521 		srng->ring_size;
522 
523 	if (next_hp != srng->u.src_ring.cached_tp) {
524 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
525 		srng->u.src_ring.hp = next_hp;
526 		/* TODO: Since reap function is not used by all rings, we can
527 		 * remove the following update of reap_hp in this function
528 		 * if we can ensure that only hal_srng_src_get_next_reaped
529 		 * is used for the rings requiring reap functionality
530 		 */
531 		srng->u.src_ring.reap_hp = next_hp;
532 		return (void *)desc;
533 	}
534 
535 	return NULL;
536 }
537 
538 /**
539  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
540  * hal_srng_src_get_next should be called subsequently to move the head pointer
541  *
542  * @hal_soc: Opaque HAL SOC handle
543  * @hal_ring: Source ring pointer
544  *
545  * Return: Opaque pointer for next ring entry; NULL on failire
546  */
547 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
548 {
549 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
550 	uint32_t *desc;
551 
552 	/* TODO: Using % is expensive, but we have to do this since
553 	 * size of some SRNG rings is not power of 2 (due to descriptor
554 	 * sizes). Need to create separate API for rings used
555 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
556 	 * SW2RXDMA and CE rings)
557 	 */
558 	if (((srng->u.src_ring.hp + srng->entry_size) %
559 		srng->ring_size) != srng->u.src_ring.cached_tp) {
560 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
561 		return (void *)desc;
562 	}
563 
564 	return NULL;
565 }
566 
567 /**
568  * hal_srng_src_num_avail - Returns number of available entries in src ring
569  *
570  * @hal_soc: Opaque HAL SOC handle
571  * @hal_ring: Source ring pointer
572  * @sync_hw_ptr: Sync cached tail pointer with HW
573  *
574  */
575 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
576 	void *hal_ring, int sync_hw_ptr)
577 {
578 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
579 	uint32 tp;
580 	uint32 hp = srng->u.src_ring.hp;
581 
582 	if (sync_hw_ptr) {
583 		tp = *(srng->u.src_ring.tp_addr);
584 		srng->u.src_ring.cached_tp = tp;
585 	} else {
586 		tp = srng->u.src_ring.cached_tp;
587 	}
588 
589 	if (tp > hp)
590 		return ((tp - hp) / srng->entry_size) - 1;
591 	else
592 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
593 }
594 
595 /**
596  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
597  * ring head/tail pointers to HW.
598  * This should be used only if hal_srng_access_start_unlocked to start ring
599  * access
600  *
601  * @hal_soc: Opaque HAL SOC handle
602  * @hal_ring: Ring pointer (Source or Destination ring)
603  *
604  * Return: 0 on success; error on failire
605  */
606 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
607 {
608 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
609 
610 	/* TODO: See if we need a write memory barrier here */
611 	if (srng->flags & HAL_SRNG_LMAC_RING) {
612 		/* For LMAC rings, ring pointer updates are done through FW and
613 		 * hence written to a shared memory location that is read by FW
614 		 */
615 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
616 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
617 		} else {
618 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
619 		}
620 	} else {
621 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
622 			hal_write_address_32_mb(hal_soc,
623 				srng->u.src_ring.hp_addr,
624 				srng->u.src_ring.hp);
625 		else
626 			hal_write_address_32_mb(hal_soc,
627 				srng->u.dst_ring.tp_addr,
628 				srng->u.dst_ring.tp);
629 	}
630 }
631 
632 /**
633  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
634  * pointers to HW
635  * This should be used only if hal_srng_access_start to start ring access
636  *
637  * @hal_soc: Opaque HAL SOC handle
638  * @hal_ring: Ring pointer (Source or Destination ring)
639  *
640  * Return: 0 on success; error on failire
641  */
642 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
643 {
644 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
645 
646 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
647 	SRNG_UNLOCK(&(srng->lock));
648 }
649 
650 /**
651  * hal_srng_access_end_reap - Unlock ring access
652  * This should be used only if hal_srng_access_start to start ring access
653  * and should be used only while reaping SRC ring completions
654  *
655  * @hal_soc: Opaque HAL SOC handle
656  * @hal_ring: Ring pointer (Source or Destination ring)
657  *
658  * Return: 0 on success; error on failire
659  */
660 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
661 {
662 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
663 	SRNG_UNLOCK(&(srng->lock));
664 }
665 
666 /* TODO: Check if the following definitions is available in HW headers */
667 #define WBM_IDLE_DESC_LIST 1
668 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
669 #define NUM_MPDUS_PER_LINK_DESC 6
670 #define NUM_MSDUS_PER_LINK_DESC 7
671 #define REO_QUEUE_DESC_ALIGN 128
672 
673 #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
674 #define LINK_DESC_ALIGN 128
675 
676 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
677  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
678  */
679 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
680 
681 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
682  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
683  * should be specified in 16 word units. But the number of bits defined for
684  * this field in HW header files is 5.
685  */
686 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
687 
688 /**
689  * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
690  * HW structure
691  *
692  * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
693  * @cookie: SW cookie for the buffer/descriptor
694  * @link_desc_paddr: Physical address of link descriptor entry
695  *
696  */
697 static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
698 	qdf_dma_addr_t link_desc_paddr)
699 {
700 	uint32_t *buf_addr = (uint32_t *)desc;
701 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
702 		link_desc_paddr & 0xffffffff);
703 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
704 		(uint64_t)link_desc_paddr >> 32);
705 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
706 		WBM_IDLE_DESC_LIST);
707 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
708 		cookie);
709 }
710 
711 /**
712  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
713  * in an idle list
714  *
715  * @hal_soc: Opaque HAL SOC handle
716  *
717  */
718 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
719 {
720 	return WBM_IDLE_SCATTER_BUF_SIZE;
721 }
722 
723 /**
724  * hal_get_link_desc_size - Get the size of each link descriptor
725  *
726  * @hal_soc: Opaque HAL SOC handle
727  *
728  */
729 static inline uint32_t hal_get_link_desc_size(void *hal_soc)
730 {
731 	return LINK_DESC_SIZE;
732 }
733 
734 /**
735  * hal_get_link_desc_align - Get the required start address alignment for
736  * link descriptors
737  *
738  * @hal_soc: Opaque HAL SOC handle
739  *
740  */
741 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
742 {
743 	return LINK_DESC_ALIGN;
744 }
745 
746 /**
747  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
748  *
749  * @hal_soc: Opaque HAL SOC handle
750  *
751  */
752 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
753 {
754 	return NUM_MPDUS_PER_LINK_DESC;
755 }
756 
757 /**
758  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
759  *
760  * @hal_soc: Opaque HAL SOC handle
761  *
762  */
763 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
764 {
765 	return NUM_MSDUS_PER_LINK_DESC;
766 }
767 
768 /**
769  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
770  * descriptor can hold
771  *
772  * @hal_soc: Opaque HAL SOC handle
773  *
774  */
775 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
776 {
777 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
778 }
779 
780 /**
781  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
782  * that the given buffer size
783  *
784  * @hal_soc: Opaque HAL SOC handle
785  * @scatter_buf_size: Size of scatter buffer
786  *
787  */
788 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
789 	uint32_t scatter_buf_size)
790 {
791 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
792 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
793 }
794 
795 /**
796  * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
797  * provided
798  *
799  * @hal_soc: Opaque HAL SOC handle
800  * @idle_scatter_bufs_base_paddr: Array of physical base addresses
801  * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
802  * @num_scatter_bufs: Number of scatter buffers in the above lists
803  * @scatter_buf_size: Size of each scatter buffer
804  *
805  */
806 extern void hal_setup_link_idle_list(void *hal_soc,
807 	qdf_dma_addr_t scatter_bufs_base_paddr[],
808 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
809 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset);
810 
811 /* REO parameters to be passed to hal_reo_setup */
812 struct hal_reo_params {
813 	bool rx_hash_enabled;
814 };
815 
816 /**
817  * hal_reo_setup - Initialize HW REO block
818  *
819  * @hal_soc: Opaque HAL SOC handle
820  * @reo_params: parameters needed by HAL for REO config
821  */
822 extern void hal_reo_setup(void *hal_soc,
823 	 struct hal_reo_params *reo_params);
824 
825 enum hal_pn_type {
826 	HAL_PN_NONE,
827 	HAL_PN_WPA,
828 	HAL_PN_WAPI_EVEN,
829 	HAL_PN_WAPI_UNEVEN,
830 };
831 
832 #define HAL_RX_MAX_BA_WINDOW 256
833 /**
834  * hal_get_reo_qdesc_size - Get size of reo queue descriptor
835  *
836  * @hal_soc: Opaque HAL SOC handle
837  * @ba_window_size: BlockAck window size
838  *
839  */
840 static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
841 	uint32_t ba_window_size)
842 {
843 	if (ba_window_size <= 1)
844 		return sizeof(struct rx_reo_queue);
845 
846 	if (ba_window_size <= 105)
847 		return sizeof(struct rx_reo_queue) +
848 			sizeof(struct rx_reo_queue_ext);
849 
850 	if (ba_window_size <= 210)
851 		return sizeof(struct rx_reo_queue) +
852 			(2 * sizeof(struct rx_reo_queue_ext));
853 
854 	return sizeof(struct rx_reo_queue) +
855 		(3 * sizeof(struct rx_reo_queue_ext));
856 }
857 
858 /**
859  * hal_get_reo_qdesc_align - Get start address alignment for reo
860  * queue descriptors
861  *
862  * @hal_soc: Opaque HAL SOC handle
863  *
864  */
865 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
866 {
867 	return REO_QUEUE_DESC_ALIGN;
868 }
869 
870 /**
871  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
872  *
873  * @hal_soc: Opaque HAL SOC handle
874  * @ba_window_size: BlockAck window size
875  * @start_seq: Starting sequence number
876  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
877  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
878  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
879  *
880  */
881 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
882 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
883 	int pn_type);
884 
885 /**
886  * hal_srng_get_hp_addr - Get head pointer physical address
887  *
888  * @hal_soc: Opaque HAL SOC handle
889  * @hal_ring: Ring pointer (Source or Destination ring)
890  *
891  */
892 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
893 {
894 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
895 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
896 
897 	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
898 		/* Currently this interface is required only for LMAC rings */
899 		return (qdf_dma_addr_t)NULL;
900 	}
901 
902 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
903 		return hal->shadow_wrptr_mem_paddr +
904 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
905 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
906 	} else {
907 		return hal->shadow_rdptr_mem_paddr +
908 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
909 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
910 	}
911 }
912 
913 /**
914  * hal_srng_get_tp_addr - Get tail pointer physical address
915  *
916  * @hal_soc: Opaque HAL SOC handle
917  * @hal_ring: Ring pointer (Source or Destination ring)
918  *
919  */
920 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
921 {
922 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
923 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
924 
925 	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
926 		/* Currently this interface is required only for LMAC rings */
927 		return (qdf_dma_addr_t)NULL;
928 	}
929 
930 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
931 		return hal->shadow_rdptr_mem_paddr +
932 			((unsigned long)(srng->u.src_ring.tp_addr) -
933 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
934 	} else {
935 		return hal->shadow_wrptr_mem_paddr +
936 			((unsigned long)(srng->u.dst_ring.tp_addr) -
937 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
938 	}
939 }
940 
941 /**
942  * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
943  *
944  * @hal_soc: Opaque HAL SOC handle
945  * @hal_ring: Ring pointer (Source or Destination ring)
946  * @ring_params: SRNG parameters will be returned through this structure
947  */
948 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
949 	struct hal_srng_params *ring_params);
950 
951 #endif /* _HAL_APIH_ */
952