xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision da7eed15327b94ec959cbc307959ad1fcb72d0ab)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "hal_internal.h"
25 #define MAX_UNWINDOWED_ADDRESS 0x80000
26 #ifdef QCA_WIFI_QCA6390
27 #define WINDOW_ENABLE_BIT 0x40000000
28 #else
29 #define WINDOW_ENABLE_BIT 0x80000000
30 #endif
31 #define WINDOW_REG_ADDRESS 0x310C
32 #define WINDOW_SHIFT 19
33 #define WINDOW_VALUE_MASK 0x3F
34 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
35 #define WINDOW_RANGE_MASK 0x7FFFF
36 
37 /*
38  * BAR + 4K is always accessible, any access outside this
39  * space requires force wake procedure.
40  * OFFSET = 4K - 32 bytes = 0x4063
41  */
42 #define MAPPED_REF_OFF 0x4063
43 #define FORCE_WAKE_DELAY_TIMEOUT 50
44 #define FORCE_WAKE_DELAY_MS 5
45 
46 #ifndef QCA_WIFI_QCA6390
47 static inline int hal_force_wake_request(struct hal_soc *soc)
48 {
49 	return 0;
50 }
51 
52 static inline int hal_force_wake_release(struct hal_soc *soc)
53 {
54 	return 0;
55 }
56 #else
57 static inline int hal_force_wake_request(struct hal_soc *soc)
58 {
59 	uint32_t timeout = 0;
60 
61 	if (pld_force_wake_request(soc->qdf_dev->dev)) {
62 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
63 			  "%s: Request send failed \n", __func__);
64 		return -EINVAL;
65 	}
66 
67 	while (!pld_is_device_awake(soc->qdf_dev->dev) &&
68 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT) {
69 		mdelay(FORCE_WAKE_DELAY_MS);
70 		timeout += FORCE_WAKE_DELAY_MS;
71 	}
72 
73 	if (pld_is_device_awake(soc->qdf_dev->dev) == true)
74 		return 0;
75 	else
76 		return -ETIMEDOUT;
77 }
78 
79 static inline int hal_force_wake_release(struct hal_soc *soc)
80 {
81 	return pld_force_wake_release(soc->qdf_dev->dev);
82 }
83 #endif
84 
85 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
86 {
87 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
88 	if (window != hal_soc->register_window) {
89 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
90 			      WINDOW_ENABLE_BIT | window);
91 		hal_soc->register_window = window;
92 	}
93 }
94 
95 /**
96  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
97  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
98  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
99  *				would be a bug
100  */
101 #ifndef QCA_WIFI_QCA6390
102 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
103 				  uint32_t value)
104 {
105 	if (!hal_soc->use_register_windowing ||
106 	    offset < MAX_UNWINDOWED_ADDRESS) {
107 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
108 	} else {
109 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
110 		hal_select_window(hal_soc, offset);
111 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
112 			  (offset & WINDOW_RANGE_MASK), value);
113 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
114 	}
115 }
116 #else
117 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
118 				  uint32_t value)
119 {
120 	if ((offset > MAPPED_REF_OFF) &&
121 	    hal_force_wake_request(hal_soc)) {
122 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
123 			  "%s: Wake up request failed\n", __func__);
124 		return;
125 	}
126 
127 	if (!hal_soc->use_register_windowing ||
128 	    offset < MAX_UNWINDOWED_ADDRESS) {
129 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
130 	} else {
131 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
132 		hal_select_window(hal_soc, offset);
133 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
134 			  (offset & WINDOW_RANGE_MASK), value);
135 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
136 	}
137 
138 	if ((offset > MAPPED_REF_OFF) &&
139 	    hal_force_wake_release(hal_soc))
140 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
141 			  "%s: Wake up release failed\n", __func__);
142 }
143 
144 #endif
145 
146 /**
147  * hal_write_address_32_mb - write a value to a register
148  *
149  */
150 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
151 					   void __iomem *addr, uint32_t value)
152 {
153 	uint32_t offset;
154 
155 	if (!hal_soc->use_register_windowing)
156 		return qdf_iowrite32(addr, value);
157 
158 	offset = addr - hal_soc->dev_base_addr;
159 	hal_write32_mb(hal_soc, offset, value);
160 }
161 
162 #ifndef QCA_WIFI_QCA6390
163 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
164 {
165 	uint32_t ret;
166 
167 	if (!hal_soc->use_register_windowing ||
168 	    offset < MAX_UNWINDOWED_ADDRESS) {
169 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
170 	}
171 
172 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
173 	hal_select_window(hal_soc, offset);
174 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
175 		       (offset & WINDOW_RANGE_MASK));
176 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
177 
178 	return ret;
179 }
180 
181 /**
182  * hal_read_address_32_mb() - Read 32-bit value from the register
183  * @soc: soc handle
184  * @addr: register address to read
185  *
186  * Return: 32-bit value
187  */
188 static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
189 					      void __iomem *addr)
190 {
191 	uint32_t offset;
192 	uint32_t ret;
193 
194 	if (!soc->use_register_windowing)
195 		return qdf_ioread32(addr);
196 
197 	offset = addr - soc->dev_base_addr;
198 	ret = hal_read32_mb(soc, offset);
199 	return ret;
200 }
201 #else
202 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
203 {
204 	uint32_t ret;
205 
206 	if ((offset > MAPPED_REF_OFF) &&
207 	    hal_force_wake_request(hal_soc)) {
208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
209 			  "%s: Wake up request failed\n", __func__);
210 		return -EINVAL;
211 	}
212 
213 	if (!hal_soc->use_register_windowing ||
214 	    offset < MAX_UNWINDOWED_ADDRESS) {
215 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
216 	}
217 
218 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
219 	hal_select_window(hal_soc, offset);
220 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
221 		       (offset & WINDOW_RANGE_MASK));
222 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
223 
224 	if ((offset > MAPPED_REF_OFF) &&
225 	    hal_force_wake_release(hal_soc))
226 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
227 			  "%s: Wake up release failed\n", __func__);
228 
229 	return ret;
230 }
231 
232 static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
233 					      void __iomem *addr)
234 {
235 	uint32_t offset;
236 	uint32_t ret;
237 
238 	if (!soc->use_register_windowing)
239 		return qdf_ioread32(addr);
240 
241 	offset = addr - soc->dev_base_addr;
242 	ret = hal_read32_mb(soc, offset);
243 	return ret;
244 }
245 #endif
246 
247 #include "hif_io32.h"
248 
249 /**
250  * hal_attach - Initialize HAL layer
251  * @hif_handle: Opaque HIF handle
252  * @qdf_dev: QDF device
253  *
254  * Return: Opaque HAL SOC handle
255  *		 NULL on failure (if given ring is not available)
256  *
257  * This function should be called as part of HIF initialization (for accessing
258  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
259  */
260 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
261 
262 /**
263  * hal_detach - Detach HAL layer
264  * @hal_soc: HAL SOC handle
265  *
266  * This function should be called as part of HIF detach
267  *
268  */
269 extern void hal_detach(void *hal_soc);
270 
271 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
272 enum hal_ring_type {
273 	REO_DST = 0,
274 	REO_EXCEPTION = 1,
275 	REO_REINJECT = 2,
276 	REO_CMD = 3,
277 	REO_STATUS = 4,
278 	TCL_DATA = 5,
279 	TCL_CMD = 6,
280 	TCL_STATUS = 7,
281 	CE_SRC = 8,
282 	CE_DST = 9,
283 	CE_DST_STATUS = 10,
284 	WBM_IDLE_LINK = 11,
285 	SW2WBM_RELEASE = 12,
286 	WBM2SW_RELEASE = 13,
287 	RXDMA_BUF = 14,
288 	RXDMA_DST = 15,
289 	RXDMA_MONITOR_BUF = 16,
290 	RXDMA_MONITOR_STATUS = 17,
291 	RXDMA_MONITOR_DST = 18,
292 	RXDMA_MONITOR_DESC = 19,
293 	DIR_BUF_RX_DMA_SRC = 20,
294 #ifdef WLAN_FEATURE_CIF_CFR
295 	WIFI_POS_SRC,
296 #endif
297 	MAX_RING_TYPES
298 };
299 
300 #define HAL_SRNG_LMAC_RING 0x80000000
301 /* SRNG flags passed in hal_srng_params.flags */
302 #define HAL_SRNG_MSI_SWAP				0x00000008
303 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
304 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
305 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
306 #define HAL_SRNG_MSI_INTR				0x00020000
307 
308 #define PN_SIZE_24 0
309 #define PN_SIZE_48 1
310 #define PN_SIZE_128 2
311 
312 /**
313  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
314  * used by callers for calculating the size of memory to be allocated before
315  * calling hal_srng_setup to setup the ring
316  *
317  * @hal_soc: Opaque HAL SOC handle
318  * @ring_type: one of the types from hal_ring_type
319  *
320  */
321 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
322 
323 /**
324  * hal_srng_max_entries - Returns maximum possible number of ring entries
325  * @hal_soc: Opaque HAL SOC handle
326  * @ring_type: one of the types from hal_ring_type
327  *
328  * Return: Maximum number of entries for the given ring_type
329  */
330 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
331 
332 /**
333  * hal_srng_dump - Dump ring status
334  * @srng: hal srng pointer
335  */
336 void hal_srng_dump(struct hal_srng *srng);
337 
338 /**
339  * hal_srng_get_dir - Returns the direction of the ring
340  * @hal_soc: Opaque HAL SOC handle
341  * @ring_type: one of the types from hal_ring_type
342  *
343  * Return: Ring direction
344  */
345 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
346 
347 /* HAL memory information */
348 struct hal_mem_info {
349 	/* dev base virutal addr */
350 	void *dev_base_addr;
351 	/* dev base physical addr */
352 	void *dev_base_paddr;
353 	/* Remote virtual pointer memory for HW/FW updates */
354 	void *shadow_rdptr_mem_vaddr;
355 	/* Remote physical pointer memory for HW/FW updates */
356 	void *shadow_rdptr_mem_paddr;
357 	/* Shared memory for ring pointer updates from host to FW */
358 	void *shadow_wrptr_mem_vaddr;
359 	/* Shared physical memory for ring pointer updates from host to FW */
360 	void *shadow_wrptr_mem_paddr;
361 };
362 
363 /* SRNG parameters to be passed to hal_srng_setup */
364 struct hal_srng_params {
365 	/* Physical base address of the ring */
366 	qdf_dma_addr_t ring_base_paddr;
367 	/* Virtual base address of the ring */
368 	void *ring_base_vaddr;
369 	/* Number of entries in ring */
370 	uint32_t num_entries;
371 	/* max transfer length */
372 	uint16_t max_buffer_length;
373 	/* MSI Address */
374 	qdf_dma_addr_t msi_addr;
375 	/* MSI data */
376 	uint32_t msi_data;
377 	/* Interrupt timer threshold – in micro seconds */
378 	uint32_t intr_timer_thres_us;
379 	/* Interrupt batch counter threshold – in number of ring entries */
380 	uint32_t intr_batch_cntr_thres_entries;
381 	/* Low threshold – in number of ring entries
382 	 * (valid for src rings only)
383 	 */
384 	uint32_t low_threshold;
385 	/* Misc flags */
386 	uint32_t flags;
387 	/* Unique ring id */
388 	uint8_t ring_id;
389 	/* Source or Destination ring */
390 	enum hal_srng_dir ring_dir;
391 	/* Size of ring entry */
392 	uint32_t entry_size;
393 	/* hw register base address */
394 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
395 };
396 
397 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
398  * @hal_soc: hal handle
399  *
400  * Return: QDF_STATUS_OK on success
401  */
402 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
403 
404 /* hal_set_one_shadow_config() - add a config for the specified ring
405  * @hal_soc: hal handle
406  * @ring_type: ring type
407  * @ring_num: ring num
408  *
409  * The ring type and ring num uniquely specify the ring.  After this call,
410  * the hp/tp will be added as the next entry int the shadow register
411  * configuration table.  The hal code will use the shadow register address
412  * in place of the hp/tp address.
413  *
414  * This function is exposed, so that the CE module can skip configuring shadow
415  * registers for unused ring and rings assigned to the firmware.
416  *
417  * Return: QDF_STATUS_OK on success
418  */
419 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
420 					    int ring_num);
421 /**
422  * hal_get_shadow_config() - retrieve the config table
423  * @hal_soc: hal handle
424  * @shadow_config: will point to the table after
425  * @num_shadow_registers_configured: will contain the number of valid entries
426  */
427 extern void hal_get_shadow_config(void *hal_soc,
428 				  struct pld_shadow_reg_v2_cfg **shadow_config,
429 				  int *num_shadow_registers_configured);
430 /**
431  * hal_srng_setup - Initialize HW SRNG ring.
432  *
433  * @hal_soc: Opaque HAL SOC handle
434  * @ring_type: one of the types from hal_ring_type
435  * @ring_num: Ring number if there are multiple rings of
436  *		same type (staring from 0)
437  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
438  * @ring_params: SRNG ring params in hal_srng_params structure.
439 
440  * Callers are expected to allocate contiguous ring memory of size
441  * 'num_entries * entry_size' bytes and pass the physical and virtual base
442  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
443  * structure. Ring base address should be 8 byte aligned and size of each ring
444  * entry should be queried using the API hal_srng_get_entrysize
445  *
446  * Return: Opaque pointer to ring on success
447  *		 NULL on failure (if given ring is not available)
448  */
449 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
450 	int mac_id, struct hal_srng_params *ring_params);
451 
452 /* Remapping ids of REO rings */
453 #define REO_REMAP_TCL 0
454 #define REO_REMAP_SW1 1
455 #define REO_REMAP_SW2 2
456 #define REO_REMAP_SW3 3
457 #define REO_REMAP_SW4 4
458 #define REO_REMAP_RELEASE 5
459 #define REO_REMAP_FW 6
460 #define REO_REMAP_UNUSED 7
461 
462 /*
463  * currently this macro only works for IX0 since all the rings we are remapping
464  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
465  */
466 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
467 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
468 /* allow the destination macros to be expanded */
469 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
470 	(_NEW_DEST << \
471 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
472 	  _ORIGINAL_DEST ## _SHFT))
473 
474 /**
475  * hal_reo_remap_IX0 - Remap REO ring destination
476  * @hal: HAL SOC handle
477  * @remap_val: Remap value
478  */
479 extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
480 
481 /**
482  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
483  * @sring: sring pointer
484  * @paddr: physical address
485  */
486 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
487 
488 /**
489  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
490  * @srng: sring pointer
491  * @vaddr: virtual address
492  */
493 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
494 
495 /**
496  * hal_srng_cleanup - Deinitialize HW SRNG ring.
497  * @hal_soc: Opaque HAL SOC handle
498  * @hal_srng: Opaque HAL SRNG pointer
499  */
500 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
501 
502 static inline bool hal_srng_initialized(void *hal_ring)
503 {
504 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
505 
506 	return !!srng->initialized;
507 }
508 
509 /**
510  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
511  * hal_srng_access_start if locked access is required
512  *
513  * @hal_soc: Opaque HAL SOC handle
514  * @hal_ring: Ring pointer (Source or Destination ring)
515  *
516  * Return: 0 on success; error on failire
517  */
518 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
519 {
520 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
521 
522 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
523 		srng->u.src_ring.cached_tp =
524 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
525 	else
526 		srng->u.dst_ring.cached_hp =
527 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
528 
529 	return 0;
530 }
531 
532 /**
533  * hal_srng_access_start - Start (locked) ring access
534  *
535  * @hal_soc: Opaque HAL SOC handle
536  * @hal_ring: Ring pointer (Source or Destination ring)
537  *
538  * Return: 0 on success; error on failire
539  */
540 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
541 {
542 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
543 
544 	if (qdf_unlikely(!hal_ring)) {
545 		qdf_print("Error: Invalid hal_ring\n");
546 		return -EINVAL;
547 	}
548 
549 	SRNG_LOCK(&(srng->lock));
550 
551 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
552 }
553 
554 /**
555  * hal_srng_dst_get_next - Get next entry from a destination ring and move
556  * cached tail pointer
557  *
558  * @hal_soc: Opaque HAL SOC handle
559  * @hal_ring: Destination ring pointer
560  *
561  * Return: Opaque pointer for next ring entry; NULL on failire
562  */
563 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
564 {
565 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
566 	uint32_t *desc;
567 
568 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
569 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
570 		/* TODO: Using % is expensive, but we have to do this since
571 		 * size of some SRNG rings is not power of 2 (due to descriptor
572 		 * sizes). Need to create separate API for rings used
573 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
574 		 * SW2RXDMA and CE rings)
575 		 */
576 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
577 			srng->ring_size;
578 
579 		return (void *)desc;
580 	}
581 
582 	return NULL;
583 }
584 
585 /**
586  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
587  * cached head pointer
588  *
589  * @hal_soc: Opaque HAL SOC handle
590  * @hal_ring: Destination ring pointer
591  *
592  * Return: Opaque pointer for next ring entry; NULL on failire
593  */
594 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
595 {
596 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
597 	uint32_t *desc;
598 	/* TODO: Using % is expensive, but we have to do this since
599 	 * size of some SRNG rings is not power of 2 (due to descriptor
600 	 * sizes). Need to create separate API for rings used
601 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
602 	 * SW2RXDMA and CE rings)
603 	 */
604 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
605 		srng->ring_size;
606 
607 	if (next_hp != srng->u.dst_ring.tp) {
608 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
609 		srng->u.dst_ring.cached_hp = next_hp;
610 		return (void *)desc;
611 	}
612 
613 	return NULL;
614 }
615 
616 /**
617  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
618  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
619  * TODO: See if we need an optimized version of get_next that doesn't check for
620  * loop_cnt
621  *
622  * @hal_soc: Opaque HAL SOC handle
623  * @hal_ring: Destination ring pointer
624  *
625  * Return: Opaque pointer for next ring entry; NULL on failire
626  */
627 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
628 {
629 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
630 
631 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
632 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
633 
634 	return NULL;
635 }
636 
637 /**
638  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
639  * by SW) in destination ring
640  *
641  * @hal_soc: Opaque HAL SOC handle
642  * @hal_ring: Destination ring pointer
643  * @sync_hw_ptr: Sync cached head pointer with HW
644  *
645  */
646 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
647 	int sync_hw_ptr)
648 {
649 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
650 	uint32_t hp;
651 	uint32_t tp = srng->u.dst_ring.tp;
652 
653 	if (sync_hw_ptr) {
654 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
655 		srng->u.dst_ring.cached_hp = hp;
656 	} else {
657 		hp = srng->u.dst_ring.cached_hp;
658 	}
659 
660 	if (hp >= tp)
661 		return (hp - tp) / srng->entry_size;
662 	else
663 		return (srng->ring_size - tp + hp) / srng->entry_size;
664 }
665 
666 /**
667  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
668  * pointer. This can be used to release any buffers associated with completed
669  * ring entries. Note that this should not be used for posting new descriptor
670  * entries. Posting of new entries should be done only using
671  * hal_srng_src_get_next_reaped when this function is used for reaping.
672  *
673  * @hal_soc: Opaque HAL SOC handle
674  * @hal_ring: Source ring pointer
675  *
676  * Return: Opaque pointer for next ring entry; NULL on failire
677  */
678 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
679 {
680 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
681 	uint32_t *desc;
682 
683 	/* TODO: Using % is expensive, but we have to do this since
684 	 * size of some SRNG rings is not power of 2 (due to descriptor
685 	 * sizes). Need to create separate API for rings used
686 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
687 	 * SW2RXDMA and CE rings)
688 	 */
689 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
690 		srng->ring_size;
691 
692 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
693 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
694 		srng->u.src_ring.reap_hp = next_reap_hp;
695 		return (void *)desc;
696 	}
697 
698 	return NULL;
699 }
700 
701 /**
702  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
703  * already reaped using hal_srng_src_reap_next, for posting new entries to
704  * the ring
705  *
706  * @hal_soc: Opaque HAL SOC handle
707  * @hal_ring: Source ring pointer
708  *
709  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
710  */
711 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
712 {
713 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
714 	uint32_t *desc;
715 
716 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
717 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
718 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
719 			srng->ring_size;
720 
721 		return (void *)desc;
722 	}
723 
724 	return NULL;
725 }
726 
727 /**
728  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
729  * move reap pointer. This API is used in detach path to release any buffers
730  * associated with ring entries which are pending reap.
731  *
732  * @hal_soc: Opaque HAL SOC handle
733  * @hal_ring: Source ring pointer
734  *
735  * Return: Opaque pointer for next ring entry; NULL on failire
736  */
737 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
738 {
739 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
740 	uint32_t *desc;
741 
742 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
743 		srng->ring_size;
744 
745 	if (next_reap_hp != srng->u.src_ring.hp) {
746 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
747 		srng->u.src_ring.reap_hp = next_reap_hp;
748 		return (void *)desc;
749 	}
750 
751 	return NULL;
752 }
753 
754 /**
755  * hal_srng_src_done_val -
756  *
757  * @hal_soc: Opaque HAL SOC handle
758  * @hal_ring: Source ring pointer
759  *
760  * Return: Opaque pointer for next ring entry; NULL on failire
761  */
762 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
763 {
764 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
765 	/* TODO: Using % is expensive, but we have to do this since
766 	 * size of some SRNG rings is not power of 2 (due to descriptor
767 	 * sizes). Need to create separate API for rings used
768 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
769 	 * SW2RXDMA and CE rings)
770 	 */
771 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
772 		srng->ring_size;
773 
774 	if (next_reap_hp == srng->u.src_ring.cached_tp)
775 		return 0;
776 
777 	if (srng->u.src_ring.cached_tp > next_reap_hp)
778 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
779 			srng->entry_size;
780 	else
781 		return ((srng->ring_size - next_reap_hp) +
782 			srng->u.src_ring.cached_tp) / srng->entry_size;
783 }
784 
785 /**
786  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
787  * @hal_soc: Opaque HAL SOC handle
788  * @hal_ring: Source ring pointer
789  * @tailp: Tail Pointer
790  * @headp: Head Pointer
791  *
792  * Return: Update tail pointer and head pointer in arguments.
793  */
794 static inline void hal_get_sw_hptp(void *hal_soc, void *hal_ring,
795 				   uint32_t *tailp, uint32_t *headp)
796 {
797 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
798 
799 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
800 		*headp = srng->u.src_ring.hp / srng->entry_size;
801 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
802 	} else {
803 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
804 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
805 	}
806 }
807 
808 /**
809  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
810  *
811  * @hal_soc: Opaque HAL SOC handle
812  * @hal_ring: Source ring pointer
813  *
814  * Return: Opaque pointer for next ring entry; NULL on failire
815  */
816 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
817 {
818 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
819 	uint32_t *desc;
820 	/* TODO: Using % is expensive, but we have to do this since
821 	 * size of some SRNG rings is not power of 2 (due to descriptor
822 	 * sizes). Need to create separate API for rings used
823 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
824 	 * SW2RXDMA and CE rings)
825 	 */
826 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
827 		srng->ring_size;
828 
829 	if (next_hp != srng->u.src_ring.cached_tp) {
830 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
831 		srng->u.src_ring.hp = next_hp;
832 		/* TODO: Since reap function is not used by all rings, we can
833 		 * remove the following update of reap_hp in this function
834 		 * if we can ensure that only hal_srng_src_get_next_reaped
835 		 * is used for the rings requiring reap functionality
836 		 */
837 		srng->u.src_ring.reap_hp = next_hp;
838 		return (void *)desc;
839 	}
840 
841 	return NULL;
842 }
843 
844 /**
845  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
846  * hal_srng_src_get_next should be called subsequently to move the head pointer
847  *
848  * @hal_soc: Opaque HAL SOC handle
849  * @hal_ring: Source ring pointer
850  *
851  * Return: Opaque pointer for next ring entry; NULL on failire
852  */
853 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
854 {
855 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
856 	uint32_t *desc;
857 
858 	/* TODO: Using % is expensive, but we have to do this since
859 	 * size of some SRNG rings is not power of 2 (due to descriptor
860 	 * sizes). Need to create separate API for rings used
861 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
862 	 * SW2RXDMA and CE rings)
863 	 */
864 	if (((srng->u.src_ring.hp + srng->entry_size) %
865 		srng->ring_size) != srng->u.src_ring.cached_tp) {
866 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
867 		return (void *)desc;
868 	}
869 
870 	return NULL;
871 }
872 
873 /**
874  * hal_srng_src_num_avail - Returns number of available entries in src ring
875  *
876  * @hal_soc: Opaque HAL SOC handle
877  * @hal_ring: Source ring pointer
878  * @sync_hw_ptr: Sync cached tail pointer with HW
879  *
880  */
881 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
882 	void *hal_ring, int sync_hw_ptr)
883 {
884 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
885 	uint32_t tp;
886 	uint32_t hp = srng->u.src_ring.hp;
887 
888 	if (sync_hw_ptr) {
889 		tp = *(srng->u.src_ring.tp_addr);
890 		srng->u.src_ring.cached_tp = tp;
891 	} else {
892 		tp = srng->u.src_ring.cached_tp;
893 	}
894 
895 	if (tp > hp)
896 		return ((tp - hp) / srng->entry_size) - 1;
897 	else
898 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
899 }
900 
901 /**
902  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
903  * ring head/tail pointers to HW.
904  * This should be used only if hal_srng_access_start_unlocked to start ring
905  * access
906  *
907  * @hal_soc: Opaque HAL SOC handle
908  * @hal_ring: Ring pointer (Source or Destination ring)
909  *
910  * Return: 0 on success; error on failire
911  */
912 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
913 {
914 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
915 
916 	/* TODO: See if we need a write memory barrier here */
917 	if (srng->flags & HAL_SRNG_LMAC_RING) {
918 		/* For LMAC rings, ring pointer updates are done through FW and
919 		 * hence written to a shared memory location that is read by FW
920 		 */
921 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
922 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
923 		} else {
924 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
925 		}
926 	} else {
927 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
928 			hal_write_address_32_mb(hal_soc,
929 				srng->u.src_ring.hp_addr,
930 				srng->u.src_ring.hp);
931 		else
932 			hal_write_address_32_mb(hal_soc,
933 				srng->u.dst_ring.tp_addr,
934 				srng->u.dst_ring.tp);
935 	}
936 }
937 
938 /**
939  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
940  * pointers to HW
941  * This should be used only if hal_srng_access_start to start ring access
942  *
943  * @hal_soc: Opaque HAL SOC handle
944  * @hal_ring: Ring pointer (Source or Destination ring)
945  *
946  * Return: 0 on success; error on failire
947  */
948 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
949 {
950 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
951 
952 	if (qdf_unlikely(!hal_ring)) {
953 		qdf_print("Error: Invalid hal_ring\n");
954 		return;
955 	}
956 
957 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
958 	SRNG_UNLOCK(&(srng->lock));
959 }
960 
961 /**
962  * hal_srng_access_end_reap - Unlock ring access
963  * This should be used only if hal_srng_access_start to start ring access
964  * and should be used only while reaping SRC ring completions
965  *
966  * @hal_soc: Opaque HAL SOC handle
967  * @hal_ring: Ring pointer (Source or Destination ring)
968  *
969  * Return: 0 on success; error on failire
970  */
971 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
972 {
973 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
974 
975 	SRNG_UNLOCK(&(srng->lock));
976 }
977 
978 /* TODO: Check if the following definitions is available in HW headers */
979 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
980 #define NUM_MPDUS_PER_LINK_DESC 6
981 #define NUM_MSDUS_PER_LINK_DESC 7
982 #define REO_QUEUE_DESC_ALIGN 128
983 
984 #define LINK_DESC_ALIGN 128
985 
986 #define ADDRESS_MATCH_TAG_VAL 0x5
987 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
988  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
989  */
990 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
991 
992 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
993  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
994  * should be specified in 16 word units. But the number of bits defined for
995  * this field in HW header files is 5.
996  */
997 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
998 
999 
1000 /**
1001  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1002  * in an idle list
1003  *
1004  * @hal_soc: Opaque HAL SOC handle
1005  *
1006  */
1007 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
1008 {
1009 	return WBM_IDLE_SCATTER_BUF_SIZE;
1010 }
1011 
1012 /**
1013  * hal_get_link_desc_size - Get the size of each link descriptor
1014  *
1015  * @hal_soc: Opaque HAL SOC handle
1016  *
1017  */
1018 static inline uint32_t hal_get_link_desc_size(struct hal_soc *hal_soc)
1019 {
1020 	if (!hal_soc || !hal_soc->ops) {
1021 		qdf_print("Error: Invalid ops\n");
1022 		QDF_BUG(0);
1023 		return -EINVAL;
1024 	}
1025 	if (!hal_soc->ops->hal_get_link_desc_size) {
1026 		qdf_print("Error: Invalid function pointer\n");
1027 		QDF_BUG(0);
1028 		return -EINVAL;
1029 	}
1030 	return hal_soc->ops->hal_get_link_desc_size();
1031 }
1032 
1033 /**
1034  * hal_get_link_desc_align - Get the required start address alignment for
1035  * link descriptors
1036  *
1037  * @hal_soc: Opaque HAL SOC handle
1038  *
1039  */
1040 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
1041 {
1042 	return LINK_DESC_ALIGN;
1043 }
1044 
1045 /**
1046  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1047  *
1048  * @hal_soc: Opaque HAL SOC handle
1049  *
1050  */
1051 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
1052 {
1053 	return NUM_MPDUS_PER_LINK_DESC;
1054 }
1055 
1056 /**
1057  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1058  *
1059  * @hal_soc: Opaque HAL SOC handle
1060  *
1061  */
1062 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
1063 {
1064 	return NUM_MSDUS_PER_LINK_DESC;
1065 }
1066 
1067 /**
1068  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1069  * descriptor can hold
1070  *
1071  * @hal_soc: Opaque HAL SOC handle
1072  *
1073  */
1074 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
1075 {
1076 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1077 }
1078 
1079 /**
1080  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1081  * that the given buffer size
1082  *
1083  * @hal_soc: Opaque HAL SOC handle
1084  * @scatter_buf_size: Size of scatter buffer
1085  *
1086  */
1087 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
1088 	uint32_t scatter_buf_size)
1089 {
1090 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1091 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
1092 }
1093 
1094 /**
1095  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1096  * each given buffer size
1097  *
1098  * @hal_soc: Opaque HAL SOC handle
1099  * @total_mem: size of memory to be scattered
1100  * @scatter_buf_size: Size of scatter buffer
1101  *
1102  */
1103 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
1104 	uint32_t total_mem, uint32_t scatter_buf_size)
1105 {
1106 	uint8_t rem = (total_mem % (scatter_buf_size -
1107 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1108 
1109 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1110 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1111 
1112 	return num_scatter_bufs;
1113 }
1114 
1115 /* REO parameters to be passed to hal_reo_setup */
1116 struct hal_reo_params {
1117 	/** rx hash steering enabled or disabled */
1118 	bool rx_hash_enabled;
1119 	/** reo remap 1 register */
1120 	uint32_t remap1;
1121 	/** reo remap 2 register */
1122 	uint32_t remap2;
1123 	/** fragment destination ring */
1124 	uint8_t frag_dst_ring;
1125 	/** padding */
1126 	uint8_t padding[3];
1127 };
1128 
1129 
1130 enum hal_pn_type {
1131 	HAL_PN_NONE,
1132 	HAL_PN_WPA,
1133 	HAL_PN_WAPI_EVEN,
1134 	HAL_PN_WAPI_UNEVEN,
1135 };
1136 
1137 #define HAL_RX_MAX_BA_WINDOW 256
1138 
1139 /**
1140  * hal_get_reo_qdesc_align - Get start address alignment for reo
1141  * queue descriptors
1142  *
1143  * @hal_soc: Opaque HAL SOC handle
1144  *
1145  */
1146 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1147 {
1148 	return REO_QUEUE_DESC_ALIGN;
1149 }
1150 
1151 /**
1152  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1153  *
1154  * @hal_soc: Opaque HAL SOC handle
1155  * @ba_window_size: BlockAck window size
1156  * @start_seq: Starting sequence number
1157  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1158  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1159  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1160  *
1161  */
1162 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1163 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1164 	int pn_type);
1165 
1166 /**
1167  * hal_srng_get_hp_addr - Get head pointer physical address
1168  *
1169  * @hal_soc: Opaque HAL SOC handle
1170  * @hal_ring: Ring pointer (Source or Destination ring)
1171  *
1172  */
1173 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1174 {
1175 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1176 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1177 
1178 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1179 		return hal->shadow_wrptr_mem_paddr +
1180 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1181 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1182 	} else {
1183 		return hal->shadow_rdptr_mem_paddr +
1184 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1185 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1186 	}
1187 }
1188 
1189 /**
1190  * hal_srng_get_tp_addr - Get tail pointer physical address
1191  *
1192  * @hal_soc: Opaque HAL SOC handle
1193  * @hal_ring: Ring pointer (Source or Destination ring)
1194  *
1195  */
1196 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1197 {
1198 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1199 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1200 
1201 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1202 		return hal->shadow_rdptr_mem_paddr +
1203 			((unsigned long)(srng->u.src_ring.tp_addr) -
1204 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1205 	} else {
1206 		return hal->shadow_wrptr_mem_paddr +
1207 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1208 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1209 	}
1210 }
1211 
1212 /**
1213  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1214  *
1215  * @hal_soc: Opaque HAL SOC handle
1216  * @hal_ring: Ring pointer (Source or Destination ring)
1217  * @ring_params: SRNG parameters will be returned through this structure
1218  */
1219 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1220 	struct hal_srng_params *ring_params);
1221 
1222 /**
1223  * hal_mem_info - Retrieve hal memory base address
1224  *
1225  * @hal_soc: Opaque HAL SOC handle
1226  * @mem: pointer to structure to be updated with hal mem info
1227  */
1228 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1229 
1230 /**
1231  * hal_get_target_type - Return target type
1232  *
1233  * @hal_soc: Opaque HAL SOC handle
1234  */
1235 uint32_t hal_get_target_type(struct hal_soc *hal);
1236 
1237 /**
1238  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1239  *
1240  * @hal_soc: Opaque HAL SOC handle
1241  * @ac: Access category
1242  * @value: timeout duration in millisec
1243  */
1244 void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac,
1245 			      uint32_t *value);
1246 /**
1247  * hal_set_aging_timeout - Set BA aging timeout
1248  *
1249  * @hal_soc: Opaque HAL SOC handle
1250  * @ac: Access category in millisec
1251  * @value: timeout duration value
1252  */
1253 void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac,
1254 			      uint32_t value);
1255 /**
1256  * hal_srng_dst_hw_init - Private function to initialize SRNG
1257  * destination ring HW
1258  * @hal_soc: HAL SOC handle
1259  * @srng: SRNG ring pointer
1260  */
1261 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1262 	struct hal_srng *srng)
1263 {
1264 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1265 }
1266 
1267 /**
1268  * hal_srng_src_hw_init - Private function to initialize SRNG
1269  * source ring HW
1270  * @hal_soc: HAL SOC handle
1271  * @srng: SRNG ring pointer
1272  */
1273 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1274 	struct hal_srng *srng)
1275 {
1276 	hal->ops->hal_srng_src_hw_init(hal, srng);
1277 }
1278 
1279 /**
1280  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
1281  * @hal_soc: Opaque HAL SOC handle
1282  * @hal_ring: Source ring pointer
1283  * @headp: Head Pointer
1284  * @tailp: Tail Pointer
1285  * @ring_type: Ring
1286  *
1287  * Return: Update tail pointer and head pointer in arguments.
1288  */
1289 static inline void hal_get_hw_hptp(struct hal_soc *hal, void *hal_ring,
1290 				   uint32_t *headp, uint32_t *tailp,
1291 				   uint8_t ring_type)
1292 {
1293 	hal->ops->hal_get_hw_hptp(hal, hal_ring, headp, tailp, ring_type);
1294 }
1295 
1296 /**
1297  * hal_reo_setup - Initialize HW REO block
1298  *
1299  * @hal_soc: Opaque HAL SOC handle
1300  * @reo_params: parameters needed by HAL for REO config
1301  */
1302 static inline void hal_reo_setup(void *halsoc,
1303 	 void *reoparams)
1304 {
1305 	struct hal_soc *hal_soc = (struct hal_soc *)halsoc;
1306 
1307 	hal_soc->ops->hal_reo_setup(halsoc, reoparams);
1308 }
1309 
1310 /**
1311  * hal_setup_link_idle_list - Setup scattered idle list using the
1312  * buffer list provided
1313  *
1314  * @hal_soc: Opaque HAL SOC handle
1315  * @scatter_bufs_base_paddr: Array of physical base addresses
1316  * @scatter_bufs_base_vaddr: Array of virtual base addresses
1317  * @num_scatter_bufs: Number of scatter buffers in the above lists
1318  * @scatter_buf_size: Size of each scatter buffer
1319  * @last_buf_end_offset: Offset to the last entry
1320  * @num_entries: Total entries of all scatter bufs
1321  *
1322  */
1323 static inline void hal_setup_link_idle_list(void *halsoc,
1324 	qdf_dma_addr_t scatter_bufs_base_paddr[],
1325 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
1326 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
1327 	uint32_t num_entries)
1328 {
1329 	struct hal_soc *hal_soc = (struct hal_soc *)halsoc;
1330 
1331 	hal_soc->ops->hal_setup_link_idle_list(halsoc, scatter_bufs_base_paddr,
1332 			scatter_bufs_base_vaddr, num_scatter_bufs,
1333 			scatter_buf_size, last_buf_end_offset,
1334 			num_entries);
1335 
1336 }
1337 
1338 /**
1339  * hal_srng_dump_ring_desc() - Dump ring descriptor info
1340  *
1341  * @hal_soc: Opaque HAL SOC handle
1342  * @hal_ring: Source ring pointer
1343  * @ring_desc: Opaque ring descriptor handle
1344  */
1345 static inline void hal_srng_dump_ring_desc(struct hal_soc *hal, void *hal_ring,
1346 					   void *ring_desc)
1347 {
1348 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1349 
1350 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
1351 			   ring_desc, (srng->entry_size << 2));
1352 }
1353 
1354 /**
1355  * hal_srng_dump_ring() - Dump last 128 descs of the ring
1356  *
1357  * @hal_soc: Opaque HAL SOC handle
1358  * @hal_ring: Source ring pointer
1359  */
1360 static inline void hal_srng_dump_ring(struct hal_soc *hal, void *hal_ring)
1361 {
1362 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1363 	uint32_t *desc;
1364 	uint32_t tp, i;
1365 
1366 	tp = srng->u.dst_ring.tp;
1367 
1368 	for (i = 0; i < 128; i++) {
1369 		if (!tp)
1370 			tp = srng->ring_size;
1371 
1372 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1373 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
1374 				   QDF_TRACE_LEVEL_FATAL,
1375 				   desc, (srng->entry_size << 2));
1376 
1377 		tp -= srng->entry_size;
1378 	}
1379 }
1380 
1381 #endif /* _HAL_APIH_ */
1382