xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "hal_internal.h"
25 #define MAX_UNWINDOWED_ADDRESS 0x80000
26 #ifdef QCA_WIFI_QCA6390
27 #define WINDOW_ENABLE_BIT 0x40000000
28 #else
29 #define WINDOW_ENABLE_BIT 0x80000000
30 #endif
31 #define WINDOW_REG_ADDRESS 0x310C
32 #define WINDOW_SHIFT 19
33 #define WINDOW_VALUE_MASK 0x3F
34 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
35 #define WINDOW_RANGE_MASK 0x7FFFF
36 
37 /*
38  * BAR + 4K is always accessible, any access outside this
39  * space requires force wake procedure.
40  * OFFSET = 4K - 32 bytes = 0x4063
41  */
42 #define MAPPED_REF_OFF 0x4063
43 #define FORCE_WAKE_DELAY_TIMEOUT 50
44 #define FORCE_WAKE_DELAY_MS 5
45 
46 #ifdef ENABLE_VERBOSE_DEBUG
47 static inline void
48 hal_set_verbose_debug(bool flag)
49 {
50 	is_hal_verbose_debug_enabled = flag;
51 }
52 #endif
53 
54 #ifndef QCA_WIFI_QCA6390
55 static inline int hal_force_wake_request(struct hal_soc *soc)
56 {
57 	return 0;
58 }
59 
60 static inline int hal_force_wake_release(struct hal_soc *soc)
61 {
62 	return 0;
63 }
64 #else
65 static inline int hal_force_wake_request(struct hal_soc *soc)
66 {
67 	uint32_t timeout = 0;
68 
69 	if (pld_force_wake_request(soc->qdf_dev->dev)) {
70 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
71 			  "%s: Request send failed \n", __func__);
72 		return -EINVAL;
73 	}
74 
75 	while (!pld_is_device_awake(soc->qdf_dev->dev) &&
76 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT) {
77 		mdelay(FORCE_WAKE_DELAY_MS);
78 		timeout += FORCE_WAKE_DELAY_MS;
79 	}
80 
81 	if (pld_is_device_awake(soc->qdf_dev->dev) == true)
82 		return 0;
83 	else
84 		return -ETIMEDOUT;
85 }
86 
87 static inline int hal_force_wake_release(struct hal_soc *soc)
88 {
89 	return pld_force_wake_release(soc->qdf_dev->dev);
90 }
91 #endif
92 
93 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
94 {
95 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
96 	if (window != hal_soc->register_window) {
97 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
98 			      WINDOW_ENABLE_BIT | window);
99 		hal_soc->register_window = window;
100 	}
101 }
102 
103 /**
104  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
105  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
106  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
107  *				would be a bug
108  */
109 #ifndef QCA_WIFI_QCA6390
110 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
111 				  uint32_t value)
112 {
113 	if (!hal_soc->use_register_windowing ||
114 	    offset < MAX_UNWINDOWED_ADDRESS) {
115 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
116 	} else {
117 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
118 		hal_select_window(hal_soc, offset);
119 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
120 			  (offset & WINDOW_RANGE_MASK), value);
121 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
122 	}
123 }
124 #else
125 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
126 				  uint32_t value)
127 {
128 	if ((offset > MAPPED_REF_OFF) &&
129 	    hal_force_wake_request(hal_soc)) {
130 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
131 			  "%s: Wake up request failed\n", __func__);
132 		return;
133 	}
134 
135 	if (!hal_soc->use_register_windowing ||
136 	    offset < MAX_UNWINDOWED_ADDRESS) {
137 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
138 	} else {
139 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
140 		hal_select_window(hal_soc, offset);
141 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
142 			  (offset & WINDOW_RANGE_MASK), value);
143 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
144 	}
145 
146 	if ((offset > MAPPED_REF_OFF) &&
147 	    hal_force_wake_release(hal_soc))
148 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
149 			  "%s: Wake up release failed\n", __func__);
150 }
151 
152 #endif
153 
154 /**
155  * hal_write_address_32_mb - write a value to a register
156  *
157  */
158 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
159 					   void __iomem *addr, uint32_t value)
160 {
161 	uint32_t offset;
162 
163 	if (!hal_soc->use_register_windowing)
164 		return qdf_iowrite32(addr, value);
165 
166 	offset = addr - hal_soc->dev_base_addr;
167 	hal_write32_mb(hal_soc, offset, value);
168 }
169 
170 #ifndef QCA_WIFI_QCA6390
171 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
172 {
173 	uint32_t ret;
174 
175 	if (!hal_soc->use_register_windowing ||
176 	    offset < MAX_UNWINDOWED_ADDRESS) {
177 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
178 	}
179 
180 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
181 	hal_select_window(hal_soc, offset);
182 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
183 		       (offset & WINDOW_RANGE_MASK));
184 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
185 
186 	return ret;
187 }
188 
189 /**
190  * hal_read_address_32_mb() - Read 32-bit value from the register
191  * @soc: soc handle
192  * @addr: register address to read
193  *
194  * Return: 32-bit value
195  */
196 static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
197 					      void __iomem *addr)
198 {
199 	uint32_t offset;
200 	uint32_t ret;
201 
202 	if (!soc->use_register_windowing)
203 		return qdf_ioread32(addr);
204 
205 	offset = addr - soc->dev_base_addr;
206 	ret = hal_read32_mb(soc, offset);
207 	return ret;
208 }
209 #else
210 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
211 {
212 	uint32_t ret;
213 
214 	if ((offset > MAPPED_REF_OFF) &&
215 	    hal_force_wake_request(hal_soc)) {
216 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
217 			  "%s: Wake up request failed\n", __func__);
218 		return -EINVAL;
219 	}
220 
221 	if (!hal_soc->use_register_windowing ||
222 	    offset < MAX_UNWINDOWED_ADDRESS) {
223 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
224 	}
225 
226 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
227 	hal_select_window(hal_soc, offset);
228 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
229 		       (offset & WINDOW_RANGE_MASK));
230 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
231 
232 	if ((offset > MAPPED_REF_OFF) &&
233 	    hal_force_wake_release(hal_soc))
234 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
235 			  "%s: Wake up release failed\n", __func__);
236 
237 	return ret;
238 }
239 
240 static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
241 					      void __iomem *addr)
242 {
243 	uint32_t offset;
244 	uint32_t ret;
245 
246 	if (!soc->use_register_windowing)
247 		return qdf_ioread32(addr);
248 
249 	offset = addr - soc->dev_base_addr;
250 	ret = hal_read32_mb(soc, offset);
251 	return ret;
252 }
253 #endif
254 
255 #include "hif_io32.h"
256 
257 /**
258  * hal_attach - Initialize HAL layer
259  * @hif_handle: Opaque HIF handle
260  * @qdf_dev: QDF device
261  *
262  * Return: Opaque HAL SOC handle
263  *		 NULL on failure (if given ring is not available)
264  *
265  * This function should be called as part of HIF initialization (for accessing
266  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
267  */
268 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
269 
270 /**
271  * hal_detach - Detach HAL layer
272  * @hal_soc: HAL SOC handle
273  *
274  * This function should be called as part of HIF detach
275  *
276  */
277 extern void hal_detach(void *hal_soc);
278 
279 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
280 enum hal_ring_type {
281 	REO_DST = 0,
282 	REO_EXCEPTION = 1,
283 	REO_REINJECT = 2,
284 	REO_CMD = 3,
285 	REO_STATUS = 4,
286 	TCL_DATA = 5,
287 	TCL_CMD = 6,
288 	TCL_STATUS = 7,
289 	CE_SRC = 8,
290 	CE_DST = 9,
291 	CE_DST_STATUS = 10,
292 	WBM_IDLE_LINK = 11,
293 	SW2WBM_RELEASE = 12,
294 	WBM2SW_RELEASE = 13,
295 	RXDMA_BUF = 14,
296 	RXDMA_DST = 15,
297 	RXDMA_MONITOR_BUF = 16,
298 	RXDMA_MONITOR_STATUS = 17,
299 	RXDMA_MONITOR_DST = 18,
300 	RXDMA_MONITOR_DESC = 19,
301 	DIR_BUF_RX_DMA_SRC = 20,
302 #ifdef WLAN_FEATURE_CIF_CFR
303 	WIFI_POS_SRC,
304 #endif
305 	MAX_RING_TYPES
306 };
307 
308 #define HAL_SRNG_LMAC_RING 0x80000000
309 /* SRNG flags passed in hal_srng_params.flags */
310 #define HAL_SRNG_MSI_SWAP				0x00000008
311 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
312 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
313 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
314 #define HAL_SRNG_MSI_INTR				0x00020000
315 
316 #define PN_SIZE_24 0
317 #define PN_SIZE_48 1
318 #define PN_SIZE_128 2
319 
320 /**
321  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
322  * used by callers for calculating the size of memory to be allocated before
323  * calling hal_srng_setup to setup the ring
324  *
325  * @hal_soc: Opaque HAL SOC handle
326  * @ring_type: one of the types from hal_ring_type
327  *
328  */
329 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
330 
331 /**
332  * hal_srng_max_entries - Returns maximum possible number of ring entries
333  * @hal_soc: Opaque HAL SOC handle
334  * @ring_type: one of the types from hal_ring_type
335  *
336  * Return: Maximum number of entries for the given ring_type
337  */
338 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
339 
340 /**
341  * hal_srng_dump - Dump ring status
342  * @srng: hal srng pointer
343  */
344 void hal_srng_dump(struct hal_srng *srng);
345 
346 /**
347  * hal_srng_get_dir - Returns the direction of the ring
348  * @hal_soc: Opaque HAL SOC handle
349  * @ring_type: one of the types from hal_ring_type
350  *
351  * Return: Ring direction
352  */
353 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
354 
355 /* HAL memory information */
356 struct hal_mem_info {
357 	/* dev base virutal addr */
358 	void *dev_base_addr;
359 	/* dev base physical addr */
360 	void *dev_base_paddr;
361 	/* Remote virtual pointer memory for HW/FW updates */
362 	void *shadow_rdptr_mem_vaddr;
363 	/* Remote physical pointer memory for HW/FW updates */
364 	void *shadow_rdptr_mem_paddr;
365 	/* Shared memory for ring pointer updates from host to FW */
366 	void *shadow_wrptr_mem_vaddr;
367 	/* Shared physical memory for ring pointer updates from host to FW */
368 	void *shadow_wrptr_mem_paddr;
369 };
370 
371 /* SRNG parameters to be passed to hal_srng_setup */
372 struct hal_srng_params {
373 	/* Physical base address of the ring */
374 	qdf_dma_addr_t ring_base_paddr;
375 	/* Virtual base address of the ring */
376 	void *ring_base_vaddr;
377 	/* Number of entries in ring */
378 	uint32_t num_entries;
379 	/* max transfer length */
380 	uint16_t max_buffer_length;
381 	/* MSI Address */
382 	qdf_dma_addr_t msi_addr;
383 	/* MSI data */
384 	uint32_t msi_data;
385 	/* Interrupt timer threshold – in micro seconds */
386 	uint32_t intr_timer_thres_us;
387 	/* Interrupt batch counter threshold – in number of ring entries */
388 	uint32_t intr_batch_cntr_thres_entries;
389 	/* Low threshold – in number of ring entries
390 	 * (valid for src rings only)
391 	 */
392 	uint32_t low_threshold;
393 	/* Misc flags */
394 	uint32_t flags;
395 	/* Unique ring id */
396 	uint8_t ring_id;
397 	/* Source or Destination ring */
398 	enum hal_srng_dir ring_dir;
399 	/* Size of ring entry */
400 	uint32_t entry_size;
401 	/* hw register base address */
402 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
403 };
404 
405 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
406  * @hal_soc: hal handle
407  *
408  * Return: QDF_STATUS_OK on success
409  */
410 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
411 
412 /* hal_set_one_shadow_config() - add a config for the specified ring
413  * @hal_soc: hal handle
414  * @ring_type: ring type
415  * @ring_num: ring num
416  *
417  * The ring type and ring num uniquely specify the ring.  After this call,
418  * the hp/tp will be added as the next entry int the shadow register
419  * configuration table.  The hal code will use the shadow register address
420  * in place of the hp/tp address.
421  *
422  * This function is exposed, so that the CE module can skip configuring shadow
423  * registers for unused ring and rings assigned to the firmware.
424  *
425  * Return: QDF_STATUS_OK on success
426  */
427 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
428 					    int ring_num);
429 /**
430  * hal_get_shadow_config() - retrieve the config table
431  * @hal_soc: hal handle
432  * @shadow_config: will point to the table after
433  * @num_shadow_registers_configured: will contain the number of valid entries
434  */
435 extern void hal_get_shadow_config(void *hal_soc,
436 				  struct pld_shadow_reg_v2_cfg **shadow_config,
437 				  int *num_shadow_registers_configured);
438 /**
439  * hal_srng_setup - Initialize HW SRNG ring.
440  *
441  * @hal_soc: Opaque HAL SOC handle
442  * @ring_type: one of the types from hal_ring_type
443  * @ring_num: Ring number if there are multiple rings of
444  *		same type (staring from 0)
445  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
446  * @ring_params: SRNG ring params in hal_srng_params structure.
447 
448  * Callers are expected to allocate contiguous ring memory of size
449  * 'num_entries * entry_size' bytes and pass the physical and virtual base
450  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
451  * structure. Ring base address should be 8 byte aligned and size of each ring
452  * entry should be queried using the API hal_srng_get_entrysize
453  *
454  * Return: Opaque pointer to ring on success
455  *		 NULL on failure (if given ring is not available)
456  */
457 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
458 	int mac_id, struct hal_srng_params *ring_params);
459 
460 /* Remapping ids of REO rings */
461 #define REO_REMAP_TCL 0
462 #define REO_REMAP_SW1 1
463 #define REO_REMAP_SW2 2
464 #define REO_REMAP_SW3 3
465 #define REO_REMAP_SW4 4
466 #define REO_REMAP_RELEASE 5
467 #define REO_REMAP_FW 6
468 #define REO_REMAP_UNUSED 7
469 
470 /*
471  * currently this macro only works for IX0 since all the rings we are remapping
472  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
473  */
474 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
475 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
476 /* allow the destination macros to be expanded */
477 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
478 	(_NEW_DEST << \
479 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
480 	  _ORIGINAL_DEST ## _SHFT))
481 
482 /**
483  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
484  * @hal: HAL SOC handle
485  * @read: boolean value to indicate if read or write
486  * @ix0: pointer to store IX0 reg value
487  * @ix1: pointer to store IX1 reg value
488  * @ix2: pointer to store IX2 reg value
489  * @ix3: pointer to store IX3 reg value
490  */
491 extern void hal_reo_read_write_ctrl_ix(struct hal_soc *hal, bool read,
492 				       uint32_t *ix0, uint32_t *ix1,
493 				       uint32_t *ix2, uint32_t *ix3);
494 
495 /**
496  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
497  * @sring: sring pointer
498  * @paddr: physical address
499  */
500 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
501 
502 /**
503  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
504  * @srng: sring pointer
505  * @vaddr: virtual address
506  */
507 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
508 
509 /**
510  * hal_srng_cleanup - Deinitialize HW SRNG ring.
511  * @hal_soc: Opaque HAL SOC handle
512  * @hal_srng: Opaque HAL SRNG pointer
513  */
514 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
515 
516 static inline bool hal_srng_initialized(void *hal_ring)
517 {
518 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
519 
520 	return !!srng->initialized;
521 }
522 
523 /**
524  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
525  * hal_srng_access_start if locked access is required
526  *
527  * @hal_soc: Opaque HAL SOC handle
528  * @hal_ring: Ring pointer (Source or Destination ring)
529  *
530  * Return: 0 on success; error on failire
531  */
532 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
533 {
534 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
535 
536 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
537 		srng->u.src_ring.cached_tp =
538 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
539 	else
540 		srng->u.dst_ring.cached_hp =
541 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
542 
543 	return 0;
544 }
545 
546 /**
547  * hal_srng_access_start - Start (locked) ring access
548  *
549  * @hal_soc: Opaque HAL SOC handle
550  * @hal_ring: Ring pointer (Source or Destination ring)
551  *
552  * Return: 0 on success; error on failire
553  */
554 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
555 {
556 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
557 
558 	if (qdf_unlikely(!hal_ring)) {
559 		qdf_print("Error: Invalid hal_ring\n");
560 		return -EINVAL;
561 	}
562 
563 	SRNG_LOCK(&(srng->lock));
564 
565 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
566 }
567 
568 /**
569  * hal_srng_dst_get_next - Get next entry from a destination ring and move
570  * cached tail pointer
571  *
572  * @hal_soc: Opaque HAL SOC handle
573  * @hal_ring: Destination ring pointer
574  *
575  * Return: Opaque pointer for next ring entry; NULL on failire
576  */
577 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
578 {
579 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
580 	uint32_t *desc;
581 
582 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
583 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
584 		/* TODO: Using % is expensive, but we have to do this since
585 		 * size of some SRNG rings is not power of 2 (due to descriptor
586 		 * sizes). Need to create separate API for rings used
587 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
588 		 * SW2RXDMA and CE rings)
589 		 */
590 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
591 			srng->ring_size;
592 
593 		return (void *)desc;
594 	}
595 
596 	return NULL;
597 }
598 
599 /**
600  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
601  * cached head pointer
602  *
603  * @hal_soc: Opaque HAL SOC handle
604  * @hal_ring: Destination ring pointer
605  *
606  * Return: Opaque pointer for next ring entry; NULL on failire
607  */
608 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
609 {
610 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
611 	uint32_t *desc;
612 	/* TODO: Using % is expensive, but we have to do this since
613 	 * size of some SRNG rings is not power of 2 (due to descriptor
614 	 * sizes). Need to create separate API for rings used
615 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
616 	 * SW2RXDMA and CE rings)
617 	 */
618 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
619 		srng->ring_size;
620 
621 	if (next_hp != srng->u.dst_ring.tp) {
622 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
623 		srng->u.dst_ring.cached_hp = next_hp;
624 		return (void *)desc;
625 	}
626 
627 	return NULL;
628 }
629 
630 /**
631  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
632  * @hal_soc: Opaque HAL SOC handle
633  * @hal_ring: Destination ring pointer
634  *
635  * Caller takes responsibility for any locking needs.
636  *
637  * Return: Opaque pointer for next ring entry; NULL on failire
638  */
639 static inline
640 void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
641 {
642 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
643 
644 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
645 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
646 
647 	return NULL;
648 }
649 
650 /**
651  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
652  * @hal_soc: Opaque HAL SOC handle
653  * @hal_ring: Destination ring pointer
654  *
655  * Sync cached head pointer with HW.
656  * Caller takes responsibility for any locking needs.
657  *
658  * Return: Opaque pointer for next ring entry; NULL on failire
659  */
660 static inline
661 void *hal_srng_dst_peek_sync(void *hal_soc, void *hal_ring)
662 {
663 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
664 
665 	srng->u.dst_ring.cached_hp =
666 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
667 
668 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
669 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
670 
671 	return NULL;
672 }
673 
674 /**
675  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
676  * @hal_soc: Opaque HAL SOC handle
677  * @hal_ring: Destination ring pointer
678  *
679  * Sync cached head pointer with HW.
680  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
681  *
682  * Return: Opaque pointer for next ring entry; NULL on failire
683  */
684 static inline
685 void *hal_srng_dst_peek_sync_locked(void *hal_soc, void *hal_ring)
686 {
687 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
688 	void *ring_desc_ptr = NULL;
689 
690 	if (qdf_unlikely(!hal_ring)) {
691 		qdf_print("Error: Invalid hal_ring\n");
692 		return  NULL;
693 	}
694 
695 	SRNG_LOCK(&srng->lock);
696 
697 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc, hal_ring);
698 
699 	SRNG_UNLOCK(&srng->lock);
700 
701 	return ring_desc_ptr;
702 }
703 
704 /**
705  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
706  * by SW) in destination ring
707  *
708  * @hal_soc: Opaque HAL SOC handle
709  * @hal_ring: Destination ring pointer
710  * @sync_hw_ptr: Sync cached head pointer with HW
711  *
712  */
713 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
714 	int sync_hw_ptr)
715 {
716 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
717 	uint32_t hp;
718 	uint32_t tp = srng->u.dst_ring.tp;
719 
720 	if (sync_hw_ptr) {
721 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
722 		srng->u.dst_ring.cached_hp = hp;
723 	} else {
724 		hp = srng->u.dst_ring.cached_hp;
725 	}
726 
727 	if (hp >= tp)
728 		return (hp - tp) / srng->entry_size;
729 	else
730 		return (srng->ring_size - tp + hp) / srng->entry_size;
731 }
732 
733 /**
734  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
735  * pointer. This can be used to release any buffers associated with completed
736  * ring entries. Note that this should not be used for posting new descriptor
737  * entries. Posting of new entries should be done only using
738  * hal_srng_src_get_next_reaped when this function is used for reaping.
739  *
740  * @hal_soc: Opaque HAL SOC handle
741  * @hal_ring: Source ring pointer
742  *
743  * Return: Opaque pointer for next ring entry; NULL on failire
744  */
745 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
746 {
747 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
748 	uint32_t *desc;
749 
750 	/* TODO: Using % is expensive, but we have to do this since
751 	 * size of some SRNG rings is not power of 2 (due to descriptor
752 	 * sizes). Need to create separate API for rings used
753 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
754 	 * SW2RXDMA and CE rings)
755 	 */
756 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
757 		srng->ring_size;
758 
759 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
760 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
761 		srng->u.src_ring.reap_hp = next_reap_hp;
762 		return (void *)desc;
763 	}
764 
765 	return NULL;
766 }
767 
768 /**
769  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
770  * already reaped using hal_srng_src_reap_next, for posting new entries to
771  * the ring
772  *
773  * @hal_soc: Opaque HAL SOC handle
774  * @hal_ring: Source ring pointer
775  *
776  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
777  */
778 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
779 {
780 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
781 	uint32_t *desc;
782 
783 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
784 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
785 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
786 			srng->ring_size;
787 
788 		return (void *)desc;
789 	}
790 
791 	return NULL;
792 }
793 
794 /**
795  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
796  * move reap pointer. This API is used in detach path to release any buffers
797  * associated with ring entries which are pending reap.
798  *
799  * @hal_soc: Opaque HAL SOC handle
800  * @hal_ring: Source ring pointer
801  *
802  * Return: Opaque pointer for next ring entry; NULL on failire
803  */
804 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
805 {
806 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
807 	uint32_t *desc;
808 
809 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
810 		srng->ring_size;
811 
812 	if (next_reap_hp != srng->u.src_ring.hp) {
813 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
814 		srng->u.src_ring.reap_hp = next_reap_hp;
815 		return (void *)desc;
816 	}
817 
818 	return NULL;
819 }
820 
821 /**
822  * hal_srng_src_done_val -
823  *
824  * @hal_soc: Opaque HAL SOC handle
825  * @hal_ring: Source ring pointer
826  *
827  * Return: Opaque pointer for next ring entry; NULL on failire
828  */
829 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
830 {
831 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
832 	/* TODO: Using % is expensive, but we have to do this since
833 	 * size of some SRNG rings is not power of 2 (due to descriptor
834 	 * sizes). Need to create separate API for rings used
835 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
836 	 * SW2RXDMA and CE rings)
837 	 */
838 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
839 		srng->ring_size;
840 
841 	if (next_reap_hp == srng->u.src_ring.cached_tp)
842 		return 0;
843 
844 	if (srng->u.src_ring.cached_tp > next_reap_hp)
845 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
846 			srng->entry_size;
847 	else
848 		return ((srng->ring_size - next_reap_hp) +
849 			srng->u.src_ring.cached_tp) / srng->entry_size;
850 }
851 
852 /**
853  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
854  * @hal_soc: Opaque HAL SOC handle
855  * @hal_ring: Source ring pointer
856  * @tailp: Tail Pointer
857  * @headp: Head Pointer
858  *
859  * Return: Update tail pointer and head pointer in arguments.
860  */
861 static inline void hal_get_sw_hptp(void *hal_soc, void *hal_ring,
862 				   uint32_t *tailp, uint32_t *headp)
863 {
864 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
865 
866 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
867 		*headp = srng->u.src_ring.hp / srng->entry_size;
868 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
869 	} else {
870 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
871 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
872 	}
873 }
874 
875 /**
876  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
877  *
878  * @hal_soc: Opaque HAL SOC handle
879  * @hal_ring: Source ring pointer
880  *
881  * Return: Opaque pointer for next ring entry; NULL on failire
882  */
883 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
884 {
885 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
886 	uint32_t *desc;
887 	/* TODO: Using % is expensive, but we have to do this since
888 	 * size of some SRNG rings is not power of 2 (due to descriptor
889 	 * sizes). Need to create separate API for rings used
890 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
891 	 * SW2RXDMA and CE rings)
892 	 */
893 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
894 		srng->ring_size;
895 
896 	if (next_hp != srng->u.src_ring.cached_tp) {
897 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
898 		srng->u.src_ring.hp = next_hp;
899 		/* TODO: Since reap function is not used by all rings, we can
900 		 * remove the following update of reap_hp in this function
901 		 * if we can ensure that only hal_srng_src_get_next_reaped
902 		 * is used for the rings requiring reap functionality
903 		 */
904 		srng->u.src_ring.reap_hp = next_hp;
905 		return (void *)desc;
906 	}
907 
908 	return NULL;
909 }
910 
911 /**
912  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
913  * hal_srng_src_get_next should be called subsequently to move the head pointer
914  *
915  * @hal_soc: Opaque HAL SOC handle
916  * @hal_ring: Source ring pointer
917  *
918  * Return: Opaque pointer for next ring entry; NULL on failire
919  */
920 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
921 {
922 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
923 	uint32_t *desc;
924 
925 	/* TODO: Using % is expensive, but we have to do this since
926 	 * size of some SRNG rings is not power of 2 (due to descriptor
927 	 * sizes). Need to create separate API for rings used
928 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
929 	 * SW2RXDMA and CE rings)
930 	 */
931 	if (((srng->u.src_ring.hp + srng->entry_size) %
932 		srng->ring_size) != srng->u.src_ring.cached_tp) {
933 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
934 		return (void *)desc;
935 	}
936 
937 	return NULL;
938 }
939 
940 /**
941  * hal_srng_src_num_avail - Returns number of available entries in src ring
942  *
943  * @hal_soc: Opaque HAL SOC handle
944  * @hal_ring: Source ring pointer
945  * @sync_hw_ptr: Sync cached tail pointer with HW
946  *
947  */
948 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
949 	void *hal_ring, int sync_hw_ptr)
950 {
951 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
952 	uint32_t tp;
953 	uint32_t hp = srng->u.src_ring.hp;
954 
955 	if (sync_hw_ptr) {
956 		tp = *(srng->u.src_ring.tp_addr);
957 		srng->u.src_ring.cached_tp = tp;
958 	} else {
959 		tp = srng->u.src_ring.cached_tp;
960 	}
961 
962 	if (tp > hp)
963 		return ((tp - hp) / srng->entry_size) - 1;
964 	else
965 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
966 }
967 
968 /**
969  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
970  * ring head/tail pointers to HW.
971  * This should be used only if hal_srng_access_start_unlocked to start ring
972  * access
973  *
974  * @hal_soc: Opaque HAL SOC handle
975  * @hal_ring: Ring pointer (Source or Destination ring)
976  *
977  * Return: 0 on success; error on failire
978  */
979 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
980 {
981 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
982 
983 	/* TODO: See if we need a write memory barrier here */
984 	if (srng->flags & HAL_SRNG_LMAC_RING) {
985 		/* For LMAC rings, ring pointer updates are done through FW and
986 		 * hence written to a shared memory location that is read by FW
987 		 */
988 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
989 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
990 		} else {
991 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
992 		}
993 	} else {
994 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
995 			hal_write_address_32_mb(hal_soc,
996 				srng->u.src_ring.hp_addr,
997 				srng->u.src_ring.hp);
998 		else
999 			hal_write_address_32_mb(hal_soc,
1000 				srng->u.dst_ring.tp_addr,
1001 				srng->u.dst_ring.tp);
1002 	}
1003 }
1004 
1005 /**
1006  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1007  * pointers to HW
1008  * This should be used only if hal_srng_access_start to start ring access
1009  *
1010  * @hal_soc: Opaque HAL SOC handle
1011  * @hal_ring: Ring pointer (Source or Destination ring)
1012  *
1013  * Return: 0 on success; error on failire
1014  */
1015 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
1016 {
1017 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1018 
1019 	if (qdf_unlikely(!hal_ring)) {
1020 		qdf_print("Error: Invalid hal_ring\n");
1021 		return;
1022 	}
1023 
1024 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
1025 	SRNG_UNLOCK(&(srng->lock));
1026 }
1027 
1028 /**
1029  * hal_srng_access_end_reap - Unlock ring access
1030  * This should be used only if hal_srng_access_start to start ring access
1031  * and should be used only while reaping SRC ring completions
1032  *
1033  * @hal_soc: Opaque HAL SOC handle
1034  * @hal_ring: Ring pointer (Source or Destination ring)
1035  *
1036  * Return: 0 on success; error on failire
1037  */
1038 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
1039 {
1040 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1041 
1042 	SRNG_UNLOCK(&(srng->lock));
1043 }
1044 
1045 /* TODO: Check if the following definitions is available in HW headers */
1046 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
1047 #define NUM_MPDUS_PER_LINK_DESC 6
1048 #define NUM_MSDUS_PER_LINK_DESC 7
1049 #define REO_QUEUE_DESC_ALIGN 128
1050 
1051 #define LINK_DESC_ALIGN 128
1052 
1053 #define ADDRESS_MATCH_TAG_VAL 0x5
1054 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1055  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1056  */
1057 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1058 
1059 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1060  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1061  * should be specified in 16 word units. But the number of bits defined for
1062  * this field in HW header files is 5.
1063  */
1064 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1065 
1066 
1067 /**
1068  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1069  * in an idle list
1070  *
1071  * @hal_soc: Opaque HAL SOC handle
1072  *
1073  */
1074 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
1075 {
1076 	return WBM_IDLE_SCATTER_BUF_SIZE;
1077 }
1078 
1079 /**
1080  * hal_get_link_desc_size - Get the size of each link descriptor
1081  *
1082  * @hal_soc: Opaque HAL SOC handle
1083  *
1084  */
1085 static inline uint32_t hal_get_link_desc_size(struct hal_soc *hal_soc)
1086 {
1087 	if (!hal_soc || !hal_soc->ops) {
1088 		qdf_print("Error: Invalid ops\n");
1089 		QDF_BUG(0);
1090 		return -EINVAL;
1091 	}
1092 	if (!hal_soc->ops->hal_get_link_desc_size) {
1093 		qdf_print("Error: Invalid function pointer\n");
1094 		QDF_BUG(0);
1095 		return -EINVAL;
1096 	}
1097 	return hal_soc->ops->hal_get_link_desc_size();
1098 }
1099 
1100 /**
1101  * hal_get_link_desc_align - Get the required start address alignment for
1102  * link descriptors
1103  *
1104  * @hal_soc: Opaque HAL SOC handle
1105  *
1106  */
1107 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
1108 {
1109 	return LINK_DESC_ALIGN;
1110 }
1111 
1112 /**
1113  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1114  *
1115  * @hal_soc: Opaque HAL SOC handle
1116  *
1117  */
1118 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
1119 {
1120 	return NUM_MPDUS_PER_LINK_DESC;
1121 }
1122 
1123 /**
1124  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1125  *
1126  * @hal_soc: Opaque HAL SOC handle
1127  *
1128  */
1129 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
1130 {
1131 	return NUM_MSDUS_PER_LINK_DESC;
1132 }
1133 
1134 /**
1135  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1136  * descriptor can hold
1137  *
1138  * @hal_soc: Opaque HAL SOC handle
1139  *
1140  */
1141 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
1142 {
1143 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1144 }
1145 
1146 /**
1147  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1148  * that the given buffer size
1149  *
1150  * @hal_soc: Opaque HAL SOC handle
1151  * @scatter_buf_size: Size of scatter buffer
1152  *
1153  */
1154 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
1155 	uint32_t scatter_buf_size)
1156 {
1157 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1158 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
1159 }
1160 
1161 /**
1162  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1163  * each given buffer size
1164  *
1165  * @hal_soc: Opaque HAL SOC handle
1166  * @total_mem: size of memory to be scattered
1167  * @scatter_buf_size: Size of scatter buffer
1168  *
1169  */
1170 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
1171 	uint32_t total_mem, uint32_t scatter_buf_size)
1172 {
1173 	uint8_t rem = (total_mem % (scatter_buf_size -
1174 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1175 
1176 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1177 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1178 
1179 	return num_scatter_bufs;
1180 }
1181 
1182 /* REO parameters to be passed to hal_reo_setup */
1183 struct hal_reo_params {
1184 	/** rx hash steering enabled or disabled */
1185 	bool rx_hash_enabled;
1186 	/** reo remap 1 register */
1187 	uint32_t remap1;
1188 	/** reo remap 2 register */
1189 	uint32_t remap2;
1190 	/** fragment destination ring */
1191 	uint8_t frag_dst_ring;
1192 	/** padding */
1193 	uint8_t padding[3];
1194 };
1195 
1196 
1197 enum hal_pn_type {
1198 	HAL_PN_NONE,
1199 	HAL_PN_WPA,
1200 	HAL_PN_WAPI_EVEN,
1201 	HAL_PN_WAPI_UNEVEN,
1202 };
1203 
1204 #define HAL_RX_MAX_BA_WINDOW 256
1205 
1206 /**
1207  * hal_get_reo_qdesc_align - Get start address alignment for reo
1208  * queue descriptors
1209  *
1210  * @hal_soc: Opaque HAL SOC handle
1211  *
1212  */
1213 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1214 {
1215 	return REO_QUEUE_DESC_ALIGN;
1216 }
1217 
1218 /**
1219  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1220  *
1221  * @hal_soc: Opaque HAL SOC handle
1222  * @ba_window_size: BlockAck window size
1223  * @start_seq: Starting sequence number
1224  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1225  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1226  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1227  *
1228  */
1229 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1230 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1231 	int pn_type);
1232 
1233 /**
1234  * hal_srng_get_hp_addr - Get head pointer physical address
1235  *
1236  * @hal_soc: Opaque HAL SOC handle
1237  * @hal_ring: Ring pointer (Source or Destination ring)
1238  *
1239  */
1240 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1241 {
1242 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1243 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1244 
1245 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1246 		return hal->shadow_wrptr_mem_paddr +
1247 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1248 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1249 	} else {
1250 		return hal->shadow_rdptr_mem_paddr +
1251 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1252 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1253 	}
1254 }
1255 
1256 /**
1257  * hal_srng_get_tp_addr - Get tail pointer physical address
1258  *
1259  * @hal_soc: Opaque HAL SOC handle
1260  * @hal_ring: Ring pointer (Source or Destination ring)
1261  *
1262  */
1263 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1264 {
1265 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1266 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1267 
1268 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1269 		return hal->shadow_rdptr_mem_paddr +
1270 			((unsigned long)(srng->u.src_ring.tp_addr) -
1271 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1272 	} else {
1273 		return hal->shadow_wrptr_mem_paddr +
1274 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1275 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1276 	}
1277 }
1278 
1279 /**
1280  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1281  *
1282  * @hal_soc: Opaque HAL SOC handle
1283  * @hal_ring: Ring pointer (Source or Destination ring)
1284  * @ring_params: SRNG parameters will be returned through this structure
1285  */
1286 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1287 	struct hal_srng_params *ring_params);
1288 
1289 /**
1290  * hal_mem_info - Retrieve hal memory base address
1291  *
1292  * @hal_soc: Opaque HAL SOC handle
1293  * @mem: pointer to structure to be updated with hal mem info
1294  */
1295 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1296 
1297 /**
1298  * hal_get_target_type - Return target type
1299  *
1300  * @hal_soc: Opaque HAL SOC handle
1301  */
1302 uint32_t hal_get_target_type(struct hal_soc *hal);
1303 
1304 /**
1305  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1306  *
1307  * @hal_soc: Opaque HAL SOC handle
1308  * @ac: Access category
1309  * @value: timeout duration in millisec
1310  */
1311 void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac,
1312 			      uint32_t *value);
1313 /**
1314  * hal_set_aging_timeout - Set BA aging timeout
1315  *
1316  * @hal_soc: Opaque HAL SOC handle
1317  * @ac: Access category in millisec
1318  * @value: timeout duration value
1319  */
1320 void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac,
1321 			      uint32_t value);
1322 /**
1323  * hal_srng_dst_hw_init - Private function to initialize SRNG
1324  * destination ring HW
1325  * @hal_soc: HAL SOC handle
1326  * @srng: SRNG ring pointer
1327  */
1328 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1329 	struct hal_srng *srng)
1330 {
1331 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1332 }
1333 
1334 /**
1335  * hal_srng_src_hw_init - Private function to initialize SRNG
1336  * source ring HW
1337  * @hal_soc: HAL SOC handle
1338  * @srng: SRNG ring pointer
1339  */
1340 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1341 	struct hal_srng *srng)
1342 {
1343 	hal->ops->hal_srng_src_hw_init(hal, srng);
1344 }
1345 
1346 /**
1347  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
1348  * @hal_soc: Opaque HAL SOC handle
1349  * @hal_ring: Source ring pointer
1350  * @headp: Head Pointer
1351  * @tailp: Tail Pointer
1352  * @ring_type: Ring
1353  *
1354  * Return: Update tail pointer and head pointer in arguments.
1355  */
1356 static inline void hal_get_hw_hptp(struct hal_soc *hal, void *hal_ring,
1357 				   uint32_t *headp, uint32_t *tailp,
1358 				   uint8_t ring_type)
1359 {
1360 	hal->ops->hal_get_hw_hptp(hal, hal_ring, headp, tailp, ring_type);
1361 }
1362 
1363 /**
1364  * hal_reo_setup - Initialize HW REO block
1365  *
1366  * @hal_soc: Opaque HAL SOC handle
1367  * @reo_params: parameters needed by HAL for REO config
1368  */
1369 static inline void hal_reo_setup(void *halsoc,
1370 	 void *reoparams)
1371 {
1372 	struct hal_soc *hal_soc = (struct hal_soc *)halsoc;
1373 
1374 	hal_soc->ops->hal_reo_setup(halsoc, reoparams);
1375 }
1376 
1377 /**
1378  * hal_setup_link_idle_list - Setup scattered idle list using the
1379  * buffer list provided
1380  *
1381  * @hal_soc: Opaque HAL SOC handle
1382  * @scatter_bufs_base_paddr: Array of physical base addresses
1383  * @scatter_bufs_base_vaddr: Array of virtual base addresses
1384  * @num_scatter_bufs: Number of scatter buffers in the above lists
1385  * @scatter_buf_size: Size of each scatter buffer
1386  * @last_buf_end_offset: Offset to the last entry
1387  * @num_entries: Total entries of all scatter bufs
1388  *
1389  */
1390 static inline void hal_setup_link_idle_list(void *halsoc,
1391 	qdf_dma_addr_t scatter_bufs_base_paddr[],
1392 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
1393 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
1394 	uint32_t num_entries)
1395 {
1396 	struct hal_soc *hal_soc = (struct hal_soc *)halsoc;
1397 
1398 	hal_soc->ops->hal_setup_link_idle_list(halsoc, scatter_bufs_base_paddr,
1399 			scatter_bufs_base_vaddr, num_scatter_bufs,
1400 			scatter_buf_size, last_buf_end_offset,
1401 			num_entries);
1402 
1403 }
1404 
1405 /**
1406  * hal_srng_dump_ring_desc() - Dump ring descriptor info
1407  *
1408  * @hal_soc: Opaque HAL SOC handle
1409  * @hal_ring: Source ring pointer
1410  * @ring_desc: Opaque ring descriptor handle
1411  */
1412 static inline void hal_srng_dump_ring_desc(struct hal_soc *hal, void *hal_ring,
1413 					   void *ring_desc)
1414 {
1415 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1416 
1417 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
1418 			   ring_desc, (srng->entry_size << 2));
1419 }
1420 
1421 /**
1422  * hal_srng_dump_ring() - Dump last 128 descs of the ring
1423  *
1424  * @hal_soc: Opaque HAL SOC handle
1425  * @hal_ring: Source ring pointer
1426  */
1427 static inline void hal_srng_dump_ring(struct hal_soc *hal, void *hal_ring)
1428 {
1429 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1430 	uint32_t *desc;
1431 	uint32_t tp, i;
1432 
1433 	tp = srng->u.dst_ring.tp;
1434 
1435 	for (i = 0; i < 128; i++) {
1436 		if (!tp)
1437 			tp = srng->ring_size;
1438 
1439 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1440 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
1441 				   QDF_TRACE_LEVEL_DEBUG,
1442 				   desc, (srng->entry_size << 2));
1443 
1444 		tp -= srng->entry_size;
1445 	}
1446 }
1447 
1448 #endif /* _HAL_APIH_ */
1449