xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #define MAX_UNWINDOWED_ADDRESS 0x80000
27 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
28 #define WINDOW_ENABLE_BIT 0x40000000
29 #else
30 #define WINDOW_ENABLE_BIT 0x80000000
31 #endif
32 #define WINDOW_REG_ADDRESS 0x310C
33 #define WINDOW_SHIFT 19
34 #define WINDOW_VALUE_MASK 0x3F
35 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
36 #define WINDOW_RANGE_MASK 0x7FFFF
37 
38 /*
39  * BAR + 4K is always accessible, any access outside this
40  * space requires force wake procedure.
41  * OFFSET = 4K - 32 bytes = 0x4063
42  */
43 #define MAPPED_REF_OFF 0x4063
44 #define FORCE_WAKE_DELAY_TIMEOUT 50
45 #define FORCE_WAKE_DELAY_MS 5
46 
47 /**
48  * hal_ring_desc - opaque handle for DP ring descriptor
49  */
50 struct hal_ring_desc;
51 typedef struct hal_ring_desc *hal_ring_desc_t;
52 
53 /**
54  * hal_link_desc - opaque handle for DP link descriptor
55  */
56 struct hal_link_desc;
57 typedef struct hal_link_desc *hal_link_desc_t;
58 
59 /**
60  * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor
61  */
62 struct hal_rxdma_desc;
63 typedef struct hal_rxdma_desc *hal_rxdma_desc_t;
64 
65 #ifdef ENABLE_VERBOSE_DEBUG
66 static inline void
67 hal_set_verbose_debug(bool flag)
68 {
69 	is_hal_verbose_debug_enabled = flag;
70 }
71 #endif
72 
73 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
74 static inline int hal_force_wake_request(struct hal_soc *soc)
75 {
76 	return 0;
77 }
78 
79 static inline int hal_force_wake_release(struct hal_soc *soc)
80 {
81 	return 0;
82 }
83 
84 static inline void hal_lock_reg_access(struct hal_soc *soc,
85 				       unsigned long *flags)
86 {
87 	qdf_spin_lock_irqsave(&soc->register_access_lock);
88 }
89 
90 static inline void hal_unlock_reg_access(struct hal_soc *soc,
91 					 unsigned long *flags)
92 {
93 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
94 }
95 
96 #else
97 static inline int hal_force_wake_request(struct hal_soc *soc)
98 {
99 	uint32_t timeout = 0;
100 	int ret;
101 
102 	ret = pld_force_wake_request(soc->qdf_dev->dev);
103 	if (ret) {
104 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
105 			  "%s: Request send failed %d\n", __func__, ret);
106 		return -EINVAL;
107 	}
108 
109 	while (!pld_is_device_awake(soc->qdf_dev->dev) &&
110 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT) {
111 		mdelay(FORCE_WAKE_DELAY_MS);
112 		timeout += FORCE_WAKE_DELAY_MS;
113 	}
114 
115 	if (pld_is_device_awake(soc->qdf_dev->dev) == true)
116 		return 0;
117 	else
118 		return -ETIMEDOUT;
119 }
120 
121 static inline int hal_force_wake_release(struct hal_soc *soc)
122 {
123 	return pld_force_wake_release(soc->qdf_dev->dev);
124 }
125 
126 static inline void hal_lock_reg_access(struct hal_soc *soc,
127 				       unsigned long *flags)
128 {
129 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
130 }
131 
132 static inline void hal_unlock_reg_access(struct hal_soc *soc,
133 					 unsigned long *flags)
134 {
135 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
136 }
137 #endif
138 
139 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
140 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
141 {
142 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
143 
144 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
145 		      WINDOW_ENABLE_BIT | window);
146 	hal_soc->register_window = window;
147 }
148 #else
149 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
150 {
151 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
152 	if (window != hal_soc->register_window) {
153 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
154 			      WINDOW_ENABLE_BIT | window);
155 		hal_soc->register_window = window;
156 	}
157 }
158 #endif
159 
160 /**
161  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
162  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
163  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
164  *				would be a bug
165  */
166 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
167 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
168 				  uint32_t value)
169 {
170 	unsigned long flags;
171 
172 	if (!hal_soc->use_register_windowing ||
173 	    offset < MAX_UNWINDOWED_ADDRESS) {
174 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
175 	} else {
176 		hal_lock_reg_access(hal_soc, &flags);
177 		hal_select_window(hal_soc, offset);
178 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
179 			  (offset & WINDOW_RANGE_MASK), value);
180 		hal_unlock_reg_access(hal_soc, &flags);
181 	}
182 }
183 #else
184 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
185 				  uint32_t value)
186 {
187 	int ret;
188 	unsigned long flags;
189 
190 	if (offset > MAPPED_REF_OFF) {
191 		ret = hal_force_wake_request(hal_soc);
192 		if (ret) {
193 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
194 				  "%s: Wake up request failed %d\n",
195 				  __func__, ret);
196 			QDF_BUG(0);
197 			return;
198 		}
199 	}
200 
201 	if (!hal_soc->use_register_windowing ||
202 	    offset < MAX_UNWINDOWED_ADDRESS) {
203 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
204 	} else {
205 		hal_lock_reg_access(hal_soc, &flags);
206 		hal_select_window(hal_soc, offset);
207 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
208 			  (offset & WINDOW_RANGE_MASK), value);
209 		hal_unlock_reg_access(hal_soc, &flags);
210 	}
211 
212 	if ((offset > MAPPED_REF_OFF) &&
213 	    hal_force_wake_release(hal_soc))
214 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
215 			  "%s: Wake up release failed\n", __func__);
216 }
217 
218 #endif
219 
220 /**
221  * hal_write_address_32_mb - write a value to a register
222  *
223  */
224 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
225 					   void __iomem *addr, uint32_t value)
226 {
227 	uint32_t offset;
228 
229 	if (!hal_soc->use_register_windowing)
230 		return qdf_iowrite32(addr, value);
231 
232 	offset = addr - hal_soc->dev_base_addr;
233 	hal_write32_mb(hal_soc, offset, value);
234 }
235 
236 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
237 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
238 {
239 	uint32_t ret;
240 	unsigned long flags;
241 
242 	if (!hal_soc->use_register_windowing ||
243 	    offset < MAX_UNWINDOWED_ADDRESS) {
244 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
245 	}
246 
247 	hal_lock_reg_access(hal_soc, &flags);
248 	hal_select_window(hal_soc, offset);
249 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
250 		       (offset & WINDOW_RANGE_MASK));
251 	hal_unlock_reg_access(hal_soc, &flags);
252 
253 	return ret;
254 }
255 
256 /**
257  * hal_read_address_32_mb() - Read 32-bit value from the register
258  * @soc: soc handle
259  * @addr: register address to read
260  *
261  * Return: 32-bit value
262  */
263 static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
264 					      void __iomem *addr)
265 {
266 	uint32_t offset;
267 	uint32_t ret;
268 
269 	if (!soc->use_register_windowing)
270 		return qdf_ioread32(addr);
271 
272 	offset = addr - soc->dev_base_addr;
273 	ret = hal_read32_mb(soc, offset);
274 	return ret;
275 }
276 #else
277 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
278 {
279 	uint32_t ret;
280 	unsigned long flags;
281 
282 	if ((offset > MAPPED_REF_OFF) &&
283 	    hal_force_wake_request(hal_soc)) {
284 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
285 			  "%s: Wake up request failed\n", __func__);
286 		return -EINVAL;
287 	}
288 
289 	if (!hal_soc->use_register_windowing ||
290 	    offset < MAX_UNWINDOWED_ADDRESS) {
291 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
292 	}
293 
294 	hal_lock_reg_access(hal_soc, &flags);
295 	hal_select_window(hal_soc, offset);
296 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
297 		       (offset & WINDOW_RANGE_MASK));
298 	hal_unlock_reg_access(hal_soc, &flags);
299 
300 	if ((offset > MAPPED_REF_OFF) &&
301 	    hal_force_wake_release(hal_soc))
302 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
303 			  "%s: Wake up release failed\n", __func__);
304 
305 	return ret;
306 }
307 
308 static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
309 					      void __iomem *addr)
310 {
311 	uint32_t offset;
312 	uint32_t ret;
313 
314 	if (!soc->use_register_windowing)
315 		return qdf_ioread32(addr);
316 
317 	offset = addr - soc->dev_base_addr;
318 	ret = hal_read32_mb(soc, offset);
319 	return ret;
320 }
321 #endif
322 
323 #include "hif_io32.h"
324 
325 /**
326  * hal_attach - Initialize HAL layer
327  * @hif_handle: Opaque HIF handle
328  * @qdf_dev: QDF device
329  *
330  * Return: Opaque HAL SOC handle
331  *		 NULL on failure (if given ring is not available)
332  *
333  * This function should be called as part of HIF initialization (for accessing
334  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
335  */
336 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
337 
338 /**
339  * hal_detach - Detach HAL layer
340  * @hal_soc: HAL SOC handle
341  *
342  * This function should be called as part of HIF detach
343  *
344  */
345 extern void hal_detach(void *hal_soc);
346 
347 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
348 enum hal_ring_type {
349 	REO_DST = 0,
350 	REO_EXCEPTION = 1,
351 	REO_REINJECT = 2,
352 	REO_CMD = 3,
353 	REO_STATUS = 4,
354 	TCL_DATA = 5,
355 	TCL_CMD = 6,
356 	TCL_STATUS = 7,
357 	CE_SRC = 8,
358 	CE_DST = 9,
359 	CE_DST_STATUS = 10,
360 	WBM_IDLE_LINK = 11,
361 	SW2WBM_RELEASE = 12,
362 	WBM2SW_RELEASE = 13,
363 	RXDMA_BUF = 14,
364 	RXDMA_DST = 15,
365 	RXDMA_MONITOR_BUF = 16,
366 	RXDMA_MONITOR_STATUS = 17,
367 	RXDMA_MONITOR_DST = 18,
368 	RXDMA_MONITOR_DESC = 19,
369 	DIR_BUF_RX_DMA_SRC = 20,
370 #ifdef WLAN_FEATURE_CIF_CFR
371 	WIFI_POS_SRC,
372 #endif
373 	MAX_RING_TYPES
374 };
375 
376 #define HAL_SRNG_LMAC_RING 0x80000000
377 /* SRNG flags passed in hal_srng_params.flags */
378 #define HAL_SRNG_MSI_SWAP				0x00000008
379 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
380 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
381 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
382 #define HAL_SRNG_MSI_INTR				0x00020000
383 #define HAL_SRNG_CACHED_DESC		0x00040000
384 
385 #define PN_SIZE_24 0
386 #define PN_SIZE_48 1
387 #define PN_SIZE_128 2
388 
389 /**
390  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
391  * used by callers for calculating the size of memory to be allocated before
392  * calling hal_srng_setup to setup the ring
393  *
394  * @hal_soc: Opaque HAL SOC handle
395  * @ring_type: one of the types from hal_ring_type
396  *
397  */
398 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
399 
400 /**
401  * hal_srng_max_entries - Returns maximum possible number of ring entries
402  * @hal_soc: Opaque HAL SOC handle
403  * @ring_type: one of the types from hal_ring_type
404  *
405  * Return: Maximum number of entries for the given ring_type
406  */
407 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
408 
409 /**
410  * hal_srng_dump - Dump ring status
411  * @srng: hal srng pointer
412  */
413 void hal_srng_dump(struct hal_srng *srng);
414 
415 /**
416  * hal_srng_get_dir - Returns the direction of the ring
417  * @hal_soc: Opaque HAL SOC handle
418  * @ring_type: one of the types from hal_ring_type
419  *
420  * Return: Ring direction
421  */
422 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
423 
424 /* HAL memory information */
425 struct hal_mem_info {
426 	/* dev base virutal addr */
427 	void *dev_base_addr;
428 	/* dev base physical addr */
429 	void *dev_base_paddr;
430 	/* Remote virtual pointer memory for HW/FW updates */
431 	void *shadow_rdptr_mem_vaddr;
432 	/* Remote physical pointer memory for HW/FW updates */
433 	void *shadow_rdptr_mem_paddr;
434 	/* Shared memory for ring pointer updates from host to FW */
435 	void *shadow_wrptr_mem_vaddr;
436 	/* Shared physical memory for ring pointer updates from host to FW */
437 	void *shadow_wrptr_mem_paddr;
438 };
439 
440 /* SRNG parameters to be passed to hal_srng_setup */
441 struct hal_srng_params {
442 	/* Physical base address of the ring */
443 	qdf_dma_addr_t ring_base_paddr;
444 	/* Virtual base address of the ring */
445 	void *ring_base_vaddr;
446 	/* Number of entries in ring */
447 	uint32_t num_entries;
448 	/* max transfer length */
449 	uint16_t max_buffer_length;
450 	/* MSI Address */
451 	qdf_dma_addr_t msi_addr;
452 	/* MSI data */
453 	uint32_t msi_data;
454 	/* Interrupt timer threshold – in micro seconds */
455 	uint32_t intr_timer_thres_us;
456 	/* Interrupt batch counter threshold – in number of ring entries */
457 	uint32_t intr_batch_cntr_thres_entries;
458 	/* Low threshold – in number of ring entries
459 	 * (valid for src rings only)
460 	 */
461 	uint32_t low_threshold;
462 	/* Misc flags */
463 	uint32_t flags;
464 	/* Unique ring id */
465 	uint8_t ring_id;
466 	/* Source or Destination ring */
467 	enum hal_srng_dir ring_dir;
468 	/* Size of ring entry */
469 	uint32_t entry_size;
470 	/* hw register base address */
471 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
472 };
473 
474 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
475  * @hal_soc: hal handle
476  *
477  * Return: QDF_STATUS_OK on success
478  */
479 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
480 
481 /* hal_set_one_shadow_config() - add a config for the specified ring
482  * @hal_soc: hal handle
483  * @ring_type: ring type
484  * @ring_num: ring num
485  *
486  * The ring type and ring num uniquely specify the ring.  After this call,
487  * the hp/tp will be added as the next entry int the shadow register
488  * configuration table.  The hal code will use the shadow register address
489  * in place of the hp/tp address.
490  *
491  * This function is exposed, so that the CE module can skip configuring shadow
492  * registers for unused ring and rings assigned to the firmware.
493  *
494  * Return: QDF_STATUS_OK on success
495  */
496 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
497 					    int ring_num);
498 /**
499  * hal_get_shadow_config() - retrieve the config table
500  * @hal_soc: hal handle
501  * @shadow_config: will point to the table after
502  * @num_shadow_registers_configured: will contain the number of valid entries
503  */
504 extern void hal_get_shadow_config(void *hal_soc,
505 				  struct pld_shadow_reg_v2_cfg **shadow_config,
506 				  int *num_shadow_registers_configured);
507 /**
508  * hal_srng_setup - Initialize HW SRNG ring.
509  *
510  * @hal_soc: Opaque HAL SOC handle
511  * @ring_type: one of the types from hal_ring_type
512  * @ring_num: Ring number if there are multiple rings of
513  *		same type (staring from 0)
514  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
515  * @ring_params: SRNG ring params in hal_srng_params structure.
516 
517  * Callers are expected to allocate contiguous ring memory of size
518  * 'num_entries * entry_size' bytes and pass the physical and virtual base
519  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
520  * structure. Ring base address should be 8 byte aligned and size of each ring
521  * entry should be queried using the API hal_srng_get_entrysize
522  *
523  * Return: Opaque pointer to ring on success
524  *		 NULL on failure (if given ring is not available)
525  */
526 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
527 	int mac_id, struct hal_srng_params *ring_params);
528 
529 /* Remapping ids of REO rings */
530 #define REO_REMAP_TCL 0
531 #define REO_REMAP_SW1 1
532 #define REO_REMAP_SW2 2
533 #define REO_REMAP_SW3 3
534 #define REO_REMAP_SW4 4
535 #define REO_REMAP_RELEASE 5
536 #define REO_REMAP_FW 6
537 #define REO_REMAP_UNUSED 7
538 
539 /*
540  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
541  * to map destination to rings
542  */
543 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
544 	((_VALUE) << \
545 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
546 	  _OFFSET ## _SHFT))
547 
548 /*
549  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
550  * to map destination to rings
551  */
552 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
553 	((_VALUE) << \
554 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
555 	  _OFFSET ## _SHFT))
556 
557 /*
558  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
559  * to map destination to rings
560  */
561 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
562 	((_VALUE) << \
563 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
564 	  _OFFSET ## _SHFT))
565 
566 /**
567  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
568  * @hal_soc_hdl: HAL SOC handle
569  * @read: boolean value to indicate if read or write
570  * @ix0: pointer to store IX0 reg value
571  * @ix1: pointer to store IX1 reg value
572  * @ix2: pointer to store IX2 reg value
573  * @ix3: pointer to store IX3 reg value
574  */
575 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
576 				uint32_t *ix0, uint32_t *ix1,
577 				uint32_t *ix2, uint32_t *ix3);
578 
579 /**
580  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
581  * @sring: sring pointer
582  * @paddr: physical address
583  */
584 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
585 
586 /**
587  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
588  * @srng: sring pointer
589  * @vaddr: virtual address
590  */
591 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
592 
593 /**
594  * hal_srng_cleanup - Deinitialize HW SRNG ring.
595  * @hal_soc: Opaque HAL SOC handle
596  * @hal_srng: Opaque HAL SRNG pointer
597  */
598 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
599 
600 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
601 {
602 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
603 
604 	return !!srng->initialized;
605 }
606 
607 /**
608  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
609  * @hal_soc: Opaque HAL SOC handle
610  * @hal_ring_hdl: Destination ring pointer
611  *
612  * Caller takes responsibility for any locking needs.
613  *
614  * Return: Opaque pointer for next ring entry; NULL on failire
615  */
616 static inline
617 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
618 			hal_ring_handle_t hal_ring_hdl)
619 {
620 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
621 
622 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
623 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
624 
625 	return NULL;
626 }
627 
628 /**
629  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
630  * hal_srng_access_start if locked access is required
631  *
632  * @hal_soc: Opaque HAL SOC handle
633  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
634  *
635  * Return: 0 on success; error on failire
636  */
637 static inline int
638 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
639 			       hal_ring_handle_t hal_ring_hdl)
640 {
641 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
642 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
643 	uint32_t *desc;
644 
645 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
646 		srng->u.src_ring.cached_tp =
647 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
648 	else {
649 		srng->u.dst_ring.cached_hp =
650 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
651 
652 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
653 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
654 			if (qdf_likely(desc)) {
655 				qdf_mem_dma_cache_sync(soc->qdf_dev,
656 						       qdf_mem_virt_to_phys
657 						       (desc),
658 						       QDF_DMA_FROM_DEVICE,
659 						       (srng->entry_size *
660 							sizeof(uint32_t)));
661 				qdf_prefetch(desc);
662 			}
663 		}
664 	}
665 
666 	return 0;
667 }
668 
669 /**
670  * hal_srng_access_start - Start (locked) ring access
671  *
672  * @hal_soc: Opaque HAL SOC handle
673  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
674  *
675  * Return: 0 on success; error on failire
676  */
677 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
678 					hal_ring_handle_t hal_ring_hdl)
679 {
680 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
681 
682 	if (qdf_unlikely(!hal_ring_hdl)) {
683 		qdf_print("Error: Invalid hal_ring\n");
684 		return -EINVAL;
685 	}
686 
687 	SRNG_LOCK(&(srng->lock));
688 
689 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
690 }
691 
692 /**
693  * hal_srng_dst_get_next - Get next entry from a destination ring and move
694  * cached tail pointer
695  *
696  * @hal_soc: Opaque HAL SOC handle
697  * @hal_ring_hdl: Destination ring pointer
698  *
699  * Return: Opaque pointer for next ring entry; NULL on failire
700  */
701 static inline
702 void *hal_srng_dst_get_next(void *hal_soc,
703 			    hal_ring_handle_t hal_ring_hdl)
704 {
705 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
706 	struct hal_soc *soc = (struct hal_soc *)hal_soc;
707 	uint32_t *desc;
708 	uint32_t *desc_next;
709 	uint32_t tp;
710 
711 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
712 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
713 		/* TODO: Using % is expensive, but we have to do this since
714 		 * size of some SRNG rings is not power of 2 (due to descriptor
715 		 * sizes). Need to create separate API for rings used
716 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
717 		 * SW2RXDMA and CE rings)
718 		 */
719 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
720 			srng->ring_size;
721 
722 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
723 			tp = srng->u.dst_ring.tp;
724 			desc_next = &srng->ring_base_vaddr[tp];
725 			qdf_mem_dma_cache_sync(soc->qdf_dev,
726 					       qdf_mem_virt_to_phys(desc_next),
727 					       QDF_DMA_FROM_DEVICE,
728 					       (srng->entry_size *
729 						sizeof(uint32_t)));
730 			qdf_prefetch(desc_next);
731 		}
732 
733 		return (void *)desc;
734 	}
735 
736 	return NULL;
737 }
738 
739 /**
740  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
741  * cached head pointer
742  *
743  * @hal_soc: Opaque HAL SOC handle
744  * @hal_ring_hdl: Destination ring pointer
745  *
746  * Return: Opaque pointer for next ring entry; NULL on failire
747  */
748 static inline void *
749 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
750 			 hal_ring_handle_t hal_ring_hdl)
751 {
752 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
753 	uint32_t *desc;
754 	/* TODO: Using % is expensive, but we have to do this since
755 	 * size of some SRNG rings is not power of 2 (due to descriptor
756 	 * sizes). Need to create separate API for rings used
757 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
758 	 * SW2RXDMA and CE rings)
759 	 */
760 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
761 		srng->ring_size;
762 
763 	if (next_hp != srng->u.dst_ring.tp) {
764 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
765 		srng->u.dst_ring.cached_hp = next_hp;
766 		return (void *)desc;
767 	}
768 
769 	return NULL;
770 }
771 
772 /**
773  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
774  * @hal_soc: Opaque HAL SOC handle
775  * @hal_ring_hdl: Destination ring pointer
776  *
777  * Sync cached head pointer with HW.
778  * Caller takes responsibility for any locking needs.
779  *
780  * Return: Opaque pointer for next ring entry; NULL on failire
781  */
782 static inline
783 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
784 			     hal_ring_handle_t hal_ring_hdl)
785 {
786 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
787 
788 	srng->u.dst_ring.cached_hp =
789 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
790 
791 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
792 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
793 
794 	return NULL;
795 }
796 
797 /**
798  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
799  * @hal_soc: Opaque HAL SOC handle
800  * @hal_ring_hdl: Destination ring pointer
801  *
802  * Sync cached head pointer with HW.
803  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
804  *
805  * Return: Opaque pointer for next ring entry; NULL on failire
806  */
807 static inline
808 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
809 				    hal_ring_handle_t hal_ring_hdl)
810 {
811 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
812 	void *ring_desc_ptr = NULL;
813 
814 	if (qdf_unlikely(!hal_ring_hdl)) {
815 		qdf_print("Error: Invalid hal_ring\n");
816 		return  NULL;
817 	}
818 
819 	SRNG_LOCK(&srng->lock);
820 
821 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
822 
823 	SRNG_UNLOCK(&srng->lock);
824 
825 	return ring_desc_ptr;
826 }
827 
828 /**
829  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
830  * by SW) in destination ring
831  *
832  * @hal_soc: Opaque HAL SOC handle
833  * @hal_ring_hdl: Destination ring pointer
834  * @sync_hw_ptr: Sync cached head pointer with HW
835  *
836  */
837 static inline
838 uint32_t hal_srng_dst_num_valid(void *hal_soc,
839 				hal_ring_handle_t hal_ring_hdl,
840 				int sync_hw_ptr)
841 {
842 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
843 	uint32_t hp;
844 	uint32_t tp = srng->u.dst_ring.tp;
845 
846 	if (sync_hw_ptr) {
847 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
848 		srng->u.dst_ring.cached_hp = hp;
849 	} else {
850 		hp = srng->u.dst_ring.cached_hp;
851 	}
852 
853 	if (hp >= tp)
854 		return (hp - tp) / srng->entry_size;
855 	else
856 		return (srng->ring_size - tp + hp) / srng->entry_size;
857 }
858 
859 /**
860  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
861  *
862  * @hal_soc: Opaque HAL SOC handle
863  * @hal_ring_hdl: Destination ring pointer
864  * @sync_hw_ptr: Sync cached head pointer with HW
865  *
866  * Returns number of valid entries to be processed by the host driver. The
867  * function takes up SRNG lock.
868  *
869  * Return: Number of valid destination entries
870  */
871 static inline uint32_t
872 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
873 			      hal_ring_handle_t hal_ring_hdl,
874 			      int sync_hw_ptr)
875 {
876 	uint32_t num_valid;
877 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
878 
879 	SRNG_LOCK(&srng->lock);
880 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
881 	SRNG_UNLOCK(&srng->lock);
882 
883 	return num_valid;
884 }
885 
886 /**
887  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
888  * pointer. This can be used to release any buffers associated with completed
889  * ring entries. Note that this should not be used for posting new descriptor
890  * entries. Posting of new entries should be done only using
891  * hal_srng_src_get_next_reaped when this function is used for reaping.
892  *
893  * @hal_soc: Opaque HAL SOC handle
894  * @hal_ring_hdl: Source ring pointer
895  *
896  * Return: Opaque pointer for next ring entry; NULL on failire
897  */
898 static inline void *
899 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
900 {
901 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
902 	uint32_t *desc;
903 
904 	/* TODO: Using % is expensive, but we have to do this since
905 	 * size of some SRNG rings is not power of 2 (due to descriptor
906 	 * sizes). Need to create separate API for rings used
907 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
908 	 * SW2RXDMA and CE rings)
909 	 */
910 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
911 		srng->ring_size;
912 
913 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
914 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
915 		srng->u.src_ring.reap_hp = next_reap_hp;
916 		return (void *)desc;
917 	}
918 
919 	return NULL;
920 }
921 
922 /**
923  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
924  * already reaped using hal_srng_src_reap_next, for posting new entries to
925  * the ring
926  *
927  * @hal_soc: Opaque HAL SOC handle
928  * @hal_ring_hdl: Source ring pointer
929  *
930  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
931  */
932 static inline void *
933 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
934 {
935 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
936 	uint32_t *desc;
937 
938 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
939 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
940 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
941 			srng->ring_size;
942 
943 		return (void *)desc;
944 	}
945 
946 	return NULL;
947 }
948 
949 /**
950  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
951  * move reap pointer. This API is used in detach path to release any buffers
952  * associated with ring entries which are pending reap.
953  *
954  * @hal_soc: Opaque HAL SOC handle
955  * @hal_ring_hdl: Source ring pointer
956  *
957  * Return: Opaque pointer for next ring entry; NULL on failire
958  */
959 static inline void *
960 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
961 {
962 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
963 	uint32_t *desc;
964 
965 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
966 		srng->ring_size;
967 
968 	if (next_reap_hp != srng->u.src_ring.hp) {
969 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
970 		srng->u.src_ring.reap_hp = next_reap_hp;
971 		return (void *)desc;
972 	}
973 
974 	return NULL;
975 }
976 
977 /**
978  * hal_srng_src_done_val -
979  *
980  * @hal_soc: Opaque HAL SOC handle
981  * @hal_ring_hdl: Source ring pointer
982  *
983  * Return: Opaque pointer for next ring entry; NULL on failire
984  */
985 static inline uint32_t
986 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
987 {
988 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
989 	/* TODO: Using % is expensive, but we have to do this since
990 	 * size of some SRNG rings is not power of 2 (due to descriptor
991 	 * sizes). Need to create separate API for rings used
992 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
993 	 * SW2RXDMA and CE rings)
994 	 */
995 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
996 		srng->ring_size;
997 
998 	if (next_reap_hp == srng->u.src_ring.cached_tp)
999 		return 0;
1000 
1001 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1002 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1003 			srng->entry_size;
1004 	else
1005 		return ((srng->ring_size - next_reap_hp) +
1006 			srng->u.src_ring.cached_tp) / srng->entry_size;
1007 }
1008 
1009 /**
1010  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1011  * @hal_ring_hdl: Source ring pointer
1012  *
1013  * Return: uint8_t
1014  */
1015 static inline
1016 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1017 {
1018 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1019 
1020 	return srng->entry_size;
1021 }
1022 
1023 /**
1024  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1025  * @hal_soc: Opaque HAL SOC handle
1026  * @hal_ring_hdl: Source ring pointer
1027  * @tailp: Tail Pointer
1028  * @headp: Head Pointer
1029  *
1030  * Return: Update tail pointer and head pointer in arguments.
1031  */
1032 static inline
1033 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1034 		     uint32_t *tailp, uint32_t *headp)
1035 {
1036 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1037 
1038 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1039 		*headp = srng->u.src_ring.hp;
1040 		*tailp = *srng->u.src_ring.tp_addr;
1041 	} else {
1042 		*tailp = srng->u.dst_ring.tp;
1043 		*headp = *srng->u.dst_ring.hp_addr;
1044 	}
1045 }
1046 
1047 /**
1048  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1049  *
1050  * @hal_soc: Opaque HAL SOC handle
1051  * @hal_ring_hdl: Source ring pointer
1052  *
1053  * Return: Opaque pointer for next ring entry; NULL on failire
1054  */
1055 static inline
1056 void *hal_srng_src_get_next(void *hal_soc,
1057 			    hal_ring_handle_t hal_ring_hdl)
1058 {
1059 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1060 	uint32_t *desc;
1061 	/* TODO: Using % is expensive, but we have to do this since
1062 	 * size of some SRNG rings is not power of 2 (due to descriptor
1063 	 * sizes). Need to create separate API for rings used
1064 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1065 	 * SW2RXDMA and CE rings)
1066 	 */
1067 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1068 		srng->ring_size;
1069 
1070 	if (next_hp != srng->u.src_ring.cached_tp) {
1071 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1072 		srng->u.src_ring.hp = next_hp;
1073 		/* TODO: Since reap function is not used by all rings, we can
1074 		 * remove the following update of reap_hp in this function
1075 		 * if we can ensure that only hal_srng_src_get_next_reaped
1076 		 * is used for the rings requiring reap functionality
1077 		 */
1078 		srng->u.src_ring.reap_hp = next_hp;
1079 		return (void *)desc;
1080 	}
1081 
1082 	return NULL;
1083 }
1084 
1085 /**
1086  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
1087  * hal_srng_src_get_next should be called subsequently to move the head pointer
1088  *
1089  * @hal_soc: Opaque HAL SOC handle
1090  * @hal_ring_hdl: Source ring pointer
1091  *
1092  * Return: Opaque pointer for next ring entry; NULL on failire
1093  */
1094 static inline
1095 void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl,
1096 			hal_ring_handle_t hal_ring_hdl)
1097 {
1098 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1099 	uint32_t *desc;
1100 
1101 	/* TODO: Using % is expensive, but we have to do this since
1102 	 * size of some SRNG rings is not power of 2 (due to descriptor
1103 	 * sizes). Need to create separate API for rings used
1104 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1105 	 * SW2RXDMA and CE rings)
1106 	 */
1107 	if (((srng->u.src_ring.hp + srng->entry_size) %
1108 		srng->ring_size) != srng->u.src_ring.cached_tp) {
1109 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1110 		return (void *)desc;
1111 	}
1112 
1113 	return NULL;
1114 }
1115 
1116 /**
1117  * hal_srng_src_num_avail - Returns number of available entries in src ring
1118  *
1119  * @hal_soc: Opaque HAL SOC handle
1120  * @hal_ring_hdl: Source ring pointer
1121  * @sync_hw_ptr: Sync cached tail pointer with HW
1122  *
1123  */
1124 static inline uint32_t
1125 hal_srng_src_num_avail(void *hal_soc,
1126 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
1127 {
1128 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1129 	uint32_t tp;
1130 	uint32_t hp = srng->u.src_ring.hp;
1131 
1132 	if (sync_hw_ptr) {
1133 		tp = *(srng->u.src_ring.tp_addr);
1134 		srng->u.src_ring.cached_tp = tp;
1135 	} else {
1136 		tp = srng->u.src_ring.cached_tp;
1137 	}
1138 
1139 	if (tp > hp)
1140 		return ((tp - hp) / srng->entry_size) - 1;
1141 	else
1142 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1143 }
1144 
1145 /**
1146  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1147  * ring head/tail pointers to HW.
1148  * This should be used only if hal_srng_access_start_unlocked to start ring
1149  * access
1150  *
1151  * @hal_soc: Opaque HAL SOC handle
1152  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1153  *
1154  * Return: 0 on success; error on failire
1155  */
1156 static inline void
1157 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1158 {
1159 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1160 
1161 	/* TODO: See if we need a write memory barrier here */
1162 	if (srng->flags & HAL_SRNG_LMAC_RING) {
1163 		/* For LMAC rings, ring pointer updates are done through FW and
1164 		 * hence written to a shared memory location that is read by FW
1165 		 */
1166 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1167 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
1168 		} else {
1169 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1170 		}
1171 	} else {
1172 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
1173 			hal_write_address_32_mb(hal_soc,
1174 				srng->u.src_ring.hp_addr,
1175 				srng->u.src_ring.hp);
1176 		else
1177 			hal_write_address_32_mb(hal_soc,
1178 				srng->u.dst_ring.tp_addr,
1179 				srng->u.dst_ring.tp);
1180 	}
1181 }
1182 
1183 /**
1184  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1185  * pointers to HW
1186  * This should be used only if hal_srng_access_start to start ring access
1187  *
1188  * @hal_soc: Opaque HAL SOC handle
1189  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1190  *
1191  * Return: 0 on success; error on failire
1192  */
1193 static inline void
1194 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1195 {
1196 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1197 
1198 	if (qdf_unlikely(!hal_ring_hdl)) {
1199 		qdf_print("Error: Invalid hal_ring\n");
1200 		return;
1201 	}
1202 
1203 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
1204 	SRNG_UNLOCK(&(srng->lock));
1205 }
1206 
1207 /**
1208  * hal_srng_access_end_reap - Unlock ring access
1209  * This should be used only if hal_srng_access_start to start ring access
1210  * and should be used only while reaping SRC ring completions
1211  *
1212  * @hal_soc: Opaque HAL SOC handle
1213  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1214  *
1215  * Return: 0 on success; error on failire
1216  */
1217 static inline void
1218 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1219 {
1220 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1221 
1222 	SRNG_UNLOCK(&(srng->lock));
1223 }
1224 
1225 /* TODO: Check if the following definitions is available in HW headers */
1226 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
1227 #define NUM_MPDUS_PER_LINK_DESC 6
1228 #define NUM_MSDUS_PER_LINK_DESC 7
1229 #define REO_QUEUE_DESC_ALIGN 128
1230 
1231 #define LINK_DESC_ALIGN 128
1232 
1233 #define ADDRESS_MATCH_TAG_VAL 0x5
1234 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1235  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1236  */
1237 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1238 
1239 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1240  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1241  * should be specified in 16 word units. But the number of bits defined for
1242  * this field in HW header files is 5.
1243  */
1244 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1245 
1246 
1247 /**
1248  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1249  * in an idle list
1250  *
1251  * @hal_soc: Opaque HAL SOC handle
1252  *
1253  */
1254 static inline
1255 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
1256 {
1257 	return WBM_IDLE_SCATTER_BUF_SIZE;
1258 }
1259 
1260 /**
1261  * hal_get_link_desc_size - Get the size of each link descriptor
1262  *
1263  * @hal_soc: Opaque HAL SOC handle
1264  *
1265  */
1266 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
1267 {
1268 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1269 
1270 	if (!hal_soc || !hal_soc->ops) {
1271 		qdf_print("Error: Invalid ops\n");
1272 		QDF_BUG(0);
1273 		return -EINVAL;
1274 	}
1275 	if (!hal_soc->ops->hal_get_link_desc_size) {
1276 		qdf_print("Error: Invalid function pointer\n");
1277 		QDF_BUG(0);
1278 		return -EINVAL;
1279 	}
1280 	return hal_soc->ops->hal_get_link_desc_size();
1281 }
1282 
1283 /**
1284  * hal_get_link_desc_align - Get the required start address alignment for
1285  * link descriptors
1286  *
1287  * @hal_soc: Opaque HAL SOC handle
1288  *
1289  */
1290 static inline
1291 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
1292 {
1293 	return LINK_DESC_ALIGN;
1294 }
1295 
1296 /**
1297  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1298  *
1299  * @hal_soc: Opaque HAL SOC handle
1300  *
1301  */
1302 static inline
1303 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1304 {
1305 	return NUM_MPDUS_PER_LINK_DESC;
1306 }
1307 
1308 /**
1309  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1310  *
1311  * @hal_soc: Opaque HAL SOC handle
1312  *
1313  */
1314 static inline
1315 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1316 {
1317 	return NUM_MSDUS_PER_LINK_DESC;
1318 }
1319 
1320 /**
1321  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1322  * descriptor can hold
1323  *
1324  * @hal_soc: Opaque HAL SOC handle
1325  *
1326  */
1327 static inline
1328 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
1329 {
1330 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1331 }
1332 
1333 /**
1334  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1335  * that the given buffer size
1336  *
1337  * @hal_soc: Opaque HAL SOC handle
1338  * @scatter_buf_size: Size of scatter buffer
1339  *
1340  */
1341 static inline
1342 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1343 					  uint32_t scatter_buf_size)
1344 {
1345 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1346 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
1347 }
1348 
1349 /**
1350  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1351  * each given buffer size
1352  *
1353  * @hal_soc: Opaque HAL SOC handle
1354  * @total_mem: size of memory to be scattered
1355  * @scatter_buf_size: Size of scatter buffer
1356  *
1357  */
1358 static inline
1359 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1360 					uint32_t total_mem,
1361 					uint32_t scatter_buf_size)
1362 {
1363 	uint8_t rem = (total_mem % (scatter_buf_size -
1364 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1365 
1366 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1367 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1368 
1369 	return num_scatter_bufs;
1370 }
1371 
1372 enum hal_pn_type {
1373 	HAL_PN_NONE,
1374 	HAL_PN_WPA,
1375 	HAL_PN_WAPI_EVEN,
1376 	HAL_PN_WAPI_UNEVEN,
1377 };
1378 
1379 #define HAL_RX_MAX_BA_WINDOW 256
1380 
1381 /**
1382  * hal_get_reo_qdesc_align - Get start address alignment for reo
1383  * queue descriptors
1384  *
1385  * @hal_soc: Opaque HAL SOC handle
1386  *
1387  */
1388 static inline
1389 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
1390 {
1391 	return REO_QUEUE_DESC_ALIGN;
1392 }
1393 
1394 /**
1395  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1396  *
1397  * @hal_soc: Opaque HAL SOC handle
1398  * @ba_window_size: BlockAck window size
1399  * @start_seq: Starting sequence number
1400  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1401  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1402  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1403  *
1404  */
1405 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1406 			 int tid, uint32_t ba_window_size,
1407 			 uint32_t start_seq, void *hw_qdesc_vaddr,
1408 			 qdf_dma_addr_t hw_qdesc_paddr,
1409 			 int pn_type);
1410 
1411 /**
1412  * hal_srng_get_hp_addr - Get head pointer physical address
1413  *
1414  * @hal_soc: Opaque HAL SOC handle
1415  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1416  *
1417  */
1418 static inline qdf_dma_addr_t
1419 hal_srng_get_hp_addr(void *hal_soc,
1420 		     hal_ring_handle_t hal_ring_hdl)
1421 {
1422 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1423 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1424 
1425 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1426 		return hal->shadow_wrptr_mem_paddr +
1427 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1428 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1429 	} else {
1430 		return hal->shadow_rdptr_mem_paddr +
1431 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1432 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1433 	}
1434 }
1435 
1436 /**
1437  * hal_srng_get_tp_addr - Get tail pointer physical address
1438  *
1439  * @hal_soc: Opaque HAL SOC handle
1440  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1441  *
1442  */
1443 static inline qdf_dma_addr_t
1444 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1445 {
1446 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1447 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1448 
1449 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1450 		return hal->shadow_rdptr_mem_paddr +
1451 			((unsigned long)(srng->u.src_ring.tp_addr) -
1452 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1453 	} else {
1454 		return hal->shadow_wrptr_mem_paddr +
1455 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1456 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1457 	}
1458 }
1459 
1460 /**
1461  * hal_srng_get_num_entries - Get total entries in the HAL Srng
1462  *
1463  * @hal_soc: Opaque HAL SOC handle
1464  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1465  *
1466  * Return: total number of entries in hal ring
1467  */
1468 static inline
1469 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1470 				  hal_ring_handle_t hal_ring_hdl)
1471 {
1472 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1473 
1474 	return srng->num_entries;
1475 }
1476 
1477 /**
1478  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1479  *
1480  * @hal_soc: Opaque HAL SOC handle
1481  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1482  * @ring_params: SRNG parameters will be returned through this structure
1483  */
1484 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1485 			 hal_ring_handle_t hal_ring_hdl,
1486 			 struct hal_srng_params *ring_params);
1487 
1488 /**
1489  * hal_mem_info - Retrieve hal memory base address
1490  *
1491  * @hal_soc: Opaque HAL SOC handle
1492  * @mem: pointer to structure to be updated with hal mem info
1493  */
1494 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
1495 
1496 /**
1497  * hal_get_target_type - Return target type
1498  *
1499  * @hal_soc: Opaque HAL SOC handle
1500  */
1501 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
1502 
1503 /**
1504  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1505  *
1506  * @hal_soc: Opaque HAL SOC handle
1507  * @ac: Access category
1508  * @value: timeout duration in millisec
1509  */
1510 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1511 			      uint32_t *value);
1512 /**
1513  * hal_set_aging_timeout - Set BA aging timeout
1514  *
1515  * @hal_soc: Opaque HAL SOC handle
1516  * @ac: Access category in millisec
1517  * @value: timeout duration value
1518  */
1519 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1520 			      uint32_t value);
1521 /**
1522  * hal_srng_dst_hw_init - Private function to initialize SRNG
1523  * destination ring HW
1524  * @hal_soc: HAL SOC handle
1525  * @srng: SRNG ring pointer
1526  */
1527 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1528 	struct hal_srng *srng)
1529 {
1530 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1531 }
1532 
1533 /**
1534  * hal_srng_src_hw_init - Private function to initialize SRNG
1535  * source ring HW
1536  * @hal_soc: HAL SOC handle
1537  * @srng: SRNG ring pointer
1538  */
1539 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1540 	struct hal_srng *srng)
1541 {
1542 	hal->ops->hal_srng_src_hw_init(hal, srng);
1543 }
1544 
1545 /**
1546  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
1547  * @hal_soc: Opaque HAL SOC handle
1548  * @hal_ring_hdl: Source ring pointer
1549  * @headp: Head Pointer
1550  * @tailp: Tail Pointer
1551  * @ring_type: Ring
1552  *
1553  * Return: Update tail pointer and head pointer in arguments.
1554  */
1555 static inline
1556 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
1557 		     hal_ring_handle_t hal_ring_hdl,
1558 		     uint32_t *headp, uint32_t *tailp,
1559 		     uint8_t ring_type)
1560 {
1561 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1562 
1563 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
1564 			headp, tailp, ring_type);
1565 }
1566 
1567 /**
1568  * hal_reo_setup - Initialize HW REO block
1569  *
1570  * @hal_soc: Opaque HAL SOC handle
1571  * @reo_params: parameters needed by HAL for REO config
1572  */
1573 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
1574 				 void *reoparams)
1575 {
1576 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1577 
1578 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
1579 }
1580 
1581 /**
1582  * hal_setup_link_idle_list - Setup scattered idle list using the
1583  * buffer list provided
1584  *
1585  * @hal_soc: Opaque HAL SOC handle
1586  * @scatter_bufs_base_paddr: Array of physical base addresses
1587  * @scatter_bufs_base_vaddr: Array of virtual base addresses
1588  * @num_scatter_bufs: Number of scatter buffers in the above lists
1589  * @scatter_buf_size: Size of each scatter buffer
1590  * @last_buf_end_offset: Offset to the last entry
1591  * @num_entries: Total entries of all scatter bufs
1592  *
1593  */
1594 static inline
1595 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
1596 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
1597 			      void *scatter_bufs_base_vaddr[],
1598 			      uint32_t num_scatter_bufs,
1599 			      uint32_t scatter_buf_size,
1600 			      uint32_t last_buf_end_offset,
1601 			      uint32_t num_entries)
1602 {
1603 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1604 
1605 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
1606 			scatter_bufs_base_vaddr, num_scatter_bufs,
1607 			scatter_buf_size, last_buf_end_offset,
1608 			num_entries);
1609 
1610 }
1611 
1612 /**
1613  * hal_srng_dump_ring_desc() - Dump ring descriptor info
1614  *
1615  * @hal_soc: Opaque HAL SOC handle
1616  * @hal_ring_hdl: Source ring pointer
1617  * @ring_desc: Opaque ring descriptor handle
1618  */
1619 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
1620 					   hal_ring_handle_t hal_ring_hdl,
1621 					   hal_ring_desc_t ring_desc)
1622 {
1623 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1624 
1625 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1626 			   ring_desc, (srng->entry_size << 2));
1627 }
1628 
1629 /**
1630  * hal_srng_dump_ring() - Dump last 128 descs of the ring
1631  *
1632  * @hal_soc: Opaque HAL SOC handle
1633  * @hal_ring_hdl: Source ring pointer
1634  */
1635 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
1636 				      hal_ring_handle_t hal_ring_hdl)
1637 {
1638 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1639 	uint32_t *desc;
1640 	uint32_t tp, i;
1641 
1642 	tp = srng->u.dst_ring.tp;
1643 
1644 	for (i = 0; i < 128; i++) {
1645 		if (!tp)
1646 			tp = srng->ring_size;
1647 
1648 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1649 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
1650 				   QDF_TRACE_LEVEL_DEBUG,
1651 				   desc, (srng->entry_size << 2));
1652 
1653 		tp -= srng->entry_size;
1654 	}
1655 }
1656 
1657 /*
1658  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
1659  * to opaque dp_ring desc type
1660  * @ring_desc - rxdma ring desc
1661  *
1662  * Return: hal_rxdma_desc_t type
1663  */
1664 static inline
1665 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
1666 {
1667 	return (hal_ring_desc_t)ring_desc;
1668 }
1669 
1670 /**
1671  * hal_srng_set_event() - Set hal_srng event
1672  * @hal_ring_hdl: Source ring pointer
1673  * @event: SRNG ring event
1674  *
1675  * Return: None
1676  */
1677 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
1678 {
1679 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1680 
1681 	qdf_atomic_set_bit(event, &srng->srng_event);
1682 }
1683 
1684 /**
1685  * hal_srng_clear_event() - Clear hal_srng event
1686  * @hal_ring_hdl: Source ring pointer
1687  * @event: SRNG ring event
1688  *
1689  * Return: None
1690  */
1691 static inline
1692 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1693 {
1694 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1695 
1696 	qdf_atomic_clear_bit(event, &srng->srng_event);
1697 }
1698 
1699 /**
1700  * hal_srng_get_clear_event() - Clear srng event and return old value
1701  * @hal_ring_hdl: Source ring pointer
1702  * @event: SRNG ring event
1703  *
1704  * Return: Return old event value
1705  */
1706 static inline
1707 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1708 {
1709 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1710 
1711 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
1712 }
1713 
1714 /**
1715  * hal_srng_set_flush_last_ts() - Record last flush time stamp
1716  * @hal_ring_hdl: Source ring pointer
1717  *
1718  * Return: None
1719  */
1720 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
1721 {
1722 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1723 
1724 	srng->last_flush_ts = qdf_get_log_timestamp();
1725 }
1726 
1727 /**
1728  * hal_srng_inc_flush_cnt() - Increment flush counter
1729  * @hal_ring_hdl: Source ring pointer
1730  *
1731  * Return: None
1732  */
1733 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
1734 {
1735 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1736 
1737 	srng->flush_count++;
1738 }
1739 #endif /* _HAL_APIH_ */
1740