xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #include "hif.h"
27 #include "hif_io32.h"
28 
29 /* calculate the register address offset from bar0 of shadow register x */
30 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
31 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
32 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
33 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
34 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
35 #elif defined(QCA_WIFI_QCA6290)
36 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
37 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
38 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
39 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
40 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 */
41 
42 #define MAX_UNWINDOWED_ADDRESS 0x80000
43 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
44 	defined(QCA_WIFI_QCN9000)
45 #define WINDOW_ENABLE_BIT 0x40000000
46 #else
47 #define WINDOW_ENABLE_BIT 0x80000000
48 #endif
49 #define WINDOW_REG_ADDRESS 0x310C
50 #define WINDOW_SHIFT 19
51 #define WINDOW_VALUE_MASK 0x3F
52 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
53 #define WINDOW_RANGE_MASK 0x7FFFF
54 /*
55  * BAR + 4K is always accessible, any access outside this
56  * space requires force wake procedure.
57  * OFFSET = 4K - 32 bytes = 0xFE0
58  */
59 #define MAPPED_REF_OFF 0xFE0
60 
61 /**
62  * hal_ring_desc - opaque handle for DP ring descriptor
63  */
64 struct hal_ring_desc;
65 typedef struct hal_ring_desc *hal_ring_desc_t;
66 
67 /**
68  * hal_link_desc - opaque handle for DP link descriptor
69  */
70 struct hal_link_desc;
71 typedef struct hal_link_desc *hal_link_desc_t;
72 
73 /**
74  * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor
75  */
76 struct hal_rxdma_desc;
77 typedef struct hal_rxdma_desc *hal_rxdma_desc_t;
78 
79 /**
80  * hal_buff_addrinfo - opaque handle for DP buffer address info
81  */
82 struct hal_buff_addrinfo;
83 typedef struct hal_buff_addrinfo *hal_buff_addrinfo_t;
84 
85 #ifdef ENABLE_VERBOSE_DEBUG
86 static inline void
87 hal_set_verbose_debug(bool flag)
88 {
89 	is_hal_verbose_debug_enabled = flag;
90 }
91 #endif
92 
93 /**
94  * hal_reg_write_result_check() - check register writing result
95  * @hal_soc: HAL soc handle
96  * @offset: register offset to read
97  * @exp_val: the expected value of register
98  * @ret_confirm: result confirm flag
99  *
100  * Return: none
101  */
102 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
103 					      uint32_t offset,
104 					      uint32_t exp_val)
105 {
106 	uint32_t value;
107 
108 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
109 	if (exp_val != value) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
111 			  "register offset 0x%x write failed!\n", offset);
112 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
113 			  "the expectation 0x%x, actual value 0x%x\n",
114 			  exp_val,
115 			  value);
116 	}
117 }
118 
119 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
120 static inline void hal_lock_reg_access(struct hal_soc *soc,
121 				       unsigned long *flags)
122 {
123 	qdf_spin_lock_irqsave(&soc->register_access_lock);
124 }
125 
126 static inline void hal_unlock_reg_access(struct hal_soc *soc,
127 					 unsigned long *flags)
128 {
129 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
130 }
131 #else
132 static inline void hal_lock_reg_access(struct hal_soc *soc,
133 				       unsigned long *flags)
134 {
135 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
136 }
137 
138 static inline void hal_unlock_reg_access(struct hal_soc *soc,
139 					 unsigned long *flags)
140 {
141 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
142 }
143 #endif
144 
145 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
146 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
147 {
148 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
149 
150 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
151 		      WINDOW_ENABLE_BIT | window);
152 	hal_soc->register_window = window;
153 }
154 
155 /**
156  * hal_select_window_confirm() - write remap window register and
157 				 check writing result
158  *
159  */
160 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
161 					     uint32_t offset)
162 {
163 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
164 
165 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
166 		      WINDOW_ENABLE_BIT | window);
167 	hal_soc->register_window = window;
168 
169 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
170 				   WINDOW_ENABLE_BIT | window);
171 }
172 #else
173 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
174 {
175 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
176 
177 	if (window != hal_soc->register_window) {
178 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
179 			      WINDOW_ENABLE_BIT | window);
180 		hal_soc->register_window = window;
181 	}
182 }
183 
184 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
185 					     uint32_t offset)
186 {
187 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
188 
189 	if (window != hal_soc->register_window) {
190 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
191 			      WINDOW_ENABLE_BIT | window);
192 		hal_soc->register_window = window;
193 
194 		hal_reg_write_result_check(
195 					hal_soc,
196 					WINDOW_REG_ADDRESS,
197 					WINDOW_ENABLE_BIT | window);
198 	}
199 }
200 #endif
201 
202 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
203 						 qdf_iomem_t addr)
204 {
205 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
206 }
207 
208 /**
209  * hal_write32_mb() - Access registers to update configuration
210  * @hal_soc: hal soc handle
211  * @offset: offset address from the BAR
212  * @value: value to write
213  *
214  * Return: None
215  *
216  * Description: Register address space is split below:
217  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
218  *  |--------------------|-------------------|------------------|
219  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
220  *
221  * 1. Any access to the shadow region, doesn't need force wake
222  *    and windowing logic to access.
223  * 2. Any access beyond BAR + 4K:
224  *    If init_phase enabled, no force wake is needed and access
225  *    should be based on windowed or unwindowed access.
226  *    If init_phase disabled, force wake is needed and access
227  *    should be based on windowed or unwindowed access.
228  *
229  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
230  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
231  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
232  *                            that window would be a bug
233  */
234 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
235 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
236 				  uint32_t value)
237 {
238 	unsigned long flags;
239 	qdf_iomem_t new_addr;
240 
241 	if (!hal_soc->use_register_windowing ||
242 	    offset < MAX_UNWINDOWED_ADDRESS) {
243 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
244 	} else if (hal_soc->static_window_map) {
245 		new_addr = hal_get_window_address(hal_soc,
246 				hal_soc->dev_base_addr + offset);
247 		qdf_iowrite32(new_addr, value);
248 	} else {
249 		hal_lock_reg_access(hal_soc, &flags);
250 		hal_select_window(hal_soc, offset);
251 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
252 			  (offset & WINDOW_RANGE_MASK), value);
253 		hal_unlock_reg_access(hal_soc, &flags);
254 	}
255 }
256 
257 /**
258  * hal_write_address_32_mb - write a value to a register
259  *
260  */
261 static inline
262 void hal_write_address_32_mb(struct hal_soc *hal_soc,
263 			     qdf_iomem_t addr, uint32_t value)
264 {
265 	uint32_t offset;
266 	qdf_iomem_t new_addr;
267 
268 	if (!hal_soc->use_register_windowing)
269 		return qdf_iowrite32(addr, value);
270 
271 	offset = addr - hal_soc->dev_base_addr;
272 	if (hal_soc->static_window_map) {
273 		new_addr = hal_get_window_address(hal_soc, addr);
274 		return qdf_iowrite32(new_addr, value);
275 	}
276 	hal_write32_mb(hal_soc, offset, value);
277 }
278 
279 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
280 		hal_write32_mb(_hal_soc, _offset, _value)
281 #else
282 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
283 				  uint32_t value)
284 {
285 	int ret;
286 	unsigned long flags;
287 
288 	/* Region < BAR + 4K can be directly accessed */
289 	if (offset < MAPPED_REF_OFF) {
290 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
291 		return;
292 	}
293 
294 	/* Region greater than BAR + 4K */
295 	if (!hal_soc->init_phase) {
296 		ret = hif_force_wake_request(hal_soc->hif_handle);
297 		if (ret) {
298 			hal_err("Wake up request failed");
299 			QDF_BUG(0);
300 			return;
301 		}
302 	}
303 
304 	if (!hal_soc->use_register_windowing ||
305 	    offset < MAX_UNWINDOWED_ADDRESS) {
306 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
307 	} else {
308 		hal_lock_reg_access(hal_soc, &flags);
309 		hal_select_window(hal_soc, offset);
310 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
311 			  (offset & WINDOW_RANGE_MASK), value);
312 		hal_unlock_reg_access(hal_soc, &flags);
313 	}
314 
315 	if (!hal_soc->init_phase) {
316 		ret = hif_force_wake_release(hal_soc->hif_handle);
317 		if (ret) {
318 			hal_err("Wake up request failed");
319 			QDF_BUG(0);
320 			return;
321 		}
322 	}
323 }
324 
325 /**
326  * hal_write32_mb_confirm() - write register and check wirting result
327  *
328  */
329 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
330 					  uint32_t offset,
331 					  uint32_t value)
332 {
333 	int ret;
334 	unsigned long flags;
335 
336 	/* Region < BAR + 4K can be directly accessed */
337 	if (offset < MAPPED_REF_OFF) {
338 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
339 		return;
340 	}
341 
342 	/* Region greater than BAR + 4K */
343 	if (!hal_soc->init_phase) {
344 		ret = hif_force_wake_request(hal_soc->hif_handle);
345 		if (ret) {
346 			hal_err("Wake up request failed");
347 			QDF_BUG(0);
348 			return;
349 		}
350 	}
351 
352 	if (!hal_soc->use_register_windowing ||
353 	    offset < MAX_UNWINDOWED_ADDRESS) {
354 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
355 		hal_reg_write_result_check(hal_soc, offset,
356 					   value);
357 	} else {
358 		hal_lock_reg_access(hal_soc, &flags);
359 		hal_select_window_confirm(hal_soc, offset);
360 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
361 			  (offset & WINDOW_RANGE_MASK), value);
362 
363 		hal_reg_write_result_check(
364 				hal_soc,
365 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
366 				value);
367 		hal_unlock_reg_access(hal_soc, &flags);
368 	}
369 
370 	if (!hal_soc->init_phase) {
371 		ret = hif_force_wake_release(hal_soc->hif_handle);
372 		if (ret) {
373 			hal_err("Wake up request failed");
374 			QDF_BUG(0);
375 			return;
376 		}
377 	}
378 }
379 
380 /**
381  * hal_write_address_32_mb - write a value to a register
382  *
383  */
384 static inline
385 void hal_write_address_32_mb(struct hal_soc *hal_soc,
386 			     qdf_iomem_t addr, uint32_t value)
387 {
388 	uint32_t offset;
389 
390 	if (!hal_soc->use_register_windowing)
391 		return qdf_iowrite32(addr, value);
392 
393 	offset = addr - hal_soc->dev_base_addr;
394 	hal_write32_mb(hal_soc, offset, value);
395 }
396 #endif
397 
398 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
399 #define hal_srng_write_address_32_mb(_a, _b, _c) qdf_iowrite32(_b, _c)
400 #else
401 #define hal_srng_write_address_32_mb(_a, _b, _c) \
402 		hal_write_address_32_mb(_a, _b, _c)
403 #endif
404 
405 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
406 /**
407  * hal_read32_mb() - Access registers to read configuration
408  * @hal_soc: hal soc handle
409  * @offset: offset address from the BAR
410  * @value: value to write
411  *
412  * Description: Register address space is split below:
413  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
414  *  |--------------------|-------------------|------------------|
415  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
416  *
417  * 1. Any access to the shadow region, doesn't need force wake
418  *    and windowing logic to access.
419  * 2. Any access beyond BAR + 4K:
420  *    If init_phase enabled, no force wake is needed and access
421  *    should be based on windowed or unwindowed access.
422  *    If init_phase disabled, force wake is needed and access
423  *    should be based on windowed or unwindowed access.
424  *
425  * Return: < 0 for failure/>= 0 for success
426  */
427 static inline
428 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
429 {
430 	uint32_t ret;
431 	unsigned long flags;
432 	qdf_iomem_t new_addr;
433 
434 	if (!hal_soc->use_register_windowing ||
435 	    offset < MAX_UNWINDOWED_ADDRESS) {
436 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
437 	} else if (hal_soc->static_window_map) {
438 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
439 		return qdf_ioread32(new_addr);
440 	}
441 
442 	hal_lock_reg_access(hal_soc, &flags);
443 	hal_select_window(hal_soc, offset);
444 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
445 		       (offset & WINDOW_RANGE_MASK));
446 	hal_unlock_reg_access(hal_soc, &flags);
447 
448 	return ret;
449 }
450 #else
451 static
452 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
453 {
454 	uint32_t ret;
455 	unsigned long flags;
456 
457 	/* Region < BAR + 4K can be directly accessed */
458 	if (offset < MAPPED_REF_OFF)
459 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
460 
461 	if ((!hal_soc->init_phase) &&
462 	    hif_force_wake_request(hal_soc->hif_handle)) {
463 		hal_err("Wake up request failed");
464 		QDF_BUG(0);
465 		return 0;
466 	}
467 
468 	if (!hal_soc->use_register_windowing ||
469 	    offset < MAX_UNWINDOWED_ADDRESS) {
470 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
471 	} else {
472 		hal_lock_reg_access(hal_soc, &flags);
473 		hal_select_window(hal_soc, offset);
474 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
475 			       (offset & WINDOW_RANGE_MASK));
476 		hal_unlock_reg_access(hal_soc, &flags);
477 	}
478 
479 	if ((!hal_soc->init_phase) &&
480 	    hif_force_wake_release(hal_soc->hif_handle)) {
481 		hal_err("Wake up release failed");
482 		QDF_BUG(0);
483 		return 0;
484 	}
485 
486 	return ret;
487 }
488 #endif
489 
490 /**
491  * hal_read_address_32_mb() - Read 32-bit value from the register
492  * @soc: soc handle
493  * @addr: register address to read
494  *
495  * Return: 32-bit value
496  */
497 static inline
498 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
499 				qdf_iomem_t addr)
500 {
501 	uint32_t offset;
502 	uint32_t ret;
503 	qdf_iomem_t new_addr;
504 
505 	if (!soc->use_register_windowing)
506 		return qdf_ioread32(addr);
507 
508 	offset = addr - soc->dev_base_addr;
509 	if (soc->static_window_map) {
510 		new_addr = hal_get_window_address(soc, addr);
511 		return qdf_ioread32(new_addr);
512 	}
513 
514 	ret = hal_read32_mb(soc, offset);
515 	return ret;
516 }
517 
518 /**
519  * hal_attach - Initialize HAL layer
520  * @hif_handle: Opaque HIF handle
521  * @qdf_dev: QDF device
522  *
523  * Return: Opaque HAL SOC handle
524  *		 NULL on failure (if given ring is not available)
525  *
526  * This function should be called as part of HIF initialization (for accessing
527  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
528  */
529 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
530 
531 /**
532  * hal_detach - Detach HAL layer
533  * @hal_soc: HAL SOC handle
534  *
535  * This function should be called as part of HIF detach
536  *
537  */
538 extern void hal_detach(void *hal_soc);
539 
540 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
541 enum hal_ring_type {
542 	REO_DST = 0,
543 	REO_EXCEPTION = 1,
544 	REO_REINJECT = 2,
545 	REO_CMD = 3,
546 	REO_STATUS = 4,
547 	TCL_DATA = 5,
548 	TCL_CMD = 6,
549 	TCL_STATUS = 7,
550 	CE_SRC = 8,
551 	CE_DST = 9,
552 	CE_DST_STATUS = 10,
553 	WBM_IDLE_LINK = 11,
554 	SW2WBM_RELEASE = 12,
555 	WBM2SW_RELEASE = 13,
556 	RXDMA_BUF = 14,
557 	RXDMA_DST = 15,
558 	RXDMA_MONITOR_BUF = 16,
559 	RXDMA_MONITOR_STATUS = 17,
560 	RXDMA_MONITOR_DST = 18,
561 	RXDMA_MONITOR_DESC = 19,
562 	DIR_BUF_RX_DMA_SRC = 20,
563 #ifdef WLAN_FEATURE_CIF_CFR
564 	WIFI_POS_SRC,
565 #endif
566 	MAX_RING_TYPES
567 };
568 
569 #define HAL_SRNG_LMAC_RING 0x80000000
570 /* SRNG flags passed in hal_srng_params.flags */
571 #define HAL_SRNG_MSI_SWAP				0x00000008
572 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
573 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
574 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
575 #define HAL_SRNG_MSI_INTR				0x00020000
576 #define HAL_SRNG_CACHED_DESC		0x00040000
577 
578 #define PN_SIZE_24 0
579 #define PN_SIZE_48 1
580 #define PN_SIZE_128 2
581 
582 #ifdef FORCE_WAKE
583 /**
584  * hal_set_init_phase() - Indicate initialization of
585  *                        datapath rings
586  * @soc: hal_soc handle
587  * @init_phase: flag to indicate datapath rings
588  *              initialization status
589  *
590  * Return: None
591  */
592 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
593 #else
594 static inline
595 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
596 {
597 }
598 #endif /* FORCE_WAKE */
599 
600 /**
601  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
602  * used by callers for calculating the size of memory to be allocated before
603  * calling hal_srng_setup to setup the ring
604  *
605  * @hal_soc: Opaque HAL SOC handle
606  * @ring_type: one of the types from hal_ring_type
607  *
608  */
609 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
610 
611 /**
612  * hal_srng_max_entries - Returns maximum possible number of ring entries
613  * @hal_soc: Opaque HAL SOC handle
614  * @ring_type: one of the types from hal_ring_type
615  *
616  * Return: Maximum number of entries for the given ring_type
617  */
618 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
619 
620 /**
621  * hal_srng_dump - Dump ring status
622  * @srng: hal srng pointer
623  */
624 void hal_srng_dump(struct hal_srng *srng);
625 
626 /**
627  * hal_srng_get_dir - Returns the direction of the ring
628  * @hal_soc: Opaque HAL SOC handle
629  * @ring_type: one of the types from hal_ring_type
630  *
631  * Return: Ring direction
632  */
633 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
634 
635 /* HAL memory information */
636 struct hal_mem_info {
637 	/* dev base virutal addr */
638 	void *dev_base_addr;
639 	/* dev base physical addr */
640 	void *dev_base_paddr;
641 	/* Remote virtual pointer memory for HW/FW updates */
642 	void *shadow_rdptr_mem_vaddr;
643 	/* Remote physical pointer memory for HW/FW updates */
644 	void *shadow_rdptr_mem_paddr;
645 	/* Shared memory for ring pointer updates from host to FW */
646 	void *shadow_wrptr_mem_vaddr;
647 	/* Shared physical memory for ring pointer updates from host to FW */
648 	void *shadow_wrptr_mem_paddr;
649 };
650 
651 /* SRNG parameters to be passed to hal_srng_setup */
652 struct hal_srng_params {
653 	/* Physical base address of the ring */
654 	qdf_dma_addr_t ring_base_paddr;
655 	/* Virtual base address of the ring */
656 	void *ring_base_vaddr;
657 	/* Number of entries in ring */
658 	uint32_t num_entries;
659 	/* max transfer length */
660 	uint16_t max_buffer_length;
661 	/* MSI Address */
662 	qdf_dma_addr_t msi_addr;
663 	/* MSI data */
664 	uint32_t msi_data;
665 	/* Interrupt timer threshold – in micro seconds */
666 	uint32_t intr_timer_thres_us;
667 	/* Interrupt batch counter threshold – in number of ring entries */
668 	uint32_t intr_batch_cntr_thres_entries;
669 	/* Low threshold – in number of ring entries
670 	 * (valid for src rings only)
671 	 */
672 	uint32_t low_threshold;
673 	/* Misc flags */
674 	uint32_t flags;
675 	/* Unique ring id */
676 	uint8_t ring_id;
677 	/* Source or Destination ring */
678 	enum hal_srng_dir ring_dir;
679 	/* Size of ring entry */
680 	uint32_t entry_size;
681 	/* hw register base address */
682 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
683 };
684 
685 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
686  * @hal_soc: hal handle
687  *
688  * Return: QDF_STATUS_OK on success
689  */
690 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
691 
692 /* hal_set_one_shadow_config() - add a config for the specified ring
693  * @hal_soc: hal handle
694  * @ring_type: ring type
695  * @ring_num: ring num
696  *
697  * The ring type and ring num uniquely specify the ring.  After this call,
698  * the hp/tp will be added as the next entry int the shadow register
699  * configuration table.  The hal code will use the shadow register address
700  * in place of the hp/tp address.
701  *
702  * This function is exposed, so that the CE module can skip configuring shadow
703  * registers for unused ring and rings assigned to the firmware.
704  *
705  * Return: QDF_STATUS_OK on success
706  */
707 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
708 					    int ring_num);
709 /**
710  * hal_get_shadow_config() - retrieve the config table
711  * @hal_soc: hal handle
712  * @shadow_config: will point to the table after
713  * @num_shadow_registers_configured: will contain the number of valid entries
714  */
715 extern void hal_get_shadow_config(void *hal_soc,
716 				  struct pld_shadow_reg_v2_cfg **shadow_config,
717 				  int *num_shadow_registers_configured);
718 /**
719  * hal_srng_setup - Initialize HW SRNG ring.
720  *
721  * @hal_soc: Opaque HAL SOC handle
722  * @ring_type: one of the types from hal_ring_type
723  * @ring_num: Ring number if there are multiple rings of
724  *		same type (staring from 0)
725  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
726  * @ring_params: SRNG ring params in hal_srng_params structure.
727 
728  * Callers are expected to allocate contiguous ring memory of size
729  * 'num_entries * entry_size' bytes and pass the physical and virtual base
730  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
731  * structure. Ring base address should be 8 byte aligned and size of each ring
732  * entry should be queried using the API hal_srng_get_entrysize
733  *
734  * Return: Opaque pointer to ring on success
735  *		 NULL on failure (if given ring is not available)
736  */
737 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
738 	int mac_id, struct hal_srng_params *ring_params);
739 
740 /* Remapping ids of REO rings */
741 #define REO_REMAP_TCL 0
742 #define REO_REMAP_SW1 1
743 #define REO_REMAP_SW2 2
744 #define REO_REMAP_SW3 3
745 #define REO_REMAP_SW4 4
746 #define REO_REMAP_RELEASE 5
747 #define REO_REMAP_FW 6
748 #define REO_REMAP_UNUSED 7
749 
750 /*
751  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
752  * to map destination to rings
753  */
754 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
755 	((_VALUE) << \
756 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
757 	  _OFFSET ## _SHFT))
758 
759 /*
760  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
761  * to map destination to rings
762  */
763 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
764 	((_VALUE) << \
765 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
766 	  _OFFSET ## _SHFT))
767 
768 /*
769  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
770  * to map destination to rings
771  */
772 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
773 	((_VALUE) << \
774 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
775 	  _OFFSET ## _SHFT))
776 
777 /**
778  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
779  * @hal_soc_hdl: HAL SOC handle
780  * @read: boolean value to indicate if read or write
781  * @ix0: pointer to store IX0 reg value
782  * @ix1: pointer to store IX1 reg value
783  * @ix2: pointer to store IX2 reg value
784  * @ix3: pointer to store IX3 reg value
785  */
786 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
787 				uint32_t *ix0, uint32_t *ix1,
788 				uint32_t *ix2, uint32_t *ix3);
789 
790 /**
791  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
792  * @sring: sring pointer
793  * @paddr: physical address
794  */
795 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
796 
797 /**
798  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
799  * @srng: sring pointer
800  * @vaddr: virtual address
801  */
802 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
803 
804 /**
805  * hal_srng_cleanup - Deinitialize HW SRNG ring.
806  * @hal_soc: Opaque HAL SOC handle
807  * @hal_srng: Opaque HAL SRNG pointer
808  */
809 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
810 
811 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
812 {
813 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
814 
815 	return !!srng->initialized;
816 }
817 
818 /**
819  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
820  * @hal_soc: Opaque HAL SOC handle
821  * @hal_ring_hdl: Destination ring pointer
822  *
823  * Caller takes responsibility for any locking needs.
824  *
825  * Return: Opaque pointer for next ring entry; NULL on failire
826  */
827 static inline
828 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
829 			hal_ring_handle_t hal_ring_hdl)
830 {
831 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
832 
833 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
834 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
835 
836 	return NULL;
837 }
838 
839 /**
840  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
841  * hal_srng_access_start if locked access is required
842  *
843  * @hal_soc: Opaque HAL SOC handle
844  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
845  *
846  * Return: 0 on success; error on failire
847  */
848 static inline int
849 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
850 			       hal_ring_handle_t hal_ring_hdl)
851 {
852 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
853 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
854 	uint32_t *desc;
855 
856 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
857 		srng->u.src_ring.cached_tp =
858 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
859 	else {
860 		srng->u.dst_ring.cached_hp =
861 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
862 
863 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
864 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
865 			if (qdf_likely(desc)) {
866 				qdf_mem_dma_cache_sync(soc->qdf_dev,
867 						       qdf_mem_virt_to_phys
868 						       (desc),
869 						       QDF_DMA_FROM_DEVICE,
870 						       (srng->entry_size *
871 							sizeof(uint32_t)));
872 				qdf_prefetch(desc);
873 			}
874 		}
875 	}
876 
877 	return 0;
878 }
879 
880 /**
881  * hal_srng_access_start - Start (locked) ring access
882  *
883  * @hal_soc: Opaque HAL SOC handle
884  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
885  *
886  * Return: 0 on success; error on failire
887  */
888 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
889 					hal_ring_handle_t hal_ring_hdl)
890 {
891 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
892 
893 	if (qdf_unlikely(!hal_ring_hdl)) {
894 		qdf_print("Error: Invalid hal_ring\n");
895 		return -EINVAL;
896 	}
897 
898 	SRNG_LOCK(&(srng->lock));
899 
900 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
901 }
902 
903 /**
904  * hal_srng_dst_get_next - Get next entry from a destination ring and move
905  * cached tail pointer
906  *
907  * @hal_soc: Opaque HAL SOC handle
908  * @hal_ring_hdl: Destination ring pointer
909  *
910  * Return: Opaque pointer for next ring entry; NULL on failire
911  */
912 static inline
913 void *hal_srng_dst_get_next(void *hal_soc,
914 			    hal_ring_handle_t hal_ring_hdl)
915 {
916 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
917 	struct hal_soc *soc = (struct hal_soc *)hal_soc;
918 	uint32_t *desc;
919 	uint32_t *desc_next;
920 	uint32_t tp;
921 
922 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
923 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
924 		/* TODO: Using % is expensive, but we have to do this since
925 		 * size of some SRNG rings is not power of 2 (due to descriptor
926 		 * sizes). Need to create separate API for rings used
927 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
928 		 * SW2RXDMA and CE rings)
929 		 */
930 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
931 			srng->ring_size;
932 
933 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
934 			tp = srng->u.dst_ring.tp;
935 			desc_next = &srng->ring_base_vaddr[tp];
936 			qdf_mem_dma_cache_sync(soc->qdf_dev,
937 					       qdf_mem_virt_to_phys(desc_next),
938 					       QDF_DMA_FROM_DEVICE,
939 					       (srng->entry_size *
940 						sizeof(uint32_t)));
941 			qdf_prefetch(desc_next);
942 		}
943 
944 		return (void *)desc;
945 	}
946 
947 	return NULL;
948 }
949 
950 /**
951  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
952  * cached head pointer
953  *
954  * @hal_soc: Opaque HAL SOC handle
955  * @hal_ring_hdl: Destination ring pointer
956  *
957  * Return: Opaque pointer for next ring entry; NULL on failire
958  */
959 static inline void *
960 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
961 			 hal_ring_handle_t hal_ring_hdl)
962 {
963 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
964 	uint32_t *desc;
965 	/* TODO: Using % is expensive, but we have to do this since
966 	 * size of some SRNG rings is not power of 2 (due to descriptor
967 	 * sizes). Need to create separate API for rings used
968 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
969 	 * SW2RXDMA and CE rings)
970 	 */
971 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
972 		srng->ring_size;
973 
974 	if (next_hp != srng->u.dst_ring.tp) {
975 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
976 		srng->u.dst_ring.cached_hp = next_hp;
977 		return (void *)desc;
978 	}
979 
980 	return NULL;
981 }
982 
983 /**
984  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
985  * @hal_soc: Opaque HAL SOC handle
986  * @hal_ring_hdl: Destination ring pointer
987  *
988  * Sync cached head pointer with HW.
989  * Caller takes responsibility for any locking needs.
990  *
991  * Return: Opaque pointer for next ring entry; NULL on failire
992  */
993 static inline
994 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
995 			     hal_ring_handle_t hal_ring_hdl)
996 {
997 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
998 
999 	srng->u.dst_ring.cached_hp =
1000 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1001 
1002 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1003 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1004 
1005 	return NULL;
1006 }
1007 
1008 /**
1009  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1010  * @hal_soc: Opaque HAL SOC handle
1011  * @hal_ring_hdl: Destination ring pointer
1012  *
1013  * Sync cached head pointer with HW.
1014  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1015  *
1016  * Return: Opaque pointer for next ring entry; NULL on failire
1017  */
1018 static inline
1019 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1020 				    hal_ring_handle_t hal_ring_hdl)
1021 {
1022 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1023 	void *ring_desc_ptr = NULL;
1024 
1025 	if (qdf_unlikely(!hal_ring_hdl)) {
1026 		qdf_print("Error: Invalid hal_ring\n");
1027 		return  NULL;
1028 	}
1029 
1030 	SRNG_LOCK(&srng->lock);
1031 
1032 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1033 
1034 	SRNG_UNLOCK(&srng->lock);
1035 
1036 	return ring_desc_ptr;
1037 }
1038 
1039 /**
1040  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1041  * by SW) in destination ring
1042  *
1043  * @hal_soc: Opaque HAL SOC handle
1044  * @hal_ring_hdl: Destination ring pointer
1045  * @sync_hw_ptr: Sync cached head pointer with HW
1046  *
1047  */
1048 static inline
1049 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1050 				hal_ring_handle_t hal_ring_hdl,
1051 				int sync_hw_ptr)
1052 {
1053 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1054 	uint32_t hp;
1055 	uint32_t tp = srng->u.dst_ring.tp;
1056 
1057 	if (sync_hw_ptr) {
1058 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1059 		srng->u.dst_ring.cached_hp = hp;
1060 	} else {
1061 		hp = srng->u.dst_ring.cached_hp;
1062 	}
1063 
1064 	if (hp >= tp)
1065 		return (hp - tp) / srng->entry_size;
1066 	else
1067 		return (srng->ring_size - tp + hp) / srng->entry_size;
1068 }
1069 
1070 /**
1071  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1072  *
1073  * @hal_soc: Opaque HAL SOC handle
1074  * @hal_ring_hdl: Destination ring pointer
1075  * @sync_hw_ptr: Sync cached head pointer with HW
1076  *
1077  * Returns number of valid entries to be processed by the host driver. The
1078  * function takes up SRNG lock.
1079  *
1080  * Return: Number of valid destination entries
1081  */
1082 static inline uint32_t
1083 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1084 			      hal_ring_handle_t hal_ring_hdl,
1085 			      int sync_hw_ptr)
1086 {
1087 	uint32_t num_valid;
1088 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1089 
1090 	SRNG_LOCK(&srng->lock);
1091 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1092 	SRNG_UNLOCK(&srng->lock);
1093 
1094 	return num_valid;
1095 }
1096 
1097 /**
1098  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1099  * pointer. This can be used to release any buffers associated with completed
1100  * ring entries. Note that this should not be used for posting new descriptor
1101  * entries. Posting of new entries should be done only using
1102  * hal_srng_src_get_next_reaped when this function is used for reaping.
1103  *
1104  * @hal_soc: Opaque HAL SOC handle
1105  * @hal_ring_hdl: Source ring pointer
1106  *
1107  * Return: Opaque pointer for next ring entry; NULL on failire
1108  */
1109 static inline void *
1110 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1111 {
1112 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1113 	uint32_t *desc;
1114 
1115 	/* TODO: Using % is expensive, but we have to do this since
1116 	 * size of some SRNG rings is not power of 2 (due to descriptor
1117 	 * sizes). Need to create separate API for rings used
1118 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1119 	 * SW2RXDMA and CE rings)
1120 	 */
1121 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1122 		srng->ring_size;
1123 
1124 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1125 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1126 		srng->u.src_ring.reap_hp = next_reap_hp;
1127 		return (void *)desc;
1128 	}
1129 
1130 	return NULL;
1131 }
1132 
1133 /**
1134  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1135  * already reaped using hal_srng_src_reap_next, for posting new entries to
1136  * the ring
1137  *
1138  * @hal_soc: Opaque HAL SOC handle
1139  * @hal_ring_hdl: Source ring pointer
1140  *
1141  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1142  */
1143 static inline void *
1144 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1145 {
1146 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1147 	uint32_t *desc;
1148 
1149 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1150 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1151 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1152 			srng->ring_size;
1153 
1154 		return (void *)desc;
1155 	}
1156 
1157 	return NULL;
1158 }
1159 
1160 /**
1161  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1162  * move reap pointer. This API is used in detach path to release any buffers
1163  * associated with ring entries which are pending reap.
1164  *
1165  * @hal_soc: Opaque HAL SOC handle
1166  * @hal_ring_hdl: Source ring pointer
1167  *
1168  * Return: Opaque pointer for next ring entry; NULL on failire
1169  */
1170 static inline void *
1171 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1172 {
1173 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1174 	uint32_t *desc;
1175 
1176 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1177 		srng->ring_size;
1178 
1179 	if (next_reap_hp != srng->u.src_ring.hp) {
1180 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1181 		srng->u.src_ring.reap_hp = next_reap_hp;
1182 		return (void *)desc;
1183 	}
1184 
1185 	return NULL;
1186 }
1187 
1188 /**
1189  * hal_srng_src_done_val -
1190  *
1191  * @hal_soc: Opaque HAL SOC handle
1192  * @hal_ring_hdl: Source ring pointer
1193  *
1194  * Return: Opaque pointer for next ring entry; NULL on failire
1195  */
1196 static inline uint32_t
1197 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1198 {
1199 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1200 	/* TODO: Using % is expensive, but we have to do this since
1201 	 * size of some SRNG rings is not power of 2 (due to descriptor
1202 	 * sizes). Need to create separate API for rings used
1203 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1204 	 * SW2RXDMA and CE rings)
1205 	 */
1206 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1207 		srng->ring_size;
1208 
1209 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1210 		return 0;
1211 
1212 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1213 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1214 			srng->entry_size;
1215 	else
1216 		return ((srng->ring_size - next_reap_hp) +
1217 			srng->u.src_ring.cached_tp) / srng->entry_size;
1218 }
1219 
1220 /**
1221  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1222  * @hal_ring_hdl: Source ring pointer
1223  *
1224  * Return: uint8_t
1225  */
1226 static inline
1227 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1228 {
1229 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1230 
1231 	return srng->entry_size;
1232 }
1233 
1234 /**
1235  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1236  * @hal_soc: Opaque HAL SOC handle
1237  * @hal_ring_hdl: Source ring pointer
1238  * @tailp: Tail Pointer
1239  * @headp: Head Pointer
1240  *
1241  * Return: Update tail pointer and head pointer in arguments.
1242  */
1243 static inline
1244 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1245 		     uint32_t *tailp, uint32_t *headp)
1246 {
1247 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1248 
1249 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1250 		*headp = srng->u.src_ring.hp;
1251 		*tailp = *srng->u.src_ring.tp_addr;
1252 	} else {
1253 		*tailp = srng->u.dst_ring.tp;
1254 		*headp = *srng->u.dst_ring.hp_addr;
1255 	}
1256 }
1257 
1258 /**
1259  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1260  *
1261  * @hal_soc: Opaque HAL SOC handle
1262  * @hal_ring_hdl: Source ring pointer
1263  *
1264  * Return: Opaque pointer for next ring entry; NULL on failire
1265  */
1266 static inline
1267 void *hal_srng_src_get_next(void *hal_soc,
1268 			    hal_ring_handle_t hal_ring_hdl)
1269 {
1270 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1271 	uint32_t *desc;
1272 	/* TODO: Using % is expensive, but we have to do this since
1273 	 * size of some SRNG rings is not power of 2 (due to descriptor
1274 	 * sizes). Need to create separate API for rings used
1275 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1276 	 * SW2RXDMA and CE rings)
1277 	 */
1278 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1279 		srng->ring_size;
1280 
1281 	if (next_hp != srng->u.src_ring.cached_tp) {
1282 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1283 		srng->u.src_ring.hp = next_hp;
1284 		/* TODO: Since reap function is not used by all rings, we can
1285 		 * remove the following update of reap_hp in this function
1286 		 * if we can ensure that only hal_srng_src_get_next_reaped
1287 		 * is used for the rings requiring reap functionality
1288 		 */
1289 		srng->u.src_ring.reap_hp = next_hp;
1290 		return (void *)desc;
1291 	}
1292 
1293 	return NULL;
1294 }
1295 
1296 /**
1297  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
1298  * hal_srng_src_get_next should be called subsequently to move the head pointer
1299  *
1300  * @hal_soc: Opaque HAL SOC handle
1301  * @hal_ring_hdl: Source ring pointer
1302  *
1303  * Return: Opaque pointer for next ring entry; NULL on failire
1304  */
1305 static inline
1306 void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl,
1307 			hal_ring_handle_t hal_ring_hdl)
1308 {
1309 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1310 	uint32_t *desc;
1311 
1312 	/* TODO: Using % is expensive, but we have to do this since
1313 	 * size of some SRNG rings is not power of 2 (due to descriptor
1314 	 * sizes). Need to create separate API for rings used
1315 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1316 	 * SW2RXDMA and CE rings)
1317 	 */
1318 	if (((srng->u.src_ring.hp + srng->entry_size) %
1319 		srng->ring_size) != srng->u.src_ring.cached_tp) {
1320 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1321 		return (void *)desc;
1322 	}
1323 
1324 	return NULL;
1325 }
1326 
1327 /**
1328  * hal_srng_src_num_avail - Returns number of available entries in src ring
1329  *
1330  * @hal_soc: Opaque HAL SOC handle
1331  * @hal_ring_hdl: Source ring pointer
1332  * @sync_hw_ptr: Sync cached tail pointer with HW
1333  *
1334  */
1335 static inline uint32_t
1336 hal_srng_src_num_avail(void *hal_soc,
1337 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
1338 {
1339 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1340 	uint32_t tp;
1341 	uint32_t hp = srng->u.src_ring.hp;
1342 
1343 	if (sync_hw_ptr) {
1344 		tp = *(srng->u.src_ring.tp_addr);
1345 		srng->u.src_ring.cached_tp = tp;
1346 	} else {
1347 		tp = srng->u.src_ring.cached_tp;
1348 	}
1349 
1350 	if (tp > hp)
1351 		return ((tp - hp) / srng->entry_size) - 1;
1352 	else
1353 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1354 }
1355 
1356 /**
1357  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1358  * ring head/tail pointers to HW.
1359  * This should be used only if hal_srng_access_start_unlocked to start ring
1360  * access
1361  *
1362  * @hal_soc: Opaque HAL SOC handle
1363  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1364  *
1365  * Return: 0 on success; error on failire
1366  */
1367 static inline void
1368 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1369 {
1370 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1371 
1372 	/* TODO: See if we need a write memory barrier here */
1373 	if (srng->flags & HAL_SRNG_LMAC_RING) {
1374 		/* For LMAC rings, ring pointer updates are done through FW and
1375 		 * hence written to a shared memory location that is read by FW
1376 		 */
1377 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1378 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
1379 		} else {
1380 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1381 		}
1382 	} else {
1383 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
1384 			hal_srng_write_address_32_mb(hal_soc,
1385 						     srng->u.src_ring.hp_addr,
1386 						     srng->u.src_ring.hp);
1387 		else
1388 			hal_srng_write_address_32_mb(hal_soc,
1389 						     srng->u.dst_ring.tp_addr,
1390 						     srng->u.dst_ring.tp);
1391 	}
1392 }
1393 
1394 /**
1395  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1396  * pointers to HW
1397  * This should be used only if hal_srng_access_start to start ring access
1398  *
1399  * @hal_soc: Opaque HAL SOC handle
1400  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1401  *
1402  * Return: 0 on success; error on failire
1403  */
1404 static inline void
1405 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1406 {
1407 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1408 
1409 	if (qdf_unlikely(!hal_ring_hdl)) {
1410 		qdf_print("Error: Invalid hal_ring\n");
1411 		return;
1412 	}
1413 
1414 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
1415 	SRNG_UNLOCK(&(srng->lock));
1416 }
1417 
1418 /**
1419  * hal_srng_access_end_reap - Unlock ring access
1420  * This should be used only if hal_srng_access_start to start ring access
1421  * and should be used only while reaping SRC ring completions
1422  *
1423  * @hal_soc: Opaque HAL SOC handle
1424  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1425  *
1426  * Return: 0 on success; error on failire
1427  */
1428 static inline void
1429 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1430 {
1431 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1432 
1433 	SRNG_UNLOCK(&(srng->lock));
1434 }
1435 
1436 /* TODO: Check if the following definitions is available in HW headers */
1437 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
1438 #define NUM_MPDUS_PER_LINK_DESC 6
1439 #define NUM_MSDUS_PER_LINK_DESC 7
1440 #define REO_QUEUE_DESC_ALIGN 128
1441 
1442 #define LINK_DESC_ALIGN 128
1443 
1444 #define ADDRESS_MATCH_TAG_VAL 0x5
1445 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1446  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1447  */
1448 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1449 
1450 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1451  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1452  * should be specified in 16 word units. But the number of bits defined for
1453  * this field in HW header files is 5.
1454  */
1455 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1456 
1457 
1458 /**
1459  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1460  * in an idle list
1461  *
1462  * @hal_soc: Opaque HAL SOC handle
1463  *
1464  */
1465 static inline
1466 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
1467 {
1468 	return WBM_IDLE_SCATTER_BUF_SIZE;
1469 }
1470 
1471 /**
1472  * hal_get_link_desc_size - Get the size of each link descriptor
1473  *
1474  * @hal_soc: Opaque HAL SOC handle
1475  *
1476  */
1477 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
1478 {
1479 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1480 
1481 	if (!hal_soc || !hal_soc->ops) {
1482 		qdf_print("Error: Invalid ops\n");
1483 		QDF_BUG(0);
1484 		return -EINVAL;
1485 	}
1486 	if (!hal_soc->ops->hal_get_link_desc_size) {
1487 		qdf_print("Error: Invalid function pointer\n");
1488 		QDF_BUG(0);
1489 		return -EINVAL;
1490 	}
1491 	return hal_soc->ops->hal_get_link_desc_size();
1492 }
1493 
1494 /**
1495  * hal_get_link_desc_align - Get the required start address alignment for
1496  * link descriptors
1497  *
1498  * @hal_soc: Opaque HAL SOC handle
1499  *
1500  */
1501 static inline
1502 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
1503 {
1504 	return LINK_DESC_ALIGN;
1505 }
1506 
1507 /**
1508  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1509  *
1510  * @hal_soc: Opaque HAL SOC handle
1511  *
1512  */
1513 static inline
1514 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1515 {
1516 	return NUM_MPDUS_PER_LINK_DESC;
1517 }
1518 
1519 /**
1520  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1521  *
1522  * @hal_soc: Opaque HAL SOC handle
1523  *
1524  */
1525 static inline
1526 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1527 {
1528 	return NUM_MSDUS_PER_LINK_DESC;
1529 }
1530 
1531 /**
1532  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1533  * descriptor can hold
1534  *
1535  * @hal_soc: Opaque HAL SOC handle
1536  *
1537  */
1538 static inline
1539 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
1540 {
1541 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1542 }
1543 
1544 /**
1545  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1546  * that the given buffer size
1547  *
1548  * @hal_soc: Opaque HAL SOC handle
1549  * @scatter_buf_size: Size of scatter buffer
1550  *
1551  */
1552 static inline
1553 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1554 					  uint32_t scatter_buf_size)
1555 {
1556 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1557 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
1558 }
1559 
1560 /**
1561  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1562  * each given buffer size
1563  *
1564  * @hal_soc: Opaque HAL SOC handle
1565  * @total_mem: size of memory to be scattered
1566  * @scatter_buf_size: Size of scatter buffer
1567  *
1568  */
1569 static inline
1570 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1571 					uint32_t total_mem,
1572 					uint32_t scatter_buf_size)
1573 {
1574 	uint8_t rem = (total_mem % (scatter_buf_size -
1575 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1576 
1577 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1578 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1579 
1580 	return num_scatter_bufs;
1581 }
1582 
1583 enum hal_pn_type {
1584 	HAL_PN_NONE,
1585 	HAL_PN_WPA,
1586 	HAL_PN_WAPI_EVEN,
1587 	HAL_PN_WAPI_UNEVEN,
1588 };
1589 
1590 #define HAL_RX_MAX_BA_WINDOW 256
1591 
1592 /**
1593  * hal_get_reo_qdesc_align - Get start address alignment for reo
1594  * queue descriptors
1595  *
1596  * @hal_soc: Opaque HAL SOC handle
1597  *
1598  */
1599 static inline
1600 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
1601 {
1602 	return REO_QUEUE_DESC_ALIGN;
1603 }
1604 
1605 /**
1606  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1607  *
1608  * @hal_soc: Opaque HAL SOC handle
1609  * @ba_window_size: BlockAck window size
1610  * @start_seq: Starting sequence number
1611  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1612  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1613  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1614  *
1615  */
1616 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1617 			 int tid, uint32_t ba_window_size,
1618 			 uint32_t start_seq, void *hw_qdesc_vaddr,
1619 			 qdf_dma_addr_t hw_qdesc_paddr,
1620 			 int pn_type);
1621 
1622 /**
1623  * hal_srng_get_hp_addr - Get head pointer physical address
1624  *
1625  * @hal_soc: Opaque HAL SOC handle
1626  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1627  *
1628  */
1629 static inline qdf_dma_addr_t
1630 hal_srng_get_hp_addr(void *hal_soc,
1631 		     hal_ring_handle_t hal_ring_hdl)
1632 {
1633 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1634 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1635 
1636 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1637 		return hal->shadow_wrptr_mem_paddr +
1638 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1639 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1640 	} else {
1641 		return hal->shadow_rdptr_mem_paddr +
1642 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1643 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1644 	}
1645 }
1646 
1647 /**
1648  * hal_srng_get_tp_addr - Get tail pointer physical address
1649  *
1650  * @hal_soc: Opaque HAL SOC handle
1651  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1652  *
1653  */
1654 static inline qdf_dma_addr_t
1655 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1656 {
1657 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1658 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1659 
1660 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1661 		return hal->shadow_rdptr_mem_paddr +
1662 			((unsigned long)(srng->u.src_ring.tp_addr) -
1663 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1664 	} else {
1665 		return hal->shadow_wrptr_mem_paddr +
1666 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1667 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1668 	}
1669 }
1670 
1671 /**
1672  * hal_srng_get_num_entries - Get total entries in the HAL Srng
1673  *
1674  * @hal_soc: Opaque HAL SOC handle
1675  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1676  *
1677  * Return: total number of entries in hal ring
1678  */
1679 static inline
1680 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1681 				  hal_ring_handle_t hal_ring_hdl)
1682 {
1683 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1684 
1685 	return srng->num_entries;
1686 }
1687 
1688 /**
1689  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1690  *
1691  * @hal_soc: Opaque HAL SOC handle
1692  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1693  * @ring_params: SRNG parameters will be returned through this structure
1694  */
1695 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1696 			 hal_ring_handle_t hal_ring_hdl,
1697 			 struct hal_srng_params *ring_params);
1698 
1699 /**
1700  * hal_mem_info - Retrieve hal memory base address
1701  *
1702  * @hal_soc: Opaque HAL SOC handle
1703  * @mem: pointer to structure to be updated with hal mem info
1704  */
1705 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
1706 
1707 /**
1708  * hal_get_target_type - Return target type
1709  *
1710  * @hal_soc: Opaque HAL SOC handle
1711  */
1712 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
1713 
1714 /**
1715  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1716  *
1717  * @hal_soc: Opaque HAL SOC handle
1718  * @ac: Access category
1719  * @value: timeout duration in millisec
1720  */
1721 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1722 			      uint32_t *value);
1723 /**
1724  * hal_set_aging_timeout - Set BA aging timeout
1725  *
1726  * @hal_soc: Opaque HAL SOC handle
1727  * @ac: Access category in millisec
1728  * @value: timeout duration value
1729  */
1730 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1731 			      uint32_t value);
1732 /**
1733  * hal_srng_dst_hw_init - Private function to initialize SRNG
1734  * destination ring HW
1735  * @hal_soc: HAL SOC handle
1736  * @srng: SRNG ring pointer
1737  */
1738 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1739 	struct hal_srng *srng)
1740 {
1741 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1742 }
1743 
1744 /**
1745  * hal_srng_src_hw_init - Private function to initialize SRNG
1746  * source ring HW
1747  * @hal_soc: HAL SOC handle
1748  * @srng: SRNG ring pointer
1749  */
1750 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1751 	struct hal_srng *srng)
1752 {
1753 	hal->ops->hal_srng_src_hw_init(hal, srng);
1754 }
1755 
1756 /**
1757  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
1758  * @hal_soc: Opaque HAL SOC handle
1759  * @hal_ring_hdl: Source ring pointer
1760  * @headp: Head Pointer
1761  * @tailp: Tail Pointer
1762  * @ring_type: Ring
1763  *
1764  * Return: Update tail pointer and head pointer in arguments.
1765  */
1766 static inline
1767 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
1768 		     hal_ring_handle_t hal_ring_hdl,
1769 		     uint32_t *headp, uint32_t *tailp,
1770 		     uint8_t ring_type)
1771 {
1772 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1773 
1774 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
1775 			headp, tailp, ring_type);
1776 }
1777 
1778 /**
1779  * hal_reo_setup - Initialize HW REO block
1780  *
1781  * @hal_soc: Opaque HAL SOC handle
1782  * @reo_params: parameters needed by HAL for REO config
1783  */
1784 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
1785 				 void *reoparams)
1786 {
1787 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1788 
1789 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
1790 }
1791 
1792 /**
1793  * hal_setup_link_idle_list - Setup scattered idle list using the
1794  * buffer list provided
1795  *
1796  * @hal_soc: Opaque HAL SOC handle
1797  * @scatter_bufs_base_paddr: Array of physical base addresses
1798  * @scatter_bufs_base_vaddr: Array of virtual base addresses
1799  * @num_scatter_bufs: Number of scatter buffers in the above lists
1800  * @scatter_buf_size: Size of each scatter buffer
1801  * @last_buf_end_offset: Offset to the last entry
1802  * @num_entries: Total entries of all scatter bufs
1803  *
1804  */
1805 static inline
1806 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
1807 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
1808 			      void *scatter_bufs_base_vaddr[],
1809 			      uint32_t num_scatter_bufs,
1810 			      uint32_t scatter_buf_size,
1811 			      uint32_t last_buf_end_offset,
1812 			      uint32_t num_entries)
1813 {
1814 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1815 
1816 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
1817 			scatter_bufs_base_vaddr, num_scatter_bufs,
1818 			scatter_buf_size, last_buf_end_offset,
1819 			num_entries);
1820 
1821 }
1822 
1823 /**
1824  * hal_srng_dump_ring_desc() - Dump ring descriptor info
1825  *
1826  * @hal_soc: Opaque HAL SOC handle
1827  * @hal_ring_hdl: Source ring pointer
1828  * @ring_desc: Opaque ring descriptor handle
1829  */
1830 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
1831 					   hal_ring_handle_t hal_ring_hdl,
1832 					   hal_ring_desc_t ring_desc)
1833 {
1834 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1835 
1836 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1837 			   ring_desc, (srng->entry_size << 2));
1838 }
1839 
1840 /**
1841  * hal_srng_dump_ring() - Dump last 128 descs of the ring
1842  *
1843  * @hal_soc: Opaque HAL SOC handle
1844  * @hal_ring_hdl: Source ring pointer
1845  */
1846 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
1847 				      hal_ring_handle_t hal_ring_hdl)
1848 {
1849 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1850 	uint32_t *desc;
1851 	uint32_t tp, i;
1852 
1853 	tp = srng->u.dst_ring.tp;
1854 
1855 	for (i = 0; i < 128; i++) {
1856 		if (!tp)
1857 			tp = srng->ring_size;
1858 
1859 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1860 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
1861 				   QDF_TRACE_LEVEL_DEBUG,
1862 				   desc, (srng->entry_size << 2));
1863 
1864 		tp -= srng->entry_size;
1865 	}
1866 }
1867 
1868 /*
1869  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
1870  * to opaque dp_ring desc type
1871  * @ring_desc - rxdma ring desc
1872  *
1873  * Return: hal_rxdma_desc_t type
1874  */
1875 static inline
1876 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
1877 {
1878 	return (hal_ring_desc_t)ring_desc;
1879 }
1880 
1881 /**
1882  * hal_srng_set_event() - Set hal_srng event
1883  * @hal_ring_hdl: Source ring pointer
1884  * @event: SRNG ring event
1885  *
1886  * Return: None
1887  */
1888 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
1889 {
1890 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1891 
1892 	qdf_atomic_set_bit(event, &srng->srng_event);
1893 }
1894 
1895 /**
1896  * hal_srng_clear_event() - Clear hal_srng event
1897  * @hal_ring_hdl: Source ring pointer
1898  * @event: SRNG ring event
1899  *
1900  * Return: None
1901  */
1902 static inline
1903 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1904 {
1905 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1906 
1907 	qdf_atomic_clear_bit(event, &srng->srng_event);
1908 }
1909 
1910 /**
1911  * hal_srng_get_clear_event() - Clear srng event and return old value
1912  * @hal_ring_hdl: Source ring pointer
1913  * @event: SRNG ring event
1914  *
1915  * Return: Return old event value
1916  */
1917 static inline
1918 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1919 {
1920 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1921 
1922 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
1923 }
1924 
1925 /**
1926  * hal_srng_set_flush_last_ts() - Record last flush time stamp
1927  * @hal_ring_hdl: Source ring pointer
1928  *
1929  * Return: None
1930  */
1931 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
1932 {
1933 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1934 
1935 	srng->last_flush_ts = qdf_get_log_timestamp();
1936 }
1937 
1938 /**
1939  * hal_srng_inc_flush_cnt() - Increment flush counter
1940  * @hal_ring_hdl: Source ring pointer
1941  *
1942  * Return: None
1943  */
1944 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
1945 {
1946 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1947 
1948 	srng->flush_count++;
1949 }
1950 #endif /* _HAL_APIH_ */
1951