xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #include "hif.h"
27 #include "hif_io32.h"
28 #include "qdf_platform.h"
29 
30 /* calculate the register address offset from bar0 of shadow register x */
31 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
32     defined(QCA_WIFI_QCA6750)
33 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
34 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
35 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
36 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
37 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
38 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
39 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
40 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
41 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
42 #else
43 #define SHADOW_REGISTER(x) 0
44 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
45 
46 #define MAX_UNWINDOWED_ADDRESS 0x80000
47 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
48     defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6750)
49 #define WINDOW_ENABLE_BIT 0x40000000
50 #else
51 #define WINDOW_ENABLE_BIT 0x80000000
52 #endif
53 #define WINDOW_REG_ADDRESS 0x310C
54 #define WINDOW_SHIFT 19
55 #define WINDOW_VALUE_MASK 0x3F
56 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
57 #define WINDOW_RANGE_MASK 0x7FFFF
58 /*
59  * BAR + 4K is always accessible, any access outside this
60  * space requires force wake procedure.
61  * OFFSET = 4K - 32 bytes = 0xFE0
62  */
63 #define MAPPED_REF_OFF 0xFE0
64 
65 /**
66  * hal_ring_desc - opaque handle for DP ring descriptor
67  */
68 struct hal_ring_desc;
69 typedef struct hal_ring_desc *hal_ring_desc_t;
70 
71 /**
72  * hal_link_desc - opaque handle for DP link descriptor
73  */
74 struct hal_link_desc;
75 typedef struct hal_link_desc *hal_link_desc_t;
76 
77 /**
78  * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor
79  */
80 struct hal_rxdma_desc;
81 typedef struct hal_rxdma_desc *hal_rxdma_desc_t;
82 
83 /**
84  * hal_buff_addrinfo - opaque handle for DP buffer address info
85  */
86 struct hal_buff_addrinfo;
87 typedef struct hal_buff_addrinfo *hal_buff_addrinfo_t;
88 
89 #ifdef ENABLE_VERBOSE_DEBUG
90 static inline void
91 hal_set_verbose_debug(bool flag)
92 {
93 	is_hal_verbose_debug_enabled = flag;
94 }
95 #endif
96 
97 /**
98  * hal_reg_write_result_check() - check register writing result
99  * @hal_soc: HAL soc handle
100  * @offset: register offset to read
101  * @exp_val: the expected value of register
102  * @ret_confirm: result confirm flag
103  *
104  * Return: none
105  */
106 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
107 					      uint32_t offset,
108 					      uint32_t exp_val)
109 {
110 	uint32_t value;
111 
112 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
113 	if (exp_val != value) {
114 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
115 			  "register offset 0x%x write failed!\n", offset);
116 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
117 			  "the expectation 0x%x, actual value 0x%x\n",
118 			  exp_val,
119 			  value);
120 	}
121 }
122 
123 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) || \
124     !defined(QCA_WIFI_QCA6750)
125 static inline void hal_lock_reg_access(struct hal_soc *soc,
126 				       unsigned long *flags)
127 {
128 	qdf_spin_lock_irqsave(&soc->register_access_lock);
129 }
130 
131 static inline void hal_unlock_reg_access(struct hal_soc *soc,
132 					 unsigned long *flags)
133 {
134 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
135 }
136 #else
137 static inline void hal_lock_reg_access(struct hal_soc *soc,
138 				       unsigned long *flags)
139 {
140 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
141 }
142 
143 static inline void hal_unlock_reg_access(struct hal_soc *soc,
144 					 unsigned long *flags)
145 {
146 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
147 }
148 #endif
149 
150 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
151 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
152 {
153 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
154 
155 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
156 		      WINDOW_ENABLE_BIT | window);
157 	hal_soc->register_window = window;
158 }
159 
160 /**
161  * hal_select_window_confirm() - write remap window register and
162 				 check writing result
163  *
164  */
165 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
166 					     uint32_t offset)
167 {
168 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
169 
170 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
171 		      WINDOW_ENABLE_BIT | window);
172 	hal_soc->register_window = window;
173 
174 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
175 				   WINDOW_ENABLE_BIT | window);
176 }
177 #else
178 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
179 {
180 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
181 
182 	if (window != hal_soc->register_window) {
183 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
184 			      WINDOW_ENABLE_BIT | window);
185 		hal_soc->register_window = window;
186 	}
187 }
188 
189 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
190 					     uint32_t offset)
191 {
192 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
193 
194 	if (window != hal_soc->register_window) {
195 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
196 			      WINDOW_ENABLE_BIT | window);
197 		hal_soc->register_window = window;
198 
199 		hal_reg_write_result_check(
200 					hal_soc,
201 					WINDOW_REG_ADDRESS,
202 					WINDOW_ENABLE_BIT | window);
203 	}
204 }
205 #endif
206 
207 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
208 						 qdf_iomem_t addr)
209 {
210 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
211 }
212 
213 /**
214  * hal_write32_mb() - Access registers to update configuration
215  * @hal_soc: hal soc handle
216  * @offset: offset address from the BAR
217  * @value: value to write
218  *
219  * Return: None
220  *
221  * Description: Register address space is split below:
222  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
223  *  |--------------------|-------------------|------------------|
224  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
225  *
226  * 1. Any access to the shadow region, doesn't need force wake
227  *    and windowing logic to access.
228  * 2. Any access beyond BAR + 4K:
229  *    If init_phase enabled, no force wake is needed and access
230  *    should be based on windowed or unwindowed access.
231  *    If init_phase disabled, force wake is needed and access
232  *    should be based on windowed or unwindowed access.
233  *
234  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
235  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
236  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
237  *                            that window would be a bug
238  */
239 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
240     !defined(QCA_WIFI_QCA6750)
241 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
242 				  uint32_t value)
243 {
244 	unsigned long flags;
245 	qdf_iomem_t new_addr;
246 
247 	if (!hal_soc->use_register_windowing ||
248 	    offset < MAX_UNWINDOWED_ADDRESS) {
249 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
250 	} else if (hal_soc->static_window_map) {
251 		new_addr = hal_get_window_address(hal_soc,
252 				hal_soc->dev_base_addr + offset);
253 		qdf_iowrite32(new_addr, value);
254 	} else {
255 		hal_lock_reg_access(hal_soc, &flags);
256 		hal_select_window(hal_soc, offset);
257 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
258 			  (offset & WINDOW_RANGE_MASK), value);
259 		hal_unlock_reg_access(hal_soc, &flags);
260 	}
261 }
262 
263 /**
264  * hal_write_address_32_mb - write a value to a register
265  *
266  */
267 static inline
268 void hal_write_address_32_mb(struct hal_soc *hal_soc,
269 			     qdf_iomem_t addr, uint32_t value)
270 {
271 	uint32_t offset;
272 	qdf_iomem_t new_addr;
273 
274 	if (!hal_soc->use_register_windowing)
275 		return qdf_iowrite32(addr, value);
276 
277 	offset = addr - hal_soc->dev_base_addr;
278 	if (hal_soc->static_window_map) {
279 		new_addr = hal_get_window_address(hal_soc, addr);
280 		return qdf_iowrite32(new_addr, value);
281 	}
282 	hal_write32_mb(hal_soc, offset, value);
283 }
284 
285 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
286 		hal_write32_mb(_hal_soc, _offset, _value)
287 #else
288 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
289 				  uint32_t value)
290 {
291 	int ret;
292 	unsigned long flags;
293 
294 	/* Region < BAR + 4K can be directly accessed */
295 	if (offset < MAPPED_REF_OFF) {
296 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
297 		return;
298 	}
299 
300 	/* Region greater than BAR + 4K */
301 	if (!hal_soc->init_phase) {
302 		ret = hif_force_wake_request(hal_soc->hif_handle);
303 		if (ret) {
304 			hal_err("Wake up request failed");
305 			qdf_check_state_before_panic();
306 			return;
307 		}
308 	}
309 
310 	if (!hal_soc->use_register_windowing ||
311 	    offset < MAX_UNWINDOWED_ADDRESS) {
312 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
313 	} else {
314 		hal_lock_reg_access(hal_soc, &flags);
315 		hal_select_window(hal_soc, offset);
316 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
317 			  (offset & WINDOW_RANGE_MASK), value);
318 		hal_unlock_reg_access(hal_soc, &flags);
319 	}
320 
321 	if (!hal_soc->init_phase) {
322 		ret = hif_force_wake_release(hal_soc->hif_handle);
323 		if (ret) {
324 			hal_err("Wake up release failed");
325 			qdf_check_state_before_panic();
326 			return;
327 		}
328 	}
329 }
330 
331 /**
332  * hal_write32_mb_confirm() - write register and check wirting result
333  *
334  */
335 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
336 					  uint32_t offset,
337 					  uint32_t value)
338 {
339 	int ret;
340 	unsigned long flags;
341 
342 	/* Region < BAR + 4K can be directly accessed */
343 	if (offset < MAPPED_REF_OFF) {
344 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
345 		return;
346 	}
347 
348 	/* Region greater than BAR + 4K */
349 	if (!hal_soc->init_phase) {
350 		ret = hif_force_wake_request(hal_soc->hif_handle);
351 		if (ret) {
352 			hal_err("Wake up request failed");
353 			qdf_check_state_before_panic();
354 			return;
355 		}
356 	}
357 
358 	if (!hal_soc->use_register_windowing ||
359 	    offset < MAX_UNWINDOWED_ADDRESS) {
360 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
361 		hal_reg_write_result_check(hal_soc, offset,
362 					   value);
363 	} else {
364 		hal_lock_reg_access(hal_soc, &flags);
365 		hal_select_window_confirm(hal_soc, offset);
366 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
367 			  (offset & WINDOW_RANGE_MASK), value);
368 
369 		hal_reg_write_result_check(
370 				hal_soc,
371 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
372 				value);
373 		hal_unlock_reg_access(hal_soc, &flags);
374 	}
375 
376 	if (!hal_soc->init_phase) {
377 		ret = hif_force_wake_release(hal_soc->hif_handle);
378 		if (ret) {
379 			hal_err("Wake up release failed");
380 			qdf_check_state_before_panic();
381 			return;
382 		}
383 	}
384 }
385 
386 /**
387  * hal_write_address_32_mb - write a value to a register
388  *
389  */
390 static inline
391 void hal_write_address_32_mb(struct hal_soc *hal_soc,
392 			     qdf_iomem_t addr, uint32_t value)
393 {
394 	uint32_t offset;
395 
396 	if (!hal_soc->use_register_windowing)
397 		return qdf_iowrite32(addr, value);
398 
399 	offset = addr - hal_soc->dev_base_addr;
400 	hal_write32_mb(hal_soc, offset, value);
401 }
402 #endif
403 
404 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
405 #define hal_srng_write_address_32_mb(_a, _b, _c) qdf_iowrite32(_b, _c)
406 #else
407 #define hal_srng_write_address_32_mb(_a, _b, _c) \
408 		hal_write_address_32_mb(_a, _b, _c)
409 #endif
410 
411 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
412     !defined(QCA_WIFI_QCA6750)
413 /**
414  * hal_read32_mb() - Access registers to read configuration
415  * @hal_soc: hal soc handle
416  * @offset: offset address from the BAR
417  * @value: value to write
418  *
419  * Description: Register address space is split below:
420  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
421  *  |--------------------|-------------------|------------------|
422  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
423  *
424  * 1. Any access to the shadow region, doesn't need force wake
425  *    and windowing logic to access.
426  * 2. Any access beyond BAR + 4K:
427  *    If init_phase enabled, no force wake is needed and access
428  *    should be based on windowed or unwindowed access.
429  *    If init_phase disabled, force wake is needed and access
430  *    should be based on windowed or unwindowed access.
431  *
432  * Return: < 0 for failure/>= 0 for success
433  */
434 static inline
435 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
436 {
437 	uint32_t ret;
438 	unsigned long flags;
439 	qdf_iomem_t new_addr;
440 
441 	if (!hal_soc->use_register_windowing ||
442 	    offset < MAX_UNWINDOWED_ADDRESS) {
443 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
444 	} else if (hal_soc->static_window_map) {
445 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
446 		return qdf_ioread32(new_addr);
447 	}
448 
449 	hal_lock_reg_access(hal_soc, &flags);
450 	hal_select_window(hal_soc, offset);
451 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
452 		       (offset & WINDOW_RANGE_MASK));
453 	hal_unlock_reg_access(hal_soc, &flags);
454 
455 	return ret;
456 }
457 #else
458 static
459 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
460 {
461 	uint32_t ret;
462 	unsigned long flags;
463 
464 	/* Region < BAR + 4K can be directly accessed */
465 	if (offset < MAPPED_REF_OFF)
466 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
467 
468 	if ((!hal_soc->init_phase) &&
469 	    hif_force_wake_request(hal_soc->hif_handle)) {
470 		hal_err("Wake up request failed");
471 		qdf_check_state_before_panic();
472 		return 0;
473 	}
474 
475 	if (!hal_soc->use_register_windowing ||
476 	    offset < MAX_UNWINDOWED_ADDRESS) {
477 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
478 	} else {
479 		hal_lock_reg_access(hal_soc, &flags);
480 		hal_select_window(hal_soc, offset);
481 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
482 			       (offset & WINDOW_RANGE_MASK));
483 		hal_unlock_reg_access(hal_soc, &flags);
484 	}
485 
486 	if ((!hal_soc->init_phase) &&
487 	    hif_force_wake_release(hal_soc->hif_handle)) {
488 		hal_err("Wake up release failed");
489 		qdf_check_state_before_panic();
490 		return 0;
491 	}
492 
493 	return ret;
494 }
495 #endif
496 
497 /**
498  * hal_read_address_32_mb() - Read 32-bit value from the register
499  * @soc: soc handle
500  * @addr: register address to read
501  *
502  * Return: 32-bit value
503  */
504 static inline
505 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
506 				qdf_iomem_t addr)
507 {
508 	uint32_t offset;
509 	uint32_t ret;
510 	qdf_iomem_t new_addr;
511 
512 	if (!soc->use_register_windowing)
513 		return qdf_ioread32(addr);
514 
515 	offset = addr - soc->dev_base_addr;
516 	if (soc->static_window_map) {
517 		new_addr = hal_get_window_address(soc, addr);
518 		return qdf_ioread32(new_addr);
519 	}
520 
521 	ret = hal_read32_mb(soc, offset);
522 	return ret;
523 }
524 
525 /**
526  * hal_attach - Initialize HAL layer
527  * @hif_handle: Opaque HIF handle
528  * @qdf_dev: QDF device
529  *
530  * Return: Opaque HAL SOC handle
531  *		 NULL on failure (if given ring is not available)
532  *
533  * This function should be called as part of HIF initialization (for accessing
534  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
535  */
536 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
537 
538 /**
539  * hal_detach - Detach HAL layer
540  * @hal_soc: HAL SOC handle
541  *
542  * This function should be called as part of HIF detach
543  *
544  */
545 extern void hal_detach(void *hal_soc);
546 
547 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
548 enum hal_ring_type {
549 	REO_DST = 0,
550 	REO_EXCEPTION = 1,
551 	REO_REINJECT = 2,
552 	REO_CMD = 3,
553 	REO_STATUS = 4,
554 	TCL_DATA = 5,
555 	TCL_CMD = 6,
556 	TCL_STATUS = 7,
557 	CE_SRC = 8,
558 	CE_DST = 9,
559 	CE_DST_STATUS = 10,
560 	WBM_IDLE_LINK = 11,
561 	SW2WBM_RELEASE = 12,
562 	WBM2SW_RELEASE = 13,
563 	RXDMA_BUF = 14,
564 	RXDMA_DST = 15,
565 	RXDMA_MONITOR_BUF = 16,
566 	RXDMA_MONITOR_STATUS = 17,
567 	RXDMA_MONITOR_DST = 18,
568 	RXDMA_MONITOR_DESC = 19,
569 	DIR_BUF_RX_DMA_SRC = 20,
570 #ifdef WLAN_FEATURE_CIF_CFR
571 	WIFI_POS_SRC,
572 #endif
573 	MAX_RING_TYPES
574 };
575 
576 #define HAL_SRNG_LMAC_RING 0x80000000
577 /* SRNG flags passed in hal_srng_params.flags */
578 #define HAL_SRNG_MSI_SWAP				0x00000008
579 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
580 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
581 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
582 #define HAL_SRNG_MSI_INTR				0x00020000
583 #define HAL_SRNG_CACHED_DESC		0x00040000
584 
585 #define PN_SIZE_24 0
586 #define PN_SIZE_48 1
587 #define PN_SIZE_128 2
588 
589 #ifdef FORCE_WAKE
590 /**
591  * hal_set_init_phase() - Indicate initialization of
592  *                        datapath rings
593  * @soc: hal_soc handle
594  * @init_phase: flag to indicate datapath rings
595  *              initialization status
596  *
597  * Return: None
598  */
599 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
600 #else
601 static inline
602 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
603 {
604 }
605 #endif /* FORCE_WAKE */
606 
607 /**
608  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
609  * used by callers for calculating the size of memory to be allocated before
610  * calling hal_srng_setup to setup the ring
611  *
612  * @hal_soc: Opaque HAL SOC handle
613  * @ring_type: one of the types from hal_ring_type
614  *
615  */
616 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
617 
618 /**
619  * hal_srng_max_entries - Returns maximum possible number of ring entries
620  * @hal_soc: Opaque HAL SOC handle
621  * @ring_type: one of the types from hal_ring_type
622  *
623  * Return: Maximum number of entries for the given ring_type
624  */
625 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
626 
627 /**
628  * hal_srng_dump - Dump ring status
629  * @srng: hal srng pointer
630  */
631 void hal_srng_dump(struct hal_srng *srng);
632 
633 /**
634  * hal_srng_get_dir - Returns the direction of the ring
635  * @hal_soc: Opaque HAL SOC handle
636  * @ring_type: one of the types from hal_ring_type
637  *
638  * Return: Ring direction
639  */
640 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
641 
642 /* HAL memory information */
643 struct hal_mem_info {
644 	/* dev base virutal addr */
645 	void *dev_base_addr;
646 	/* dev base physical addr */
647 	void *dev_base_paddr;
648 	/* Remote virtual pointer memory for HW/FW updates */
649 	void *shadow_rdptr_mem_vaddr;
650 	/* Remote physical pointer memory for HW/FW updates */
651 	void *shadow_rdptr_mem_paddr;
652 	/* Shared memory for ring pointer updates from host to FW */
653 	void *shadow_wrptr_mem_vaddr;
654 	/* Shared physical memory for ring pointer updates from host to FW */
655 	void *shadow_wrptr_mem_paddr;
656 };
657 
658 /* SRNG parameters to be passed to hal_srng_setup */
659 struct hal_srng_params {
660 	/* Physical base address of the ring */
661 	qdf_dma_addr_t ring_base_paddr;
662 	/* Virtual base address of the ring */
663 	void *ring_base_vaddr;
664 	/* Number of entries in ring */
665 	uint32_t num_entries;
666 	/* max transfer length */
667 	uint16_t max_buffer_length;
668 	/* MSI Address */
669 	qdf_dma_addr_t msi_addr;
670 	/* MSI data */
671 	uint32_t msi_data;
672 	/* Interrupt timer threshold – in micro seconds */
673 	uint32_t intr_timer_thres_us;
674 	/* Interrupt batch counter threshold – in number of ring entries */
675 	uint32_t intr_batch_cntr_thres_entries;
676 	/* Low threshold – in number of ring entries
677 	 * (valid for src rings only)
678 	 */
679 	uint32_t low_threshold;
680 	/* Misc flags */
681 	uint32_t flags;
682 	/* Unique ring id */
683 	uint8_t ring_id;
684 	/* Source or Destination ring */
685 	enum hal_srng_dir ring_dir;
686 	/* Size of ring entry */
687 	uint32_t entry_size;
688 	/* hw register base address */
689 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
690 };
691 
692 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
693  * @hal_soc: hal handle
694  *
695  * Return: QDF_STATUS_OK on success
696  */
697 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
698 
699 /* hal_set_one_shadow_config() - add a config for the specified ring
700  * @hal_soc: hal handle
701  * @ring_type: ring type
702  * @ring_num: ring num
703  *
704  * The ring type and ring num uniquely specify the ring.  After this call,
705  * the hp/tp will be added as the next entry int the shadow register
706  * configuration table.  The hal code will use the shadow register address
707  * in place of the hp/tp address.
708  *
709  * This function is exposed, so that the CE module can skip configuring shadow
710  * registers for unused ring and rings assigned to the firmware.
711  *
712  * Return: QDF_STATUS_OK on success
713  */
714 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
715 					    int ring_num);
716 /**
717  * hal_get_shadow_config() - retrieve the config table
718  * @hal_soc: hal handle
719  * @shadow_config: will point to the table after
720  * @num_shadow_registers_configured: will contain the number of valid entries
721  */
722 extern void hal_get_shadow_config(void *hal_soc,
723 				  struct pld_shadow_reg_v2_cfg **shadow_config,
724 				  int *num_shadow_registers_configured);
725 /**
726  * hal_srng_setup - Initialize HW SRNG ring.
727  *
728  * @hal_soc: Opaque HAL SOC handle
729  * @ring_type: one of the types from hal_ring_type
730  * @ring_num: Ring number if there are multiple rings of
731  *		same type (staring from 0)
732  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
733  * @ring_params: SRNG ring params in hal_srng_params structure.
734 
735  * Callers are expected to allocate contiguous ring memory of size
736  * 'num_entries * entry_size' bytes and pass the physical and virtual base
737  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
738  * structure. Ring base address should be 8 byte aligned and size of each ring
739  * entry should be queried using the API hal_srng_get_entrysize
740  *
741  * Return: Opaque pointer to ring on success
742  *		 NULL on failure (if given ring is not available)
743  */
744 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
745 	int mac_id, struct hal_srng_params *ring_params);
746 
747 /* Remapping ids of REO rings */
748 #define REO_REMAP_TCL 0
749 #define REO_REMAP_SW1 1
750 #define REO_REMAP_SW2 2
751 #define REO_REMAP_SW3 3
752 #define REO_REMAP_SW4 4
753 #define REO_REMAP_RELEASE 5
754 #define REO_REMAP_FW 6
755 #define REO_REMAP_UNUSED 7
756 
757 /*
758  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
759  * to map destination to rings
760  */
761 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
762 	((_VALUE) << \
763 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
764 	  _OFFSET ## _SHFT))
765 
766 /*
767  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
768  * to map destination to rings
769  */
770 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
771 	((_VALUE) << \
772 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
773 	  _OFFSET ## _SHFT))
774 
775 /*
776  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
777  * to map destination to rings
778  */
779 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
780 	((_VALUE) << \
781 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
782 	  _OFFSET ## _SHFT))
783 
784 /**
785  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
786  * @hal_soc_hdl: HAL SOC handle
787  * @read: boolean value to indicate if read or write
788  * @ix0: pointer to store IX0 reg value
789  * @ix1: pointer to store IX1 reg value
790  * @ix2: pointer to store IX2 reg value
791  * @ix3: pointer to store IX3 reg value
792  */
793 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
794 				uint32_t *ix0, uint32_t *ix1,
795 				uint32_t *ix2, uint32_t *ix3);
796 
797 /**
798  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
799  * @sring: sring pointer
800  * @paddr: physical address
801  */
802 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
803 
804 /**
805  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
806  * @srng: sring pointer
807  * @vaddr: virtual address
808  */
809 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
810 
811 /**
812  * hal_srng_cleanup - Deinitialize HW SRNG ring.
813  * @hal_soc: Opaque HAL SOC handle
814  * @hal_srng: Opaque HAL SRNG pointer
815  */
816 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
817 
818 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
819 {
820 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
821 
822 	return !!srng->initialized;
823 }
824 
825 /**
826  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
827  * @hal_soc: Opaque HAL SOC handle
828  * @hal_ring_hdl: Destination ring pointer
829  *
830  * Caller takes responsibility for any locking needs.
831  *
832  * Return: Opaque pointer for next ring entry; NULL on failire
833  */
834 static inline
835 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
836 			hal_ring_handle_t hal_ring_hdl)
837 {
838 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
839 
840 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
841 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
842 
843 	return NULL;
844 }
845 
846 /**
847  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
848  * hal_srng_access_start if locked access is required
849  *
850  * @hal_soc: Opaque HAL SOC handle
851  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
852  *
853  * Return: 0 on success; error on failire
854  */
855 static inline int
856 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
857 			       hal_ring_handle_t hal_ring_hdl)
858 {
859 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
860 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
861 	uint32_t *desc;
862 
863 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
864 		srng->u.src_ring.cached_tp =
865 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
866 	else {
867 		srng->u.dst_ring.cached_hp =
868 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
869 
870 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
871 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
872 			if (qdf_likely(desc)) {
873 				qdf_mem_dma_cache_sync(soc->qdf_dev,
874 						       qdf_mem_virt_to_phys
875 						       (desc),
876 						       QDF_DMA_FROM_DEVICE,
877 						       (srng->entry_size *
878 							sizeof(uint32_t)));
879 				qdf_prefetch(desc);
880 			}
881 		}
882 	}
883 
884 	return 0;
885 }
886 
887 /**
888  * hal_srng_access_start - Start (locked) ring access
889  *
890  * @hal_soc: Opaque HAL SOC handle
891  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
892  *
893  * Return: 0 on success; error on failire
894  */
895 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
896 					hal_ring_handle_t hal_ring_hdl)
897 {
898 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
899 
900 	if (qdf_unlikely(!hal_ring_hdl)) {
901 		qdf_print("Error: Invalid hal_ring\n");
902 		return -EINVAL;
903 	}
904 
905 	SRNG_LOCK(&(srng->lock));
906 
907 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
908 }
909 
910 /**
911  * hal_srng_dst_get_next - Get next entry from a destination ring and move
912  * cached tail pointer
913  *
914  * @hal_soc: Opaque HAL SOC handle
915  * @hal_ring_hdl: Destination ring pointer
916  *
917  * Return: Opaque pointer for next ring entry; NULL on failire
918  */
919 static inline
920 void *hal_srng_dst_get_next(void *hal_soc,
921 			    hal_ring_handle_t hal_ring_hdl)
922 {
923 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
924 	struct hal_soc *soc = (struct hal_soc *)hal_soc;
925 	uint32_t *desc;
926 	uint32_t *desc_next;
927 	uint32_t tp;
928 
929 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
930 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
931 		/* TODO: Using % is expensive, but we have to do this since
932 		 * size of some SRNG rings is not power of 2 (due to descriptor
933 		 * sizes). Need to create separate API for rings used
934 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
935 		 * SW2RXDMA and CE rings)
936 		 */
937 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
938 			srng->ring_size;
939 
940 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
941 			tp = srng->u.dst_ring.tp;
942 			desc_next = &srng->ring_base_vaddr[tp];
943 			qdf_mem_dma_cache_sync(soc->qdf_dev,
944 					       qdf_mem_virt_to_phys(desc_next),
945 					       QDF_DMA_FROM_DEVICE,
946 					       (srng->entry_size *
947 						sizeof(uint32_t)));
948 			qdf_prefetch(desc_next);
949 		}
950 
951 		return (void *)desc;
952 	}
953 
954 	return NULL;
955 }
956 
957 /**
958  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
959  * cached head pointer
960  *
961  * @hal_soc: Opaque HAL SOC handle
962  * @hal_ring_hdl: Destination ring pointer
963  *
964  * Return: Opaque pointer for next ring entry; NULL on failire
965  */
966 static inline void *
967 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
968 			 hal_ring_handle_t hal_ring_hdl)
969 {
970 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
971 	uint32_t *desc;
972 	/* TODO: Using % is expensive, but we have to do this since
973 	 * size of some SRNG rings is not power of 2 (due to descriptor
974 	 * sizes). Need to create separate API for rings used
975 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
976 	 * SW2RXDMA and CE rings)
977 	 */
978 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
979 		srng->ring_size;
980 
981 	if (next_hp != srng->u.dst_ring.tp) {
982 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
983 		srng->u.dst_ring.cached_hp = next_hp;
984 		return (void *)desc;
985 	}
986 
987 	return NULL;
988 }
989 
990 /**
991  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
992  * @hal_soc: Opaque HAL SOC handle
993  * @hal_ring_hdl: Destination ring pointer
994  *
995  * Sync cached head pointer with HW.
996  * Caller takes responsibility for any locking needs.
997  *
998  * Return: Opaque pointer for next ring entry; NULL on failire
999  */
1000 static inline
1001 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1002 			     hal_ring_handle_t hal_ring_hdl)
1003 {
1004 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1005 
1006 	srng->u.dst_ring.cached_hp =
1007 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1008 
1009 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1010 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1011 
1012 	return NULL;
1013 }
1014 
1015 /**
1016  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1017  * @hal_soc: Opaque HAL SOC handle
1018  * @hal_ring_hdl: Destination ring pointer
1019  *
1020  * Sync cached head pointer with HW.
1021  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1022  *
1023  * Return: Opaque pointer for next ring entry; NULL on failire
1024  */
1025 static inline
1026 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1027 				    hal_ring_handle_t hal_ring_hdl)
1028 {
1029 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1030 	void *ring_desc_ptr = NULL;
1031 
1032 	if (qdf_unlikely(!hal_ring_hdl)) {
1033 		qdf_print("Error: Invalid hal_ring\n");
1034 		return  NULL;
1035 	}
1036 
1037 	SRNG_LOCK(&srng->lock);
1038 
1039 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1040 
1041 	SRNG_UNLOCK(&srng->lock);
1042 
1043 	return ring_desc_ptr;
1044 }
1045 
1046 /**
1047  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1048  * by SW) in destination ring
1049  *
1050  * @hal_soc: Opaque HAL SOC handle
1051  * @hal_ring_hdl: Destination ring pointer
1052  * @sync_hw_ptr: Sync cached head pointer with HW
1053  *
1054  */
1055 static inline
1056 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1057 				hal_ring_handle_t hal_ring_hdl,
1058 				int sync_hw_ptr)
1059 {
1060 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1061 	uint32_t hp;
1062 	uint32_t tp = srng->u.dst_ring.tp;
1063 
1064 	if (sync_hw_ptr) {
1065 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1066 		srng->u.dst_ring.cached_hp = hp;
1067 	} else {
1068 		hp = srng->u.dst_ring.cached_hp;
1069 	}
1070 
1071 	if (hp >= tp)
1072 		return (hp - tp) / srng->entry_size;
1073 	else
1074 		return (srng->ring_size - tp + hp) / srng->entry_size;
1075 }
1076 
1077 /**
1078  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1079  *
1080  * @hal_soc: Opaque HAL SOC handle
1081  * @hal_ring_hdl: Destination ring pointer
1082  * @sync_hw_ptr: Sync cached head pointer with HW
1083  *
1084  * Returns number of valid entries to be processed by the host driver. The
1085  * function takes up SRNG lock.
1086  *
1087  * Return: Number of valid destination entries
1088  */
1089 static inline uint32_t
1090 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1091 			      hal_ring_handle_t hal_ring_hdl,
1092 			      int sync_hw_ptr)
1093 {
1094 	uint32_t num_valid;
1095 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1096 
1097 	SRNG_LOCK(&srng->lock);
1098 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1099 	SRNG_UNLOCK(&srng->lock);
1100 
1101 	return num_valid;
1102 }
1103 
1104 /**
1105  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1106  * pointer. This can be used to release any buffers associated with completed
1107  * ring entries. Note that this should not be used for posting new descriptor
1108  * entries. Posting of new entries should be done only using
1109  * hal_srng_src_get_next_reaped when this function is used for reaping.
1110  *
1111  * @hal_soc: Opaque HAL SOC handle
1112  * @hal_ring_hdl: Source ring pointer
1113  *
1114  * Return: Opaque pointer for next ring entry; NULL on failire
1115  */
1116 static inline void *
1117 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1118 {
1119 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1120 	uint32_t *desc;
1121 
1122 	/* TODO: Using % is expensive, but we have to do this since
1123 	 * size of some SRNG rings is not power of 2 (due to descriptor
1124 	 * sizes). Need to create separate API for rings used
1125 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1126 	 * SW2RXDMA and CE rings)
1127 	 */
1128 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1129 		srng->ring_size;
1130 
1131 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1132 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1133 		srng->u.src_ring.reap_hp = next_reap_hp;
1134 		return (void *)desc;
1135 	}
1136 
1137 	return NULL;
1138 }
1139 
1140 /**
1141  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1142  * already reaped using hal_srng_src_reap_next, for posting new entries to
1143  * the ring
1144  *
1145  * @hal_soc: Opaque HAL SOC handle
1146  * @hal_ring_hdl: Source ring pointer
1147  *
1148  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1149  */
1150 static inline void *
1151 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1152 {
1153 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1154 	uint32_t *desc;
1155 
1156 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1157 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1158 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1159 			srng->ring_size;
1160 
1161 		return (void *)desc;
1162 	}
1163 
1164 	return NULL;
1165 }
1166 
1167 /**
1168  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1169  * move reap pointer. This API is used in detach path to release any buffers
1170  * associated with ring entries which are pending reap.
1171  *
1172  * @hal_soc: Opaque HAL SOC handle
1173  * @hal_ring_hdl: Source ring pointer
1174  *
1175  * Return: Opaque pointer for next ring entry; NULL on failire
1176  */
1177 static inline void *
1178 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1179 {
1180 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1181 	uint32_t *desc;
1182 
1183 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1184 		srng->ring_size;
1185 
1186 	if (next_reap_hp != srng->u.src_ring.hp) {
1187 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1188 		srng->u.src_ring.reap_hp = next_reap_hp;
1189 		return (void *)desc;
1190 	}
1191 
1192 	return NULL;
1193 }
1194 
1195 /**
1196  * hal_srng_src_done_val -
1197  *
1198  * @hal_soc: Opaque HAL SOC handle
1199  * @hal_ring_hdl: Source ring pointer
1200  *
1201  * Return: Opaque pointer for next ring entry; NULL on failire
1202  */
1203 static inline uint32_t
1204 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1205 {
1206 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1207 	/* TODO: Using % is expensive, but we have to do this since
1208 	 * size of some SRNG rings is not power of 2 (due to descriptor
1209 	 * sizes). Need to create separate API for rings used
1210 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1211 	 * SW2RXDMA and CE rings)
1212 	 */
1213 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1214 		srng->ring_size;
1215 
1216 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1217 		return 0;
1218 
1219 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1220 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1221 			srng->entry_size;
1222 	else
1223 		return ((srng->ring_size - next_reap_hp) +
1224 			srng->u.src_ring.cached_tp) / srng->entry_size;
1225 }
1226 
1227 /**
1228  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1229  * @hal_ring_hdl: Source ring pointer
1230  *
1231  * Return: uint8_t
1232  */
1233 static inline
1234 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1235 {
1236 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1237 
1238 	return srng->entry_size;
1239 }
1240 
1241 /**
1242  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1243  * @hal_soc: Opaque HAL SOC handle
1244  * @hal_ring_hdl: Source ring pointer
1245  * @tailp: Tail Pointer
1246  * @headp: Head Pointer
1247  *
1248  * Return: Update tail pointer and head pointer in arguments.
1249  */
1250 static inline
1251 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1252 		     uint32_t *tailp, uint32_t *headp)
1253 {
1254 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1255 
1256 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1257 		*headp = srng->u.src_ring.hp;
1258 		*tailp = *srng->u.src_ring.tp_addr;
1259 	} else {
1260 		*tailp = srng->u.dst_ring.tp;
1261 		*headp = *srng->u.dst_ring.hp_addr;
1262 	}
1263 }
1264 
1265 /**
1266  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1267  *
1268  * @hal_soc: Opaque HAL SOC handle
1269  * @hal_ring_hdl: Source ring pointer
1270  *
1271  * Return: Opaque pointer for next ring entry; NULL on failire
1272  */
1273 static inline
1274 void *hal_srng_src_get_next(void *hal_soc,
1275 			    hal_ring_handle_t hal_ring_hdl)
1276 {
1277 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1278 	uint32_t *desc;
1279 	/* TODO: Using % is expensive, but we have to do this since
1280 	 * size of some SRNG rings is not power of 2 (due to descriptor
1281 	 * sizes). Need to create separate API for rings used
1282 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1283 	 * SW2RXDMA and CE rings)
1284 	 */
1285 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1286 		srng->ring_size;
1287 
1288 	if (next_hp != srng->u.src_ring.cached_tp) {
1289 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1290 		srng->u.src_ring.hp = next_hp;
1291 		/* TODO: Since reap function is not used by all rings, we can
1292 		 * remove the following update of reap_hp in this function
1293 		 * if we can ensure that only hal_srng_src_get_next_reaped
1294 		 * is used for the rings requiring reap functionality
1295 		 */
1296 		srng->u.src_ring.reap_hp = next_hp;
1297 		return (void *)desc;
1298 	}
1299 
1300 	return NULL;
1301 }
1302 
1303 /**
1304  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
1305  * hal_srng_src_get_next should be called subsequently to move the head pointer
1306  *
1307  * @hal_soc: Opaque HAL SOC handle
1308  * @hal_ring_hdl: Source ring pointer
1309  *
1310  * Return: Opaque pointer for next ring entry; NULL on failire
1311  */
1312 static inline
1313 void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl,
1314 			hal_ring_handle_t hal_ring_hdl)
1315 {
1316 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1317 	uint32_t *desc;
1318 
1319 	/* TODO: Using % is expensive, but we have to do this since
1320 	 * size of some SRNG rings is not power of 2 (due to descriptor
1321 	 * sizes). Need to create separate API for rings used
1322 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1323 	 * SW2RXDMA and CE rings)
1324 	 */
1325 	if (((srng->u.src_ring.hp + srng->entry_size) %
1326 		srng->ring_size) != srng->u.src_ring.cached_tp) {
1327 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1328 		return (void *)desc;
1329 	}
1330 
1331 	return NULL;
1332 }
1333 
1334 /**
1335  * hal_srng_src_num_avail - Returns number of available entries in src ring
1336  *
1337  * @hal_soc: Opaque HAL SOC handle
1338  * @hal_ring_hdl: Source ring pointer
1339  * @sync_hw_ptr: Sync cached tail pointer with HW
1340  *
1341  */
1342 static inline uint32_t
1343 hal_srng_src_num_avail(void *hal_soc,
1344 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
1345 {
1346 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1347 	uint32_t tp;
1348 	uint32_t hp = srng->u.src_ring.hp;
1349 
1350 	if (sync_hw_ptr) {
1351 		tp = *(srng->u.src_ring.tp_addr);
1352 		srng->u.src_ring.cached_tp = tp;
1353 	} else {
1354 		tp = srng->u.src_ring.cached_tp;
1355 	}
1356 
1357 	if (tp > hp)
1358 		return ((tp - hp) / srng->entry_size) - 1;
1359 	else
1360 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1361 }
1362 
1363 /**
1364  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1365  * ring head/tail pointers to HW.
1366  * This should be used only if hal_srng_access_start_unlocked to start ring
1367  * access
1368  *
1369  * @hal_soc: Opaque HAL SOC handle
1370  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1371  *
1372  * Return: 0 on success; error on failire
1373  */
1374 static inline void
1375 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1376 {
1377 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1378 
1379 	/* TODO: See if we need a write memory barrier here */
1380 	if (srng->flags & HAL_SRNG_LMAC_RING) {
1381 		/* For LMAC rings, ring pointer updates are done through FW and
1382 		 * hence written to a shared memory location that is read by FW
1383 		 */
1384 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1385 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
1386 		} else {
1387 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1388 		}
1389 	} else {
1390 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
1391 			hal_srng_write_address_32_mb(hal_soc,
1392 						     srng->u.src_ring.hp_addr,
1393 						     srng->u.src_ring.hp);
1394 		else
1395 			hal_srng_write_address_32_mb(hal_soc,
1396 						     srng->u.dst_ring.tp_addr,
1397 						     srng->u.dst_ring.tp);
1398 	}
1399 }
1400 
1401 /**
1402  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1403  * pointers to HW
1404  * This should be used only if hal_srng_access_start to start ring access
1405  *
1406  * @hal_soc: Opaque HAL SOC handle
1407  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1408  *
1409  * Return: 0 on success; error on failire
1410  */
1411 static inline void
1412 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1413 {
1414 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1415 
1416 	if (qdf_unlikely(!hal_ring_hdl)) {
1417 		qdf_print("Error: Invalid hal_ring\n");
1418 		return;
1419 	}
1420 
1421 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
1422 	SRNG_UNLOCK(&(srng->lock));
1423 }
1424 
1425 /**
1426  * hal_srng_access_end_reap - Unlock ring access
1427  * This should be used only if hal_srng_access_start to start ring access
1428  * and should be used only while reaping SRC ring completions
1429  *
1430  * @hal_soc: Opaque HAL SOC handle
1431  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1432  *
1433  * Return: 0 on success; error on failire
1434  */
1435 static inline void
1436 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1437 {
1438 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1439 
1440 	SRNG_UNLOCK(&(srng->lock));
1441 }
1442 
1443 /* TODO: Check if the following definitions is available in HW headers */
1444 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
1445 #define NUM_MPDUS_PER_LINK_DESC 6
1446 #define NUM_MSDUS_PER_LINK_DESC 7
1447 #define REO_QUEUE_DESC_ALIGN 128
1448 
1449 #define LINK_DESC_ALIGN 128
1450 
1451 #define ADDRESS_MATCH_TAG_VAL 0x5
1452 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1453  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1454  */
1455 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1456 
1457 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1458  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1459  * should be specified in 16 word units. But the number of bits defined for
1460  * this field in HW header files is 5.
1461  */
1462 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1463 
1464 
1465 /**
1466  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1467  * in an idle list
1468  *
1469  * @hal_soc: Opaque HAL SOC handle
1470  *
1471  */
1472 static inline
1473 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
1474 {
1475 	return WBM_IDLE_SCATTER_BUF_SIZE;
1476 }
1477 
1478 /**
1479  * hal_get_link_desc_size - Get the size of each link descriptor
1480  *
1481  * @hal_soc: Opaque HAL SOC handle
1482  *
1483  */
1484 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
1485 {
1486 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1487 
1488 	if (!hal_soc || !hal_soc->ops) {
1489 		qdf_print("Error: Invalid ops\n");
1490 		QDF_BUG(0);
1491 		return -EINVAL;
1492 	}
1493 	if (!hal_soc->ops->hal_get_link_desc_size) {
1494 		qdf_print("Error: Invalid function pointer\n");
1495 		QDF_BUG(0);
1496 		return -EINVAL;
1497 	}
1498 	return hal_soc->ops->hal_get_link_desc_size();
1499 }
1500 
1501 /**
1502  * hal_get_link_desc_align - Get the required start address alignment for
1503  * link descriptors
1504  *
1505  * @hal_soc: Opaque HAL SOC handle
1506  *
1507  */
1508 static inline
1509 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
1510 {
1511 	return LINK_DESC_ALIGN;
1512 }
1513 
1514 /**
1515  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1516  *
1517  * @hal_soc: Opaque HAL SOC handle
1518  *
1519  */
1520 static inline
1521 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1522 {
1523 	return NUM_MPDUS_PER_LINK_DESC;
1524 }
1525 
1526 /**
1527  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1528  *
1529  * @hal_soc: Opaque HAL SOC handle
1530  *
1531  */
1532 static inline
1533 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1534 {
1535 	return NUM_MSDUS_PER_LINK_DESC;
1536 }
1537 
1538 /**
1539  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1540  * descriptor can hold
1541  *
1542  * @hal_soc: Opaque HAL SOC handle
1543  *
1544  */
1545 static inline
1546 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
1547 {
1548 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1549 }
1550 
1551 /**
1552  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1553  * that the given buffer size
1554  *
1555  * @hal_soc: Opaque HAL SOC handle
1556  * @scatter_buf_size: Size of scatter buffer
1557  *
1558  */
1559 static inline
1560 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1561 					  uint32_t scatter_buf_size)
1562 {
1563 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1564 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
1565 }
1566 
1567 /**
1568  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1569  * each given buffer size
1570  *
1571  * @hal_soc: Opaque HAL SOC handle
1572  * @total_mem: size of memory to be scattered
1573  * @scatter_buf_size: Size of scatter buffer
1574  *
1575  */
1576 static inline
1577 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1578 					uint32_t total_mem,
1579 					uint32_t scatter_buf_size)
1580 {
1581 	uint8_t rem = (total_mem % (scatter_buf_size -
1582 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1583 
1584 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1585 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1586 
1587 	return num_scatter_bufs;
1588 }
1589 
1590 enum hal_pn_type {
1591 	HAL_PN_NONE,
1592 	HAL_PN_WPA,
1593 	HAL_PN_WAPI_EVEN,
1594 	HAL_PN_WAPI_UNEVEN,
1595 };
1596 
1597 #define HAL_RX_MAX_BA_WINDOW 256
1598 
1599 /**
1600  * hal_get_reo_qdesc_align - Get start address alignment for reo
1601  * queue descriptors
1602  *
1603  * @hal_soc: Opaque HAL SOC handle
1604  *
1605  */
1606 static inline
1607 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
1608 {
1609 	return REO_QUEUE_DESC_ALIGN;
1610 }
1611 
1612 /**
1613  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1614  *
1615  * @hal_soc: Opaque HAL SOC handle
1616  * @ba_window_size: BlockAck window size
1617  * @start_seq: Starting sequence number
1618  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1619  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1620  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1621  *
1622  */
1623 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1624 			 int tid, uint32_t ba_window_size,
1625 			 uint32_t start_seq, void *hw_qdesc_vaddr,
1626 			 qdf_dma_addr_t hw_qdesc_paddr,
1627 			 int pn_type);
1628 
1629 /**
1630  * hal_srng_get_hp_addr - Get head pointer physical address
1631  *
1632  * @hal_soc: Opaque HAL SOC handle
1633  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1634  *
1635  */
1636 static inline qdf_dma_addr_t
1637 hal_srng_get_hp_addr(void *hal_soc,
1638 		     hal_ring_handle_t hal_ring_hdl)
1639 {
1640 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1641 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1642 
1643 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1644 		return hal->shadow_wrptr_mem_paddr +
1645 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1646 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1647 	} else {
1648 		return hal->shadow_rdptr_mem_paddr +
1649 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1650 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1651 	}
1652 }
1653 
1654 /**
1655  * hal_srng_get_tp_addr - Get tail pointer physical address
1656  *
1657  * @hal_soc: Opaque HAL SOC handle
1658  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1659  *
1660  */
1661 static inline qdf_dma_addr_t
1662 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1663 {
1664 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1665 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1666 
1667 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1668 		return hal->shadow_rdptr_mem_paddr +
1669 			((unsigned long)(srng->u.src_ring.tp_addr) -
1670 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1671 	} else {
1672 		return hal->shadow_wrptr_mem_paddr +
1673 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1674 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1675 	}
1676 }
1677 
1678 /**
1679  * hal_srng_get_num_entries - Get total entries in the HAL Srng
1680  *
1681  * @hal_soc: Opaque HAL SOC handle
1682  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1683  *
1684  * Return: total number of entries in hal ring
1685  */
1686 static inline
1687 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1688 				  hal_ring_handle_t hal_ring_hdl)
1689 {
1690 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1691 
1692 	return srng->num_entries;
1693 }
1694 
1695 /**
1696  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1697  *
1698  * @hal_soc: Opaque HAL SOC handle
1699  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1700  * @ring_params: SRNG parameters will be returned through this structure
1701  */
1702 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1703 			 hal_ring_handle_t hal_ring_hdl,
1704 			 struct hal_srng_params *ring_params);
1705 
1706 /**
1707  * hal_mem_info - Retrieve hal memory base address
1708  *
1709  * @hal_soc: Opaque HAL SOC handle
1710  * @mem: pointer to structure to be updated with hal mem info
1711  */
1712 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
1713 
1714 /**
1715  * hal_get_target_type - Return target type
1716  *
1717  * @hal_soc: Opaque HAL SOC handle
1718  */
1719 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
1720 
1721 /**
1722  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1723  *
1724  * @hal_soc: Opaque HAL SOC handle
1725  * @ac: Access category
1726  * @value: timeout duration in millisec
1727  */
1728 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1729 			      uint32_t *value);
1730 /**
1731  * hal_set_aging_timeout - Set BA aging timeout
1732  *
1733  * @hal_soc: Opaque HAL SOC handle
1734  * @ac: Access category in millisec
1735  * @value: timeout duration value
1736  */
1737 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1738 			      uint32_t value);
1739 /**
1740  * hal_srng_dst_hw_init - Private function to initialize SRNG
1741  * destination ring HW
1742  * @hal_soc: HAL SOC handle
1743  * @srng: SRNG ring pointer
1744  */
1745 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1746 	struct hal_srng *srng)
1747 {
1748 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1749 }
1750 
1751 /**
1752  * hal_srng_src_hw_init - Private function to initialize SRNG
1753  * source ring HW
1754  * @hal_soc: HAL SOC handle
1755  * @srng: SRNG ring pointer
1756  */
1757 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1758 	struct hal_srng *srng)
1759 {
1760 	hal->ops->hal_srng_src_hw_init(hal, srng);
1761 }
1762 
1763 /**
1764  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
1765  * @hal_soc: Opaque HAL SOC handle
1766  * @hal_ring_hdl: Source ring pointer
1767  * @headp: Head Pointer
1768  * @tailp: Tail Pointer
1769  * @ring_type: Ring
1770  *
1771  * Return: Update tail pointer and head pointer in arguments.
1772  */
1773 static inline
1774 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
1775 		     hal_ring_handle_t hal_ring_hdl,
1776 		     uint32_t *headp, uint32_t *tailp,
1777 		     uint8_t ring_type)
1778 {
1779 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1780 
1781 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
1782 			headp, tailp, ring_type);
1783 }
1784 
1785 /**
1786  * hal_reo_setup - Initialize HW REO block
1787  *
1788  * @hal_soc: Opaque HAL SOC handle
1789  * @reo_params: parameters needed by HAL for REO config
1790  */
1791 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
1792 				 void *reoparams)
1793 {
1794 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1795 
1796 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
1797 }
1798 
1799 /**
1800  * hal_setup_link_idle_list - Setup scattered idle list using the
1801  * buffer list provided
1802  *
1803  * @hal_soc: Opaque HAL SOC handle
1804  * @scatter_bufs_base_paddr: Array of physical base addresses
1805  * @scatter_bufs_base_vaddr: Array of virtual base addresses
1806  * @num_scatter_bufs: Number of scatter buffers in the above lists
1807  * @scatter_buf_size: Size of each scatter buffer
1808  * @last_buf_end_offset: Offset to the last entry
1809  * @num_entries: Total entries of all scatter bufs
1810  *
1811  */
1812 static inline
1813 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
1814 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
1815 			      void *scatter_bufs_base_vaddr[],
1816 			      uint32_t num_scatter_bufs,
1817 			      uint32_t scatter_buf_size,
1818 			      uint32_t last_buf_end_offset,
1819 			      uint32_t num_entries)
1820 {
1821 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1822 
1823 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
1824 			scatter_bufs_base_vaddr, num_scatter_bufs,
1825 			scatter_buf_size, last_buf_end_offset,
1826 			num_entries);
1827 
1828 }
1829 
1830 /**
1831  * hal_srng_dump_ring_desc() - Dump ring descriptor info
1832  *
1833  * @hal_soc: Opaque HAL SOC handle
1834  * @hal_ring_hdl: Source ring pointer
1835  * @ring_desc: Opaque ring descriptor handle
1836  */
1837 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
1838 					   hal_ring_handle_t hal_ring_hdl,
1839 					   hal_ring_desc_t ring_desc)
1840 {
1841 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1842 
1843 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1844 			   ring_desc, (srng->entry_size << 2));
1845 }
1846 
1847 /**
1848  * hal_srng_dump_ring() - Dump last 128 descs of the ring
1849  *
1850  * @hal_soc: Opaque HAL SOC handle
1851  * @hal_ring_hdl: Source ring pointer
1852  */
1853 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
1854 				      hal_ring_handle_t hal_ring_hdl)
1855 {
1856 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1857 	uint32_t *desc;
1858 	uint32_t tp, i;
1859 
1860 	tp = srng->u.dst_ring.tp;
1861 
1862 	for (i = 0; i < 128; i++) {
1863 		if (!tp)
1864 			tp = srng->ring_size;
1865 
1866 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
1867 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
1868 				   QDF_TRACE_LEVEL_DEBUG,
1869 				   desc, (srng->entry_size << 2));
1870 
1871 		tp -= srng->entry_size;
1872 	}
1873 }
1874 
1875 /*
1876  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
1877  * to opaque dp_ring desc type
1878  * @ring_desc - rxdma ring desc
1879  *
1880  * Return: hal_rxdma_desc_t type
1881  */
1882 static inline
1883 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
1884 {
1885 	return (hal_ring_desc_t)ring_desc;
1886 }
1887 
1888 /**
1889  * hal_srng_set_event() - Set hal_srng event
1890  * @hal_ring_hdl: Source ring pointer
1891  * @event: SRNG ring event
1892  *
1893  * Return: None
1894  */
1895 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
1896 {
1897 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1898 
1899 	qdf_atomic_set_bit(event, &srng->srng_event);
1900 }
1901 
1902 /**
1903  * hal_srng_clear_event() - Clear hal_srng event
1904  * @hal_ring_hdl: Source ring pointer
1905  * @event: SRNG ring event
1906  *
1907  * Return: None
1908  */
1909 static inline
1910 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1911 {
1912 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1913 
1914 	qdf_atomic_clear_bit(event, &srng->srng_event);
1915 }
1916 
1917 /**
1918  * hal_srng_get_clear_event() - Clear srng event and return old value
1919  * @hal_ring_hdl: Source ring pointer
1920  * @event: SRNG ring event
1921  *
1922  * Return: Return old event value
1923  */
1924 static inline
1925 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
1926 {
1927 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1928 
1929 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
1930 }
1931 
1932 /**
1933  * hal_srng_set_flush_last_ts() - Record last flush time stamp
1934  * @hal_ring_hdl: Source ring pointer
1935  *
1936  * Return: None
1937  */
1938 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
1939 {
1940 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1941 
1942 	srng->last_flush_ts = qdf_get_log_timestamp();
1943 }
1944 
1945 /**
1946  * hal_srng_inc_flush_cnt() - Increment flush counter
1947  * @hal_ring_hdl: Source ring pointer
1948  *
1949  * Return: None
1950  */
1951 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
1952 {
1953 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1954 
1955 	srng->flush_count++;
1956 }
1957 #endif /* _HAL_APIH_ */
1958