xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HAL_API_H_
21 #define _HAL_API_H_
22 
23 #include "qdf_types.h"
24 #include "qdf_util.h"
25 #include "qdf_atomic.h"
26 #include "hal_internal.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "qdf_platform.h"
30 
31 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
32 #include "hal_hw_headers.h"
33 #endif
34 
35 /* Ring index for WBM2SW2 release ring */
36 #define HAL_IPA_TX_COMP_RING_IDX 2
37 
38 /* calculate the register address offset from bar0 of shadow register x */
39 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
40     defined(QCA_WIFI_KIWI)
41 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
42 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
43 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
44 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
45 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
46 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
47 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
48 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
49 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
50 #elif defined(QCA_WIFI_QCA6750)
51 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
52 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
53 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
54 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
55 #else
56 #define SHADOW_REGISTER(x) 0
57 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
58 
59 /*
60  * BAR + 4K is always accessible, any access outside this
61  * space requires force wake procedure.
62  * OFFSET = 4K - 32 bytes = 0xFE0
63  */
64 #define MAPPED_REF_OFF 0xFE0
65 
66 #define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET
67 
68 #ifdef ENABLE_VERBOSE_DEBUG
69 static inline void
70 hal_set_verbose_debug(bool flag)
71 {
72 	is_hal_verbose_debug_enabled = flag;
73 }
74 #endif
75 
76 #ifdef ENABLE_HAL_SOC_STATS
77 #define HAL_STATS_INC(_handle, _field, _delta) \
78 { \
79 	if (likely(_handle)) \
80 		_handle->stats._field += _delta; \
81 }
82 #else
83 #define HAL_STATS_INC(_handle, _field, _delta)
84 #endif
85 
86 #ifdef ENABLE_HAL_REG_WR_HISTORY
87 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
88 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
89 
90 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
91 				 uint32_t offset,
92 				 uint32_t wr_val,
93 				 uint32_t rd_val);
94 
95 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
96 					     int array_size)
97 {
98 	int record_index = qdf_atomic_inc_return(table_index);
99 
100 	return record_index & (array_size - 1);
101 }
102 #else
103 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
104 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
105 		offset,	\
106 		wr_val,	\
107 		rd_val)
108 #endif
109 
110 /**
111  * hal_reg_write_result_check() - check register writing result
112  * @hal_soc: HAL soc handle
113  * @offset: register offset to read
114  * @exp_val: the expected value of register
115  * @ret_confirm: result confirm flag
116  *
117  * Return: none
118  */
119 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
120 					      uint32_t offset,
121 					      uint32_t exp_val)
122 {
123 	uint32_t value;
124 
125 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
126 	if (exp_val != value) {
127 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
128 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
129 	}
130 }
131 
132 #ifdef WINDOW_REG_PLD_LOCK_ENABLE
133 static inline void hal_lock_reg_access(struct hal_soc *soc,
134 				       unsigned long *flags)
135 {
136 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
137 }
138 
139 static inline void hal_unlock_reg_access(struct hal_soc *soc,
140 					 unsigned long *flags)
141 {
142 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
143 }
144 #else
145 static inline void hal_lock_reg_access(struct hal_soc *soc,
146 				       unsigned long *flags)
147 {
148 	qdf_spin_lock_irqsave(&soc->register_access_lock);
149 }
150 
151 static inline void hal_unlock_reg_access(struct hal_soc *soc,
152 					 unsigned long *flags)
153 {
154 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
155 }
156 #endif
157 
158 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
159 /**
160  * hal_select_window_confirm() - write remap window register and
161 				 check writing result
162  *
163  */
164 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
165 					     uint32_t offset)
166 {
167 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
168 
169 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
170 		      WINDOW_ENABLE_BIT | window);
171 	hal_soc->register_window = window;
172 
173 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
174 				   WINDOW_ENABLE_BIT | window);
175 }
176 #else
177 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
178 					     uint32_t offset)
179 {
180 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
181 
182 	if (window != hal_soc->register_window) {
183 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
184 			      WINDOW_ENABLE_BIT | window);
185 		hal_soc->register_window = window;
186 
187 		hal_reg_write_result_check(
188 					hal_soc,
189 					WINDOW_REG_ADDRESS,
190 					WINDOW_ENABLE_BIT | window);
191 	}
192 }
193 #endif
194 
195 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
196 						 qdf_iomem_t addr)
197 {
198 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
199 }
200 
201 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
202 					       hal_ring_handle_t hal_ring_hdl)
203 {
204 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
205 
206 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
207 							 hal_ring_hdl);
208 }
209 
210 /**
211  * hal_write32_mb() - Access registers to update configuration
212  * @hal_soc: hal soc handle
213  * @offset: offset address from the BAR
214  * @value: value to write
215  *
216  * Return: None
217  *
218  * Description: Register address space is split below:
219  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
220  *  |--------------------|-------------------|------------------|
221  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
222  *
223  * 1. Any access to the shadow region, doesn't need force wake
224  *    and windowing logic to access.
225  * 2. Any access beyond BAR + 4K:
226  *    If init_phase enabled, no force wake is needed and access
227  *    should be based on windowed or unwindowed access.
228  *    If init_phase disabled, force wake is needed and access
229  *    should be based on windowed or unwindowed access.
230  *
231  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
232  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
233  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
234  *                            that window would be a bug
235  */
236 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
237     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
238 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
239 				  uint32_t value)
240 {
241 	unsigned long flags;
242 	qdf_iomem_t new_addr;
243 
244 	if (!hal_soc->use_register_windowing ||
245 	    offset < MAX_UNWINDOWED_ADDRESS) {
246 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
247 	} else if (hal_soc->static_window_map) {
248 		new_addr = hal_get_window_address(hal_soc,
249 				hal_soc->dev_base_addr + offset);
250 		qdf_iowrite32(new_addr, value);
251 	} else {
252 		hal_lock_reg_access(hal_soc, &flags);
253 		hal_select_window_confirm(hal_soc, offset);
254 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
255 			  (offset & WINDOW_RANGE_MASK), value);
256 		hal_unlock_reg_access(hal_soc, &flags);
257 	}
258 }
259 
260 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
261 		hal_write32_mb(_hal_soc, _offset, _value)
262 
263 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
264 #else
265 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
266 				  uint32_t value)
267 {
268 	int ret;
269 	unsigned long flags;
270 	qdf_iomem_t new_addr;
271 
272 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
273 					hal_soc->hif_handle))) {
274 		hal_err_rl("target access is not allowed");
275 		return;
276 	}
277 
278 	/* Region < BAR + 4K can be directly accessed */
279 	if (offset < MAPPED_REF_OFF) {
280 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
281 		return;
282 	}
283 
284 	/* Region greater than BAR + 4K */
285 	if (!hal_soc->init_phase) {
286 		ret = hif_force_wake_request(hal_soc->hif_handle);
287 		if (ret) {
288 			hal_err_rl("Wake up request failed");
289 			qdf_check_state_before_panic(__func__, __LINE__);
290 			return;
291 		}
292 	}
293 
294 	if (!hal_soc->use_register_windowing ||
295 	    offset < MAX_UNWINDOWED_ADDRESS) {
296 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
297 	} else if (hal_soc->static_window_map) {
298 		new_addr = hal_get_window_address(
299 					hal_soc,
300 					hal_soc->dev_base_addr + offset);
301 		qdf_iowrite32(new_addr, value);
302 	} else {
303 		hal_lock_reg_access(hal_soc, &flags);
304 		hal_select_window_confirm(hal_soc, offset);
305 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
306 			  (offset & WINDOW_RANGE_MASK), value);
307 		hal_unlock_reg_access(hal_soc, &flags);
308 	}
309 
310 	if (!hal_soc->init_phase) {
311 		ret = hif_force_wake_release(hal_soc->hif_handle);
312 		if (ret) {
313 			hal_err("Wake up release failed");
314 			qdf_check_state_before_panic(__func__, __LINE__);
315 			return;
316 		}
317 	}
318 }
319 
320 /**
321  * hal_write32_mb_confirm() - write register and check writing result
322  *
323  */
324 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
325 					  uint32_t offset,
326 					  uint32_t value)
327 {
328 	int ret;
329 	unsigned long flags;
330 	qdf_iomem_t new_addr;
331 
332 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
333 					hal_soc->hif_handle))) {
334 		hal_err_rl("target access is not allowed");
335 		return;
336 	}
337 
338 	/* Region < BAR + 4K can be directly accessed */
339 	if (offset < MAPPED_REF_OFF) {
340 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
341 		return;
342 	}
343 
344 	/* Region greater than BAR + 4K */
345 	if (!hal_soc->init_phase) {
346 		ret = hif_force_wake_request(hal_soc->hif_handle);
347 		if (ret) {
348 			hal_err("Wake up request failed");
349 			qdf_check_state_before_panic(__func__, __LINE__);
350 			return;
351 		}
352 	}
353 
354 	if (!hal_soc->use_register_windowing ||
355 	    offset < MAX_UNWINDOWED_ADDRESS) {
356 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
357 		hal_reg_write_result_check(hal_soc, offset,
358 					   value);
359 	} else if (hal_soc->static_window_map) {
360 		new_addr = hal_get_window_address(
361 					hal_soc,
362 					hal_soc->dev_base_addr + offset);
363 		qdf_iowrite32(new_addr, value);
364 		hal_reg_write_result_check(hal_soc,
365 					   new_addr - hal_soc->dev_base_addr,
366 					   value);
367 	} else {
368 		hal_lock_reg_access(hal_soc, &flags);
369 		hal_select_window_confirm(hal_soc, offset);
370 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
371 			  (offset & WINDOW_RANGE_MASK), value);
372 
373 		hal_reg_write_result_check(
374 				hal_soc,
375 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
376 				value);
377 		hal_unlock_reg_access(hal_soc, &flags);
378 	}
379 
380 	if (!hal_soc->init_phase) {
381 		ret = hif_force_wake_release(hal_soc->hif_handle);
382 		if (ret) {
383 			hal_err("Wake up release failed");
384 			qdf_check_state_before_panic(__func__, __LINE__);
385 			return;
386 		}
387 	}
388 }
389 
390 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
391 				       uint32_t value)
392 {
393 	unsigned long flags;
394 	qdf_iomem_t new_addr;
395 
396 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
397 					hal_soc->hif_handle))) {
398 		hal_err_rl("%s: target access is not allowed", __func__);
399 		return;
400 	}
401 
402 	if (!hal_soc->use_register_windowing ||
403 	    offset < MAX_UNWINDOWED_ADDRESS) {
404 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
405 	} else if (hal_soc->static_window_map) {
406 		new_addr = hal_get_window_address(
407 					hal_soc,
408 					hal_soc->dev_base_addr + offset);
409 		qdf_iowrite32(new_addr, value);
410 	} else {
411 		hal_lock_reg_access(hal_soc, &flags);
412 		hal_select_window_confirm(hal_soc, offset);
413 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
414 			  (offset & WINDOW_RANGE_MASK), value);
415 		hal_unlock_reg_access(hal_soc, &flags);
416 	}
417 }
418 #endif
419 
420 /**
421  * hal_write_address_32_mb - write a value to a register
422  *
423  */
424 static inline
425 void hal_write_address_32_mb(struct hal_soc *hal_soc,
426 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
427 {
428 	uint32_t offset;
429 
430 	if (!hal_soc->use_register_windowing)
431 		return qdf_iowrite32(addr, value);
432 
433 	offset = addr - hal_soc->dev_base_addr;
434 
435 	if (qdf_unlikely(wr_confirm))
436 		hal_write32_mb_confirm(hal_soc, offset, value);
437 	else
438 		hal_write32_mb(hal_soc, offset, value);
439 }
440 
441 
442 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
443 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
444 						struct hal_srng *srng,
445 						void __iomem *addr,
446 						uint32_t value)
447 {
448 	qdf_iowrite32(addr, value);
449 }
450 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
451 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
452 						struct hal_srng *srng,
453 						void __iomem *addr,
454 						uint32_t value)
455 {
456 	hal_delayed_reg_write(hal_soc, srng, addr, value);
457 }
458 #else
459 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
460 						struct hal_srng *srng,
461 						void __iomem *addr,
462 						uint32_t value)
463 {
464 	hal_write_address_32_mb(hal_soc, addr, value, false);
465 }
466 #endif
467 
468 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
469     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
470 /**
471  * hal_read32_mb() - Access registers to read configuration
472  * @hal_soc: hal soc handle
473  * @offset: offset address from the BAR
474  * @value: value to write
475  *
476  * Description: Register address space is split below:
477  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
478  *  |--------------------|-------------------|------------------|
479  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
480  *
481  * 1. Any access to the shadow region, doesn't need force wake
482  *    and windowing logic to access.
483  * 2. Any access beyond BAR + 4K:
484  *    If init_phase enabled, no force wake is needed and access
485  *    should be based on windowed or unwindowed access.
486  *    If init_phase disabled, force wake is needed and access
487  *    should be based on windowed or unwindowed access.
488  *
489  * Return: < 0 for failure/>= 0 for success
490  */
491 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
492 {
493 	uint32_t ret;
494 	unsigned long flags;
495 	qdf_iomem_t new_addr;
496 
497 	if (!hal_soc->use_register_windowing ||
498 	    offset < MAX_UNWINDOWED_ADDRESS) {
499 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
500 	} else if (hal_soc->static_window_map) {
501 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
502 		return qdf_ioread32(new_addr);
503 	}
504 
505 	hal_lock_reg_access(hal_soc, &flags);
506 	hal_select_window_confirm(hal_soc, offset);
507 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
508 		       (offset & WINDOW_RANGE_MASK));
509 	hal_unlock_reg_access(hal_soc, &flags);
510 
511 	return ret;
512 }
513 
514 #define hal_read32_mb_cmem(_hal_soc, _offset)
515 #else
516 static
517 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
518 {
519 	uint32_t ret;
520 	unsigned long flags;
521 	qdf_iomem_t new_addr;
522 
523 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
524 					hal_soc->hif_handle))) {
525 		hal_err_rl("target access is not allowed");
526 		return 0;
527 	}
528 
529 	/* Region < BAR + 4K can be directly accessed */
530 	if (offset < MAPPED_REF_OFF)
531 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
532 
533 	if ((!hal_soc->init_phase) &&
534 	    hif_force_wake_request(hal_soc->hif_handle)) {
535 		hal_err("Wake up request failed");
536 		qdf_check_state_before_panic(__func__, __LINE__);
537 		return 0;
538 	}
539 
540 	if (!hal_soc->use_register_windowing ||
541 	    offset < MAX_UNWINDOWED_ADDRESS) {
542 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
543 	} else if (hal_soc->static_window_map) {
544 		new_addr = hal_get_window_address(
545 					hal_soc,
546 					hal_soc->dev_base_addr + offset);
547 		ret = qdf_ioread32(new_addr);
548 	} else {
549 		hal_lock_reg_access(hal_soc, &flags);
550 		hal_select_window_confirm(hal_soc, offset);
551 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
552 			       (offset & WINDOW_RANGE_MASK));
553 		hal_unlock_reg_access(hal_soc, &flags);
554 	}
555 
556 	if ((!hal_soc->init_phase) &&
557 	    hif_force_wake_release(hal_soc->hif_handle)) {
558 		hal_err("Wake up release failed");
559 		qdf_check_state_before_panic(__func__, __LINE__);
560 		return 0;
561 	}
562 
563 	return ret;
564 }
565 
566 static inline
567 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
568 {
569 	uint32_t ret;
570 	unsigned long flags;
571 	qdf_iomem_t new_addr;
572 
573 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
574 					hal_soc->hif_handle))) {
575 		hal_err_rl("%s: target access is not allowed", __func__);
576 		return 0;
577 	}
578 
579 	if (!hal_soc->use_register_windowing ||
580 	    offset < MAX_UNWINDOWED_ADDRESS) {
581 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
582 	} else if (hal_soc->static_window_map) {
583 		new_addr = hal_get_window_address(
584 					hal_soc,
585 					hal_soc->dev_base_addr + offset);
586 		ret = qdf_ioread32(new_addr);
587 	} else {
588 		hal_lock_reg_access(hal_soc, &flags);
589 		hal_select_window_confirm(hal_soc, offset);
590 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
591 			       (offset & WINDOW_RANGE_MASK));
592 		hal_unlock_reg_access(hal_soc, &flags);
593 	}
594 	return ret;
595 }
596 #endif
597 
598 /* Max times allowed for register writing retry */
599 #define HAL_REG_WRITE_RETRY_MAX		5
600 /* Delay milliseconds for each time retry */
601 #define HAL_REG_WRITE_RETRY_DELAY	1
602 
603 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
604 /* To check shadow config index range between 0..31 */
605 #define HAL_SHADOW_REG_INDEX_LOW 32
606 /* To check shadow config index range between 32..39 */
607 #define HAL_SHADOW_REG_INDEX_HIGH 40
608 /* Dirty bit reg offsets corresponding to shadow config index */
609 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
610 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
611 /* PCIE_PCIE_TOP base addr offset */
612 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
613 /* Max retry attempts to read the dirty bit reg */
614 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
615 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
616 #else
617 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
618 #endif
619 /* Delay in usecs for polling dirty bit reg */
620 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
621 
622 /**
623  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
624  * write was successful
625  * @hal_soc: hal soc handle
626  * @shadow_config_index: index of shadow reg used to confirm
627  * write
628  *
629  * Return: QDF_STATUS_SUCCESS on success
630  */
631 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
632 						int shadow_config_index)
633 {
634 	uint32_t read_value = 0;
635 	int retry_cnt = 0;
636 	uint32_t reg_offset = 0;
637 
638 	if (shadow_config_index > 0 &&
639 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
640 		reg_offset =
641 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
642 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
643 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
644 		reg_offset =
645 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
646 	} else {
647 		hal_err("Invalid shadow_config_index = %d",
648 			shadow_config_index);
649 		return QDF_STATUS_E_INVAL;
650 	}
651 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
652 		read_value = hal_read32_mb(
653 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
654 		/* Check if dirty bit corresponding to shadow_index is set */
655 		if (read_value & BIT(shadow_config_index)) {
656 			/* Dirty reg bit not reset */
657 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
658 			retry_cnt++;
659 		} else {
660 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
661 				  reg_offset, read_value);
662 			return QDF_STATUS_SUCCESS;
663 		}
664 	}
665 	return QDF_STATUS_E_TIMEOUT;
666 }
667 
668 /**
669  * hal_write32_mb_shadow_confirm() - write to shadow reg and
670  * poll dirty register bit to confirm write
671  * @hal_soc: hal soc handle
672  * @reg_offset: target reg offset address from BAR
673  * @value: value to write
674  *
675  * Return: QDF_STATUS_SUCCESS on success
676  */
677 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
678 	struct hal_soc *hal,
679 	uint32_t reg_offset,
680 	uint32_t value)
681 {
682 	int i;
683 	QDF_STATUS ret;
684 	uint32_t shadow_reg_offset;
685 	int shadow_config_index;
686 	bool is_reg_offset_present = false;
687 
688 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
689 		/* Found the shadow config for the reg_offset */
690 		struct shadow_reg_config *hal_shadow_reg_list =
691 			&hal->list_shadow_reg_config[i];
692 		if (hal_shadow_reg_list->target_register ==
693 			reg_offset) {
694 			shadow_config_index =
695 				hal_shadow_reg_list->shadow_config_index;
696 			shadow_reg_offset =
697 				SHADOW_REGISTER(shadow_config_index);
698 			hal_write32_mb_confirm(
699 				hal, shadow_reg_offset, value);
700 			is_reg_offset_present = true;
701 			break;
702 		}
703 		ret = QDF_STATUS_E_FAILURE;
704 	}
705 	if (is_reg_offset_present) {
706 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
707 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
708 			 reg_offset, value, ret);
709 		if (QDF_IS_STATUS_ERROR(ret)) {
710 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
711 			return ret;
712 		}
713 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
714 	}
715 	return ret;
716 }
717 
718 /**
719  * hal_write32_mb_confirm_retry() - write register with confirming and
720 				    do retry/recovery if writing failed
721  * @hal_soc: hal soc handle
722  * @offset: offset address from the BAR
723  * @value: value to write
724  * @recovery: is recovery needed or not.
725  *
726  * Write the register value with confirming and read it back, if
727  * read back value is not as expected, do retry for writing, if
728  * retry hit max times allowed but still fail, check if recovery
729  * needed.
730  *
731  * Return: None
732  */
733 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
734 						uint32_t offset,
735 						uint32_t value,
736 						bool recovery)
737 {
738 	QDF_STATUS ret;
739 
740 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
741 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
742 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
743 }
744 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
745 
746 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
747 						uint32_t offset,
748 						uint32_t value,
749 						bool recovery)
750 {
751 	uint8_t retry_cnt = 0;
752 	uint32_t read_value;
753 
754 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
755 		hal_write32_mb_confirm(hal_soc, offset, value);
756 		read_value = hal_read32_mb(hal_soc, offset);
757 		if (qdf_likely(read_value == value))
758 			break;
759 
760 		/* write failed, do retry */
761 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
762 			 offset, value, read_value);
763 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
764 		retry_cnt++;
765 	}
766 
767 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
768 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
769 }
770 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
771 
772 #if defined(FEATURE_HAL_DELAYED_REG_WRITE)
773 /**
774  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
775  * @hal_soc: HAL soc handle
776  *
777  * Return: none
778  */
779 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
780 
781 /**
782  * hal_dump_reg_write_stats() - dump reg write stats
783  * @hal_soc: HAL soc handle
784  *
785  * Return: none
786  */
787 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
788 
789 /**
790  * hal_get_reg_write_pending_work() - get the number of entries
791  *		pending in the workqueue to be processed.
792  * @hal_soc: HAL soc handle
793  *
794  * Returns: the number of entries pending to be processed
795  */
796 int hal_get_reg_write_pending_work(void *hal_soc);
797 
798 #else
799 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
800 {
801 }
802 
803 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
804 {
805 }
806 
807 static inline int hal_get_reg_write_pending_work(void *hal_soc)
808 {
809 	return 0;
810 }
811 #endif
812 
813 /**
814  * hal_read_address_32_mb() - Read 32-bit value from the register
815  * @soc: soc handle
816  * @addr: register address to read
817  *
818  * Return: 32-bit value
819  */
820 static inline
821 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
822 				qdf_iomem_t addr)
823 {
824 	uint32_t offset;
825 	uint32_t ret;
826 
827 	if (!soc->use_register_windowing)
828 		return qdf_ioread32(addr);
829 
830 	offset = addr - soc->dev_base_addr;
831 	ret = hal_read32_mb(soc, offset);
832 	return ret;
833 }
834 
835 /**
836  * hal_attach - Initialize HAL layer
837  * @hif_handle: Opaque HIF handle
838  * @qdf_dev: QDF device
839  *
840  * Return: Opaque HAL SOC handle
841  *		 NULL on failure (if given ring is not available)
842  *
843  * This function should be called as part of HIF initialization (for accessing
844  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
845  */
846 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
847 
848 /**
849  * hal_detach - Detach HAL layer
850  * @hal_soc: HAL SOC handle
851  *
852  * This function should be called as part of HIF detach
853  *
854  */
855 extern void hal_detach(void *hal_soc);
856 
857 #define HAL_SRNG_LMAC_RING 0x80000000
858 /* SRNG flags passed in hal_srng_params.flags */
859 #define HAL_SRNG_MSI_SWAP				0x00000008
860 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
861 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
862 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
863 #define HAL_SRNG_MSI_INTR				0x00020000
864 #define HAL_SRNG_CACHED_DESC		0x00040000
865 
866 #if defined(QCA_WIFI_QCA6490)  || defined(QCA_WIFI_KIWI)
867 #define HAL_SRNG_PREFETCH_TIMER 1
868 #else
869 #define HAL_SRNG_PREFETCH_TIMER 0
870 #endif
871 
872 #define PN_SIZE_24 0
873 #define PN_SIZE_48 1
874 #define PN_SIZE_128 2
875 
876 #ifdef FORCE_WAKE
877 /**
878  * hal_set_init_phase() - Indicate initialization of
879  *                        datapath rings
880  * @soc: hal_soc handle
881  * @init_phase: flag to indicate datapath rings
882  *              initialization status
883  *
884  * Return: None
885  */
886 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
887 #else
888 static inline
889 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
890 {
891 }
892 #endif /* FORCE_WAKE */
893 
894 /**
895  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
896  * used by callers for calculating the size of memory to be allocated before
897  * calling hal_srng_setup to setup the ring
898  *
899  * @hal_soc: Opaque HAL SOC handle
900  * @ring_type: one of the types from hal_ring_type
901  *
902  */
903 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
904 
905 /**
906  * hal_srng_max_entries - Returns maximum possible number of ring entries
907  * @hal_soc: Opaque HAL SOC handle
908  * @ring_type: one of the types from hal_ring_type
909  *
910  * Return: Maximum number of entries for the given ring_type
911  */
912 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
913 
914 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
915 				 uint32_t low_threshold);
916 
917 /**
918  * hal_srng_dump - Dump ring status
919  * @srng: hal srng pointer
920  */
921 void hal_srng_dump(struct hal_srng *srng);
922 
923 /**
924  * hal_srng_get_dir - Returns the direction of the ring
925  * @hal_soc: Opaque HAL SOC handle
926  * @ring_type: one of the types from hal_ring_type
927  *
928  * Return: Ring direction
929  */
930 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
931 
932 /* HAL memory information */
933 struct hal_mem_info {
934 	/* dev base virtual addr */
935 	void *dev_base_addr;
936 	/* dev base physical addr */
937 	void *dev_base_paddr;
938 	/* dev base ce virtual addr - applicable only for qca5018  */
939 	/* In qca5018 CE register are outside wcss block */
940 	/* using a separate address space to access CE registers */
941 	void *dev_base_addr_ce;
942 	/* dev base ce physical addr */
943 	void *dev_base_paddr_ce;
944 	/* Remote virtual pointer memory for HW/FW updates */
945 	void *shadow_rdptr_mem_vaddr;
946 	/* Remote physical pointer memory for HW/FW updates */
947 	void *shadow_rdptr_mem_paddr;
948 	/* Shared memory for ring pointer updates from host to FW */
949 	void *shadow_wrptr_mem_vaddr;
950 	/* Shared physical memory for ring pointer updates from host to FW */
951 	void *shadow_wrptr_mem_paddr;
952 	/* lmac srng start id */
953 	uint8_t lmac_srng_start_id;
954 };
955 
956 /* SRNG parameters to be passed to hal_srng_setup */
957 struct hal_srng_params {
958 	/* Physical base address of the ring */
959 	qdf_dma_addr_t ring_base_paddr;
960 	/* Virtual base address of the ring */
961 	void *ring_base_vaddr;
962 	/* Number of entries in ring */
963 	uint32_t num_entries;
964 	/* max transfer length */
965 	uint16_t max_buffer_length;
966 	/* MSI Address */
967 	qdf_dma_addr_t msi_addr;
968 	/* MSI data */
969 	uint32_t msi_data;
970 	/* Interrupt timer threshold – in micro seconds */
971 	uint32_t intr_timer_thres_us;
972 	/* Interrupt batch counter threshold – in number of ring entries */
973 	uint32_t intr_batch_cntr_thres_entries;
974 	/* Low threshold – in number of ring entries
975 	 * (valid for src rings only)
976 	 */
977 	uint32_t low_threshold;
978 	/* Misc flags */
979 	uint32_t flags;
980 	/* Unique ring id */
981 	uint8_t ring_id;
982 	/* Source or Destination ring */
983 	enum hal_srng_dir ring_dir;
984 	/* Size of ring entry */
985 	uint32_t entry_size;
986 	/* hw register base address */
987 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
988 	/* prefetch timer config - in micro seconds */
989 	uint32_t prefetch_timer;
990 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
991 	/* Near full IRQ support flag */
992 	uint32_t nf_irq_support;
993 	/* MSI2 Address */
994 	qdf_dma_addr_t msi2_addr;
995 	/* MSI2 data */
996 	uint32_t msi2_data;
997 	/* Critical threshold */
998 	uint16_t crit_thresh;
999 	/* High threshold */
1000 	uint16_t high_thresh;
1001 	/* Safe threshold */
1002 	uint16_t safe_thresh;
1003 #endif
1004 };
1005 
1006 /* hal_construct_srng_shadow_regs() - initialize the shadow
1007  * registers for srngs
1008  * @hal_soc: hal handle
1009  *
1010  * Return: QDF_STATUS_OK on success
1011  */
1012 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1013 
1014 /* hal_set_one_shadow_config() - add a config for the specified ring
1015  * @hal_soc: hal handle
1016  * @ring_type: ring type
1017  * @ring_num: ring num
1018  *
1019  * The ring type and ring num uniquely specify the ring.  After this call,
1020  * the hp/tp will be added as the next entry int the shadow register
1021  * configuration table.  The hal code will use the shadow register address
1022  * in place of the hp/tp address.
1023  *
1024  * This function is exposed, so that the CE module can skip configuring shadow
1025  * registers for unused ring and rings assigned to the firmware.
1026  *
1027  * Return: QDF_STATUS_OK on success
1028  */
1029 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1030 				     int ring_num);
1031 /**
1032  * hal_get_shadow_config() - retrieve the config table for shadow cfg v2
1033  * @hal_soc: hal handle
1034  * @shadow_config: will point to the table after
1035  * @num_shadow_registers_configured: will contain the number of valid entries
1036  */
1037 extern void
1038 hal_get_shadow_config(void *hal_soc,
1039 		      struct pld_shadow_reg_v2_cfg **shadow_config,
1040 		      int *num_shadow_registers_configured);
1041 
1042 #ifdef CONFIG_SHADOW_V3
1043 /**
1044  * hal_get_shadow_v3_config() - retrieve the config table for shadow cfg v3
1045  * @hal_soc: hal handle
1046  * @shadow_config: will point to the table after
1047  * @num_shadow_registers_configured: will contain the number of valid entries
1048  */
1049 extern void
1050 hal_get_shadow_v3_config(void *hal_soc,
1051 			 struct pld_shadow_reg_v3_cfg **shadow_config,
1052 			 int *num_shadow_registers_configured);
1053 #endif
1054 
1055 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1056 /**
1057  * hal_srng_is_near_full_irq_supported() - Check if srng supports near full irq
1058  * @hal_soc: HAL SoC handle [To be validated by caller]
1059  * @ring_type: srng type
1060  * @ring_num: The index of the srng (of the same type)
1061  *
1062  * Return: true, if srng support near full irq trigger
1063  *	false, if the srng does not support near full irq support.
1064  */
1065 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1066 					 int ring_type, int ring_num);
1067 #else
1068 static inline
1069 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1070 					 int ring_type, int ring_num)
1071 {
1072 	return false;
1073 }
1074 #endif
1075 
1076 /**
1077  * hal_srng_setup - Initialize HW SRNG ring.
1078  *
1079  * @hal_soc: Opaque HAL SOC handle
1080  * @ring_type: one of the types from hal_ring_type
1081  * @ring_num: Ring number if there are multiple rings of
1082  *		same type (staring from 0)
1083  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1084  * @ring_params: SRNG ring params in hal_srng_params structure.
1085  * @idle_check: Check if ring is idle
1086 
1087  * Callers are expected to allocate contiguous ring memory of size
1088  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1089  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1090  * structure. Ring base address should be 8 byte aligned and size of each ring
1091  * entry should be queried using the API hal_srng_get_entrysize
1092  *
1093  * Return: Opaque pointer to ring on success
1094  *		 NULL on failure (if given ring is not available)
1095  */
1096 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1097 	int mac_id, struct hal_srng_params *ring_params, bool idle_check);
1098 
1099 /* Remapping ids of REO rings */
1100 #define REO_REMAP_TCL 0
1101 #define REO_REMAP_SW1 1
1102 #define REO_REMAP_SW2 2
1103 #define REO_REMAP_SW3 3
1104 #define REO_REMAP_SW4 4
1105 #define REO_REMAP_RELEASE 5
1106 #define REO_REMAP_FW 6
1107 /*
1108  * In Beryllium: 4 bits REO destination ring value is defined as: 0: TCL
1109  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
1110  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
1111  *
1112  */
1113 #define REO_REMAP_SW5 7
1114 #define REO_REMAP_SW6 8
1115 #define REO_REMAP_SW7 9
1116 #define REO_REMAP_SW8 10
1117 
1118 /*
1119  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1120  * to map destination to rings
1121  */
1122 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1123 	((_VALUE) << \
1124 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1125 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1126 
1127 /*
1128  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1129  * to map destination to rings
1130  */
1131 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1132 	((_VALUE) << \
1133 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1134 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1135 
1136 /*
1137  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1138  * to map destination to rings
1139  */
1140 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1141 	((_VALUE) << \
1142 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1143 	  _OFFSET ## _SHFT))
1144 
1145 /*
1146  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1147  * to map destination to rings
1148  */
1149 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1150 	((_VALUE) << \
1151 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1152 	  _OFFSET ## _SHFT))
1153 
1154 /*
1155  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1156  * to map destination to rings
1157  */
1158 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1159 	((_VALUE) << \
1160 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1161 	  _OFFSET ## _SHFT))
1162 
1163 /**
1164  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1165  * @hal_soc_hdl: HAL SOC handle
1166  * @read: boolean value to indicate if read or write
1167  * @ix0: pointer to store IX0 reg value
1168  * @ix1: pointer to store IX1 reg value
1169  * @ix2: pointer to store IX2 reg value
1170  * @ix3: pointer to store IX3 reg value
1171  */
1172 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1173 				uint32_t *ix0, uint32_t *ix1,
1174 				uint32_t *ix2, uint32_t *ix3);
1175 
1176 /**
1177  * hal_srng_set_hp_paddr_confirm() - Set physical address to dest SRNG head
1178  *  pointer and confirm that write went through by reading back the value
1179  * @sring: sring pointer
1180  * @paddr: physical address
1181  *
1182  * Return: None
1183  */
1184 extern void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring,
1185 					      uint64_t paddr);
1186 
1187 /**
1188  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
1189  * @hal_soc: hal_soc handle
1190  * @srng: sring pointer
1191  * @vaddr: virtual address
1192  */
1193 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1194 			  struct hal_srng *srng,
1195 			  uint32_t *vaddr);
1196 
1197 /**
1198  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1199  * @hal_soc: Opaque HAL SOC handle
1200  * @hal_srng: Opaque HAL SRNG pointer
1201  */
1202 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
1203 
1204 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1205 {
1206 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1207 
1208 	return !!srng->initialized;
1209 }
1210 
1211 /**
1212  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
1213  * @hal_soc: Opaque HAL SOC handle
1214  * @hal_ring_hdl: Destination ring pointer
1215  *
1216  * Caller takes responsibility for any locking needs.
1217  *
1218  * Return: Opaque pointer for next ring entry; NULL on failire
1219  */
1220 static inline
1221 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1222 			hal_ring_handle_t hal_ring_hdl)
1223 {
1224 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1225 
1226 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1227 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1228 
1229 	return NULL;
1230 }
1231 
1232 
1233 /**
1234  * hal_mem_dma_cache_sync - Cache sync the specified virtual address Range
1235  * @hal_soc: HAL soc handle
1236  * @desc: desc start address
1237  * @entry_size: size of memory to sync
1238  *
1239  * Return: void
1240  */
1241 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
1242 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1243 					  uint32_t entry_size)
1244 {
1245 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1246 }
1247 #else
1248 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1249 					  uint32_t entry_size)
1250 {
1251 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1252 			       QDF_DMA_FROM_DEVICE,
1253 			       (entry_size * sizeof(uint32_t)));
1254 }
1255 #endif
1256 
1257 /**
1258  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
1259  * hal_srng_access_start if locked access is required
1260  *
1261  * @hal_soc: Opaque HAL SOC handle
1262  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1263  *
1264  * This API doesn't implement any byte-order conversion on reading hp/tp.
1265  * So, Use API only for those srngs for which the target writes hp/tp values to
1266  * the DDR in the Host order.
1267  *
1268  * Return: 0 on success; error on failire
1269  */
1270 static inline int
1271 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1272 			       hal_ring_handle_t hal_ring_hdl)
1273 {
1274 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1275 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1276 	uint32_t *desc;
1277 
1278 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1279 		srng->u.src_ring.cached_tp =
1280 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1281 	else {
1282 		srng->u.dst_ring.cached_hp =
1283 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1284 
1285 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1286 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1287 			if (qdf_likely(desc)) {
1288 				hal_mem_dma_cache_sync(soc, desc,
1289 						       srng->entry_size);
1290 				qdf_prefetch(desc);
1291 			}
1292 		}
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 /**
1299  * hal_le_srng_access_start_unlocked_in_cpu_order - Start ring access
1300  * (unlocked) with endianness correction.
1301  * @hal_soc: Opaque HAL SOC handle
1302  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1303  *
1304  * This API provides same functionally as hal_srng_access_start_unlocked()
1305  * except that it converts the little-endian formatted hp/tp values to
1306  * Host order on reading them. So, this API should only be used for those srngs
1307  * for which the target always writes hp/tp values in little-endian order
1308  * regardless of Host order.
1309  *
1310  * Also, this API doesn't take the lock. For locked access, use
1311  * hal_srng_access_start/hal_le_srng_access_start_in_cpu_order.
1312  *
1313  * Return: 0 on success; error on failire
1314  */
1315 static inline int
1316 hal_le_srng_access_start_unlocked_in_cpu_order(
1317 	hal_soc_handle_t hal_soc_hdl,
1318 	hal_ring_handle_t hal_ring_hdl)
1319 {
1320 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1321 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1322 	uint32_t *desc;
1323 
1324 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1325 		srng->u.src_ring.cached_tp =
1326 			qdf_le32_to_cpu(*(volatile uint32_t *)
1327 					(srng->u.src_ring.tp_addr));
1328 	else {
1329 		srng->u.dst_ring.cached_hp =
1330 			qdf_le32_to_cpu(*(volatile uint32_t *)
1331 					(srng->u.dst_ring.hp_addr));
1332 
1333 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1334 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1335 			if (qdf_likely(desc)) {
1336 				hal_mem_dma_cache_sync(soc, desc,
1337 						       srng->entry_size);
1338 				qdf_prefetch(desc);
1339 			}
1340 		}
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 /**
1347  * hal_srng_try_access_start - Try to start (locked) ring access
1348  *
1349  * @hal_soc: Opaque HAL SOC handle
1350  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1351  *
1352  * Return: 0 on success; error on failure
1353  */
1354 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1355 					    hal_ring_handle_t hal_ring_hdl)
1356 {
1357 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1358 
1359 	if (qdf_unlikely(!hal_ring_hdl)) {
1360 		qdf_print("Error: Invalid hal_ring\n");
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1365 		return -EINVAL;
1366 
1367 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1368 }
1369 
1370 /**
1371  * hal_srng_access_start - Start (locked) ring access
1372  *
1373  * @hal_soc: Opaque HAL SOC handle
1374  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1375  *
1376  * This API doesn't implement any byte-order conversion on reading hp/tp.
1377  * So, Use API only for those srngs for which the target writes hp/tp values to
1378  * the DDR in the Host order.
1379  *
1380  * Return: 0 on success; error on failire
1381  */
1382 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1383 					hal_ring_handle_t hal_ring_hdl)
1384 {
1385 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1386 
1387 	if (qdf_unlikely(!hal_ring_hdl)) {
1388 		qdf_print("Error: Invalid hal_ring\n");
1389 		return -EINVAL;
1390 	}
1391 
1392 	SRNG_LOCK(&(srng->lock));
1393 
1394 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1395 }
1396 
1397 /**
1398  * hal_le_srng_access_start_in_cpu_order - Start (locked) ring access with
1399  * endianness correction
1400  * @hal_soc: Opaque HAL SOC handle
1401  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1402  *
1403  * This API provides same functionally as hal_srng_access_start()
1404  * except that it converts the little-endian formatted hp/tp values to
1405  * Host order on reading them. So, this API should only be used for those srngs
1406  * for which the target always writes hp/tp values in little-endian order
1407  * regardless of Host order.
1408  *
1409  * Return: 0 on success; error on failire
1410  */
1411 static inline int
1412 hal_le_srng_access_start_in_cpu_order(
1413 	hal_soc_handle_t hal_soc_hdl,
1414 	hal_ring_handle_t hal_ring_hdl)
1415 {
1416 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1417 
1418 	if (qdf_unlikely(!hal_ring_hdl)) {
1419 		qdf_print("Error: Invalid hal_ring\n");
1420 		return -EINVAL;
1421 	}
1422 
1423 	SRNG_LOCK(&(srng->lock));
1424 
1425 	return hal_le_srng_access_start_unlocked_in_cpu_order(
1426 			hal_soc_hdl, hal_ring_hdl);
1427 }
1428 
1429 /**
1430  * hal_srng_dst_get_next - Get next entry from a destination ring
1431  * @hal_soc: Opaque HAL SOC handle
1432  * @hal_ring_hdl: Destination ring pointer
1433  *
1434  * Return: Opaque pointer for next ring entry; NULL on failure
1435  */
1436 static inline
1437 void *hal_srng_dst_get_next(void *hal_soc,
1438 			    hal_ring_handle_t hal_ring_hdl)
1439 {
1440 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1441 	uint32_t *desc;
1442 
1443 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1444 		return NULL;
1445 
1446 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1447 	/* TODO: Using % is expensive, but we have to do this since
1448 	 * size of some SRNG rings is not power of 2 (due to descriptor
1449 	 * sizes). Need to create separate API for rings used
1450 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1451 	 * SW2RXDMA and CE rings)
1452 	 */
1453 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1454 	if (srng->u.dst_ring.tp == srng->ring_size)
1455 		srng->u.dst_ring.tp = 0;
1456 
1457 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1458 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1459 		uint32_t *desc_next;
1460 		uint32_t tp;
1461 
1462 		tp = srng->u.dst_ring.tp;
1463 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1464 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1465 		qdf_prefetch(desc_next);
1466 	}
1467 
1468 	return (void *)desc;
1469 }
1470 
1471 /**
1472  * hal_srng_dst_get_next_cached - Get cached next entry
1473  * @hal_soc: Opaque HAL SOC handle
1474  * @hal_ring_hdl: Destination ring pointer
1475  *
1476  * Get next entry from a destination ring and move cached tail pointer
1477  *
1478  * Return: Opaque pointer for next ring entry; NULL on failure
1479  */
1480 static inline
1481 void *hal_srng_dst_get_next_cached(void *hal_soc,
1482 				   hal_ring_handle_t hal_ring_hdl)
1483 {
1484 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1485 	uint32_t *desc;
1486 	uint32_t *desc_next;
1487 
1488 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1489 		return NULL;
1490 
1491 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1492 	/* TODO: Using % is expensive, but we have to do this since
1493 	 * size of some SRNG rings is not power of 2 (due to descriptor
1494 	 * sizes). Need to create separate API for rings used
1495 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1496 	 * SW2RXDMA and CE rings)
1497 	 */
1498 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1499 	if (srng->u.dst_ring.tp == srng->ring_size)
1500 		srng->u.dst_ring.tp = 0;
1501 
1502 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1503 	qdf_prefetch(desc_next);
1504 	return (void *)desc;
1505 }
1506 
1507 /**
1508  * hal_srng_dst_dec_tp - decrement the TP of the Dst ring by one entry
1509  * @hal_soc: Opaque HAL SOC handle
1510  * @hal_ring_hdl: Destination ring pointer
1511  *
1512  * reset the tail pointer in the destination ring by one entry
1513  *
1514  */
1515 static inline
1516 void hal_srng_dst_dec_tp(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1517 {
1518 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1519 
1520 	if (qdf_unlikely(!srng->u.dst_ring.tp))
1521 		srng->u.dst_ring.tp = (srng->ring_size - srng->entry_size);
1522 	else
1523 		srng->u.dst_ring.tp -= srng->entry_size;
1524 }
1525 
1526 static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
1527 {
1528 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1529 
1530 	if (qdf_unlikely(!hal_ring_hdl)) {
1531 		qdf_print("error: invalid hal_ring\n");
1532 		return -EINVAL;
1533 	}
1534 
1535 	SRNG_LOCK(&(srng->lock));
1536 	return 0;
1537 }
1538 
1539 static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
1540 {
1541 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1542 
1543 	if (qdf_unlikely(!hal_ring_hdl)) {
1544 		qdf_print("error: invalid hal_ring\n");
1545 		return -EINVAL;
1546 	}
1547 
1548 	SRNG_UNLOCK(&(srng->lock));
1549 	return 0;
1550 }
1551 
1552 /**
1553  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1554  * cached head pointer
1555  *
1556  * @hal_soc: Opaque HAL SOC handle
1557  * @hal_ring_hdl: Destination ring pointer
1558  *
1559  * Return: Opaque pointer for next ring entry; NULL on failire
1560  */
1561 static inline void *
1562 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1563 			 hal_ring_handle_t hal_ring_hdl)
1564 {
1565 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1566 	uint32_t *desc;
1567 	/* TODO: Using % is expensive, but we have to do this since
1568 	 * size of some SRNG rings is not power of 2 (due to descriptor
1569 	 * sizes). Need to create separate API for rings used
1570 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1571 	 * SW2RXDMA and CE rings)
1572 	 */
1573 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1574 		srng->ring_size;
1575 
1576 	if (next_hp != srng->u.dst_ring.tp) {
1577 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1578 		srng->u.dst_ring.cached_hp = next_hp;
1579 		return (void *)desc;
1580 	}
1581 
1582 	return NULL;
1583 }
1584 
1585 /**
1586  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1587  * @hal_soc: Opaque HAL SOC handle
1588  * @hal_ring_hdl: Destination ring pointer
1589  *
1590  * Sync cached head pointer with HW.
1591  * Caller takes responsibility for any locking needs.
1592  *
1593  * Return: Opaque pointer for next ring entry; NULL on failire
1594  */
1595 static inline
1596 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1597 			     hal_ring_handle_t hal_ring_hdl)
1598 {
1599 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1600 
1601 	srng->u.dst_ring.cached_hp =
1602 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1603 
1604 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1605 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1606 
1607 	return NULL;
1608 }
1609 
1610 /**
1611  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1612  * @hal_soc: Opaque HAL SOC handle
1613  * @hal_ring_hdl: Destination ring pointer
1614  *
1615  * Sync cached head pointer with HW.
1616  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1617  *
1618  * Return: Opaque pointer for next ring entry; NULL on failire
1619  */
1620 static inline
1621 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1622 				    hal_ring_handle_t hal_ring_hdl)
1623 {
1624 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1625 	void *ring_desc_ptr = NULL;
1626 
1627 	if (qdf_unlikely(!hal_ring_hdl)) {
1628 		qdf_print("Error: Invalid hal_ring\n");
1629 		return  NULL;
1630 	}
1631 
1632 	SRNG_LOCK(&srng->lock);
1633 
1634 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1635 
1636 	SRNG_UNLOCK(&srng->lock);
1637 
1638 	return ring_desc_ptr;
1639 }
1640 
1641 #define hal_srng_dst_num_valid_nolock(hal_soc, hal_ring_hdl, sync_hw_ptr) \
1642 		hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr)
1643 
1644 /**
1645  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1646  * by SW) in destination ring
1647  *
1648  * @hal_soc: Opaque HAL SOC handle
1649  * @hal_ring_hdl: Destination ring pointer
1650  * @sync_hw_ptr: Sync cached head pointer with HW
1651  *
1652  */
1653 static inline
1654 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1655 				hal_ring_handle_t hal_ring_hdl,
1656 				int sync_hw_ptr)
1657 {
1658 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1659 	uint32_t hp;
1660 	uint32_t tp = srng->u.dst_ring.tp;
1661 
1662 	if (sync_hw_ptr) {
1663 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1664 		srng->u.dst_ring.cached_hp = hp;
1665 	} else {
1666 		hp = srng->u.dst_ring.cached_hp;
1667 	}
1668 
1669 	if (hp >= tp)
1670 		return (hp - tp) / srng->entry_size;
1671 
1672 	return (srng->ring_size - tp + hp) / srng->entry_size;
1673 }
1674 
1675 /**
1676  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1677  * @hal_soc: Opaque HAL SOC handle
1678  * @hal_ring_hdl: Destination ring pointer
1679  * @entry_count: call invalidate API if valid entries available
1680  *
1681  * Invalidates a set of cached descriptors starting from TP to cached_HP
1682  *
1683  * Return - None
1684  */
1685 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1686 						 hal_ring_handle_t hal_ring_hdl,
1687 						 uint32_t entry_count)
1688 {
1689 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1690 	uint32_t *first_desc;
1691 	uint32_t *last_desc;
1692 	uint32_t last_desc_index;
1693 
1694 	/*
1695 	 * If SRNG does not have cached descriptors this
1696 	 * API call should be a no op
1697 	 */
1698 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1699 		return;
1700 
1701 	if (!entry_count)
1702 		return;
1703 
1704 	first_desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1705 
1706 	last_desc_index = (srng->u.dst_ring.tp +
1707 			   (entry_count * srng->entry_size)) %
1708 			  srng->ring_size;
1709 
1710 	last_desc =  &srng->ring_base_vaddr[last_desc_index];
1711 
1712 	if (last_desc > (uint32_t *)first_desc)
1713 		/* invalidate from tp to cached_hp */
1714 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1715 					      (void *)(last_desc));
1716 	else {
1717 		/* invalidate from tp to end of the ring */
1718 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1719 					      (void *)srng->ring_vaddr_end);
1720 
1721 		/* invalidate from start of ring to cached_hp */
1722 		qdf_nbuf_dma_inv_range_no_dsb((void *)srng->ring_base_vaddr,
1723 					      (void *)last_desc);
1724 	}
1725 	qdf_dsb();
1726 }
1727 
1728 /**
1729  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1730  *
1731  * @hal_soc: Opaque HAL SOC handle
1732  * @hal_ring_hdl: Destination ring pointer
1733  * @sync_hw_ptr: Sync cached head pointer with HW
1734  *
1735  * Returns number of valid entries to be processed by the host driver. The
1736  * function takes up SRNG lock.
1737  *
1738  * Return: Number of valid destination entries
1739  */
1740 static inline uint32_t
1741 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1742 			      hal_ring_handle_t hal_ring_hdl,
1743 			      int sync_hw_ptr)
1744 {
1745 	uint32_t num_valid;
1746 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1747 
1748 	SRNG_LOCK(&srng->lock);
1749 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1750 	SRNG_UNLOCK(&srng->lock);
1751 
1752 	return num_valid;
1753 }
1754 
1755 /**
1756  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1757  *
1758  * @hal_soc: Opaque HAL SOC handle
1759  * @hal_ring_hdl: Destination ring pointer
1760  *
1761  */
1762 static inline
1763 void hal_srng_sync_cachedhp(void *hal_soc,
1764 				hal_ring_handle_t hal_ring_hdl)
1765 {
1766 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1767 	uint32_t hp;
1768 
1769 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1770 	srng->u.dst_ring.cached_hp = hp;
1771 }
1772 
1773 /**
1774  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1775  * pointer. This can be used to release any buffers associated with completed
1776  * ring entries. Note that this should not be used for posting new descriptor
1777  * entries. Posting of new entries should be done only using
1778  * hal_srng_src_get_next_reaped when this function is used for reaping.
1779  *
1780  * @hal_soc: Opaque HAL SOC handle
1781  * @hal_ring_hdl: Source ring pointer
1782  *
1783  * Return: Opaque pointer for next ring entry; NULL on failire
1784  */
1785 static inline void *
1786 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1787 {
1788 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1789 	uint32_t *desc;
1790 
1791 	/* TODO: Using % is expensive, but we have to do this since
1792 	 * size of some SRNG rings is not power of 2 (due to descriptor
1793 	 * sizes). Need to create separate API for rings used
1794 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1795 	 * SW2RXDMA and CE rings)
1796 	 */
1797 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1798 		srng->ring_size;
1799 
1800 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1801 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1802 		srng->u.src_ring.reap_hp = next_reap_hp;
1803 		return (void *)desc;
1804 	}
1805 
1806 	return NULL;
1807 }
1808 
1809 /**
1810  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1811  * already reaped using hal_srng_src_reap_next, for posting new entries to
1812  * the ring
1813  *
1814  * @hal_soc: Opaque HAL SOC handle
1815  * @hal_ring_hdl: Source ring pointer
1816  *
1817  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1818  */
1819 static inline void *
1820 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1821 {
1822 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1823 	uint32_t *desc;
1824 
1825 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1826 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1827 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1828 			srng->ring_size;
1829 
1830 		return (void *)desc;
1831 	}
1832 
1833 	return NULL;
1834 }
1835 
1836 /**
1837  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1838  * move reap pointer. This API is used in detach path to release any buffers
1839  * associated with ring entries which are pending reap.
1840  *
1841  * @hal_soc: Opaque HAL SOC handle
1842  * @hal_ring_hdl: Source ring pointer
1843  *
1844  * Return: Opaque pointer for next ring entry; NULL on failire
1845  */
1846 static inline void *
1847 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1848 {
1849 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1850 	uint32_t *desc;
1851 
1852 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1853 		srng->ring_size;
1854 
1855 	if (next_reap_hp != srng->u.src_ring.hp) {
1856 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1857 		srng->u.src_ring.reap_hp = next_reap_hp;
1858 		return (void *)desc;
1859 	}
1860 
1861 	return NULL;
1862 }
1863 
1864 /**
1865  * hal_srng_src_done_val -
1866  *
1867  * @hal_soc: Opaque HAL SOC handle
1868  * @hal_ring_hdl: Source ring pointer
1869  *
1870  * Return: Opaque pointer for next ring entry; NULL on failire
1871  */
1872 static inline uint32_t
1873 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1874 {
1875 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1876 	/* TODO: Using % is expensive, but we have to do this since
1877 	 * size of some SRNG rings is not power of 2 (due to descriptor
1878 	 * sizes). Need to create separate API for rings used
1879 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1880 	 * SW2RXDMA and CE rings)
1881 	 */
1882 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1883 		srng->ring_size;
1884 
1885 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1886 		return 0;
1887 
1888 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1889 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1890 			srng->entry_size;
1891 	else
1892 		return ((srng->ring_size - next_reap_hp) +
1893 			srng->u.src_ring.cached_tp) / srng->entry_size;
1894 }
1895 
1896 /**
1897  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1898  * @hal_ring_hdl: Source ring pointer
1899  *
1900  * srng->entry_size value is in 4 byte dwords so left shifting
1901  * this by 2 to return the value of entry_size in bytes.
1902  *
1903  * Return: uint8_t
1904  */
1905 static inline
1906 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1907 {
1908 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1909 
1910 	return srng->entry_size << 2;
1911 }
1912 
1913 /**
1914  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1915  * @hal_soc: Opaque HAL SOC handle
1916  * @hal_ring_hdl: Source ring pointer
1917  * @tailp: Tail Pointer
1918  * @headp: Head Pointer
1919  *
1920  * Return: Update tail pointer and head pointer in arguments.
1921  */
1922 static inline
1923 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1924 		     uint32_t *tailp, uint32_t *headp)
1925 {
1926 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1927 
1928 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1929 		*headp = srng->u.src_ring.hp;
1930 		*tailp = *srng->u.src_ring.tp_addr;
1931 	} else {
1932 		*tailp = srng->u.dst_ring.tp;
1933 		*headp = *srng->u.dst_ring.hp_addr;
1934 	}
1935 }
1936 
1937 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
1938 /**
1939  * hal_srng_src_get_next_consumed - Get the next desc if consumed by HW
1940  *
1941  * @hal_soc: Opaque HAL SOC handle
1942  * @hal_ring_hdl: Source ring pointer
1943  *
1944  * Return: pointer to descriptor if consumed by HW, else NULL
1945  */
1946 static inline
1947 void *hal_srng_src_get_next_consumed(void *hal_soc,
1948 				     hal_ring_handle_t hal_ring_hdl)
1949 {
1950 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1951 	uint32_t *desc = NULL;
1952 	/* TODO: Using % is expensive, but we have to do this since
1953 	 * size of some SRNG rings is not power of 2 (due to descriptor
1954 	 * sizes). Need to create separate API for rings used
1955 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1956 	 * SW2RXDMA and CE rings)
1957 	 */
1958 	uint32_t next_entry = (srng->last_desc_cleared + srng->entry_size) %
1959 			      srng->ring_size;
1960 
1961 	if (next_entry != srng->u.src_ring.cached_tp) {
1962 		desc = &srng->ring_base_vaddr[next_entry];
1963 		srng->last_desc_cleared = next_entry;
1964 	}
1965 
1966 	return desc;
1967 }
1968 
1969 #else
1970 static inline
1971 void *hal_srng_src_get_next_consumed(void *hal_soc,
1972 				     hal_ring_handle_t hal_ring_hdl)
1973 {
1974 	return NULL;
1975 }
1976 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
1977 
1978 /**
1979  * hal_srng_src_peek - get the HP of the SRC ring
1980  * @hal_soc: Opaque HAL SOC handle
1981  * @hal_ring_hdl: Source ring pointer
1982  *
1983  * get the head pointer in the src ring but do not increment it
1984  */
1985 static inline
1986 void *hal_srng_src_peek(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1987 {
1988 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1989 	uint32_t *desc;
1990 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1991 		srng->ring_size;
1992 
1993 	if (next_hp != srng->u.src_ring.cached_tp) {
1994 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1995 		return (void *)desc;
1996 	}
1997 
1998 	return NULL;
1999 }
2000 
2001 /**
2002  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
2003  *
2004  * @hal_soc: Opaque HAL SOC handle
2005  * @hal_ring_hdl: Source ring pointer
2006  *
2007  * Return: Opaque pointer for next ring entry; NULL on failire
2008  */
2009 static inline
2010 void *hal_srng_src_get_next(void *hal_soc,
2011 			    hal_ring_handle_t hal_ring_hdl)
2012 {
2013 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2014 	uint32_t *desc;
2015 	/* TODO: Using % is expensive, but we have to do this since
2016 	 * size of some SRNG rings is not power of 2 (due to descriptor
2017 	 * sizes). Need to create separate API for rings used
2018 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2019 	 * SW2RXDMA and CE rings)
2020 	 */
2021 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2022 		srng->ring_size;
2023 
2024 	if (next_hp != srng->u.src_ring.cached_tp) {
2025 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2026 		srng->u.src_ring.hp = next_hp;
2027 		/* TODO: Since reap function is not used by all rings, we can
2028 		 * remove the following update of reap_hp in this function
2029 		 * if we can ensure that only hal_srng_src_get_next_reaped
2030 		 * is used for the rings requiring reap functionality
2031 		 */
2032 		srng->u.src_ring.reap_hp = next_hp;
2033 		return (void *)desc;
2034 	}
2035 
2036 	return NULL;
2037 }
2038 
2039 /**
2040  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
2041  * moving head pointer.
2042  * hal_srng_src_get_next should be called subsequently to move the head pointer
2043  *
2044  * @hal_soc: Opaque HAL SOC handle
2045  * @hal_ring_hdl: Source ring pointer
2046  *
2047  * Return: Opaque pointer for next ring entry; NULL on failire
2048  */
2049 static inline
2050 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
2051 				   hal_ring_handle_t hal_ring_hdl)
2052 {
2053 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2054 	uint32_t *desc;
2055 
2056 	/* TODO: Using % is expensive, but we have to do this since
2057 	 * size of some SRNG rings is not power of 2 (due to descriptor
2058 	 * sizes). Need to create separate API for rings used
2059 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2060 	 * SW2RXDMA and CE rings)
2061 	 */
2062 	if (((srng->u.src_ring.hp + srng->entry_size) %
2063 		srng->ring_size) != srng->u.src_ring.cached_tp) {
2064 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2065 						srng->entry_size) %
2066 						srng->ring_size]);
2067 		return (void *)desc;
2068 	}
2069 
2070 	return NULL;
2071 }
2072 
2073 /**
2074  * hal_srng_src_peek_n_get_next_next - Get next to next, i.e HP + 2 entry
2075  * from a ring without moving head pointer.
2076  *
2077  * @hal_soc: Opaque HAL SOC handle
2078  * @hal_ring_hdl: Source ring pointer
2079  *
2080  * Return: Opaque pointer for next to next ring entry; NULL on failire
2081  */
2082 static inline
2083 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
2084 					hal_ring_handle_t hal_ring_hdl)
2085 {
2086 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2087 	uint32_t *desc;
2088 
2089 	/* TODO: Using % is expensive, but we have to do this since
2090 	 * size of some SRNG rings is not power of 2 (due to descriptor
2091 	 * sizes). Need to create separate API for rings used
2092 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2093 	 * SW2RXDMA and CE rings)
2094 	 */
2095 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
2096 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
2097 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
2098 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
2099 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2100 						(srng->entry_size * 2)) %
2101 						srng->ring_size]);
2102 		return (void *)desc;
2103 	}
2104 
2105 	return NULL;
2106 }
2107 
2108 /**
2109  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
2110  * and move hp to next in src ring
2111  *
2112  * Usage: This API should only be used at init time replenish.
2113  *
2114  * @hal_soc_hdl: HAL soc handle
2115  * @hal_ring_hdl: Source ring pointer
2116  *
2117  */
2118 static inline void *
2119 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
2120 				    hal_ring_handle_t hal_ring_hdl)
2121 {
2122 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2123 	uint32_t *cur_desc = NULL;
2124 	uint32_t next_hp;
2125 
2126 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
2127 
2128 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2129 		srng->ring_size;
2130 
2131 	if (next_hp != srng->u.src_ring.cached_tp)
2132 		srng->u.src_ring.hp = next_hp;
2133 
2134 	return (void *)cur_desc;
2135 }
2136 
2137 /**
2138  * hal_srng_src_num_avail - Returns number of available entries in src ring
2139  *
2140  * @hal_soc: Opaque HAL SOC handle
2141  * @hal_ring_hdl: Source ring pointer
2142  * @sync_hw_ptr: Sync cached tail pointer with HW
2143  *
2144  */
2145 static inline uint32_t
2146 hal_srng_src_num_avail(void *hal_soc,
2147 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
2148 {
2149 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2150 	uint32_t tp;
2151 	uint32_t hp = srng->u.src_ring.hp;
2152 
2153 	if (sync_hw_ptr) {
2154 		tp = *(srng->u.src_ring.tp_addr);
2155 		srng->u.src_ring.cached_tp = tp;
2156 	} else {
2157 		tp = srng->u.src_ring.cached_tp;
2158 	}
2159 
2160 	if (tp > hp)
2161 		return ((tp - hp) / srng->entry_size) - 1;
2162 	else
2163 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
2164 }
2165 
2166 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
2167 /**
2168  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2169  * @hal_soc_hdl: HAL soc handle
2170  * @hal_ring_hdl: SRNG handle
2171  *
2172  * This function tries to acquire SRNG lock, and hence should not be called
2173  * from a context which has already acquired the SRNG lock.
2174  *
2175  * Return: None
2176  */
2177 static inline
2178 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2179 					 hal_ring_handle_t hal_ring_hdl)
2180 {
2181 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2182 
2183 	SRNG_LOCK(&srng->lock);
2184 	srng->high_wm.val = 0;
2185 	srng->high_wm.timestamp = 0;
2186 	qdf_mem_zero(&srng->high_wm.bins[0], sizeof(srng->high_wm.bins[0]) *
2187 					     HAL_SRNG_HIGH_WM_BIN_MAX);
2188 	SRNG_UNLOCK(&srng->lock);
2189 }
2190 
2191 /**
2192  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2193  * @hal_soc_hdl: HAL soc handle
2194  * @hal_ring_hdl: SRNG handle
2195  *
2196  * This function should be called with the SRNG lock held.
2197  *
2198  * Return: None
2199  */
2200 static inline
2201 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2202 					   hal_ring_handle_t hal_ring_hdl)
2203 {
2204 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2205 	uint32_t curr_wm_val = 0;
2206 
2207 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
2208 		curr_wm_val = hal_srng_src_num_avail(hal_soc_hdl, hal_ring_hdl,
2209 						     0);
2210 	else
2211 		curr_wm_val = hal_srng_dst_num_valid(hal_soc_hdl, hal_ring_hdl,
2212 						     0);
2213 
2214 	if (curr_wm_val > srng->high_wm.val) {
2215 		srng->high_wm.val = curr_wm_val;
2216 		srng->high_wm.timestamp = qdf_get_system_timestamp();
2217 	}
2218 
2219 	if (curr_wm_val >=
2220 		srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100])
2221 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]++;
2222 	else if (curr_wm_val >=
2223 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90])
2224 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90]++;
2225 	else if (curr_wm_val >=
2226 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80])
2227 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80]++;
2228 	else if (curr_wm_val >=
2229 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70])
2230 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70]++;
2231 	else if (curr_wm_val >=
2232 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60])
2233 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60]++;
2234 	else
2235 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT]++;
2236 }
2237 
2238 static inline
2239 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2240 				hal_ring_handle_t hal_ring_hdl,
2241 				char *buf, int buf_len, int pos)
2242 {
2243 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2244 
2245 	return qdf_scnprintf(buf + pos, buf_len - pos,
2246 			     "%8u %7u %12llu %10u %10u %10u %10u %10u %10u",
2247 			     srng->ring_id, srng->high_wm.val,
2248 			     srng->high_wm.timestamp,
2249 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT],
2250 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60],
2251 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70],
2252 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80],
2253 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90],
2254 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]);
2255 }
2256 #else
2257 /**
2258  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2259  * @hal_soc_hdl: HAL soc handle
2260  * @hal_ring_hdl: SRNG handle
2261  *
2262  * This function tries to acquire SRNG lock, and hence should not be called
2263  * from a context which has already acquired the SRNG lock.
2264  *
2265  * Return: None
2266  */
2267 static inline
2268 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2269 					 hal_ring_handle_t hal_ring_hdl)
2270 {
2271 }
2272 
2273 /**
2274  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2275  * @hal_soc_hdl: HAL soc handle
2276  * @hal_ring_hdl: SRNG handle
2277  *
2278  * This function should be called with the SRNG lock held.
2279  *
2280  * Return: None
2281  */
2282 static inline
2283 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2284 					   hal_ring_handle_t hal_ring_hdl)
2285 {
2286 }
2287 
2288 static inline
2289 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2290 				hal_ring_handle_t hal_ring_hdl,
2291 				char *buf, int buf_len, int pos)
2292 {
2293 	return 0;
2294 }
2295 #endif
2296 
2297 /**
2298  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
2299  * ring head/tail pointers to HW.
2300  *
2301  * @hal_soc: Opaque HAL SOC handle
2302  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2303  *
2304  * The target expects cached head/tail pointer to be updated to the
2305  * shared location in the little-endian order, This API ensures that.
2306  * This API should be used only if hal_srng_access_start_unlocked was used to
2307  * start ring access
2308  *
2309  * Return: None
2310  */
2311 static inline void
2312 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2313 {
2314 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2315 
2316 	/* TODO: See if we need a write memory barrier here */
2317 	if (srng->flags & HAL_SRNG_LMAC_RING) {
2318 		/* For LMAC rings, ring pointer updates are done through FW and
2319 		 * hence written to a shared memory location that is read by FW
2320 		 */
2321 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2322 			*srng->u.src_ring.hp_addr =
2323 				qdf_cpu_to_le32(srng->u.src_ring.hp);
2324 		} else {
2325 			*srng->u.dst_ring.tp_addr =
2326 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
2327 		}
2328 	} else {
2329 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
2330 			hal_srng_write_address_32_mb(hal_soc,
2331 						     srng,
2332 						     srng->u.src_ring.hp_addr,
2333 						     srng->u.src_ring.hp);
2334 		else
2335 			hal_srng_write_address_32_mb(hal_soc,
2336 						     srng,
2337 						     srng->u.dst_ring.tp_addr,
2338 						     srng->u.dst_ring.tp);
2339 	}
2340 }
2341 
2342 /* hal_srng_access_end_unlocked already handles endianness conversion,
2343  * use the same.
2344  */
2345 #define hal_le_srng_access_end_unlocked_in_cpu_order \
2346 	hal_srng_access_end_unlocked
2347 
2348 /**
2349  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
2350  * pointers to HW
2351  *
2352  * @hal_soc: Opaque HAL SOC handle
2353  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2354  *
2355  * The target expects cached head/tail pointer to be updated to the
2356  * shared location in the little-endian order, This API ensures that.
2357  * This API should be used only if hal_srng_access_start was used to
2358  * start ring access
2359  *
2360  */
2361 static inline void
2362 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2363 {
2364 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2365 
2366 	if (qdf_unlikely(!hal_ring_hdl)) {
2367 		qdf_print("Error: Invalid hal_ring\n");
2368 		return;
2369 	}
2370 
2371 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2372 	SRNG_UNLOCK(&(srng->lock));
2373 }
2374 
2375 #ifdef FEATURE_RUNTIME_PM
2376 #define hal_srng_access_end_v1 hal_srng_rtpm_access_end
2377 
2378 /**
2379  * hal_srng_rtpm_access_end - RTPM aware, Unlock ring access
2380  * @hal_soc: Opaque HAL SOC handle
2381  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2382  * @rtpm_dbgid: RTPM debug id
2383  * @is_critical_ctx: Whether the calling context is critical
2384  *
2385  * Function updates the HP/TP value to the hardware register.
2386  * The target expects cached head/tail pointer to be updated to the
2387  * shared location in the little-endian order, This API ensures that.
2388  * This API should be used only if hal_srng_access_start was used to
2389  * start ring access
2390  *
2391  * Return: None
2392  */
2393 void
2394 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,
2395 			 hal_ring_handle_t hal_ring_hdl,
2396 			 uint32_t rtpm_id);
2397 #else
2398 #define hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl, rtpm_id) \
2399 	hal_srng_access_end(hal_soc_hdl, hal_ring_hdl)
2400 #endif
2401 
2402 /* hal_srng_access_end already handles endianness conversion, so use the same */
2403 #define hal_le_srng_access_end_in_cpu_order \
2404 	hal_srng_access_end
2405 
2406 /**
2407  * hal_srng_access_end_reap - Unlock ring access
2408  * This should be used only if hal_srng_access_start to start ring access
2409  * and should be used only while reaping SRC ring completions
2410  *
2411  * @hal_soc: Opaque HAL SOC handle
2412  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2413  *
2414  * Return: 0 on success; error on failire
2415  */
2416 static inline void
2417 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2418 {
2419 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2420 
2421 	SRNG_UNLOCK(&(srng->lock));
2422 }
2423 
2424 /* TODO: Check if the following definitions is available in HW headers */
2425 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2426 #define NUM_MPDUS_PER_LINK_DESC 6
2427 #define NUM_MSDUS_PER_LINK_DESC 7
2428 #define REO_QUEUE_DESC_ALIGN 128
2429 
2430 #define LINK_DESC_ALIGN 128
2431 
2432 #define ADDRESS_MATCH_TAG_VAL 0x5
2433 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2434  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2435  */
2436 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2437 
2438 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2439  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2440  * should be specified in 16 word units. But the number of bits defined for
2441  * this field in HW header files is 5.
2442  */
2443 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2444 
2445 
2446 /**
2447  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
2448  * in an idle list
2449  *
2450  * @hal_soc: Opaque HAL SOC handle
2451  *
2452  */
2453 static inline
2454 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2455 {
2456 	return WBM_IDLE_SCATTER_BUF_SIZE;
2457 }
2458 
2459 /**
2460  * hal_get_link_desc_size - Get the size of each link descriptor
2461  *
2462  * @hal_soc: Opaque HAL SOC handle
2463  *
2464  */
2465 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2466 {
2467 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2468 
2469 	if (!hal_soc || !hal_soc->ops) {
2470 		qdf_print("Error: Invalid ops\n");
2471 		QDF_BUG(0);
2472 		return -EINVAL;
2473 	}
2474 	if (!hal_soc->ops->hal_get_link_desc_size) {
2475 		qdf_print("Error: Invalid function pointer\n");
2476 		QDF_BUG(0);
2477 		return -EINVAL;
2478 	}
2479 	return hal_soc->ops->hal_get_link_desc_size();
2480 }
2481 
2482 /**
2483  * hal_get_link_desc_align - Get the required start address alignment for
2484  * link descriptors
2485  *
2486  * @hal_soc: Opaque HAL SOC handle
2487  *
2488  */
2489 static inline
2490 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2491 {
2492 	return LINK_DESC_ALIGN;
2493 }
2494 
2495 /**
2496  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
2497  *
2498  * @hal_soc: Opaque HAL SOC handle
2499  *
2500  */
2501 static inline
2502 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2503 {
2504 	return NUM_MPDUS_PER_LINK_DESC;
2505 }
2506 
2507 /**
2508  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
2509  *
2510  * @hal_soc: Opaque HAL SOC handle
2511  *
2512  */
2513 static inline
2514 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2515 {
2516 	return NUM_MSDUS_PER_LINK_DESC;
2517 }
2518 
2519 /**
2520  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
2521  * descriptor can hold
2522  *
2523  * @hal_soc: Opaque HAL SOC handle
2524  *
2525  */
2526 static inline
2527 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2528 {
2529 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2530 }
2531 
2532 /**
2533  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
2534  * that the given buffer size
2535  *
2536  * @hal_soc: Opaque HAL SOC handle
2537  * @scatter_buf_size: Size of scatter buffer
2538  *
2539  */
2540 static inline
2541 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2542 					  uint32_t scatter_buf_size)
2543 {
2544 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2545 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2546 }
2547 
2548 /**
2549  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
2550  * each given buffer size
2551  *
2552  * @hal_soc: Opaque HAL SOC handle
2553  * @total_mem: size of memory to be scattered
2554  * @scatter_buf_size: Size of scatter buffer
2555  *
2556  */
2557 static inline
2558 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2559 					uint32_t total_mem,
2560 					uint32_t scatter_buf_size)
2561 {
2562 	uint8_t rem = (total_mem % (scatter_buf_size -
2563 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2564 
2565 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2566 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2567 
2568 	return num_scatter_bufs;
2569 }
2570 
2571 enum hal_pn_type {
2572 	HAL_PN_NONE,
2573 	HAL_PN_WPA,
2574 	HAL_PN_WAPI_EVEN,
2575 	HAL_PN_WAPI_UNEVEN,
2576 };
2577 
2578 #define HAL_RX_BA_WINDOW_256 256
2579 #define HAL_RX_BA_WINDOW_1024 1024
2580 
2581 /**
2582  * hal_get_reo_qdesc_align - Get start address alignment for reo
2583  * queue descriptors
2584  *
2585  * @hal_soc: Opaque HAL SOC handle
2586  *
2587  */
2588 static inline
2589 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2590 {
2591 	return REO_QUEUE_DESC_ALIGN;
2592 }
2593 
2594 /**
2595  * hal_srng_get_hp_addr - Get head pointer physical address
2596  *
2597  * @hal_soc: Opaque HAL SOC handle
2598  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2599  *
2600  */
2601 static inline qdf_dma_addr_t
2602 hal_srng_get_hp_addr(void *hal_soc,
2603 		     hal_ring_handle_t hal_ring_hdl)
2604 {
2605 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2606 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2607 
2608 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2609 		return hal->shadow_wrptr_mem_paddr +
2610 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
2611 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2612 	} else {
2613 		return hal->shadow_rdptr_mem_paddr +
2614 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2615 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2616 	}
2617 }
2618 
2619 /**
2620  * hal_srng_get_tp_addr - Get tail pointer physical address
2621  *
2622  * @hal_soc: Opaque HAL SOC handle
2623  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2624  *
2625  */
2626 static inline qdf_dma_addr_t
2627 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2628 {
2629 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2630 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2631 
2632 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2633 		return hal->shadow_rdptr_mem_paddr +
2634 			((unsigned long)(srng->u.src_ring.tp_addr) -
2635 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2636 	} else {
2637 		return hal->shadow_wrptr_mem_paddr +
2638 			((unsigned long)(srng->u.dst_ring.tp_addr) -
2639 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
2640 	}
2641 }
2642 
2643 /**
2644  * hal_srng_get_num_entries - Get total entries in the HAL Srng
2645  *
2646  * @hal_soc: Opaque HAL SOC handle
2647  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2648  *
2649  * Return: total number of entries in hal ring
2650  */
2651 static inline
2652 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2653 				  hal_ring_handle_t hal_ring_hdl)
2654 {
2655 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2656 
2657 	return srng->num_entries;
2658 }
2659 
2660 /**
2661  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
2662  *
2663  * @hal_soc: Opaque HAL SOC handle
2664  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2665  * @ring_params: SRNG parameters will be returned through this structure
2666  */
2667 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2668 			 hal_ring_handle_t hal_ring_hdl,
2669 			 struct hal_srng_params *ring_params);
2670 
2671 /**
2672  * hal_mem_info - Retrieve hal memory base address
2673  *
2674  * @hal_soc: Opaque HAL SOC handle
2675  * @mem: pointer to structure to be updated with hal mem info
2676  */
2677 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2678 
2679 /**
2680  * hal_get_target_type - Return target type
2681  *
2682  * @hal_soc: Opaque HAL SOC handle
2683  */
2684 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2685 
2686 /**
2687  * hal_srng_dst_hw_init - Private function to initialize SRNG
2688  * destination ring HW
2689  * @hal_soc: HAL SOC handle
2690  * @srng: SRNG ring pointer
2691  * @idle_check: Check if ring is idle
2692  */
2693 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2694 					struct hal_srng *srng, bool idle_check)
2695 {
2696 	hal->ops->hal_srng_dst_hw_init(hal, srng, idle_check);
2697 }
2698 
2699 /**
2700  * hal_srng_src_hw_init - Private function to initialize SRNG
2701  * source ring HW
2702  * @hal_soc: HAL SOC handle
2703  * @srng: SRNG ring pointer
2704  * @idle_check: Check if ring is idle
2705  */
2706 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2707 					struct hal_srng *srng, bool idle_check)
2708 {
2709 	hal->ops->hal_srng_src_hw_init(hal, srng, idle_check);
2710 }
2711 
2712 /**
2713  * hal_srng_hw_disable - Private function to disable SRNG
2714  * source ring HW
2715  * @hal_soc: HAL SOC handle
2716  * @srng: SRNG ring pointer
2717  */
2718 static inline
2719 void hal_srng_hw_disable(struct hal_soc *hal_soc, struct hal_srng *srng)
2720 {
2721 	if (hal_soc->ops->hal_srng_hw_disable)
2722 		hal_soc->ops->hal_srng_hw_disable(hal_soc, srng);
2723 }
2724 
2725 /**
2726  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2727  * @hal_soc: Opaque HAL SOC handle
2728  * @hal_ring_hdl: Source ring pointer
2729  * @headp: Head Pointer
2730  * @tailp: Tail Pointer
2731  * @ring_type: Ring
2732  *
2733  * Return: Update tail pointer and head pointer in arguments.
2734  */
2735 static inline
2736 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2737 		     hal_ring_handle_t hal_ring_hdl,
2738 		     uint32_t *headp, uint32_t *tailp,
2739 		     uint8_t ring_type)
2740 {
2741 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2742 
2743 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2744 			headp, tailp, ring_type);
2745 }
2746 
2747 /**
2748  * hal_reo_setup - Initialize HW REO block
2749  *
2750  * @hal_soc: Opaque HAL SOC handle
2751  * @reo_params: parameters needed by HAL for REO config
2752  * @qref_reset: reset qref
2753  */
2754 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2755 				 void *reoparams, int qref_reset)
2756 {
2757 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2758 
2759 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams, qref_reset);
2760 }
2761 
2762 static inline
2763 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2764 				   uint32_t *ring, uint32_t num_rings,
2765 				   uint32_t *remap1, uint32_t *remap2)
2766 {
2767 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2768 
2769 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2770 					num_rings, remap1, remap2);
2771 }
2772 
2773 static inline
2774 void hal_compute_reo_remap_ix0(hal_soc_handle_t hal_soc_hdl, uint32_t *remap0)
2775 {
2776 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2777 
2778 	if (hal_soc->ops->hal_compute_reo_remap_ix0)
2779 		hal_soc->ops->hal_compute_reo_remap_ix0(remap0);
2780 }
2781 
2782 /**
2783  * hal_setup_link_idle_list - Setup scattered idle list using the
2784  * buffer list provided
2785  *
2786  * @hal_soc: Opaque HAL SOC handle
2787  * @scatter_bufs_base_paddr: Array of physical base addresses
2788  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2789  * @num_scatter_bufs: Number of scatter buffers in the above lists
2790  * @scatter_buf_size: Size of each scatter buffer
2791  * @last_buf_end_offset: Offset to the last entry
2792  * @num_entries: Total entries of all scatter bufs
2793  *
2794  */
2795 static inline
2796 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2797 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2798 			      void *scatter_bufs_base_vaddr[],
2799 			      uint32_t num_scatter_bufs,
2800 			      uint32_t scatter_buf_size,
2801 			      uint32_t last_buf_end_offset,
2802 			      uint32_t num_entries)
2803 {
2804 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2805 
2806 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2807 			scatter_bufs_base_vaddr, num_scatter_bufs,
2808 			scatter_buf_size, last_buf_end_offset,
2809 			num_entries);
2810 
2811 }
2812 
2813 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2814 /**
2815  * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
2816  * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
2817  *
2818  * Use the virtual addr pointer to reo h/w queue desc to read
2819  * the values from ddr and log them.
2820  *
2821  * Return: none
2822  */
2823 static inline void hal_dump_rx_reo_queue_desc(
2824 	void *hw_qdesc_vaddr_aligned)
2825 {
2826 	struct rx_reo_queue *hw_qdesc =
2827 		(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
2828 
2829 	if (!hw_qdesc)
2830 		return;
2831 
2832 	hal_info("receive_queue_number %u vld %u window_jump_2k %u"
2833 		 " hole_count %u ba_window_size %u ignore_ampdu_flag %u"
2834 		 " svld %u ssn %u current_index %u"
2835 		 " disable_duplicate_detection %u soft_reorder_enable %u"
2836 		 " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
2837 		 " msdu_frames_processed_count %u total_processed_byte_count %u"
2838 		 " late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
2839 		 " pn_error_detected_flag %u current_mpdu_count %u"
2840 		 " current_msdu_count %u timeout_count %u"
2841 		 " forward_due_to_bar_count %u duplicate_count %u"
2842 		 " frames_in_order_count %u bar_received_count %u"
2843 		 " pn_check_needed %u pn_shall_be_even %u"
2844 		 " pn_shall_be_uneven %u pn_size %u",
2845 		 hw_qdesc->receive_queue_number,
2846 		 hw_qdesc->vld,
2847 		 hw_qdesc->window_jump_2k,
2848 		 hw_qdesc->hole_count,
2849 		 hw_qdesc->ba_window_size,
2850 		 hw_qdesc->ignore_ampdu_flag,
2851 		 hw_qdesc->svld,
2852 		 hw_qdesc->ssn,
2853 		 hw_qdesc->current_index,
2854 		 hw_qdesc->disable_duplicate_detection,
2855 		 hw_qdesc->soft_reorder_enable,
2856 		 hw_qdesc->chk_2k_mode,
2857 		 hw_qdesc->oor_mode,
2858 		 hw_qdesc->mpdu_frames_processed_count,
2859 		 hw_qdesc->msdu_frames_processed_count,
2860 		 hw_qdesc->total_processed_byte_count,
2861 		 hw_qdesc->late_receive_mpdu_count,
2862 		 hw_qdesc->seq_2k_error_detected_flag,
2863 		 hw_qdesc->pn_error_detected_flag,
2864 		 hw_qdesc->current_mpdu_count,
2865 		 hw_qdesc->current_msdu_count,
2866 		 hw_qdesc->timeout_count,
2867 		 hw_qdesc->forward_due_to_bar_count,
2868 		 hw_qdesc->duplicate_count,
2869 		 hw_qdesc->frames_in_order_count,
2870 		 hw_qdesc->bar_received_count,
2871 		 hw_qdesc->pn_check_needed,
2872 		 hw_qdesc->pn_shall_be_even,
2873 		 hw_qdesc->pn_shall_be_uneven,
2874 		 hw_qdesc->pn_size);
2875 }
2876 
2877 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
2878 
2879 static inline void hal_dump_rx_reo_queue_desc(
2880 	void *hw_qdesc_vaddr_aligned)
2881 {
2882 }
2883 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2884 
2885 /**
2886  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2887  *
2888  * @hal_soc: Opaque HAL SOC handle
2889  * @hal_ring_hdl: Source ring pointer
2890  * @ring_desc: Opaque ring descriptor handle
2891  */
2892 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2893 					   hal_ring_handle_t hal_ring_hdl,
2894 					   hal_ring_desc_t ring_desc)
2895 {
2896 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2897 
2898 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2899 			   ring_desc, (srng->entry_size << 2));
2900 }
2901 
2902 /**
2903  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2904  *
2905  * @hal_soc: Opaque HAL SOC handle
2906  * @hal_ring_hdl: Source ring pointer
2907  */
2908 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2909 				      hal_ring_handle_t hal_ring_hdl)
2910 {
2911 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2912 	uint32_t *desc;
2913 	uint32_t tp, i;
2914 
2915 	tp = srng->u.dst_ring.tp;
2916 
2917 	for (i = 0; i < 128; i++) {
2918 		if (!tp)
2919 			tp = srng->ring_size;
2920 
2921 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2922 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2923 				   QDF_TRACE_LEVEL_DEBUG,
2924 				   desc, (srng->entry_size << 2));
2925 
2926 		tp -= srng->entry_size;
2927 	}
2928 }
2929 
2930 /*
2931  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2932  * to opaque dp_ring desc type
2933  * @ring_desc - rxdma ring desc
2934  *
2935  * Return: hal_rxdma_desc_t type
2936  */
2937 static inline
2938 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2939 {
2940 	return (hal_ring_desc_t)ring_desc;
2941 }
2942 
2943 /**
2944  * hal_srng_set_event() - Set hal_srng event
2945  * @hal_ring_hdl: Source ring pointer
2946  * @event: SRNG ring event
2947  *
2948  * Return: None
2949  */
2950 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2951 {
2952 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2953 
2954 	qdf_atomic_set_bit(event, &srng->srng_event);
2955 }
2956 
2957 /**
2958  * hal_srng_clear_event() - Clear hal_srng event
2959  * @hal_ring_hdl: Source ring pointer
2960  * @event: SRNG ring event
2961  *
2962  * Return: None
2963  */
2964 static inline
2965 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2966 {
2967 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2968 
2969 	qdf_atomic_clear_bit(event, &srng->srng_event);
2970 }
2971 
2972 /**
2973  * hal_srng_get_clear_event() - Clear srng event and return old value
2974  * @hal_ring_hdl: Source ring pointer
2975  * @event: SRNG ring event
2976  *
2977  * Return: Return old event value
2978  */
2979 static inline
2980 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2981 {
2982 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2983 
2984 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2985 }
2986 
2987 /**
2988  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2989  * @hal_ring_hdl: Source ring pointer
2990  *
2991  * Return: None
2992  */
2993 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2994 {
2995 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2996 
2997 	srng->last_flush_ts = qdf_get_log_timestamp();
2998 }
2999 
3000 /**
3001  * hal_srng_inc_flush_cnt() - Increment flush counter
3002  * @hal_ring_hdl: Source ring pointer
3003  *
3004  * Return: None
3005  */
3006 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
3007 {
3008 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3009 
3010 	srng->flush_count++;
3011 }
3012 
3013 /**
3014  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
3015  *
3016  * @hal: Core HAL soc handle
3017  * @ring_desc: Mon dest ring descriptor
3018  * @desc_info: Desc info to be populated
3019  *
3020  * Return void
3021  */
3022 static inline void
3023 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
3024 			    hal_ring_desc_t ring_desc,
3025 			    hal_rx_mon_desc_info_t desc_info)
3026 {
3027 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
3028 }
3029 
3030 /**
3031  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
3032  *				 register value.
3033  *
3034  * @hal_soc_hdl: Opaque HAL soc handle
3035  *
3036  * Return: None
3037  */
3038 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
3039 {
3040 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3041 
3042 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
3043 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
3044 }
3045 
3046 /**
3047  * hal_reo_enable_pn_in_dest() - Subscribe for previous PN for 2k-jump or
3048  *			OOR error frames
3049  * @hal_soc_hdl: Opaque HAL soc handle
3050  *
3051  * Return: true if feature is enabled,
3052  *	false, otherwise.
3053  */
3054 static inline uint8_t
3055 hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)
3056 {
3057 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3058 
3059 	if (hal_soc->ops->hal_reo_enable_pn_in_dest)
3060 		return hal_soc->ops->hal_reo_enable_pn_in_dest(hal_soc);
3061 
3062 	return 0;
3063 }
3064 
3065 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
3066 
3067 /**
3068  * hal_set_one_target_reg_config() - Populate the target reg
3069  * offset in hal_soc for one non srng related register at the
3070  * given list index
3071  * @hal_soc: hal handle
3072  * @target_reg_offset: target register offset
3073  * @list_index: index in hal list for shadow regs
3074  *
3075  * Return: none
3076  */
3077 void hal_set_one_target_reg_config(struct hal_soc *hal,
3078 				   uint32_t target_reg_offset,
3079 				   int list_index);
3080 
3081 /**
3082  * hal_set_shadow_regs() - Populate register offset for
3083  * registers that need to be populated in list_shadow_reg_config
3084  * in order to be sent to FW. These reg offsets will be mapped
3085  * to shadow registers.
3086  * @hal_soc: hal handle
3087  *
3088  * Return: QDF_STATUS_OK on success
3089  */
3090 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
3091 
3092 /**
3093  * hal_construct_shadow_regs() - initialize the shadow registers
3094  * for non-srng related register configs
3095  * @hal_soc: hal handle
3096  *
3097  * Return: QDF_STATUS_OK on success
3098  */
3099 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
3100 
3101 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3102 static inline void hal_set_one_target_reg_config(
3103 	struct hal_soc *hal,
3104 	uint32_t target_reg_offset,
3105 	int list_index)
3106 {
3107 }
3108 
3109 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
3110 {
3111 	return QDF_STATUS_SUCCESS;
3112 }
3113 
3114 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
3115 {
3116 	return QDF_STATUS_SUCCESS;
3117 }
3118 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3119 
3120 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3121 /**
3122  * hal_flush_reg_write_work() - flush all writes from register write queue
3123  * @arg: hal_soc pointer
3124  *
3125  * Return: None
3126  */
3127 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
3128 
3129 #else
3130 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
3131 #endif
3132 
3133 /**
3134  * hal_get_ring_usage - Calculate the ring usage percentage
3135  * @hal_ring_hdl: Ring pointer
3136  * @ring_type: Ring type
3137  * @headp: pointer to head value
3138  * @tailp: pointer to tail value
3139  *
3140  * Calculate the ring usage percentage for src and dest rings
3141  *
3142  * Return: Ring usage percentage
3143  */
3144 static inline
3145 uint32_t hal_get_ring_usage(
3146 	hal_ring_handle_t hal_ring_hdl,
3147 	enum hal_ring_type ring_type, uint32_t *headp, uint32_t *tailp)
3148 {
3149 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3150 	uint32_t num_avail, num_valid = 0;
3151 	uint32_t ring_usage;
3152 
3153 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
3154 		if (*tailp > *headp)
3155 			num_avail =  ((*tailp - *headp) / srng->entry_size) - 1;
3156 		else
3157 			num_avail = ((srng->ring_size - *headp + *tailp) /
3158 				     srng->entry_size) - 1;
3159 		if (ring_type == WBM_IDLE_LINK)
3160 			num_valid = num_avail;
3161 		else
3162 			num_valid = srng->num_entries - num_avail;
3163 	} else {
3164 		if (*headp >= *tailp)
3165 			num_valid = ((*headp - *tailp) / srng->entry_size);
3166 		else
3167 			num_valid = ((srng->ring_size - *tailp + *headp) /
3168 				     srng->entry_size);
3169 	}
3170 	ring_usage = (100 * num_valid) / srng->num_entries;
3171 	return ring_usage;
3172 }
3173 
3174 /**
3175  * hal_cmem_write() - function for CMEM buffer writing
3176  * @hal_soc_hdl: HAL SOC handle
3177  * @offset: CMEM address
3178  * @value: value to write
3179  *
3180  * Return: None.
3181  */
3182 static inline void
3183 hal_cmem_write(hal_soc_handle_t hal_soc_hdl, uint32_t offset,
3184 	       uint32_t value)
3185 {
3186 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3187 
3188 	if (hal_soc->ops->hal_cmem_write)
3189 		hal_soc->ops->hal_cmem_write(hal_soc_hdl, offset, value);
3190 
3191 	return;
3192 }
3193 
3194 static inline bool
3195 hal_dmac_cmn_src_rxbuf_ring_get(hal_soc_handle_t hal_soc_hdl)
3196 {
3197 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3198 
3199 	return hal_soc->dmac_cmn_src_rxbuf_ring;
3200 }
3201 
3202 /**
3203  * hal_srng_dst_prefetch() - function to prefetch 4 destination ring descs
3204  * @hal_soc_hdl: HAL SOC handle
3205  * @hal_ring_hdl: Destination ring pointer
3206  * @num_valid: valid entries in the ring
3207  *
3208  * return: last prefetched destination ring descriptor
3209  */
3210 static inline
3211 void *hal_srng_dst_prefetch(hal_soc_handle_t hal_soc_hdl,
3212 			    hal_ring_handle_t hal_ring_hdl,
3213 			    uint16_t num_valid)
3214 {
3215 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3216 	uint8_t *desc;
3217 	uint32_t cnt;
3218 	/*
3219 	 * prefetching 4 HW descriptors will ensure atleast by the time
3220 	 * 5th HW descriptor is being processed it is guaranteed that the
3221 	 * 5th HW descriptor, its SW Desc, its nbuf and its nbuf's data
3222 	 * are in cache line. basically ensuring all the 4 (HW, SW, nbuf
3223 	 * & nbuf->data) are prefetched.
3224 	 */
3225 	uint32_t max_prefetch = 4;
3226 
3227 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3228 		return NULL;
3229 
3230 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3231 
3232 	if (num_valid < max_prefetch)
3233 		max_prefetch = num_valid;
3234 
3235 	for (cnt = 0; cnt < max_prefetch; cnt++) {
3236 		desc += srng->entry_size * sizeof(uint32_t);
3237 		if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3238 			desc = (uint8_t *)&srng->ring_base_vaddr[0];
3239 
3240 		qdf_prefetch(desc);
3241 	}
3242 	return (void *)desc;
3243 }
3244 
3245 /**
3246  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3247  * @hal_soc_hdl: HAL SOC handle
3248  * @hal_ring_hdl: Destination ring pointer
3249  * @last_prefetched_hw_desc: last prefetched HW descriptor
3250  *
3251  * return: next prefetched destination descriptor
3252  */
3253 static inline
3254 void *hal_srng_dst_prefetch_next_cached_desc(hal_soc_handle_t hal_soc_hdl,
3255 					     hal_ring_handle_t hal_ring_hdl,
3256 					     uint8_t *last_prefetched_hw_desc)
3257 {
3258 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3259 
3260 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3261 		return NULL;
3262 
3263 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3264 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3265 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3266 
3267 	qdf_prefetch(last_prefetched_hw_desc);
3268 	return (void *)last_prefetched_hw_desc;
3269 }
3270 
3271 /**
3272  * hal_srng_dst_prefetch_32_byte_desc() - function to prefetch a desc at
3273  *					  64 byte offset
3274  * @hal_soc_hdl: HAL SOC handle
3275  * @hal_ring_hdl: Destination ring pointer
3276  * @num_valid: valid entries in the ring
3277  *
3278  * return: last prefetched destination ring descriptor
3279  */
3280 static inline
3281 void *hal_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3282 					 hal_ring_handle_t hal_ring_hdl,
3283 					 uint16_t num_valid)
3284 {
3285 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3286 	uint8_t *desc;
3287 
3288 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3289 		return NULL;
3290 
3291 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3292 
3293 	if ((uintptr_t)desc & 0x3f)
3294 		desc += srng->entry_size * sizeof(uint32_t);
3295 	else
3296 		desc += (srng->entry_size * sizeof(uint32_t)) * 2;
3297 
3298 	if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3299 		desc = (uint8_t *)&srng->ring_base_vaddr[0];
3300 
3301 	qdf_prefetch(desc);
3302 
3303 	return (void *)(desc + srng->entry_size * sizeof(uint32_t));
3304 }
3305 
3306 /**
3307  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3308  * @hal_soc_hdl: HAL SOC handle
3309  * @hal_ring_hdl: Destination ring pointer
3310  * @last_prefetched_hw_desc: last prefetched HW descriptor
3311  *
3312  * return: next prefetched destination descriptor
3313  */
3314 static inline
3315 void *hal_srng_dst_get_next_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3316 					 hal_ring_handle_t hal_ring_hdl,
3317 					 uint8_t *last_prefetched_hw_desc)
3318 {
3319 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3320 
3321 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3322 		return NULL;
3323 
3324 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3325 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3326 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3327 
3328 	return (void *)last_prefetched_hw_desc;
3329 }
3330 
3331 /**
3332  * hal_srng_src_set_hp() - set head idx.
3333  * @hal_soc_hdl: HAL SOC handle
3334  * @idx: head idx
3335  *
3336  * return: none
3337  */
3338 static inline
3339 void hal_srng_src_set_hp(hal_ring_handle_t hal_ring_hdl, uint16_t idx)
3340 {
3341 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3342 
3343 	srng->u.src_ring.hp = idx * srng->entry_size;
3344 }
3345 
3346 /**
3347  * hal_srng_dst_set_tp() - set tail idx.
3348  * @hal_soc_hdl: HAL SOC handle
3349  * @idx: tail idx
3350  *
3351  * return: none
3352  */
3353 static inline
3354 void hal_srng_dst_set_tp(hal_ring_handle_t hal_ring_hdl, uint16_t idx)
3355 {
3356 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3357 
3358 	srng->u.dst_ring.tp = idx * srng->entry_size;
3359 }
3360 
3361 /**
3362  * hal_srng_src_get_tpidx() - get tail idx
3363  * @hal_soc_hdl: HAL SOC handle
3364  *
3365  * return: tail idx
3366  */
3367 static inline
3368 uint16_t hal_srng_src_get_tpidx(hal_ring_handle_t hal_ring_hdl)
3369 {
3370 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3371 	uint32_t tp = *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
3372 
3373 	return tp / srng->entry_size;
3374 }
3375 
3376 /**
3377  * hal_srng_dst_get_hpidx() - get head idx
3378  * @hal_soc_hdl: HAL SOC handle
3379  *
3380  * return: head idx
3381  */
3382 static inline
3383 uint16_t hal_srng_dst_get_hpidx(hal_ring_handle_t hal_ring_hdl)
3384 {
3385 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3386 	uint32_t hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
3387 
3388 	return hp / srng->entry_size;
3389 }
3390 #endif /* _HAL_APIH_ */
3391