xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HAL_API_H_
21 #define _HAL_API_H_
22 
23 #include "qdf_types.h"
24 #include "qdf_util.h"
25 #include "qdf_atomic.h"
26 #include "hal_internal.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "qdf_platform.h"
30 
31 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
32 #include "hal_hw_headers.h"
33 #endif
34 
35 /* Ring index for WBM2SW2 release ring */
36 #define HAL_IPA_TX_COMP_RING_IDX 2
37 
38 /* calculate the register address offset from bar0 of shadow register x */
39 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
40     defined(QCA_WIFI_KIWI)
41 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
42 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
43 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
44 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
45 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
46 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
47 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
48 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
49 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
50 #elif defined(QCA_WIFI_QCA6750)
51 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
52 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
53 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
54 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
55 #else
56 #define SHADOW_REGISTER(x) 0
57 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
58 
59 /*
60  * BAR + 4K is always accessible, any access outside this
61  * space requires force wake procedure.
62  * OFFSET = 4K - 32 bytes = 0xFE0
63  */
64 #define MAPPED_REF_OFF 0xFE0
65 
66 #define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET
67 
68 #ifdef ENABLE_VERBOSE_DEBUG
69 static inline void
70 hal_set_verbose_debug(bool flag)
71 {
72 	is_hal_verbose_debug_enabled = flag;
73 }
74 #endif
75 
76 #ifdef ENABLE_HAL_SOC_STATS
77 #define HAL_STATS_INC(_handle, _field, _delta) \
78 { \
79 	if (likely(_handle)) \
80 		_handle->stats._field += _delta; \
81 }
82 #else
83 #define HAL_STATS_INC(_handle, _field, _delta)
84 #endif
85 
86 #ifdef ENABLE_HAL_REG_WR_HISTORY
87 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
88 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
89 
90 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
91 				 uint32_t offset,
92 				 uint32_t wr_val,
93 				 uint32_t rd_val);
94 
95 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
96 					     int array_size)
97 {
98 	int record_index = qdf_atomic_inc_return(table_index);
99 
100 	return record_index & (array_size - 1);
101 }
102 #else
103 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
104 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
105 		offset,	\
106 		wr_val,	\
107 		rd_val)
108 #endif
109 
110 /**
111  * hal_reg_write_result_check() - check register writing result
112  * @hal_soc: HAL soc handle
113  * @offset: register offset to read
114  * @exp_val: the expected value of register
115  * @ret_confirm: result confirm flag
116  *
117  * Return: none
118  */
119 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
120 					      uint32_t offset,
121 					      uint32_t exp_val)
122 {
123 	uint32_t value;
124 
125 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
126 	if (exp_val != value) {
127 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
128 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
129 	}
130 }
131 
132 #ifdef WINDOW_REG_PLD_LOCK_ENABLE
133 static inline void hal_lock_reg_access(struct hal_soc *soc,
134 				       unsigned long *flags)
135 {
136 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
137 }
138 
139 static inline void hal_unlock_reg_access(struct hal_soc *soc,
140 					 unsigned long *flags)
141 {
142 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
143 }
144 #else
145 static inline void hal_lock_reg_access(struct hal_soc *soc,
146 				       unsigned long *flags)
147 {
148 	qdf_spin_lock_irqsave(&soc->register_access_lock);
149 }
150 
151 static inline void hal_unlock_reg_access(struct hal_soc *soc,
152 					 unsigned long *flags)
153 {
154 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
155 }
156 #endif
157 
158 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
159 /**
160  * hal_select_window_confirm() - write remap window register and
161 				 check writing result
162  *
163  */
164 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
165 					     uint32_t offset)
166 {
167 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
168 
169 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
170 		      WINDOW_ENABLE_BIT | window);
171 	hal_soc->register_window = window;
172 
173 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
174 				   WINDOW_ENABLE_BIT | window);
175 }
176 #else
177 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
178 					     uint32_t offset)
179 {
180 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
181 
182 	if (window != hal_soc->register_window) {
183 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
184 			      WINDOW_ENABLE_BIT | window);
185 		hal_soc->register_window = window;
186 
187 		hal_reg_write_result_check(
188 					hal_soc,
189 					WINDOW_REG_ADDRESS,
190 					WINDOW_ENABLE_BIT | window);
191 	}
192 }
193 #endif
194 
195 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
196 						 qdf_iomem_t addr)
197 {
198 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
199 }
200 
201 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
202 					       hal_ring_handle_t hal_ring_hdl)
203 {
204 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
205 
206 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
207 							 hal_ring_hdl);
208 }
209 
210 /**
211  * hal_write32_mb() - Access registers to update configuration
212  * @hal_soc: hal soc handle
213  * @offset: offset address from the BAR
214  * @value: value to write
215  *
216  * Return: None
217  *
218  * Description: Register address space is split below:
219  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
220  *  |--------------------|-------------------|------------------|
221  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
222  *
223  * 1. Any access to the shadow region, doesn't need force wake
224  *    and windowing logic to access.
225  * 2. Any access beyond BAR + 4K:
226  *    If init_phase enabled, no force wake is needed and access
227  *    should be based on windowed or unwindowed access.
228  *    If init_phase disabled, force wake is needed and access
229  *    should be based on windowed or unwindowed access.
230  *
231  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
232  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
233  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
234  *                            that window would be a bug
235  */
236 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
237     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
238 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
239 				  uint32_t value)
240 {
241 	unsigned long flags;
242 	qdf_iomem_t new_addr;
243 
244 	if (!hal_soc->use_register_windowing ||
245 	    offset < MAX_UNWINDOWED_ADDRESS) {
246 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
247 	} else if (hal_soc->static_window_map) {
248 		new_addr = hal_get_window_address(hal_soc,
249 				hal_soc->dev_base_addr + offset);
250 		qdf_iowrite32(new_addr, value);
251 	} else {
252 		hal_lock_reg_access(hal_soc, &flags);
253 		hal_select_window_confirm(hal_soc, offset);
254 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
255 			  (offset & WINDOW_RANGE_MASK), value);
256 		hal_unlock_reg_access(hal_soc, &flags);
257 	}
258 }
259 
260 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
261 		hal_write32_mb(_hal_soc, _offset, _value)
262 
263 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
264 #else
265 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
266 				  uint32_t value)
267 {
268 	int ret;
269 	unsigned long flags;
270 	qdf_iomem_t new_addr;
271 
272 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
273 					hal_soc->hif_handle))) {
274 		hal_err_rl("target access is not allowed");
275 		return;
276 	}
277 
278 	/* Region < BAR + 4K can be directly accessed */
279 	if (offset < MAPPED_REF_OFF) {
280 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
281 		return;
282 	}
283 
284 	/* Region greater than BAR + 4K */
285 	if (!hal_soc->init_phase) {
286 		ret = hif_force_wake_request(hal_soc->hif_handle);
287 		if (ret) {
288 			hal_err_rl("Wake up request failed");
289 			qdf_check_state_before_panic(__func__, __LINE__);
290 			return;
291 		}
292 	}
293 
294 	if (!hal_soc->use_register_windowing ||
295 	    offset < MAX_UNWINDOWED_ADDRESS) {
296 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
297 	} else if (hal_soc->static_window_map) {
298 		new_addr = hal_get_window_address(
299 					hal_soc,
300 					hal_soc->dev_base_addr + offset);
301 		qdf_iowrite32(new_addr, value);
302 	} else {
303 		hal_lock_reg_access(hal_soc, &flags);
304 		hal_select_window_confirm(hal_soc, offset);
305 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
306 			  (offset & WINDOW_RANGE_MASK), value);
307 		hal_unlock_reg_access(hal_soc, &flags);
308 	}
309 
310 	if (!hal_soc->init_phase) {
311 		ret = hif_force_wake_release(hal_soc->hif_handle);
312 		if (ret) {
313 			hal_err("Wake up release failed");
314 			qdf_check_state_before_panic(__func__, __LINE__);
315 			return;
316 		}
317 	}
318 }
319 
320 /**
321  * hal_write32_mb_confirm() - write register and check wirting result
322  *
323  */
324 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
325 					  uint32_t offset,
326 					  uint32_t value)
327 {
328 	int ret;
329 	unsigned long flags;
330 	qdf_iomem_t new_addr;
331 
332 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
333 					hal_soc->hif_handle))) {
334 		hal_err_rl("target access is not allowed");
335 		return;
336 	}
337 
338 	/* Region < BAR + 4K can be directly accessed */
339 	if (offset < MAPPED_REF_OFF) {
340 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
341 		return;
342 	}
343 
344 	/* Region greater than BAR + 4K */
345 	if (!hal_soc->init_phase) {
346 		ret = hif_force_wake_request(hal_soc->hif_handle);
347 		if (ret) {
348 			hal_err("Wake up request failed");
349 			qdf_check_state_before_panic(__func__, __LINE__);
350 			return;
351 		}
352 	}
353 
354 	if (!hal_soc->use_register_windowing ||
355 	    offset < MAX_UNWINDOWED_ADDRESS) {
356 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
357 		hal_reg_write_result_check(hal_soc, offset,
358 					   value);
359 	} else if (hal_soc->static_window_map) {
360 		new_addr = hal_get_window_address(
361 					hal_soc,
362 					hal_soc->dev_base_addr + offset);
363 		qdf_iowrite32(new_addr, value);
364 		hal_reg_write_result_check(hal_soc,
365 					   new_addr - hal_soc->dev_base_addr,
366 					   value);
367 	} else {
368 		hal_lock_reg_access(hal_soc, &flags);
369 		hal_select_window_confirm(hal_soc, offset);
370 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
371 			  (offset & WINDOW_RANGE_MASK), value);
372 
373 		hal_reg_write_result_check(
374 				hal_soc,
375 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
376 				value);
377 		hal_unlock_reg_access(hal_soc, &flags);
378 	}
379 
380 	if (!hal_soc->init_phase) {
381 		ret = hif_force_wake_release(hal_soc->hif_handle);
382 		if (ret) {
383 			hal_err("Wake up release failed");
384 			qdf_check_state_before_panic(__func__, __LINE__);
385 			return;
386 		}
387 	}
388 }
389 
390 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
391 				       uint32_t value)
392 {
393 	unsigned long flags;
394 	qdf_iomem_t new_addr;
395 
396 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
397 					hal_soc->hif_handle))) {
398 		hal_err_rl("%s: target access is not allowed", __func__);
399 		return;
400 	}
401 
402 	if (!hal_soc->use_register_windowing ||
403 	    offset < MAX_UNWINDOWED_ADDRESS) {
404 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
405 	} else if (hal_soc->static_window_map) {
406 		new_addr = hal_get_window_address(
407 					hal_soc,
408 					hal_soc->dev_base_addr + offset);
409 		qdf_iowrite32(new_addr, value);
410 	} else {
411 		hal_lock_reg_access(hal_soc, &flags);
412 		hal_select_window_confirm(hal_soc, offset);
413 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
414 			  (offset & WINDOW_RANGE_MASK), value);
415 		hal_unlock_reg_access(hal_soc, &flags);
416 	}
417 }
418 #endif
419 
420 /**
421  * hal_write_address_32_mb - write a value to a register
422  *
423  */
424 static inline
425 void hal_write_address_32_mb(struct hal_soc *hal_soc,
426 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
427 {
428 	uint32_t offset;
429 
430 	if (!hal_soc->use_register_windowing)
431 		return qdf_iowrite32(addr, value);
432 
433 	offset = addr - hal_soc->dev_base_addr;
434 
435 	if (qdf_unlikely(wr_confirm))
436 		hal_write32_mb_confirm(hal_soc, offset, value);
437 	else
438 		hal_write32_mb(hal_soc, offset, value);
439 }
440 
441 
442 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
443 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
444 						struct hal_srng *srng,
445 						void __iomem *addr,
446 						uint32_t value)
447 {
448 	qdf_iowrite32(addr, value);
449 }
450 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
451 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
452 						struct hal_srng *srng,
453 						void __iomem *addr,
454 						uint32_t value)
455 {
456 	hal_delayed_reg_write(hal_soc, srng, addr, value);
457 }
458 #else
459 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
460 						struct hal_srng *srng,
461 						void __iomem *addr,
462 						uint32_t value)
463 {
464 	hal_write_address_32_mb(hal_soc, addr, value, false);
465 }
466 #endif
467 
468 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
469     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
470 /**
471  * hal_read32_mb() - Access registers to read configuration
472  * @hal_soc: hal soc handle
473  * @offset: offset address from the BAR
474  * @value: value to write
475  *
476  * Description: Register address space is split below:
477  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
478  *  |--------------------|-------------------|------------------|
479  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
480  *
481  * 1. Any access to the shadow region, doesn't need force wake
482  *    and windowing logic to access.
483  * 2. Any access beyond BAR + 4K:
484  *    If init_phase enabled, no force wake is needed and access
485  *    should be based on windowed or unwindowed access.
486  *    If init_phase disabled, force wake is needed and access
487  *    should be based on windowed or unwindowed access.
488  *
489  * Return: < 0 for failure/>= 0 for success
490  */
491 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
492 {
493 	uint32_t ret;
494 	unsigned long flags;
495 	qdf_iomem_t new_addr;
496 
497 	if (!hal_soc->use_register_windowing ||
498 	    offset < MAX_UNWINDOWED_ADDRESS) {
499 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
500 	} else if (hal_soc->static_window_map) {
501 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
502 		return qdf_ioread32(new_addr);
503 	}
504 
505 	hal_lock_reg_access(hal_soc, &flags);
506 	hal_select_window_confirm(hal_soc, offset);
507 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
508 		       (offset & WINDOW_RANGE_MASK));
509 	hal_unlock_reg_access(hal_soc, &flags);
510 
511 	return ret;
512 }
513 
514 #define hal_read32_mb_cmem(_hal_soc, _offset)
515 #else
516 static
517 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
518 {
519 	uint32_t ret;
520 	unsigned long flags;
521 	qdf_iomem_t new_addr;
522 
523 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
524 					hal_soc->hif_handle))) {
525 		hal_err_rl("target access is not allowed");
526 		return 0;
527 	}
528 
529 	/* Region < BAR + 4K can be directly accessed */
530 	if (offset < MAPPED_REF_OFF)
531 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
532 
533 	if ((!hal_soc->init_phase) &&
534 	    hif_force_wake_request(hal_soc->hif_handle)) {
535 		hal_err("Wake up request failed");
536 		qdf_check_state_before_panic(__func__, __LINE__);
537 		return 0;
538 	}
539 
540 	if (!hal_soc->use_register_windowing ||
541 	    offset < MAX_UNWINDOWED_ADDRESS) {
542 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
543 	} else if (hal_soc->static_window_map) {
544 		new_addr = hal_get_window_address(
545 					hal_soc,
546 					hal_soc->dev_base_addr + offset);
547 		ret = qdf_ioread32(new_addr);
548 	} else {
549 		hal_lock_reg_access(hal_soc, &flags);
550 		hal_select_window_confirm(hal_soc, offset);
551 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
552 			       (offset & WINDOW_RANGE_MASK));
553 		hal_unlock_reg_access(hal_soc, &flags);
554 	}
555 
556 	if ((!hal_soc->init_phase) &&
557 	    hif_force_wake_release(hal_soc->hif_handle)) {
558 		hal_err("Wake up release failed");
559 		qdf_check_state_before_panic(__func__, __LINE__);
560 		return 0;
561 	}
562 
563 	return ret;
564 }
565 
566 static inline
567 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
568 {
569 	uint32_t ret;
570 	unsigned long flags;
571 	qdf_iomem_t new_addr;
572 
573 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
574 					hal_soc->hif_handle))) {
575 		hal_err_rl("%s: target access is not allowed", __func__);
576 		return 0;
577 	}
578 
579 	if (!hal_soc->use_register_windowing ||
580 	    offset < MAX_UNWINDOWED_ADDRESS) {
581 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
582 	} else if (hal_soc->static_window_map) {
583 		new_addr = hal_get_window_address(
584 					hal_soc,
585 					hal_soc->dev_base_addr + offset);
586 		ret = qdf_ioread32(new_addr);
587 	} else {
588 		hal_lock_reg_access(hal_soc, &flags);
589 		hal_select_window_confirm(hal_soc, offset);
590 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
591 			       (offset & WINDOW_RANGE_MASK));
592 		hal_unlock_reg_access(hal_soc, &flags);
593 	}
594 	return ret;
595 }
596 #endif
597 
598 /* Max times allowed for register writing retry */
599 #define HAL_REG_WRITE_RETRY_MAX		5
600 /* Delay milliseconds for each time retry */
601 #define HAL_REG_WRITE_RETRY_DELAY	1
602 
603 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
604 /* To check shadow config index range between 0..31 */
605 #define HAL_SHADOW_REG_INDEX_LOW 32
606 /* To check shadow config index range between 32..39 */
607 #define HAL_SHADOW_REG_INDEX_HIGH 40
608 /* Dirty bit reg offsets corresponding to shadow config index */
609 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
610 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
611 /* PCIE_PCIE_TOP base addr offset */
612 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
613 /* Max retry attempts to read the dirty bit reg */
614 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
615 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
616 #else
617 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
618 #endif
619 /* Delay in usecs for polling dirty bit reg */
620 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
621 
622 /**
623  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
624  * write was successful
625  * @hal_soc: hal soc handle
626  * @shadow_config_index: index of shadow reg used to confirm
627  * write
628  *
629  * Return: QDF_STATUS_SUCCESS on success
630  */
631 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
632 						int shadow_config_index)
633 {
634 	uint32_t read_value = 0;
635 	int retry_cnt = 0;
636 	uint32_t reg_offset = 0;
637 
638 	if (shadow_config_index > 0 &&
639 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
640 		reg_offset =
641 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
642 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
643 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
644 		reg_offset =
645 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
646 	} else {
647 		hal_err("Invalid shadow_config_index = %d",
648 			shadow_config_index);
649 		return QDF_STATUS_E_INVAL;
650 	}
651 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
652 		read_value = hal_read32_mb(
653 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
654 		/* Check if dirty bit corresponding to shadow_index is set */
655 		if (read_value & BIT(shadow_config_index)) {
656 			/* Dirty reg bit not reset */
657 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
658 			retry_cnt++;
659 		} else {
660 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
661 				  reg_offset, read_value);
662 			return QDF_STATUS_SUCCESS;
663 		}
664 	}
665 	return QDF_STATUS_E_TIMEOUT;
666 }
667 
668 /**
669  * hal_write32_mb_shadow_confirm() - write to shadow reg and
670  * poll dirty register bit to confirm write
671  * @hal_soc: hal soc handle
672  * @reg_offset: target reg offset address from BAR
673  * @value: value to write
674  *
675  * Return: QDF_STATUS_SUCCESS on success
676  */
677 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
678 	struct hal_soc *hal,
679 	uint32_t reg_offset,
680 	uint32_t value)
681 {
682 	int i;
683 	QDF_STATUS ret;
684 	uint32_t shadow_reg_offset;
685 	int shadow_config_index;
686 	bool is_reg_offset_present = false;
687 
688 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
689 		/* Found the shadow config for the reg_offset */
690 		struct shadow_reg_config *hal_shadow_reg_list =
691 			&hal->list_shadow_reg_config[i];
692 		if (hal_shadow_reg_list->target_register ==
693 			reg_offset) {
694 			shadow_config_index =
695 				hal_shadow_reg_list->shadow_config_index;
696 			shadow_reg_offset =
697 				SHADOW_REGISTER(shadow_config_index);
698 			hal_write32_mb_confirm(
699 				hal, shadow_reg_offset, value);
700 			is_reg_offset_present = true;
701 			break;
702 		}
703 		ret = QDF_STATUS_E_FAILURE;
704 	}
705 	if (is_reg_offset_present) {
706 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
707 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
708 			 reg_offset, value, ret);
709 		if (QDF_IS_STATUS_ERROR(ret)) {
710 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
711 			return ret;
712 		}
713 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
714 	}
715 	return ret;
716 }
717 
718 /**
719  * hal_write32_mb_confirm_retry() - write register with confirming and
720 				    do retry/recovery if writing failed
721  * @hal_soc: hal soc handle
722  * @offset: offset address from the BAR
723  * @value: value to write
724  * @recovery: is recovery needed or not.
725  *
726  * Write the register value with confirming and read it back, if
727  * read back value is not as expected, do retry for writing, if
728  * retry hit max times allowed but still fail, check if recovery
729  * needed.
730  *
731  * Return: None
732  */
733 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
734 						uint32_t offset,
735 						uint32_t value,
736 						bool recovery)
737 {
738 	QDF_STATUS ret;
739 
740 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
741 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
742 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
743 }
744 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
745 
746 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
747 						uint32_t offset,
748 						uint32_t value,
749 						bool recovery)
750 {
751 	uint8_t retry_cnt = 0;
752 	uint32_t read_value;
753 
754 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
755 		hal_write32_mb_confirm(hal_soc, offset, value);
756 		read_value = hal_read32_mb(hal_soc, offset);
757 		if (qdf_likely(read_value == value))
758 			break;
759 
760 		/* write failed, do retry */
761 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
762 			 offset, value, read_value);
763 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
764 		retry_cnt++;
765 	}
766 
767 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
768 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
769 }
770 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
771 
772 #if defined(FEATURE_HAL_DELAYED_REG_WRITE)
773 /**
774  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
775  * @hal_soc: HAL soc handle
776  *
777  * Return: none
778  */
779 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
780 
781 /**
782  * hal_dump_reg_write_stats() - dump reg write stats
783  * @hal_soc: HAL soc handle
784  *
785  * Return: none
786  */
787 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
788 
789 /**
790  * hal_get_reg_write_pending_work() - get the number of entries
791  *		pending in the workqueue to be processed.
792  * @hal_soc: HAL soc handle
793  *
794  * Returns: the number of entries pending to be processed
795  */
796 int hal_get_reg_write_pending_work(void *hal_soc);
797 
798 #else
799 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
800 {
801 }
802 
803 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
804 {
805 }
806 
807 static inline int hal_get_reg_write_pending_work(void *hal_soc)
808 {
809 	return 0;
810 }
811 #endif
812 
813 /**
814  * hal_read_address_32_mb() - Read 32-bit value from the register
815  * @soc: soc handle
816  * @addr: register address to read
817  *
818  * Return: 32-bit value
819  */
820 static inline
821 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
822 				qdf_iomem_t addr)
823 {
824 	uint32_t offset;
825 	uint32_t ret;
826 
827 	if (!soc->use_register_windowing)
828 		return qdf_ioread32(addr);
829 
830 	offset = addr - soc->dev_base_addr;
831 	ret = hal_read32_mb(soc, offset);
832 	return ret;
833 }
834 
835 /**
836  * hal_attach - Initialize HAL layer
837  * @hif_handle: Opaque HIF handle
838  * @qdf_dev: QDF device
839  *
840  * Return: Opaque HAL SOC handle
841  *		 NULL on failure (if given ring is not available)
842  *
843  * This function should be called as part of HIF initialization (for accessing
844  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
845  */
846 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
847 
848 /**
849  * hal_detach - Detach HAL layer
850  * @hal_soc: HAL SOC handle
851  *
852  * This function should be called as part of HIF detach
853  *
854  */
855 extern void hal_detach(void *hal_soc);
856 
857 #define HAL_SRNG_LMAC_RING 0x80000000
858 /* SRNG flags passed in hal_srng_params.flags */
859 #define HAL_SRNG_MSI_SWAP				0x00000008
860 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
861 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
862 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
863 #define HAL_SRNG_MSI_INTR				0x00020000
864 #define HAL_SRNG_CACHED_DESC		0x00040000
865 
866 #if defined(QCA_WIFI_QCA6490)  || defined(QCA_WIFI_KIWI)
867 #define HAL_SRNG_PREFETCH_TIMER 1
868 #else
869 #define HAL_SRNG_PREFETCH_TIMER 0
870 #endif
871 
872 #define PN_SIZE_24 0
873 #define PN_SIZE_48 1
874 #define PN_SIZE_128 2
875 
876 #ifdef FORCE_WAKE
877 /**
878  * hal_set_init_phase() - Indicate initialization of
879  *                        datapath rings
880  * @soc: hal_soc handle
881  * @init_phase: flag to indicate datapath rings
882  *              initialization status
883  *
884  * Return: None
885  */
886 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
887 #else
888 static inline
889 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
890 {
891 }
892 #endif /* FORCE_WAKE */
893 
894 /**
895  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
896  * used by callers for calculating the size of memory to be allocated before
897  * calling hal_srng_setup to setup the ring
898  *
899  * @hal_soc: Opaque HAL SOC handle
900  * @ring_type: one of the types from hal_ring_type
901  *
902  */
903 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
904 
905 /**
906  * hal_srng_max_entries - Returns maximum possible number of ring entries
907  * @hal_soc: Opaque HAL SOC handle
908  * @ring_type: one of the types from hal_ring_type
909  *
910  * Return: Maximum number of entries for the given ring_type
911  */
912 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
913 
914 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
915 				 uint32_t low_threshold);
916 
917 /**
918  * hal_srng_dump - Dump ring status
919  * @srng: hal srng pointer
920  */
921 void hal_srng_dump(struct hal_srng *srng);
922 
923 /**
924  * hal_srng_get_dir - Returns the direction of the ring
925  * @hal_soc: Opaque HAL SOC handle
926  * @ring_type: one of the types from hal_ring_type
927  *
928  * Return: Ring direction
929  */
930 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
931 
932 /* HAL memory information */
933 struct hal_mem_info {
934 	/* dev base virutal addr */
935 	void *dev_base_addr;
936 	/* dev base physical addr */
937 	void *dev_base_paddr;
938 	/* dev base ce virutal addr - applicable only for qca5018  */
939 	/* In qca5018 CE register are outside wcss block */
940 	/* using a separate address space to access CE registers */
941 	void *dev_base_addr_ce;
942 	/* dev base ce physical addr */
943 	void *dev_base_paddr_ce;
944 	/* Remote virtual pointer memory for HW/FW updates */
945 	void *shadow_rdptr_mem_vaddr;
946 	/* Remote physical pointer memory for HW/FW updates */
947 	void *shadow_rdptr_mem_paddr;
948 	/* Shared memory for ring pointer updates from host to FW */
949 	void *shadow_wrptr_mem_vaddr;
950 	/* Shared physical memory for ring pointer updates from host to FW */
951 	void *shadow_wrptr_mem_paddr;
952 	/* lmac srng start id */
953 	uint8_t lmac_srng_start_id;
954 };
955 
956 /* SRNG parameters to be passed to hal_srng_setup */
957 struct hal_srng_params {
958 	/* Physical base address of the ring */
959 	qdf_dma_addr_t ring_base_paddr;
960 	/* Virtual base address of the ring */
961 	void *ring_base_vaddr;
962 	/* Number of entries in ring */
963 	uint32_t num_entries;
964 	/* max transfer length */
965 	uint16_t max_buffer_length;
966 	/* MSI Address */
967 	qdf_dma_addr_t msi_addr;
968 	/* MSI data */
969 	uint32_t msi_data;
970 	/* Interrupt timer threshold – in micro seconds */
971 	uint32_t intr_timer_thres_us;
972 	/* Interrupt batch counter threshold – in number of ring entries */
973 	uint32_t intr_batch_cntr_thres_entries;
974 	/* Low threshold – in number of ring entries
975 	 * (valid for src rings only)
976 	 */
977 	uint32_t low_threshold;
978 	/* Misc flags */
979 	uint32_t flags;
980 	/* Unique ring id */
981 	uint8_t ring_id;
982 	/* Source or Destination ring */
983 	enum hal_srng_dir ring_dir;
984 	/* Size of ring entry */
985 	uint32_t entry_size;
986 	/* hw register base address */
987 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
988 	/* prefetch timer config - in micro seconds */
989 	uint32_t prefetch_timer;
990 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
991 	/* Near full IRQ support flag */
992 	uint32_t nf_irq_support;
993 	/* MSI2 Address */
994 	qdf_dma_addr_t msi2_addr;
995 	/* MSI2 data */
996 	uint32_t msi2_data;
997 	/* Critical threshold */
998 	uint16_t crit_thresh;
999 	/* High threshold */
1000 	uint16_t high_thresh;
1001 	/* Safe threshold */
1002 	uint16_t safe_thresh;
1003 #endif
1004 };
1005 
1006 /* hal_construct_srng_shadow_regs() - initialize the shadow
1007  * registers for srngs
1008  * @hal_soc: hal handle
1009  *
1010  * Return: QDF_STATUS_OK on success
1011  */
1012 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1013 
1014 /* hal_set_one_shadow_config() - add a config for the specified ring
1015  * @hal_soc: hal handle
1016  * @ring_type: ring type
1017  * @ring_num: ring num
1018  *
1019  * The ring type and ring num uniquely specify the ring.  After this call,
1020  * the hp/tp will be added as the next entry int the shadow register
1021  * configuration table.  The hal code will use the shadow register address
1022  * in place of the hp/tp address.
1023  *
1024  * This function is exposed, so that the CE module can skip configuring shadow
1025  * registers for unused ring and rings assigned to the firmware.
1026  *
1027  * Return: QDF_STATUS_OK on success
1028  */
1029 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1030 				     int ring_num);
1031 /**
1032  * hal_get_shadow_config() - retrieve the config table for shadow cfg v2
1033  * @hal_soc: hal handle
1034  * @shadow_config: will point to the table after
1035  * @num_shadow_registers_configured: will contain the number of valid entries
1036  */
1037 extern void
1038 hal_get_shadow_config(void *hal_soc,
1039 		      struct pld_shadow_reg_v2_cfg **shadow_config,
1040 		      int *num_shadow_registers_configured);
1041 
1042 #ifdef CONFIG_SHADOW_V3
1043 /**
1044  * hal_get_shadow_v3_config() - retrieve the config table for shadow cfg v3
1045  * @hal_soc: hal handle
1046  * @shadow_config: will point to the table after
1047  * @num_shadow_registers_configured: will contain the number of valid entries
1048  */
1049 extern void
1050 hal_get_shadow_v3_config(void *hal_soc,
1051 			 struct pld_shadow_reg_v3_cfg **shadow_config,
1052 			 int *num_shadow_registers_configured);
1053 #endif
1054 
1055 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1056 /**
1057  * hal_srng_is_near_full_irq_supported() - Check if srng supports near full irq
1058  * @hal_soc: HAL SoC handle [To be validated by caller]
1059  * @ring_type: srng type
1060  * @ring_num: The index of the srng (of the same type)
1061  *
1062  * Return: true, if srng support near full irq trigger
1063  *	false, if the srng does not support near full irq support.
1064  */
1065 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1066 					 int ring_type, int ring_num);
1067 #else
1068 static inline
1069 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1070 					 int ring_type, int ring_num)
1071 {
1072 	return false;
1073 }
1074 #endif
1075 
1076 /**
1077  * hal_srng_setup - Initialize HW SRNG ring.
1078  *
1079  * @hal_soc: Opaque HAL SOC handle
1080  * @ring_type: one of the types from hal_ring_type
1081  * @ring_num: Ring number if there are multiple rings of
1082  *		same type (staring from 0)
1083  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1084  * @ring_params: SRNG ring params in hal_srng_params structure.
1085 
1086  * Callers are expected to allocate contiguous ring memory of size
1087  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1088  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1089  * structure. Ring base address should be 8 byte aligned and size of each ring
1090  * entry should be queried using the API hal_srng_get_entrysize
1091  *
1092  * Return: Opaque pointer to ring on success
1093  *		 NULL on failure (if given ring is not available)
1094  */
1095 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1096 	int mac_id, struct hal_srng_params *ring_params);
1097 
1098 /* Remapping ids of REO rings */
1099 #define REO_REMAP_TCL 0
1100 #define REO_REMAP_SW1 1
1101 #define REO_REMAP_SW2 2
1102 #define REO_REMAP_SW3 3
1103 #define REO_REMAP_SW4 4
1104 #define REO_REMAP_RELEASE 5
1105 #define REO_REMAP_FW 6
1106 /*
1107  * In Beryllium: 4 bits REO destination ring value is defined as: 0: TCL
1108  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
1109  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
1110  *
1111  */
1112 #define REO_REMAP_SW5 7
1113 #define REO_REMAP_SW6 8
1114 #define REO_REMAP_SW7 9
1115 #define REO_REMAP_SW8 10
1116 
1117 /*
1118  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1119  * to map destination to rings
1120  */
1121 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1122 	((_VALUE) << \
1123 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1124 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1125 
1126 /*
1127  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1128  * to map destination to rings
1129  */
1130 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1131 	((_VALUE) << \
1132 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1133 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1134 
1135 /*
1136  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1137  * to map destination to rings
1138  */
1139 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1140 	((_VALUE) << \
1141 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1142 	  _OFFSET ## _SHFT))
1143 
1144 /*
1145  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1146  * to map destination to rings
1147  */
1148 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1149 	((_VALUE) << \
1150 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1151 	  _OFFSET ## _SHFT))
1152 
1153 /*
1154  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1155  * to map destination to rings
1156  */
1157 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1158 	((_VALUE) << \
1159 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1160 	  _OFFSET ## _SHFT))
1161 
1162 /**
1163  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1164  * @hal_soc_hdl: HAL SOC handle
1165  * @read: boolean value to indicate if read or write
1166  * @ix0: pointer to store IX0 reg value
1167  * @ix1: pointer to store IX1 reg value
1168  * @ix2: pointer to store IX2 reg value
1169  * @ix3: pointer to store IX3 reg value
1170  */
1171 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1172 				uint32_t *ix0, uint32_t *ix1,
1173 				uint32_t *ix2, uint32_t *ix3);
1174 
1175 /**
1176  * hal_srng_set_hp_paddr_confirm() - Set physical address to dest SRNG head
1177  *  pointer and confirm that write went through by reading back the value
1178  * @sring: sring pointer
1179  * @paddr: physical address
1180  *
1181  * Return: None
1182  */
1183 extern void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring,
1184 					      uint64_t paddr);
1185 
1186 /**
1187  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
1188  * @hal_soc: hal_soc handle
1189  * @srng: sring pointer
1190  * @vaddr: virtual address
1191  */
1192 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1193 			  struct hal_srng *srng,
1194 			  uint32_t *vaddr);
1195 
1196 /**
1197  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1198  * @hal_soc: Opaque HAL SOC handle
1199  * @hal_srng: Opaque HAL SRNG pointer
1200  */
1201 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
1202 
1203 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1204 {
1205 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1206 
1207 	return !!srng->initialized;
1208 }
1209 
1210 /**
1211  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
1212  * @hal_soc: Opaque HAL SOC handle
1213  * @hal_ring_hdl: Destination ring pointer
1214  *
1215  * Caller takes responsibility for any locking needs.
1216  *
1217  * Return: Opaque pointer for next ring entry; NULL on failire
1218  */
1219 static inline
1220 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1221 			hal_ring_handle_t hal_ring_hdl)
1222 {
1223 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1224 
1225 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1226 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1227 
1228 	return NULL;
1229 }
1230 
1231 
1232 /**
1233  * hal_mem_dma_cache_sync - Cache sync the specified virtual address Range
1234  * @hal_soc: HAL soc handle
1235  * @desc: desc start address
1236  * @entry_size: size of memory to sync
1237  *
1238  * Return: void
1239  */
1240 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
1241 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1242 					  uint32_t entry_size)
1243 {
1244 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1245 }
1246 #else
1247 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1248 					  uint32_t entry_size)
1249 {
1250 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1251 			       QDF_DMA_FROM_DEVICE,
1252 			       (entry_size * sizeof(uint32_t)));
1253 }
1254 #endif
1255 
1256 /**
1257  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
1258  * hal_srng_access_start if locked access is required
1259  *
1260  * @hal_soc: Opaque HAL SOC handle
1261  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1262  *
1263  * This API doesn't implement any byte-order conversion on reading hp/tp.
1264  * So, Use API only for those srngs for which the target writes hp/tp values to
1265  * the DDR in the Host order.
1266  *
1267  * Return: 0 on success; error on failire
1268  */
1269 static inline int
1270 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1271 			       hal_ring_handle_t hal_ring_hdl)
1272 {
1273 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1274 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1275 	uint32_t *desc;
1276 
1277 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1278 		srng->u.src_ring.cached_tp =
1279 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1280 	else {
1281 		srng->u.dst_ring.cached_hp =
1282 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1283 
1284 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1285 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1286 			if (qdf_likely(desc)) {
1287 				hal_mem_dma_cache_sync(soc, desc,
1288 						       srng->entry_size);
1289 				qdf_prefetch(desc);
1290 			}
1291 		}
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 /**
1298  * hal_le_srng_access_start_unlocked_in_cpu_order - Start ring access
1299  * (unlocked) with endianness correction.
1300  * @hal_soc: Opaque HAL SOC handle
1301  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1302  *
1303  * This API provides same functionally as hal_srng_access_start_unlocked()
1304  * except that it converts the little-endian formatted hp/tp values to
1305  * Host order on reading them. So, this API should only be used for those srngs
1306  * for which the target always writes hp/tp values in little-endian order
1307  * regardless of Host order.
1308  *
1309  * Also, this API doesn't take the lock. For locked access, use
1310  * hal_srng_access_start/hal_le_srng_access_start_in_cpu_order.
1311  *
1312  * Return: 0 on success; error on failire
1313  */
1314 static inline int
1315 hal_le_srng_access_start_unlocked_in_cpu_order(
1316 	hal_soc_handle_t hal_soc_hdl,
1317 	hal_ring_handle_t hal_ring_hdl)
1318 {
1319 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1320 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1321 	uint32_t *desc;
1322 
1323 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1324 		srng->u.src_ring.cached_tp =
1325 			qdf_le32_to_cpu(*(volatile uint32_t *)
1326 					(srng->u.src_ring.tp_addr));
1327 	else {
1328 		srng->u.dst_ring.cached_hp =
1329 			qdf_le32_to_cpu(*(volatile uint32_t *)
1330 					(srng->u.dst_ring.hp_addr));
1331 
1332 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1333 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1334 			if (qdf_likely(desc)) {
1335 				hal_mem_dma_cache_sync(soc, desc,
1336 						       srng->entry_size);
1337 				qdf_prefetch(desc);
1338 			}
1339 		}
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 /**
1346  * hal_srng_try_access_start - Try to start (locked) ring access
1347  *
1348  * @hal_soc: Opaque HAL SOC handle
1349  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1350  *
1351  * Return: 0 on success; error on failure
1352  */
1353 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1354 					    hal_ring_handle_t hal_ring_hdl)
1355 {
1356 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1357 
1358 	if (qdf_unlikely(!hal_ring_hdl)) {
1359 		qdf_print("Error: Invalid hal_ring\n");
1360 		return -EINVAL;
1361 	}
1362 
1363 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1364 		return -EINVAL;
1365 
1366 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1367 }
1368 
1369 /**
1370  * hal_srng_access_start - Start (locked) ring access
1371  *
1372  * @hal_soc: Opaque HAL SOC handle
1373  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1374  *
1375  * This API doesn't implement any byte-order conversion on reading hp/tp.
1376  * So, Use API only for those srngs for which the target writes hp/tp values to
1377  * the DDR in the Host order.
1378  *
1379  * Return: 0 on success; error on failire
1380  */
1381 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1382 					hal_ring_handle_t hal_ring_hdl)
1383 {
1384 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1385 
1386 	if (qdf_unlikely(!hal_ring_hdl)) {
1387 		qdf_print("Error: Invalid hal_ring\n");
1388 		return -EINVAL;
1389 	}
1390 
1391 	SRNG_LOCK(&(srng->lock));
1392 
1393 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1394 }
1395 
1396 /**
1397  * hal_le_srng_access_start_in_cpu_order - Start (locked) ring access with
1398  * endianness correction
1399  * @hal_soc: Opaque HAL SOC handle
1400  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1401  *
1402  * This API provides same functionally as hal_srng_access_start()
1403  * except that it converts the little-endian formatted hp/tp values to
1404  * Host order on reading them. So, this API should only be used for those srngs
1405  * for which the target always writes hp/tp values in little-endian order
1406  * regardless of Host order.
1407  *
1408  * Return: 0 on success; error on failire
1409  */
1410 static inline int
1411 hal_le_srng_access_start_in_cpu_order(
1412 	hal_soc_handle_t hal_soc_hdl,
1413 	hal_ring_handle_t hal_ring_hdl)
1414 {
1415 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1416 
1417 	if (qdf_unlikely(!hal_ring_hdl)) {
1418 		qdf_print("Error: Invalid hal_ring\n");
1419 		return -EINVAL;
1420 	}
1421 
1422 	SRNG_LOCK(&(srng->lock));
1423 
1424 	return hal_le_srng_access_start_unlocked_in_cpu_order(
1425 			hal_soc_hdl, hal_ring_hdl);
1426 }
1427 
1428 /**
1429  * hal_srng_dst_get_next - Get next entry from a destination ring
1430  * @hal_soc: Opaque HAL SOC handle
1431  * @hal_ring_hdl: Destination ring pointer
1432  *
1433  * Return: Opaque pointer for next ring entry; NULL on failure
1434  */
1435 static inline
1436 void *hal_srng_dst_get_next(void *hal_soc,
1437 			    hal_ring_handle_t hal_ring_hdl)
1438 {
1439 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1440 	uint32_t *desc;
1441 
1442 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1443 		return NULL;
1444 
1445 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1446 	/* TODO: Using % is expensive, but we have to do this since
1447 	 * size of some SRNG rings is not power of 2 (due to descriptor
1448 	 * sizes). Need to create separate API for rings used
1449 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1450 	 * SW2RXDMA and CE rings)
1451 	 */
1452 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1453 	if (srng->u.dst_ring.tp == srng->ring_size)
1454 		srng->u.dst_ring.tp = 0;
1455 
1456 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1457 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1458 		uint32_t *desc_next;
1459 		uint32_t tp;
1460 
1461 		tp = srng->u.dst_ring.tp;
1462 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1463 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1464 		qdf_prefetch(desc_next);
1465 	}
1466 
1467 	return (void *)desc;
1468 }
1469 
1470 /**
1471  * hal_srng_dst_get_next_cached - Get cached next entry
1472  * @hal_soc: Opaque HAL SOC handle
1473  * @hal_ring_hdl: Destination ring pointer
1474  *
1475  * Get next entry from a destination ring and move cached tail pointer
1476  *
1477  * Return: Opaque pointer for next ring entry; NULL on failure
1478  */
1479 static inline
1480 void *hal_srng_dst_get_next_cached(void *hal_soc,
1481 				   hal_ring_handle_t hal_ring_hdl)
1482 {
1483 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1484 	uint32_t *desc;
1485 	uint32_t *desc_next;
1486 
1487 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1488 		return NULL;
1489 
1490 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1491 	/* TODO: Using % is expensive, but we have to do this since
1492 	 * size of some SRNG rings is not power of 2 (due to descriptor
1493 	 * sizes). Need to create separate API for rings used
1494 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1495 	 * SW2RXDMA and CE rings)
1496 	 */
1497 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1498 	if (srng->u.dst_ring.tp == srng->ring_size)
1499 		srng->u.dst_ring.tp = 0;
1500 
1501 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1502 	qdf_prefetch(desc_next);
1503 	return (void *)desc;
1504 }
1505 
1506 /**
1507  * hal_srng_dst_dec_tp - decrement the TP of the Dst ring by one entry
1508  * @hal_soc: Opaque HAL SOC handle
1509  * @hal_ring_hdl: Destination ring pointer
1510  *
1511  * reset the tail pointer in the destination ring by one entry
1512  *
1513  */
1514 static inline
1515 void hal_srng_dst_dec_tp(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1516 {
1517 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1518 
1519 	if (qdf_unlikely(!srng->u.dst_ring.tp))
1520 		srng->u.dst_ring.tp = (srng->ring_size - srng->entry_size);
1521 	else
1522 		srng->u.dst_ring.tp -= srng->entry_size;
1523 }
1524 
1525 static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
1526 {
1527 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1528 
1529 	if (qdf_unlikely(!hal_ring_hdl)) {
1530 		qdf_print("error: invalid hal_ring\n");
1531 		return -EINVAL;
1532 	}
1533 
1534 	SRNG_LOCK(&(srng->lock));
1535 	return 0;
1536 }
1537 
1538 static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
1539 {
1540 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1541 
1542 	if (qdf_unlikely(!hal_ring_hdl)) {
1543 		qdf_print("error: invalid hal_ring\n");
1544 		return -EINVAL;
1545 	}
1546 
1547 	SRNG_UNLOCK(&(srng->lock));
1548 	return 0;
1549 }
1550 
1551 /**
1552  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1553  * cached head pointer
1554  *
1555  * @hal_soc: Opaque HAL SOC handle
1556  * @hal_ring_hdl: Destination ring pointer
1557  *
1558  * Return: Opaque pointer for next ring entry; NULL on failire
1559  */
1560 static inline void *
1561 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1562 			 hal_ring_handle_t hal_ring_hdl)
1563 {
1564 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1565 	uint32_t *desc;
1566 	/* TODO: Using % is expensive, but we have to do this since
1567 	 * size of some SRNG rings is not power of 2 (due to descriptor
1568 	 * sizes). Need to create separate API for rings used
1569 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1570 	 * SW2RXDMA and CE rings)
1571 	 */
1572 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1573 		srng->ring_size;
1574 
1575 	if (next_hp != srng->u.dst_ring.tp) {
1576 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1577 		srng->u.dst_ring.cached_hp = next_hp;
1578 		return (void *)desc;
1579 	}
1580 
1581 	return NULL;
1582 }
1583 
1584 /**
1585  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1586  * @hal_soc: Opaque HAL SOC handle
1587  * @hal_ring_hdl: Destination ring pointer
1588  *
1589  * Sync cached head pointer with HW.
1590  * Caller takes responsibility for any locking needs.
1591  *
1592  * Return: Opaque pointer for next ring entry; NULL on failire
1593  */
1594 static inline
1595 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1596 			     hal_ring_handle_t hal_ring_hdl)
1597 {
1598 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1599 
1600 	srng->u.dst_ring.cached_hp =
1601 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1602 
1603 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1604 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1605 
1606 	return NULL;
1607 }
1608 
1609 /**
1610  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1611  * @hal_soc: Opaque HAL SOC handle
1612  * @hal_ring_hdl: Destination ring pointer
1613  *
1614  * Sync cached head pointer with HW.
1615  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1616  *
1617  * Return: Opaque pointer for next ring entry; NULL on failire
1618  */
1619 static inline
1620 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1621 				    hal_ring_handle_t hal_ring_hdl)
1622 {
1623 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1624 	void *ring_desc_ptr = NULL;
1625 
1626 	if (qdf_unlikely(!hal_ring_hdl)) {
1627 		qdf_print("Error: Invalid hal_ring\n");
1628 		return  NULL;
1629 	}
1630 
1631 	SRNG_LOCK(&srng->lock);
1632 
1633 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1634 
1635 	SRNG_UNLOCK(&srng->lock);
1636 
1637 	return ring_desc_ptr;
1638 }
1639 
1640 #define hal_srng_dst_num_valid_nolock(hal_soc, hal_ring_hdl, sync_hw_ptr) \
1641 		hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr)
1642 
1643 /**
1644  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1645  * by SW) in destination ring
1646  *
1647  * @hal_soc: Opaque HAL SOC handle
1648  * @hal_ring_hdl: Destination ring pointer
1649  * @sync_hw_ptr: Sync cached head pointer with HW
1650  *
1651  */
1652 static inline
1653 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1654 				hal_ring_handle_t hal_ring_hdl,
1655 				int sync_hw_ptr)
1656 {
1657 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1658 	uint32_t hp;
1659 	uint32_t tp = srng->u.dst_ring.tp;
1660 
1661 	if (sync_hw_ptr) {
1662 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1663 		srng->u.dst_ring.cached_hp = hp;
1664 	} else {
1665 		hp = srng->u.dst_ring.cached_hp;
1666 	}
1667 
1668 	if (hp >= tp)
1669 		return (hp - tp) / srng->entry_size;
1670 
1671 	return (srng->ring_size - tp + hp) / srng->entry_size;
1672 }
1673 
1674 /**
1675  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1676  * @hal_soc: Opaque HAL SOC handle
1677  * @hal_ring_hdl: Destination ring pointer
1678  * @entry_count: call invalidate API if valid entries available
1679  *
1680  * Invalidates a set of cached descriptors starting from TP to cached_HP
1681  *
1682  * Return - None
1683  */
1684 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1685 						 hal_ring_handle_t hal_ring_hdl,
1686 						 uint32_t entry_count)
1687 {
1688 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1689 	uint32_t *first_desc;
1690 	uint32_t *last_desc;
1691 	uint32_t last_desc_index;
1692 
1693 	/*
1694 	 * If SRNG does not have cached descriptors this
1695 	 * API call should be a no op
1696 	 */
1697 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1698 		return;
1699 
1700 	if (!entry_count)
1701 		return;
1702 
1703 	first_desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1704 
1705 	last_desc_index = (srng->u.dst_ring.tp +
1706 			   (entry_count * srng->entry_size)) %
1707 			  srng->ring_size;
1708 
1709 	last_desc =  &srng->ring_base_vaddr[last_desc_index];
1710 
1711 	if (last_desc > (uint32_t *)first_desc)
1712 		/* invalidate from tp to cached_hp */
1713 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1714 					      (void *)(last_desc));
1715 	else {
1716 		/* invalidate from tp to end of the ring */
1717 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1718 					      (void *)srng->ring_vaddr_end);
1719 
1720 		/* invalidate from start of ring to cached_hp */
1721 		qdf_nbuf_dma_inv_range_no_dsb((void *)srng->ring_base_vaddr,
1722 					      (void *)last_desc);
1723 	}
1724 	qdf_dsb();
1725 }
1726 
1727 /**
1728  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1729  *
1730  * @hal_soc: Opaque HAL SOC handle
1731  * @hal_ring_hdl: Destination ring pointer
1732  * @sync_hw_ptr: Sync cached head pointer with HW
1733  *
1734  * Returns number of valid entries to be processed by the host driver. The
1735  * function takes up SRNG lock.
1736  *
1737  * Return: Number of valid destination entries
1738  */
1739 static inline uint32_t
1740 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1741 			      hal_ring_handle_t hal_ring_hdl,
1742 			      int sync_hw_ptr)
1743 {
1744 	uint32_t num_valid;
1745 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1746 
1747 	SRNG_LOCK(&srng->lock);
1748 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1749 	SRNG_UNLOCK(&srng->lock);
1750 
1751 	return num_valid;
1752 }
1753 
1754 /**
1755  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1756  *
1757  * @hal_soc: Opaque HAL SOC handle
1758  * @hal_ring_hdl: Destination ring pointer
1759  *
1760  */
1761 static inline
1762 void hal_srng_sync_cachedhp(void *hal_soc,
1763 				hal_ring_handle_t hal_ring_hdl)
1764 {
1765 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1766 	uint32_t hp;
1767 
1768 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1769 	srng->u.dst_ring.cached_hp = hp;
1770 }
1771 
1772 /**
1773  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1774  * pointer. This can be used to release any buffers associated with completed
1775  * ring entries. Note that this should not be used for posting new descriptor
1776  * entries. Posting of new entries should be done only using
1777  * hal_srng_src_get_next_reaped when this function is used for reaping.
1778  *
1779  * @hal_soc: Opaque HAL SOC handle
1780  * @hal_ring_hdl: Source ring pointer
1781  *
1782  * Return: Opaque pointer for next ring entry; NULL on failire
1783  */
1784 static inline void *
1785 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1786 {
1787 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1788 	uint32_t *desc;
1789 
1790 	/* TODO: Using % is expensive, but we have to do this since
1791 	 * size of some SRNG rings is not power of 2 (due to descriptor
1792 	 * sizes). Need to create separate API for rings used
1793 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1794 	 * SW2RXDMA and CE rings)
1795 	 */
1796 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1797 		srng->ring_size;
1798 
1799 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1800 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1801 		srng->u.src_ring.reap_hp = next_reap_hp;
1802 		return (void *)desc;
1803 	}
1804 
1805 	return NULL;
1806 }
1807 
1808 /**
1809  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1810  * already reaped using hal_srng_src_reap_next, for posting new entries to
1811  * the ring
1812  *
1813  * @hal_soc: Opaque HAL SOC handle
1814  * @hal_ring_hdl: Source ring pointer
1815  *
1816  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1817  */
1818 static inline void *
1819 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1820 {
1821 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1822 	uint32_t *desc;
1823 
1824 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1825 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1826 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1827 			srng->ring_size;
1828 
1829 		return (void *)desc;
1830 	}
1831 
1832 	return NULL;
1833 }
1834 
1835 /**
1836  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1837  * move reap pointer. This API is used in detach path to release any buffers
1838  * associated with ring entries which are pending reap.
1839  *
1840  * @hal_soc: Opaque HAL SOC handle
1841  * @hal_ring_hdl: Source ring pointer
1842  *
1843  * Return: Opaque pointer for next ring entry; NULL on failire
1844  */
1845 static inline void *
1846 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1847 {
1848 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1849 	uint32_t *desc;
1850 
1851 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1852 		srng->ring_size;
1853 
1854 	if (next_reap_hp != srng->u.src_ring.hp) {
1855 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1856 		srng->u.src_ring.reap_hp = next_reap_hp;
1857 		return (void *)desc;
1858 	}
1859 
1860 	return NULL;
1861 }
1862 
1863 /**
1864  * hal_srng_src_done_val -
1865  *
1866  * @hal_soc: Opaque HAL SOC handle
1867  * @hal_ring_hdl: Source ring pointer
1868  *
1869  * Return: Opaque pointer for next ring entry; NULL on failire
1870  */
1871 static inline uint32_t
1872 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1873 {
1874 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1875 	/* TODO: Using % is expensive, but we have to do this since
1876 	 * size of some SRNG rings is not power of 2 (due to descriptor
1877 	 * sizes). Need to create separate API for rings used
1878 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1879 	 * SW2RXDMA and CE rings)
1880 	 */
1881 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1882 		srng->ring_size;
1883 
1884 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1885 		return 0;
1886 
1887 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1888 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1889 			srng->entry_size;
1890 	else
1891 		return ((srng->ring_size - next_reap_hp) +
1892 			srng->u.src_ring.cached_tp) / srng->entry_size;
1893 }
1894 
1895 /**
1896  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1897  * @hal_ring_hdl: Source ring pointer
1898  *
1899  * srng->entry_size value is in 4 byte dwords so left shifting
1900  * this by 2 to return the value of entry_size in bytes.
1901  *
1902  * Return: uint8_t
1903  */
1904 static inline
1905 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1906 {
1907 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1908 
1909 	return srng->entry_size << 2;
1910 }
1911 
1912 /**
1913  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1914  * @hal_soc: Opaque HAL SOC handle
1915  * @hal_ring_hdl: Source ring pointer
1916  * @tailp: Tail Pointer
1917  * @headp: Head Pointer
1918  *
1919  * Return: Update tail pointer and head pointer in arguments.
1920  */
1921 static inline
1922 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1923 		     uint32_t *tailp, uint32_t *headp)
1924 {
1925 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1926 
1927 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1928 		*headp = srng->u.src_ring.hp;
1929 		*tailp = *srng->u.src_ring.tp_addr;
1930 	} else {
1931 		*tailp = srng->u.dst_ring.tp;
1932 		*headp = *srng->u.dst_ring.hp_addr;
1933 	}
1934 }
1935 
1936 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
1937 /**
1938  * hal_srng_src_get_next_consumed - Get the next desc if consumed by HW
1939  *
1940  * @hal_soc: Opaque HAL SOC handle
1941  * @hal_ring_hdl: Source ring pointer
1942  *
1943  * Return: pointer to descriptor if consumed by HW, else NULL
1944  */
1945 static inline
1946 void *hal_srng_src_get_next_consumed(void *hal_soc,
1947 				     hal_ring_handle_t hal_ring_hdl)
1948 {
1949 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1950 	uint32_t *desc = NULL;
1951 	/* TODO: Using % is expensive, but we have to do this since
1952 	 * size of some SRNG rings is not power of 2 (due to descriptor
1953 	 * sizes). Need to create separate API for rings used
1954 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1955 	 * SW2RXDMA and CE rings)
1956 	 */
1957 	uint32_t next_entry = (srng->last_desc_cleared + srng->entry_size) %
1958 			      srng->ring_size;
1959 
1960 	if (next_entry != srng->u.src_ring.cached_tp) {
1961 		desc = &srng->ring_base_vaddr[next_entry];
1962 		srng->last_desc_cleared = next_entry;
1963 	}
1964 
1965 	return desc;
1966 }
1967 
1968 #else
1969 static inline
1970 void *hal_srng_src_get_next_consumed(void *hal_soc,
1971 				     hal_ring_handle_t hal_ring_hdl)
1972 {
1973 	return NULL;
1974 }
1975 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
1976 
1977 /**
1978  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1979  *
1980  * @hal_soc: Opaque HAL SOC handle
1981  * @hal_ring_hdl: Source ring pointer
1982  *
1983  * Return: Opaque pointer for next ring entry; NULL on failire
1984  */
1985 static inline
1986 void *hal_srng_src_get_next(void *hal_soc,
1987 			    hal_ring_handle_t hal_ring_hdl)
1988 {
1989 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1990 	uint32_t *desc;
1991 	/* TODO: Using % is expensive, but we have to do this since
1992 	 * size of some SRNG rings is not power of 2 (due to descriptor
1993 	 * sizes). Need to create separate API for rings used
1994 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1995 	 * SW2RXDMA and CE rings)
1996 	 */
1997 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1998 		srng->ring_size;
1999 
2000 	if (next_hp != srng->u.src_ring.cached_tp) {
2001 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2002 		srng->u.src_ring.hp = next_hp;
2003 		/* TODO: Since reap function is not used by all rings, we can
2004 		 * remove the following update of reap_hp in this function
2005 		 * if we can ensure that only hal_srng_src_get_next_reaped
2006 		 * is used for the rings requiring reap functionality
2007 		 */
2008 		srng->u.src_ring.reap_hp = next_hp;
2009 		return (void *)desc;
2010 	}
2011 
2012 	return NULL;
2013 }
2014 
2015 /**
2016  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
2017  * moving head pointer.
2018  * hal_srng_src_get_next should be called subsequently to move the head pointer
2019  *
2020  * @hal_soc: Opaque HAL SOC handle
2021  * @hal_ring_hdl: Source ring pointer
2022  *
2023  * Return: Opaque pointer for next ring entry; NULL on failire
2024  */
2025 static inline
2026 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
2027 				   hal_ring_handle_t hal_ring_hdl)
2028 {
2029 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2030 	uint32_t *desc;
2031 
2032 	/* TODO: Using % is expensive, but we have to do this since
2033 	 * size of some SRNG rings is not power of 2 (due to descriptor
2034 	 * sizes). Need to create separate API for rings used
2035 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2036 	 * SW2RXDMA and CE rings)
2037 	 */
2038 	if (((srng->u.src_ring.hp + srng->entry_size) %
2039 		srng->ring_size) != srng->u.src_ring.cached_tp) {
2040 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2041 						srng->entry_size) %
2042 						srng->ring_size]);
2043 		return (void *)desc;
2044 	}
2045 
2046 	return NULL;
2047 }
2048 
2049 /**
2050  * hal_srng_src_peek_n_get_next_next - Get next to next, i.e HP + 2 entry
2051  * from a ring without moving head pointer.
2052  *
2053  * @hal_soc: Opaque HAL SOC handle
2054  * @hal_ring_hdl: Source ring pointer
2055  *
2056  * Return: Opaque pointer for next to next ring entry; NULL on failire
2057  */
2058 static inline
2059 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
2060 					hal_ring_handle_t hal_ring_hdl)
2061 {
2062 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2063 	uint32_t *desc;
2064 
2065 	/* TODO: Using % is expensive, but we have to do this since
2066 	 * size of some SRNG rings is not power of 2 (due to descriptor
2067 	 * sizes). Need to create separate API for rings used
2068 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2069 	 * SW2RXDMA and CE rings)
2070 	 */
2071 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
2072 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
2073 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
2074 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
2075 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2076 						(srng->entry_size * 2)) %
2077 						srng->ring_size]);
2078 		return (void *)desc;
2079 	}
2080 
2081 	return NULL;
2082 }
2083 
2084 /**
2085  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
2086  * and move hp to next in src ring
2087  *
2088  * Usage: This API should only be used at init time replenish.
2089  *
2090  * @hal_soc_hdl: HAL soc handle
2091  * @hal_ring_hdl: Source ring pointer
2092  *
2093  */
2094 static inline void *
2095 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
2096 				    hal_ring_handle_t hal_ring_hdl)
2097 {
2098 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2099 	uint32_t *cur_desc = NULL;
2100 	uint32_t next_hp;
2101 
2102 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
2103 
2104 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2105 		srng->ring_size;
2106 
2107 	if (next_hp != srng->u.src_ring.cached_tp)
2108 		srng->u.src_ring.hp = next_hp;
2109 
2110 	return (void *)cur_desc;
2111 }
2112 
2113 /**
2114  * hal_srng_src_num_avail - Returns number of available entries in src ring
2115  *
2116  * @hal_soc: Opaque HAL SOC handle
2117  * @hal_ring_hdl: Source ring pointer
2118  * @sync_hw_ptr: Sync cached tail pointer with HW
2119  *
2120  */
2121 static inline uint32_t
2122 hal_srng_src_num_avail(void *hal_soc,
2123 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
2124 {
2125 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2126 	uint32_t tp;
2127 	uint32_t hp = srng->u.src_ring.hp;
2128 
2129 	if (sync_hw_ptr) {
2130 		tp = *(srng->u.src_ring.tp_addr);
2131 		srng->u.src_ring.cached_tp = tp;
2132 	} else {
2133 		tp = srng->u.src_ring.cached_tp;
2134 	}
2135 
2136 	if (tp > hp)
2137 		return ((tp - hp) / srng->entry_size) - 1;
2138 	else
2139 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
2140 }
2141 
2142 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
2143 /**
2144  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2145  * @hal_soc_hdl: HAL soc handle
2146  * @hal_ring_hdl: SRNG handle
2147  *
2148  * This function tries to acquire SRNG lock, and hence should not be called
2149  * from a context which has already acquired the SRNG lock.
2150  *
2151  * Return: None
2152  */
2153 static inline
2154 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2155 					 hal_ring_handle_t hal_ring_hdl)
2156 {
2157 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2158 
2159 	SRNG_LOCK(&srng->lock);
2160 	srng->high_wm.val = 0;
2161 	srng->high_wm.timestamp = 0;
2162 	qdf_mem_zero(&srng->high_wm.bins[0], sizeof(srng->high_wm.bins[0]) *
2163 					     HAL_SRNG_HIGH_WM_BIN_MAX);
2164 	SRNG_UNLOCK(&srng->lock);
2165 }
2166 
2167 /**
2168  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2169  * @hal_soc_hdl: HAL soc handle
2170  * @hal_ring_hdl: SRNG handle
2171  *
2172  * This function should be called with the SRNG lock held.
2173  *
2174  * Return: None
2175  */
2176 static inline
2177 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2178 					   hal_ring_handle_t hal_ring_hdl)
2179 {
2180 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2181 	uint32_t curr_wm_val = 0;
2182 
2183 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
2184 		curr_wm_val = hal_srng_src_num_avail(hal_soc_hdl, hal_ring_hdl,
2185 						     0);
2186 	else
2187 		curr_wm_val = hal_srng_dst_num_valid(hal_soc_hdl, hal_ring_hdl,
2188 						     0);
2189 
2190 	if (curr_wm_val > srng->high_wm.val) {
2191 		srng->high_wm.val = curr_wm_val;
2192 		srng->high_wm.timestamp = qdf_get_system_timestamp();
2193 	}
2194 
2195 	if (curr_wm_val >=
2196 		srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100])
2197 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]++;
2198 	else if (curr_wm_val >=
2199 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90])
2200 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90]++;
2201 	else if (curr_wm_val >=
2202 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80])
2203 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80]++;
2204 	else if (curr_wm_val >=
2205 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70])
2206 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70]++;
2207 	else if (curr_wm_val >=
2208 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60])
2209 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60]++;
2210 	else
2211 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT]++;
2212 }
2213 
2214 static inline
2215 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2216 				hal_ring_handle_t hal_ring_hdl,
2217 				char *buf, int buf_len, int pos)
2218 {
2219 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2220 
2221 	return qdf_scnprintf(buf + pos, buf_len - pos,
2222 			     "%8u %7u %12llu %10u %10u %10u %10u %10u %10u",
2223 			     srng->ring_id, srng->high_wm.val,
2224 			     srng->high_wm.timestamp,
2225 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT],
2226 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60],
2227 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70],
2228 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80],
2229 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90],
2230 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]);
2231 }
2232 #else
2233 /**
2234  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2235  * @hal_soc_hdl: HAL soc handle
2236  * @hal_ring_hdl: SRNG handle
2237  *
2238  * This function tries to acquire SRNG lock, and hence should not be called
2239  * from a context which has already acquired the SRNG lock.
2240  *
2241  * Return: None
2242  */
2243 static inline
2244 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2245 					 hal_ring_handle_t hal_ring_hdl)
2246 {
2247 }
2248 
2249 /**
2250  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2251  * @hal_soc_hdl: HAL soc handle
2252  * @hal_ring_hdl: SRNG handle
2253  *
2254  * This function should be called with the SRNG lock held.
2255  *
2256  * Return: None
2257  */
2258 static inline
2259 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2260 					   hal_ring_handle_t hal_ring_hdl)
2261 {
2262 }
2263 
2264 static inline
2265 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2266 				hal_ring_handle_t hal_ring_hdl,
2267 				char *buf, int buf_len, int pos)
2268 {
2269 	return 0;
2270 }
2271 #endif
2272 
2273 /**
2274  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
2275  * ring head/tail pointers to HW.
2276  *
2277  * @hal_soc: Opaque HAL SOC handle
2278  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2279  *
2280  * The target expects cached head/tail pointer to be updated to the
2281  * shared location in the little-endian order, This API ensures that.
2282  * This API should be used only if hal_srng_access_start_unlocked was used to
2283  * start ring access
2284  *
2285  * Return: None
2286  */
2287 static inline void
2288 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2289 {
2290 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2291 
2292 	/* TODO: See if we need a write memory barrier here */
2293 	if (srng->flags & HAL_SRNG_LMAC_RING) {
2294 		/* For LMAC rings, ring pointer updates are done through FW and
2295 		 * hence written to a shared memory location that is read by FW
2296 		 */
2297 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2298 			*srng->u.src_ring.hp_addr =
2299 				qdf_cpu_to_le32(srng->u.src_ring.hp);
2300 		} else {
2301 			*srng->u.dst_ring.tp_addr =
2302 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
2303 		}
2304 	} else {
2305 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
2306 			hal_srng_write_address_32_mb(hal_soc,
2307 						     srng,
2308 						     srng->u.src_ring.hp_addr,
2309 						     srng->u.src_ring.hp);
2310 		else
2311 			hal_srng_write_address_32_mb(hal_soc,
2312 						     srng,
2313 						     srng->u.dst_ring.tp_addr,
2314 						     srng->u.dst_ring.tp);
2315 	}
2316 }
2317 
2318 /* hal_srng_access_end_unlocked already handles endianness conversion,
2319  * use the same.
2320  */
2321 #define hal_le_srng_access_end_unlocked_in_cpu_order \
2322 	hal_srng_access_end_unlocked
2323 
2324 /**
2325  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
2326  * pointers to HW
2327  *
2328  * @hal_soc: Opaque HAL SOC handle
2329  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2330  *
2331  * The target expects cached head/tail pointer to be updated to the
2332  * shared location in the little-endian order, This API ensures that.
2333  * This API should be used only if hal_srng_access_start was used to
2334  * start ring access
2335  *
2336  */
2337 static inline void
2338 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2339 {
2340 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2341 
2342 	if (qdf_unlikely(!hal_ring_hdl)) {
2343 		qdf_print("Error: Invalid hal_ring\n");
2344 		return;
2345 	}
2346 
2347 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2348 	SRNG_UNLOCK(&(srng->lock));
2349 }
2350 
2351 #ifdef FEATURE_RUNTIME_PM
2352 #define hal_srng_access_end_v1 hal_srng_rtpm_access_end
2353 
2354 /**
2355  * hal_srng_rtpm_access_end - RTPM aware, Unlock ring access
2356  * @hal_soc: Opaque HAL SOC handle
2357  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2358  * @rtpm_dbgid: RTPM debug id
2359  * @is_critical_ctx: Whether the calling context is critical
2360  *
2361  * Function updates the HP/TP value to the hardware register.
2362  * The target expects cached head/tail pointer to be updated to the
2363  * shared location in the little-endian order, This API ensures that.
2364  * This API should be used only if hal_srng_access_start was used to
2365  * start ring access
2366  *
2367  * Return: None
2368  */
2369 void
2370 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,
2371 			 hal_ring_handle_t hal_ring_hdl,
2372 			 uint32_t rtpm_id);
2373 #else
2374 #define hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl, rtpm_id) \
2375 	hal_srng_access_end(hal_soc_hdl, hal_ring_hdl)
2376 #endif
2377 
2378 /* hal_srng_access_end already handles endianness conversion, so use the same */
2379 #define hal_le_srng_access_end_in_cpu_order \
2380 	hal_srng_access_end
2381 
2382 /**
2383  * hal_srng_access_end_reap - Unlock ring access
2384  * This should be used only if hal_srng_access_start to start ring access
2385  * and should be used only while reaping SRC ring completions
2386  *
2387  * @hal_soc: Opaque HAL SOC handle
2388  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2389  *
2390  * Return: 0 on success; error on failire
2391  */
2392 static inline void
2393 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2394 {
2395 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2396 
2397 	SRNG_UNLOCK(&(srng->lock));
2398 }
2399 
2400 /* TODO: Check if the following definitions is available in HW headers */
2401 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2402 #define NUM_MPDUS_PER_LINK_DESC 6
2403 #define NUM_MSDUS_PER_LINK_DESC 7
2404 #define REO_QUEUE_DESC_ALIGN 128
2405 
2406 #define LINK_DESC_ALIGN 128
2407 
2408 #define ADDRESS_MATCH_TAG_VAL 0x5
2409 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2410  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2411  */
2412 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2413 
2414 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2415  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2416  * should be specified in 16 word units. But the number of bits defined for
2417  * this field in HW header files is 5.
2418  */
2419 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2420 
2421 
2422 /**
2423  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
2424  * in an idle list
2425  *
2426  * @hal_soc: Opaque HAL SOC handle
2427  *
2428  */
2429 static inline
2430 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2431 {
2432 	return WBM_IDLE_SCATTER_BUF_SIZE;
2433 }
2434 
2435 /**
2436  * hal_get_link_desc_size - Get the size of each link descriptor
2437  *
2438  * @hal_soc: Opaque HAL SOC handle
2439  *
2440  */
2441 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2442 {
2443 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2444 
2445 	if (!hal_soc || !hal_soc->ops) {
2446 		qdf_print("Error: Invalid ops\n");
2447 		QDF_BUG(0);
2448 		return -EINVAL;
2449 	}
2450 	if (!hal_soc->ops->hal_get_link_desc_size) {
2451 		qdf_print("Error: Invalid function pointer\n");
2452 		QDF_BUG(0);
2453 		return -EINVAL;
2454 	}
2455 	return hal_soc->ops->hal_get_link_desc_size();
2456 }
2457 
2458 /**
2459  * hal_get_link_desc_align - Get the required start address alignment for
2460  * link descriptors
2461  *
2462  * @hal_soc: Opaque HAL SOC handle
2463  *
2464  */
2465 static inline
2466 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2467 {
2468 	return LINK_DESC_ALIGN;
2469 }
2470 
2471 /**
2472  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
2473  *
2474  * @hal_soc: Opaque HAL SOC handle
2475  *
2476  */
2477 static inline
2478 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2479 {
2480 	return NUM_MPDUS_PER_LINK_DESC;
2481 }
2482 
2483 /**
2484  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
2485  *
2486  * @hal_soc: Opaque HAL SOC handle
2487  *
2488  */
2489 static inline
2490 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2491 {
2492 	return NUM_MSDUS_PER_LINK_DESC;
2493 }
2494 
2495 /**
2496  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
2497  * descriptor can hold
2498  *
2499  * @hal_soc: Opaque HAL SOC handle
2500  *
2501  */
2502 static inline
2503 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2504 {
2505 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2506 }
2507 
2508 /**
2509  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
2510  * that the given buffer size
2511  *
2512  * @hal_soc: Opaque HAL SOC handle
2513  * @scatter_buf_size: Size of scatter buffer
2514  *
2515  */
2516 static inline
2517 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2518 					  uint32_t scatter_buf_size)
2519 {
2520 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2521 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2522 }
2523 
2524 /**
2525  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
2526  * each given buffer size
2527  *
2528  * @hal_soc: Opaque HAL SOC handle
2529  * @total_mem: size of memory to be scattered
2530  * @scatter_buf_size: Size of scatter buffer
2531  *
2532  */
2533 static inline
2534 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2535 					uint32_t total_mem,
2536 					uint32_t scatter_buf_size)
2537 {
2538 	uint8_t rem = (total_mem % (scatter_buf_size -
2539 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2540 
2541 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2542 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2543 
2544 	return num_scatter_bufs;
2545 }
2546 
2547 enum hal_pn_type {
2548 	HAL_PN_NONE,
2549 	HAL_PN_WPA,
2550 	HAL_PN_WAPI_EVEN,
2551 	HAL_PN_WAPI_UNEVEN,
2552 };
2553 
2554 #define HAL_RX_BA_WINDOW_256 256
2555 #define HAL_RX_BA_WINDOW_1024 1024
2556 
2557 /**
2558  * hal_get_reo_qdesc_align - Get start address alignment for reo
2559  * queue descriptors
2560  *
2561  * @hal_soc: Opaque HAL SOC handle
2562  *
2563  */
2564 static inline
2565 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2566 {
2567 	return REO_QUEUE_DESC_ALIGN;
2568 }
2569 
2570 /**
2571  * hal_srng_get_hp_addr - Get head pointer physical address
2572  *
2573  * @hal_soc: Opaque HAL SOC handle
2574  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2575  *
2576  */
2577 static inline qdf_dma_addr_t
2578 hal_srng_get_hp_addr(void *hal_soc,
2579 		     hal_ring_handle_t hal_ring_hdl)
2580 {
2581 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2582 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2583 
2584 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2585 		return hal->shadow_wrptr_mem_paddr +
2586 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
2587 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2588 	} else {
2589 		return hal->shadow_rdptr_mem_paddr +
2590 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2591 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2592 	}
2593 }
2594 
2595 /**
2596  * hal_srng_get_tp_addr - Get tail pointer physical address
2597  *
2598  * @hal_soc: Opaque HAL SOC handle
2599  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2600  *
2601  */
2602 static inline qdf_dma_addr_t
2603 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2604 {
2605 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2606 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2607 
2608 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2609 		return hal->shadow_rdptr_mem_paddr +
2610 			((unsigned long)(srng->u.src_ring.tp_addr) -
2611 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2612 	} else {
2613 		return hal->shadow_wrptr_mem_paddr +
2614 			((unsigned long)(srng->u.dst_ring.tp_addr) -
2615 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
2616 	}
2617 }
2618 
2619 /**
2620  * hal_srng_get_num_entries - Get total entries in the HAL Srng
2621  *
2622  * @hal_soc: Opaque HAL SOC handle
2623  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2624  *
2625  * Return: total number of entries in hal ring
2626  */
2627 static inline
2628 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2629 				  hal_ring_handle_t hal_ring_hdl)
2630 {
2631 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2632 
2633 	return srng->num_entries;
2634 }
2635 
2636 /**
2637  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
2638  *
2639  * @hal_soc: Opaque HAL SOC handle
2640  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2641  * @ring_params: SRNG parameters will be returned through this structure
2642  */
2643 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2644 			 hal_ring_handle_t hal_ring_hdl,
2645 			 struct hal_srng_params *ring_params);
2646 
2647 /**
2648  * hal_mem_info - Retrieve hal memory base address
2649  *
2650  * @hal_soc: Opaque HAL SOC handle
2651  * @mem: pointer to structure to be updated with hal mem info
2652  */
2653 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2654 
2655 /**
2656  * hal_get_target_type - Return target type
2657  *
2658  * @hal_soc: Opaque HAL SOC handle
2659  */
2660 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2661 
2662 /**
2663  * hal_srng_dst_hw_init - Private function to initialize SRNG
2664  * destination ring HW
2665  * @hal_soc: HAL SOC handle
2666  * @srng: SRNG ring pointer
2667  */
2668 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2669 	struct hal_srng *srng)
2670 {
2671 	hal->ops->hal_srng_dst_hw_init(hal, srng);
2672 }
2673 
2674 /**
2675  * hal_srng_src_hw_init - Private function to initialize SRNG
2676  * source ring HW
2677  * @hal_soc: HAL SOC handle
2678  * @srng: SRNG ring pointer
2679  */
2680 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2681 	struct hal_srng *srng)
2682 {
2683 	hal->ops->hal_srng_src_hw_init(hal, srng);
2684 }
2685 
2686 /**
2687  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2688  * @hal_soc: Opaque HAL SOC handle
2689  * @hal_ring_hdl: Source ring pointer
2690  * @headp: Head Pointer
2691  * @tailp: Tail Pointer
2692  * @ring_type: Ring
2693  *
2694  * Return: Update tail pointer and head pointer in arguments.
2695  */
2696 static inline
2697 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2698 		     hal_ring_handle_t hal_ring_hdl,
2699 		     uint32_t *headp, uint32_t *tailp,
2700 		     uint8_t ring_type)
2701 {
2702 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2703 
2704 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2705 			headp, tailp, ring_type);
2706 }
2707 
2708 /**
2709  * hal_reo_setup - Initialize HW REO block
2710  *
2711  * @hal_soc: Opaque HAL SOC handle
2712  * @reo_params: parameters needed by HAL for REO config
2713  */
2714 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2715 				 void *reoparams)
2716 {
2717 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2718 
2719 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
2720 }
2721 
2722 static inline
2723 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2724 				   uint32_t *ring, uint32_t num_rings,
2725 				   uint32_t *remap1, uint32_t *remap2)
2726 {
2727 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2728 
2729 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2730 					num_rings, remap1, remap2);
2731 }
2732 
2733 static inline
2734 void hal_compute_reo_remap_ix0(hal_soc_handle_t hal_soc_hdl, uint32_t *remap0)
2735 {
2736 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2737 
2738 	if (hal_soc->ops->hal_compute_reo_remap_ix0)
2739 		hal_soc->ops->hal_compute_reo_remap_ix0(remap0);
2740 }
2741 
2742 /**
2743  * hal_setup_link_idle_list - Setup scattered idle list using the
2744  * buffer list provided
2745  *
2746  * @hal_soc: Opaque HAL SOC handle
2747  * @scatter_bufs_base_paddr: Array of physical base addresses
2748  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2749  * @num_scatter_bufs: Number of scatter buffers in the above lists
2750  * @scatter_buf_size: Size of each scatter buffer
2751  * @last_buf_end_offset: Offset to the last entry
2752  * @num_entries: Total entries of all scatter bufs
2753  *
2754  */
2755 static inline
2756 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2757 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2758 			      void *scatter_bufs_base_vaddr[],
2759 			      uint32_t num_scatter_bufs,
2760 			      uint32_t scatter_buf_size,
2761 			      uint32_t last_buf_end_offset,
2762 			      uint32_t num_entries)
2763 {
2764 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2765 
2766 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2767 			scatter_bufs_base_vaddr, num_scatter_bufs,
2768 			scatter_buf_size, last_buf_end_offset,
2769 			num_entries);
2770 
2771 }
2772 
2773 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2774 /**
2775  * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
2776  * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
2777  *
2778  * Use the virtual addr pointer to reo h/w queue desc to read
2779  * the values from ddr and log them.
2780  *
2781  * Return: none
2782  */
2783 static inline void hal_dump_rx_reo_queue_desc(
2784 	void *hw_qdesc_vaddr_aligned)
2785 {
2786 	struct rx_reo_queue *hw_qdesc =
2787 		(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
2788 
2789 	if (!hw_qdesc)
2790 		return;
2791 
2792 	hal_info("receive_queue_number %u vld %u window_jump_2k %u"
2793 		 " hole_count %u ba_window_size %u ignore_ampdu_flag %u"
2794 		 " svld %u ssn %u current_index %u"
2795 		 " disable_duplicate_detection %u soft_reorder_enable %u"
2796 		 " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
2797 		 " msdu_frames_processed_count %u total_processed_byte_count %u"
2798 		 " late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
2799 		 " pn_error_detected_flag %u current_mpdu_count %u"
2800 		 " current_msdu_count %u timeout_count %u"
2801 		 " forward_due_to_bar_count %u duplicate_count %u"
2802 		 " frames_in_order_count %u bar_received_count %u"
2803 		 " pn_check_needed %u pn_shall_be_even %u"
2804 		 " pn_shall_be_uneven %u pn_size %u",
2805 		 hw_qdesc->receive_queue_number,
2806 		 hw_qdesc->vld,
2807 		 hw_qdesc->window_jump_2k,
2808 		 hw_qdesc->hole_count,
2809 		 hw_qdesc->ba_window_size,
2810 		 hw_qdesc->ignore_ampdu_flag,
2811 		 hw_qdesc->svld,
2812 		 hw_qdesc->ssn,
2813 		 hw_qdesc->current_index,
2814 		 hw_qdesc->disable_duplicate_detection,
2815 		 hw_qdesc->soft_reorder_enable,
2816 		 hw_qdesc->chk_2k_mode,
2817 		 hw_qdesc->oor_mode,
2818 		 hw_qdesc->mpdu_frames_processed_count,
2819 		 hw_qdesc->msdu_frames_processed_count,
2820 		 hw_qdesc->total_processed_byte_count,
2821 		 hw_qdesc->late_receive_mpdu_count,
2822 		 hw_qdesc->seq_2k_error_detected_flag,
2823 		 hw_qdesc->pn_error_detected_flag,
2824 		 hw_qdesc->current_mpdu_count,
2825 		 hw_qdesc->current_msdu_count,
2826 		 hw_qdesc->timeout_count,
2827 		 hw_qdesc->forward_due_to_bar_count,
2828 		 hw_qdesc->duplicate_count,
2829 		 hw_qdesc->frames_in_order_count,
2830 		 hw_qdesc->bar_received_count,
2831 		 hw_qdesc->pn_check_needed,
2832 		 hw_qdesc->pn_shall_be_even,
2833 		 hw_qdesc->pn_shall_be_uneven,
2834 		 hw_qdesc->pn_size);
2835 }
2836 
2837 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
2838 
2839 static inline void hal_dump_rx_reo_queue_desc(
2840 	void *hw_qdesc_vaddr_aligned)
2841 {
2842 }
2843 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2844 
2845 /**
2846  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2847  *
2848  * @hal_soc: Opaque HAL SOC handle
2849  * @hal_ring_hdl: Source ring pointer
2850  * @ring_desc: Opaque ring descriptor handle
2851  */
2852 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2853 					   hal_ring_handle_t hal_ring_hdl,
2854 					   hal_ring_desc_t ring_desc)
2855 {
2856 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2857 
2858 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2859 			   ring_desc, (srng->entry_size << 2));
2860 }
2861 
2862 /**
2863  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2864  *
2865  * @hal_soc: Opaque HAL SOC handle
2866  * @hal_ring_hdl: Source ring pointer
2867  */
2868 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2869 				      hal_ring_handle_t hal_ring_hdl)
2870 {
2871 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2872 	uint32_t *desc;
2873 	uint32_t tp, i;
2874 
2875 	tp = srng->u.dst_ring.tp;
2876 
2877 	for (i = 0; i < 128; i++) {
2878 		if (!tp)
2879 			tp = srng->ring_size;
2880 
2881 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2882 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2883 				   QDF_TRACE_LEVEL_DEBUG,
2884 				   desc, (srng->entry_size << 2));
2885 
2886 		tp -= srng->entry_size;
2887 	}
2888 }
2889 
2890 /*
2891  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2892  * to opaque dp_ring desc type
2893  * @ring_desc - rxdma ring desc
2894  *
2895  * Return: hal_rxdma_desc_t type
2896  */
2897 static inline
2898 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2899 {
2900 	return (hal_ring_desc_t)ring_desc;
2901 }
2902 
2903 /**
2904  * hal_srng_set_event() - Set hal_srng event
2905  * @hal_ring_hdl: Source ring pointer
2906  * @event: SRNG ring event
2907  *
2908  * Return: None
2909  */
2910 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2911 {
2912 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2913 
2914 	qdf_atomic_set_bit(event, &srng->srng_event);
2915 }
2916 
2917 /**
2918  * hal_srng_clear_event() - Clear hal_srng event
2919  * @hal_ring_hdl: Source ring pointer
2920  * @event: SRNG ring event
2921  *
2922  * Return: None
2923  */
2924 static inline
2925 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2926 {
2927 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2928 
2929 	qdf_atomic_clear_bit(event, &srng->srng_event);
2930 }
2931 
2932 /**
2933  * hal_srng_get_clear_event() - Clear srng event and return old value
2934  * @hal_ring_hdl: Source ring pointer
2935  * @event: SRNG ring event
2936  *
2937  * Return: Return old event value
2938  */
2939 static inline
2940 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2941 {
2942 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2943 
2944 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2945 }
2946 
2947 /**
2948  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2949  * @hal_ring_hdl: Source ring pointer
2950  *
2951  * Return: None
2952  */
2953 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2954 {
2955 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2956 
2957 	srng->last_flush_ts = qdf_get_log_timestamp();
2958 }
2959 
2960 /**
2961  * hal_srng_inc_flush_cnt() - Increment flush counter
2962  * @hal_ring_hdl: Source ring pointer
2963  *
2964  * Return: None
2965  */
2966 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
2967 {
2968 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2969 
2970 	srng->flush_count++;
2971 }
2972 
2973 /**
2974  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
2975  *
2976  * @hal: Core HAL soc handle
2977  * @ring_desc: Mon dest ring descriptor
2978  * @desc_info: Desc info to be populated
2979  *
2980  * Return void
2981  */
2982 static inline void
2983 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
2984 			    hal_ring_desc_t ring_desc,
2985 			    hal_rx_mon_desc_info_t desc_info)
2986 {
2987 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
2988 }
2989 
2990 /**
2991  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
2992  *				 register value.
2993  *
2994  * @hal_soc_hdl: Opaque HAL soc handle
2995  *
2996  * Return: None
2997  */
2998 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
2999 {
3000 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3001 
3002 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
3003 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
3004 }
3005 
3006 /**
3007  * hal_reo_enable_pn_in_dest() - Subscribe for previous PN for 2k-jump or
3008  *			OOR error frames
3009  * @hal_soc_hdl: Opaque HAL soc handle
3010  *
3011  * Return: true if feature is enabled,
3012  *	false, otherwise.
3013  */
3014 static inline uint8_t
3015 hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)
3016 {
3017 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3018 
3019 	if (hal_soc->ops->hal_reo_enable_pn_in_dest)
3020 		return hal_soc->ops->hal_reo_enable_pn_in_dest(hal_soc);
3021 
3022 	return 0;
3023 }
3024 
3025 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
3026 
3027 /**
3028  * hal_set_one_target_reg_config() - Populate the target reg
3029  * offset in hal_soc for one non srng related register at the
3030  * given list index
3031  * @hal_soc: hal handle
3032  * @target_reg_offset: target register offset
3033  * @list_index: index in hal list for shadow regs
3034  *
3035  * Return: none
3036  */
3037 void hal_set_one_target_reg_config(struct hal_soc *hal,
3038 				   uint32_t target_reg_offset,
3039 				   int list_index);
3040 
3041 /**
3042  * hal_set_shadow_regs() - Populate register offset for
3043  * registers that need to be populated in list_shadow_reg_config
3044  * in order to be sent to FW. These reg offsets will be mapped
3045  * to shadow registers.
3046  * @hal_soc: hal handle
3047  *
3048  * Return: QDF_STATUS_OK on success
3049  */
3050 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
3051 
3052 /**
3053  * hal_construct_shadow_regs() - initialize the shadow registers
3054  * for non-srng related register configs
3055  * @hal_soc: hal handle
3056  *
3057  * Return: QDF_STATUS_OK on success
3058  */
3059 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
3060 
3061 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3062 static inline void hal_set_one_target_reg_config(
3063 	struct hal_soc *hal,
3064 	uint32_t target_reg_offset,
3065 	int list_index)
3066 {
3067 }
3068 
3069 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
3070 {
3071 	return QDF_STATUS_SUCCESS;
3072 }
3073 
3074 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
3075 {
3076 	return QDF_STATUS_SUCCESS;
3077 }
3078 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3079 
3080 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3081 /**
3082  * hal_flush_reg_write_work() - flush all writes from register write queue
3083  * @arg: hal_soc pointer
3084  *
3085  * Return: None
3086  */
3087 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
3088 
3089 #else
3090 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
3091 #endif
3092 
3093 /**
3094  * hal_get_ring_usage - Calculate the ring usage percentage
3095  * @hal_ring_hdl: Ring pointer
3096  * @ring_type: Ring type
3097  * @headp: pointer to head value
3098  * @tailp: pointer to tail value
3099  *
3100  * Calculate the ring usage percentage for src and dest rings
3101  *
3102  * Return: Ring usage percentage
3103  */
3104 static inline
3105 uint32_t hal_get_ring_usage(
3106 	hal_ring_handle_t hal_ring_hdl,
3107 	enum hal_ring_type ring_type, uint32_t *headp, uint32_t *tailp)
3108 {
3109 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3110 	uint32_t num_avail, num_valid = 0;
3111 	uint32_t ring_usage;
3112 
3113 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
3114 		if (*tailp > *headp)
3115 			num_avail =  ((*tailp - *headp) / srng->entry_size) - 1;
3116 		else
3117 			num_avail = ((srng->ring_size - *headp + *tailp) /
3118 				     srng->entry_size) - 1;
3119 		if (ring_type == WBM_IDLE_LINK)
3120 			num_valid = num_avail;
3121 		else
3122 			num_valid = srng->num_entries - num_avail;
3123 	} else {
3124 		if (*headp >= *tailp)
3125 			num_valid = ((*headp - *tailp) / srng->entry_size);
3126 		else
3127 			num_valid = ((srng->ring_size - *tailp + *headp) /
3128 				     srng->entry_size);
3129 	}
3130 	ring_usage = (100 * num_valid) / srng->num_entries;
3131 	return ring_usage;
3132 }
3133 
3134 /**
3135  * hal_cmem_write() - function for CMEM buffer writing
3136  * @hal_soc_hdl: HAL SOC handle
3137  * @offset: CMEM address
3138  * @value: value to write
3139  *
3140  * Return: None.
3141  */
3142 static inline void
3143 hal_cmem_write(hal_soc_handle_t hal_soc_hdl, uint32_t offset,
3144 	       uint32_t value)
3145 {
3146 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3147 
3148 	if (hal_soc->ops->hal_cmem_write)
3149 		hal_soc->ops->hal_cmem_write(hal_soc_hdl, offset, value);
3150 
3151 	return;
3152 }
3153 
3154 static inline bool
3155 hal_dmac_cmn_src_rxbuf_ring_get(hal_soc_handle_t hal_soc_hdl)
3156 {
3157 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3158 
3159 	return hal_soc->dmac_cmn_src_rxbuf_ring;
3160 }
3161 
3162 /**
3163  * hal_srng_dst_prefetch() - function to prefetch 4 destination ring descs
3164  * @hal_soc_hdl: HAL SOC handle
3165  * @hal_ring_hdl: Destination ring pointer
3166  * @num_valid: valid entries in the ring
3167  *
3168  * return: last prefetched destination ring descriptor
3169  */
3170 static inline
3171 void *hal_srng_dst_prefetch(hal_soc_handle_t hal_soc_hdl,
3172 			    hal_ring_handle_t hal_ring_hdl,
3173 			    uint16_t num_valid)
3174 {
3175 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3176 	uint8_t *desc;
3177 	uint32_t cnt;
3178 	/*
3179 	 * prefetching 4 HW descriptors will ensure atleast by the time
3180 	 * 5th HW descriptor is being processed it is guranteed that the
3181 	 * 5th HW descriptor, its SW Desc, its nbuf and its nbuf's data
3182 	 * are in cache line. basically ensuring all the 4 (HW, SW, nbuf
3183 	 * & nbuf->data) are prefetched.
3184 	 */
3185 	uint32_t max_prefetch = 4;
3186 
3187 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3188 		return NULL;
3189 
3190 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3191 
3192 	if (num_valid < max_prefetch)
3193 		max_prefetch = num_valid;
3194 
3195 	for (cnt = 0; cnt < max_prefetch; cnt++) {
3196 		desc += srng->entry_size * sizeof(uint32_t);
3197 		if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3198 			desc = (uint8_t *)&srng->ring_base_vaddr[0];
3199 
3200 		qdf_prefetch(desc);
3201 	}
3202 	return (void *)desc;
3203 }
3204 
3205 /**
3206  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3207  * @hal_soc_hdl: HAL SOC handle
3208  * @hal_ring_hdl: Destination ring pointer
3209  * @last_prefetched_hw_desc: last prefetched HW descriptor
3210  *
3211  * return: next prefetched destination descriptor
3212  */
3213 static inline
3214 void *hal_srng_dst_prefetch_next_cached_desc(hal_soc_handle_t hal_soc_hdl,
3215 					     hal_ring_handle_t hal_ring_hdl,
3216 					     uint8_t *last_prefetched_hw_desc)
3217 {
3218 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3219 
3220 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3221 		return NULL;
3222 
3223 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3224 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3225 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3226 
3227 	qdf_prefetch(last_prefetched_hw_desc);
3228 	return (void *)last_prefetched_hw_desc;
3229 }
3230 #endif /* _HAL_APIH_ */
3231