xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 22f89679c1f1aeaf62e34eaee8c5ca99467bd241)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HAL_API_H_
21 #define _HAL_API_H_
22 
23 #include "qdf_types.h"
24 #include "qdf_util.h"
25 #include "qdf_atomic.h"
26 #include "hal_internal.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "qdf_platform.h"
30 
31 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
32 #include "hal_hw_headers.h"
33 #endif
34 
35 /* Ring index for WBM2SW2 release ring */
36 #define HAL_IPA_TX_COMP_RING_IDX 2
37 
38 #if defined(CONFIG_SHADOW_V2) || defined(CONFIG_SHADOW_V3)
39 #define ignore_shadow false
40 #define CHECK_SHADOW_REGISTERS true
41 #else
42 #define ignore_shadow true
43 #define CHECK_SHADOW_REGISTERS false
44 #endif
45 
46 /* calculate the register address offset from bar0 of shadow register x */
47 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
48     defined(QCA_WIFI_KIWI)
49 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
50 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
51 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
52 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
53 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
54 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
55 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
56 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
57 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
58 #elif defined(QCA_WIFI_QCA6750)
59 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
60 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
61 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
62 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
63 #else
64 #define SHADOW_REGISTER(x) 0
65 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
66 
67 /*
68  * BAR + 4K is always accessible, any access outside this
69  * space requires force wake procedure.
70  * OFFSET = 4K - 32 bytes = 0xFE0
71  */
72 #define MAPPED_REF_OFF 0xFE0
73 
74 #define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET
75 
76 #ifdef ENABLE_VERBOSE_DEBUG
77 static inline void
78 hal_set_verbose_debug(bool flag)
79 {
80 	is_hal_verbose_debug_enabled = flag;
81 }
82 #endif
83 
84 #ifdef ENABLE_HAL_SOC_STATS
85 #define HAL_STATS_INC(_handle, _field, _delta) \
86 { \
87 	if (likely(_handle)) \
88 		_handle->stats._field += _delta; \
89 }
90 #else
91 #define HAL_STATS_INC(_handle, _field, _delta)
92 #endif
93 
94 #ifdef ENABLE_HAL_REG_WR_HISTORY
95 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
96 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
97 
98 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
99 				 uint32_t offset,
100 				 uint32_t wr_val,
101 				 uint32_t rd_val);
102 
103 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
104 					     int array_size)
105 {
106 	int record_index = qdf_atomic_inc_return(table_index);
107 
108 	return record_index & (array_size - 1);
109 }
110 #else
111 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
112 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
113 		offset,	\
114 		wr_val,	\
115 		rd_val)
116 #endif
117 
118 /**
119  * hal_reg_write_result_check() - check register writing result
120  * @hal_soc: HAL soc handle
121  * @offset: register offset to read
122  * @exp_val: the expected value of register
123  * @ret_confirm: result confirm flag
124  *
125  * Return: none
126  */
127 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
128 					      uint32_t offset,
129 					      uint32_t exp_val)
130 {
131 	uint32_t value;
132 
133 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
134 	if (exp_val != value) {
135 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
136 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
137 	}
138 }
139 
140 #ifdef WINDOW_REG_PLD_LOCK_ENABLE
141 static inline void hal_lock_reg_access(struct hal_soc *soc,
142 				       unsigned long *flags)
143 {
144 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
145 }
146 
147 static inline void hal_unlock_reg_access(struct hal_soc *soc,
148 					 unsigned long *flags)
149 {
150 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
151 }
152 #else
153 static inline void hal_lock_reg_access(struct hal_soc *soc,
154 				       unsigned long *flags)
155 {
156 	qdf_spin_lock_irqsave(&soc->register_access_lock);
157 }
158 
159 static inline void hal_unlock_reg_access(struct hal_soc *soc,
160 					 unsigned long *flags)
161 {
162 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
163 }
164 #endif
165 
166 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
167 /**
168  * hal_select_window_confirm() - write remap window register and
169 				 check writing result
170  *
171  */
172 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
173 					     uint32_t offset)
174 {
175 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
176 
177 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
178 		      WINDOW_ENABLE_BIT | window);
179 	hal_soc->register_window = window;
180 
181 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
182 				   WINDOW_ENABLE_BIT | window);
183 }
184 #else
185 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
186 					     uint32_t offset)
187 {
188 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
189 
190 	if (window != hal_soc->register_window) {
191 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
192 			      WINDOW_ENABLE_BIT | window);
193 		hal_soc->register_window = window;
194 
195 		hal_reg_write_result_check(
196 					hal_soc,
197 					WINDOW_REG_ADDRESS,
198 					WINDOW_ENABLE_BIT | window);
199 	}
200 }
201 #endif
202 
203 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
204 						 qdf_iomem_t addr)
205 {
206 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
207 }
208 
209 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
210 					       hal_ring_handle_t hal_ring_hdl)
211 {
212 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
213 
214 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
215 							 hal_ring_hdl);
216 }
217 
218 /**
219  * hal_write32_mb() - Access registers to update configuration
220  * @hal_soc: hal soc handle
221  * @offset: offset address from the BAR
222  * @value: value to write
223  *
224  * Return: None
225  *
226  * Description: Register address space is split below:
227  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
228  *  |--------------------|-------------------|------------------|
229  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
230  *
231  * 1. Any access to the shadow region, doesn't need force wake
232  *    and windowing logic to access.
233  * 2. Any access beyond BAR + 4K:
234  *    If init_phase enabled, no force wake is needed and access
235  *    should be based on windowed or unwindowed access.
236  *    If init_phase disabled, force wake is needed and access
237  *    should be based on windowed or unwindowed access.
238  *
239  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
240  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
241  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
242  *                            that window would be a bug
243  */
244 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
245     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
246 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
247 				  uint32_t value)
248 {
249 	unsigned long flags;
250 	qdf_iomem_t new_addr;
251 
252 	if (!hal_soc->use_register_windowing ||
253 	    offset < MAX_UNWINDOWED_ADDRESS) {
254 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
255 	} else if (hal_soc->static_window_map) {
256 		new_addr = hal_get_window_address(hal_soc,
257 				hal_soc->dev_base_addr + offset);
258 		qdf_iowrite32(new_addr, value);
259 	} else {
260 		hal_lock_reg_access(hal_soc, &flags);
261 		hal_select_window_confirm(hal_soc, offset);
262 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
263 			  (offset & WINDOW_RANGE_MASK), value);
264 		hal_unlock_reg_access(hal_soc, &flags);
265 	}
266 }
267 
268 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
269 		hal_write32_mb(_hal_soc, _offset, _value)
270 
271 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
272 #else
273 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
274 				  uint32_t value)
275 {
276 	int ret;
277 	unsigned long flags;
278 	qdf_iomem_t new_addr;
279 
280 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
281 					hal_soc->hif_handle))) {
282 		hal_err_rl("target access is not allowed");
283 		return;
284 	}
285 
286 	/* Region < BAR + 4K can be directly accessed */
287 	if (offset < MAPPED_REF_OFF) {
288 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
289 		return;
290 	}
291 
292 	/* Region greater than BAR + 4K */
293 	if (!hal_soc->init_phase) {
294 		ret = hif_force_wake_request(hal_soc->hif_handle);
295 		if (ret) {
296 			hal_err_rl("Wake up request failed");
297 			qdf_check_state_before_panic(__func__, __LINE__);
298 			return;
299 		}
300 	}
301 
302 	if (!hal_soc->use_register_windowing ||
303 	    offset < MAX_UNWINDOWED_ADDRESS) {
304 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
305 	} else if (hal_soc->static_window_map) {
306 		new_addr = hal_get_window_address(
307 					hal_soc,
308 					hal_soc->dev_base_addr + offset);
309 		qdf_iowrite32(new_addr, value);
310 	} else {
311 		hal_lock_reg_access(hal_soc, &flags);
312 		hal_select_window_confirm(hal_soc, offset);
313 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
314 			  (offset & WINDOW_RANGE_MASK), value);
315 		hal_unlock_reg_access(hal_soc, &flags);
316 	}
317 
318 	if (!hal_soc->init_phase) {
319 		ret = hif_force_wake_release(hal_soc->hif_handle);
320 		if (ret) {
321 			hal_err("Wake up release failed");
322 			qdf_check_state_before_panic(__func__, __LINE__);
323 			return;
324 		}
325 	}
326 }
327 
328 /**
329  * hal_write32_mb_confirm() - write register and check writing result
330  *
331  */
332 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
333 					  uint32_t offset,
334 					  uint32_t value)
335 {
336 	int ret;
337 	unsigned long flags;
338 	qdf_iomem_t new_addr;
339 
340 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
341 					hal_soc->hif_handle))) {
342 		hal_err_rl("target access is not allowed");
343 		return;
344 	}
345 
346 	/* Region < BAR + 4K can be directly accessed */
347 	if (offset < MAPPED_REF_OFF) {
348 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
349 		return;
350 	}
351 
352 	/* Region greater than BAR + 4K */
353 	if (!hal_soc->init_phase) {
354 		ret = hif_force_wake_request(hal_soc->hif_handle);
355 		if (ret) {
356 			hal_err("Wake up request failed");
357 			qdf_check_state_before_panic(__func__, __LINE__);
358 			return;
359 		}
360 	}
361 
362 	if (!hal_soc->use_register_windowing ||
363 	    offset < MAX_UNWINDOWED_ADDRESS) {
364 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
365 		hal_reg_write_result_check(hal_soc, offset,
366 					   value);
367 	} else if (hal_soc->static_window_map) {
368 		new_addr = hal_get_window_address(
369 					hal_soc,
370 					hal_soc->dev_base_addr + offset);
371 		qdf_iowrite32(new_addr, value);
372 		hal_reg_write_result_check(hal_soc,
373 					   new_addr - hal_soc->dev_base_addr,
374 					   value);
375 	} else {
376 		hal_lock_reg_access(hal_soc, &flags);
377 		hal_select_window_confirm(hal_soc, offset);
378 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
379 			  (offset & WINDOW_RANGE_MASK), value);
380 
381 		hal_reg_write_result_check(
382 				hal_soc,
383 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
384 				value);
385 		hal_unlock_reg_access(hal_soc, &flags);
386 	}
387 
388 	if (!hal_soc->init_phase) {
389 		ret = hif_force_wake_release(hal_soc->hif_handle);
390 		if (ret) {
391 			hal_err("Wake up release failed");
392 			qdf_check_state_before_panic(__func__, __LINE__);
393 			return;
394 		}
395 	}
396 }
397 
398 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
399 				       uint32_t value)
400 {
401 	unsigned long flags;
402 	qdf_iomem_t new_addr;
403 
404 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
405 					hal_soc->hif_handle))) {
406 		hal_err_rl("%s: target access is not allowed", __func__);
407 		return;
408 	}
409 
410 	if (!hal_soc->use_register_windowing ||
411 	    offset < MAX_UNWINDOWED_ADDRESS) {
412 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
413 	} else if (hal_soc->static_window_map) {
414 		new_addr = hal_get_window_address(
415 					hal_soc,
416 					hal_soc->dev_base_addr + offset);
417 		qdf_iowrite32(new_addr, value);
418 	} else {
419 		hal_lock_reg_access(hal_soc, &flags);
420 		hal_select_window_confirm(hal_soc, offset);
421 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
422 			  (offset & WINDOW_RANGE_MASK), value);
423 		hal_unlock_reg_access(hal_soc, &flags);
424 	}
425 }
426 #endif
427 
428 /**
429  * hal_write_address_32_mb - write a value to a register
430  *
431  */
432 static inline
433 void hal_write_address_32_mb(struct hal_soc *hal_soc,
434 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
435 {
436 	uint32_t offset;
437 
438 	if (!hal_soc->use_register_windowing)
439 		return qdf_iowrite32(addr, value);
440 
441 	offset = addr - hal_soc->dev_base_addr;
442 
443 	if (qdf_unlikely(wr_confirm))
444 		hal_write32_mb_confirm(hal_soc, offset, value);
445 	else
446 		hal_write32_mb(hal_soc, offset, value);
447 }
448 
449 
450 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
451 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
452 						struct hal_srng *srng,
453 						void __iomem *addr,
454 						uint32_t value)
455 {
456 	qdf_iowrite32(addr, value);
457 }
458 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
459 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
460 						struct hal_srng *srng,
461 						void __iomem *addr,
462 						uint32_t value)
463 {
464 	hal_delayed_reg_write(hal_soc, srng, addr, value);
465 }
466 #else
467 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
468 						struct hal_srng *srng,
469 						void __iomem *addr,
470 						uint32_t value)
471 {
472 	hal_write_address_32_mb(hal_soc, addr, value, false);
473 }
474 #endif
475 
476 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
477     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
478 /**
479  * hal_read32_mb() - Access registers to read configuration
480  * @hal_soc: hal soc handle
481  * @offset: offset address from the BAR
482  * @value: value to write
483  *
484  * Description: Register address space is split below:
485  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
486  *  |--------------------|-------------------|------------------|
487  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
488  *
489  * 1. Any access to the shadow region, doesn't need force wake
490  *    and windowing logic to access.
491  * 2. Any access beyond BAR + 4K:
492  *    If init_phase enabled, no force wake is needed and access
493  *    should be based on windowed or unwindowed access.
494  *    If init_phase disabled, force wake is needed and access
495  *    should be based on windowed or unwindowed access.
496  *
497  * Return: < 0 for failure/>= 0 for success
498  */
499 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
500 {
501 	uint32_t ret;
502 	unsigned long flags;
503 	qdf_iomem_t new_addr;
504 
505 	if (!hal_soc->use_register_windowing ||
506 	    offset < MAX_UNWINDOWED_ADDRESS) {
507 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
508 	} else if (hal_soc->static_window_map) {
509 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
510 		return qdf_ioread32(new_addr);
511 	}
512 
513 	hal_lock_reg_access(hal_soc, &flags);
514 	hal_select_window_confirm(hal_soc, offset);
515 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
516 		       (offset & WINDOW_RANGE_MASK));
517 	hal_unlock_reg_access(hal_soc, &flags);
518 
519 	return ret;
520 }
521 
522 #define hal_read32_mb_cmem(_hal_soc, _offset)
523 #else
524 static
525 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
526 {
527 	uint32_t ret;
528 	unsigned long flags;
529 	qdf_iomem_t new_addr;
530 
531 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
532 					hal_soc->hif_handle))) {
533 		hal_err_rl("target access is not allowed");
534 		return 0;
535 	}
536 
537 	/* Region < BAR + 4K can be directly accessed */
538 	if (offset < MAPPED_REF_OFF)
539 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
540 
541 	if ((!hal_soc->init_phase) &&
542 	    hif_force_wake_request(hal_soc->hif_handle)) {
543 		hal_err("Wake up request failed");
544 		qdf_check_state_before_panic(__func__, __LINE__);
545 		return 0;
546 	}
547 
548 	if (!hal_soc->use_register_windowing ||
549 	    offset < MAX_UNWINDOWED_ADDRESS) {
550 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
551 	} else if (hal_soc->static_window_map) {
552 		new_addr = hal_get_window_address(
553 					hal_soc,
554 					hal_soc->dev_base_addr + offset);
555 		ret = qdf_ioread32(new_addr);
556 	} else {
557 		hal_lock_reg_access(hal_soc, &flags);
558 		hal_select_window_confirm(hal_soc, offset);
559 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
560 			       (offset & WINDOW_RANGE_MASK));
561 		hal_unlock_reg_access(hal_soc, &flags);
562 	}
563 
564 	if ((!hal_soc->init_phase) &&
565 	    hif_force_wake_release(hal_soc->hif_handle)) {
566 		hal_err("Wake up release failed");
567 		qdf_check_state_before_panic(__func__, __LINE__);
568 		return 0;
569 	}
570 
571 	return ret;
572 }
573 
574 static inline
575 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
576 {
577 	uint32_t ret;
578 	unsigned long flags;
579 	qdf_iomem_t new_addr;
580 
581 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
582 					hal_soc->hif_handle))) {
583 		hal_err_rl("%s: target access is not allowed", __func__);
584 		return 0;
585 	}
586 
587 	if (!hal_soc->use_register_windowing ||
588 	    offset < MAX_UNWINDOWED_ADDRESS) {
589 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
590 	} else if (hal_soc->static_window_map) {
591 		new_addr = hal_get_window_address(
592 					hal_soc,
593 					hal_soc->dev_base_addr + offset);
594 		ret = qdf_ioread32(new_addr);
595 	} else {
596 		hal_lock_reg_access(hal_soc, &flags);
597 		hal_select_window_confirm(hal_soc, offset);
598 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
599 			       (offset & WINDOW_RANGE_MASK));
600 		hal_unlock_reg_access(hal_soc, &flags);
601 	}
602 	return ret;
603 }
604 #endif
605 
606 /* Max times allowed for register writing retry */
607 #define HAL_REG_WRITE_RETRY_MAX		5
608 /* Delay milliseconds for each time retry */
609 #define HAL_REG_WRITE_RETRY_DELAY	1
610 
611 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
612 /* To check shadow config index range between 0..31 */
613 #define HAL_SHADOW_REG_INDEX_LOW 32
614 /* To check shadow config index range between 32..39 */
615 #define HAL_SHADOW_REG_INDEX_HIGH 40
616 /* Dirty bit reg offsets corresponding to shadow config index */
617 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
618 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
619 /* PCIE_PCIE_TOP base addr offset */
620 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
621 /* Max retry attempts to read the dirty bit reg */
622 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
623 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
624 #else
625 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
626 #endif
627 /* Delay in usecs for polling dirty bit reg */
628 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
629 
630 /**
631  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
632  * write was successful
633  * @hal_soc: hal soc handle
634  * @shadow_config_index: index of shadow reg used to confirm
635  * write
636  *
637  * Return: QDF_STATUS_SUCCESS on success
638  */
639 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
640 						int shadow_config_index)
641 {
642 	uint32_t read_value = 0;
643 	int retry_cnt = 0;
644 	uint32_t reg_offset = 0;
645 
646 	if (shadow_config_index > 0 &&
647 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
648 		reg_offset =
649 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
650 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
651 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
652 		reg_offset =
653 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
654 	} else {
655 		hal_err("Invalid shadow_config_index = %d",
656 			shadow_config_index);
657 		return QDF_STATUS_E_INVAL;
658 	}
659 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
660 		read_value = hal_read32_mb(
661 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
662 		/* Check if dirty bit corresponding to shadow_index is set */
663 		if (read_value & BIT(shadow_config_index)) {
664 			/* Dirty reg bit not reset */
665 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
666 			retry_cnt++;
667 		} else {
668 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
669 				  reg_offset, read_value);
670 			return QDF_STATUS_SUCCESS;
671 		}
672 	}
673 	return QDF_STATUS_E_TIMEOUT;
674 }
675 
676 /**
677  * hal_write32_mb_shadow_confirm() - write to shadow reg and
678  * poll dirty register bit to confirm write
679  * @hal_soc: hal soc handle
680  * @reg_offset: target reg offset address from BAR
681  * @value: value to write
682  *
683  * Return: QDF_STATUS_SUCCESS on success
684  */
685 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
686 	struct hal_soc *hal,
687 	uint32_t reg_offset,
688 	uint32_t value)
689 {
690 	int i;
691 	QDF_STATUS ret;
692 	uint32_t shadow_reg_offset;
693 	int shadow_config_index;
694 	bool is_reg_offset_present = false;
695 
696 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
697 		/* Found the shadow config for the reg_offset */
698 		struct shadow_reg_config *hal_shadow_reg_list =
699 			&hal->list_shadow_reg_config[i];
700 		if (hal_shadow_reg_list->target_register ==
701 			reg_offset) {
702 			shadow_config_index =
703 				hal_shadow_reg_list->shadow_config_index;
704 			shadow_reg_offset =
705 				SHADOW_REGISTER(shadow_config_index);
706 			hal_write32_mb_confirm(
707 				hal, shadow_reg_offset, value);
708 			is_reg_offset_present = true;
709 			break;
710 		}
711 		ret = QDF_STATUS_E_FAILURE;
712 	}
713 	if (is_reg_offset_present) {
714 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
715 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
716 			 reg_offset, value, ret);
717 		if (QDF_IS_STATUS_ERROR(ret)) {
718 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
719 			return ret;
720 		}
721 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
722 	}
723 	return ret;
724 }
725 
726 /**
727  * hal_write32_mb_confirm_retry() - write register with confirming and
728 				    do retry/recovery if writing failed
729  * @hal_soc: hal soc handle
730  * @offset: offset address from the BAR
731  * @value: value to write
732  * @recovery: is recovery needed or not.
733  *
734  * Write the register value with confirming and read it back, if
735  * read back value is not as expected, do retry for writing, if
736  * retry hit max times allowed but still fail, check if recovery
737  * needed.
738  *
739  * Return: None
740  */
741 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
742 						uint32_t offset,
743 						uint32_t value,
744 						bool recovery)
745 {
746 	QDF_STATUS ret;
747 
748 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
749 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
750 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
751 }
752 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
753 
754 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
755 						uint32_t offset,
756 						uint32_t value,
757 						bool recovery)
758 {
759 	uint8_t retry_cnt = 0;
760 	uint32_t read_value;
761 
762 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
763 		hal_write32_mb_confirm(hal_soc, offset, value);
764 		read_value = hal_read32_mb(hal_soc, offset);
765 		if (qdf_likely(read_value == value))
766 			break;
767 
768 		/* write failed, do retry */
769 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
770 			 offset, value, read_value);
771 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
772 		retry_cnt++;
773 	}
774 
775 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
776 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
777 }
778 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
779 
780 #if defined(FEATURE_HAL_DELAYED_REG_WRITE)
781 /**
782  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
783  * @hal_soc: HAL soc handle
784  *
785  * Return: none
786  */
787 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
788 
789 /**
790  * hal_dump_reg_write_stats() - dump reg write stats
791  * @hal_soc: HAL soc handle
792  *
793  * Return: none
794  */
795 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
796 
797 /**
798  * hal_get_reg_write_pending_work() - get the number of entries
799  *		pending in the workqueue to be processed.
800  * @hal_soc: HAL soc handle
801  *
802  * Returns: the number of entries pending to be processed
803  */
804 int hal_get_reg_write_pending_work(void *hal_soc);
805 
806 #else
807 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
808 {
809 }
810 
811 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
812 {
813 }
814 
815 static inline int hal_get_reg_write_pending_work(void *hal_soc)
816 {
817 	return 0;
818 }
819 #endif
820 
821 /**
822  * hal_read_address_32_mb() - Read 32-bit value from the register
823  * @soc: soc handle
824  * @addr: register address to read
825  *
826  * Return: 32-bit value
827  */
828 static inline
829 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
830 				qdf_iomem_t addr)
831 {
832 	uint32_t offset;
833 	uint32_t ret;
834 
835 	if (!soc->use_register_windowing)
836 		return qdf_ioread32(addr);
837 
838 	offset = addr - soc->dev_base_addr;
839 	ret = hal_read32_mb(soc, offset);
840 	return ret;
841 }
842 
843 /**
844  * hal_attach - Initialize HAL layer
845  * @hif_handle: Opaque HIF handle
846  * @qdf_dev: QDF device
847  *
848  * Return: Opaque HAL SOC handle
849  *		 NULL on failure (if given ring is not available)
850  *
851  * This function should be called as part of HIF initialization (for accessing
852  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
853  */
854 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
855 
856 /**
857  * hal_detach - Detach HAL layer
858  * @hal_soc: HAL SOC handle
859  *
860  * This function should be called as part of HIF detach
861  *
862  */
863 extern void hal_detach(void *hal_soc);
864 
865 #define HAL_SRNG_LMAC_RING 0x80000000
866 /* SRNG flags passed in hal_srng_params.flags */
867 #define HAL_SRNG_MSI_SWAP				0x00000008
868 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
869 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
870 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
871 #define HAL_SRNG_MSI_INTR				0x00020000
872 #define HAL_SRNG_CACHED_DESC		0x00040000
873 
874 #if defined(QCA_WIFI_QCA6490)  || defined(QCA_WIFI_KIWI)
875 #define HAL_SRNG_PREFETCH_TIMER 1
876 #else
877 #define HAL_SRNG_PREFETCH_TIMER 0
878 #endif
879 
880 #define PN_SIZE_24 0
881 #define PN_SIZE_48 1
882 #define PN_SIZE_128 2
883 
884 #ifdef FORCE_WAKE
885 /**
886  * hal_set_init_phase() - Indicate initialization of
887  *                        datapath rings
888  * @soc: hal_soc handle
889  * @init_phase: flag to indicate datapath rings
890  *              initialization status
891  *
892  * Return: None
893  */
894 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
895 #else
896 static inline
897 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
898 {
899 }
900 #endif /* FORCE_WAKE */
901 
902 /**
903  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
904  * used by callers for calculating the size of memory to be allocated before
905  * calling hal_srng_setup to setup the ring
906  *
907  * @hal_soc: Opaque HAL SOC handle
908  * @ring_type: one of the types from hal_ring_type
909  *
910  */
911 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
912 
913 /**
914  * hal_srng_max_entries - Returns maximum possible number of ring entries
915  * @hal_soc: Opaque HAL SOC handle
916  * @ring_type: one of the types from hal_ring_type
917  *
918  * Return: Maximum number of entries for the given ring_type
919  */
920 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
921 
922 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
923 				 uint32_t low_threshold);
924 
925 /**
926  * hal_srng_dump - Dump ring status
927  * @srng: hal srng pointer
928  */
929 void hal_srng_dump(struct hal_srng *srng);
930 
931 /**
932  * hal_srng_get_dir - Returns the direction of the ring
933  * @hal_soc: Opaque HAL SOC handle
934  * @ring_type: one of the types from hal_ring_type
935  *
936  * Return: Ring direction
937  */
938 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
939 
940 /* HAL memory information */
941 struct hal_mem_info {
942 	/* dev base virtual addr */
943 	void *dev_base_addr;
944 	/* dev base physical addr */
945 	void *dev_base_paddr;
946 	/* dev base ce virtual addr - applicable only for qca5018  */
947 	/* In qca5018 CE register are outside wcss block */
948 	/* using a separate address space to access CE registers */
949 	void *dev_base_addr_ce;
950 	/* dev base ce physical addr */
951 	void *dev_base_paddr_ce;
952 	/* Remote virtual pointer memory for HW/FW updates */
953 	void *shadow_rdptr_mem_vaddr;
954 	/* Remote physical pointer memory for HW/FW updates */
955 	void *shadow_rdptr_mem_paddr;
956 	/* Shared memory for ring pointer updates from host to FW */
957 	void *shadow_wrptr_mem_vaddr;
958 	/* Shared physical memory for ring pointer updates from host to FW */
959 	void *shadow_wrptr_mem_paddr;
960 	/* lmac srng start id */
961 	uint8_t lmac_srng_start_id;
962 };
963 
964 /* SRNG parameters to be passed to hal_srng_setup */
965 struct hal_srng_params {
966 	/* Physical base address of the ring */
967 	qdf_dma_addr_t ring_base_paddr;
968 	/* Virtual base address of the ring */
969 	void *ring_base_vaddr;
970 	/* Number of entries in ring */
971 	uint32_t num_entries;
972 	/* max transfer length */
973 	uint16_t max_buffer_length;
974 	/* MSI Address */
975 	qdf_dma_addr_t msi_addr;
976 	/* MSI data */
977 	uint32_t msi_data;
978 	/* Interrupt timer threshold – in micro seconds */
979 	uint32_t intr_timer_thres_us;
980 	/* Interrupt batch counter threshold – in number of ring entries */
981 	uint32_t intr_batch_cntr_thres_entries;
982 	/* Low threshold – in number of ring entries
983 	 * (valid for src rings only)
984 	 */
985 	uint32_t low_threshold;
986 	/* Misc flags */
987 	uint32_t flags;
988 	/* Unique ring id */
989 	uint8_t ring_id;
990 	/* Source or Destination ring */
991 	enum hal_srng_dir ring_dir;
992 	/* Size of ring entry */
993 	uint32_t entry_size;
994 	/* hw register base address */
995 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
996 	/* prefetch timer config - in micro seconds */
997 	uint32_t prefetch_timer;
998 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
999 	/* Near full IRQ support flag */
1000 	uint32_t nf_irq_support;
1001 	/* MSI2 Address */
1002 	qdf_dma_addr_t msi2_addr;
1003 	/* MSI2 data */
1004 	uint32_t msi2_data;
1005 	/* Critical threshold */
1006 	uint16_t crit_thresh;
1007 	/* High threshold */
1008 	uint16_t high_thresh;
1009 	/* Safe threshold */
1010 	uint16_t safe_thresh;
1011 #endif
1012 };
1013 
1014 /* hal_construct_srng_shadow_regs() - initialize the shadow
1015  * registers for srngs
1016  * @hal_soc: hal handle
1017  *
1018  * Return: QDF_STATUS_OK on success
1019  */
1020 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1021 
1022 /* hal_set_one_shadow_config() - add a config for the specified ring
1023  * @hal_soc: hal handle
1024  * @ring_type: ring type
1025  * @ring_num: ring num
1026  *
1027  * The ring type and ring num uniquely specify the ring.  After this call,
1028  * the hp/tp will be added as the next entry int the shadow register
1029  * configuration table.  The hal code will use the shadow register address
1030  * in place of the hp/tp address.
1031  *
1032  * This function is exposed, so that the CE module can skip configuring shadow
1033  * registers for unused ring and rings assigned to the firmware.
1034  *
1035  * Return: QDF_STATUS_OK on success
1036  */
1037 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1038 				     int ring_num);
1039 /**
1040  * hal_get_shadow_config() - retrieve the config table for shadow cfg v2
1041  * @hal_soc: hal handle
1042  * @shadow_config: will point to the table after
1043  * @num_shadow_registers_configured: will contain the number of valid entries
1044  */
1045 extern void
1046 hal_get_shadow_config(void *hal_soc,
1047 		      struct pld_shadow_reg_v2_cfg **shadow_config,
1048 		      int *num_shadow_registers_configured);
1049 
1050 #ifdef CONFIG_SHADOW_V3
1051 /**
1052  * hal_get_shadow_v3_config() - retrieve the config table for shadow cfg v3
1053  * @hal_soc: hal handle
1054  * @shadow_config: will point to the table after
1055  * @num_shadow_registers_configured: will contain the number of valid entries
1056  */
1057 extern void
1058 hal_get_shadow_v3_config(void *hal_soc,
1059 			 struct pld_shadow_reg_v3_cfg **shadow_config,
1060 			 int *num_shadow_registers_configured);
1061 #endif
1062 
1063 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1064 /**
1065  * hal_srng_is_near_full_irq_supported() - Check if srng supports near full irq
1066  * @hal_soc: HAL SoC handle [To be validated by caller]
1067  * @ring_type: srng type
1068  * @ring_num: The index of the srng (of the same type)
1069  *
1070  * Return: true, if srng support near full irq trigger
1071  *	false, if the srng does not support near full irq support.
1072  */
1073 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1074 					 int ring_type, int ring_num);
1075 #else
1076 static inline
1077 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1078 					 int ring_type, int ring_num)
1079 {
1080 	return false;
1081 }
1082 #endif
1083 
1084 /**
1085  * hal_srng_setup - Initialize HW SRNG ring.
1086  *
1087  * @hal_soc: Opaque HAL SOC handle
1088  * @ring_type: one of the types from hal_ring_type
1089  * @ring_num: Ring number if there are multiple rings of
1090  *		same type (staring from 0)
1091  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1092  * @ring_params: SRNG ring params in hal_srng_params structure.
1093  * @idle_check: Check if ring is idle
1094 
1095  * Callers are expected to allocate contiguous ring memory of size
1096  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1097  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1098  * structure. Ring base address should be 8 byte aligned and size of each ring
1099  * entry should be queried using the API hal_srng_get_entrysize
1100  *
1101  * Return: Opaque pointer to ring on success
1102  *		 NULL on failure (if given ring is not available)
1103  */
1104 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1105 			    int mac_id, struct hal_srng_params *ring_params,
1106 			    bool idle_check);
1107 
1108 /**
1109  * hal_srng_setup_idx - Initialize HW SRNG ring.
1110  *
1111  * @hal_soc: Opaque HAL SOC handle
1112  * @ring_type: one of the types from hal_ring_type
1113  * @ring_num: Ring number if there are multiple rings of
1114  *		same type (staring from 0)
1115  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1116  * @ring_params: SRNG ring params in hal_srng_params structure.
1117  * @idle_check: Check if ring is idle
1118  * @idx: Ring index
1119 
1120  * Callers are expected to allocate contiguous ring memory of size
1121  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1122  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1123  * structure. Ring base address should be 8 byte aligned and size of each ring
1124  * entry should be queried using the API hal_srng_get_entrysize
1125  *
1126  * Return: Opaque pointer to ring on success
1127  *		 NULL on failure (if given ring is not available)
1128  */
1129 extern void *hal_srng_setup_idx(void *hal_soc, int ring_type, int ring_num,
1130 				int mac_id, struct hal_srng_params *ring_params,
1131 				bool idle_check, uint32_t idx);
1132 
1133 
1134 /* Remapping ids of REO rings */
1135 #define REO_REMAP_TCL 0
1136 #define REO_REMAP_SW1 1
1137 #define REO_REMAP_SW2 2
1138 #define REO_REMAP_SW3 3
1139 #define REO_REMAP_SW4 4
1140 #define REO_REMAP_RELEASE 5
1141 #define REO_REMAP_FW 6
1142 /*
1143  * In Beryllium: 4 bits REO destination ring value is defined as: 0: TCL
1144  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
1145  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
1146  *
1147  */
1148 #define REO_REMAP_SW5 7
1149 #define REO_REMAP_SW6 8
1150 #define REO_REMAP_SW7 9
1151 #define REO_REMAP_SW8 10
1152 
1153 /*
1154  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1155  * to map destination to rings
1156  */
1157 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1158 	((_VALUE) << \
1159 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1160 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1161 
1162 /*
1163  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1164  * to map destination to rings
1165  */
1166 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1167 	((_VALUE) << \
1168 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1169 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1170 
1171 /*
1172  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1173  * to map destination to rings
1174  */
1175 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1176 	((_VALUE) << \
1177 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1178 	  _OFFSET ## _SHFT))
1179 
1180 /*
1181  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1182  * to map destination to rings
1183  */
1184 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1185 	((_VALUE) << \
1186 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1187 	  _OFFSET ## _SHFT))
1188 
1189 /*
1190  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1191  * to map destination to rings
1192  */
1193 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1194 	((_VALUE) << \
1195 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1196 	  _OFFSET ## _SHFT))
1197 
1198 /**
1199  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1200  * @hal_soc_hdl: HAL SOC handle
1201  * @read: boolean value to indicate if read or write
1202  * @ix0: pointer to store IX0 reg value
1203  * @ix1: pointer to store IX1 reg value
1204  * @ix2: pointer to store IX2 reg value
1205  * @ix3: pointer to store IX3 reg value
1206  */
1207 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1208 				uint32_t *ix0, uint32_t *ix1,
1209 				uint32_t *ix2, uint32_t *ix3);
1210 
1211 /**
1212  * hal_srng_set_hp_paddr_confirm() - Set physical address to dest SRNG head
1213  *  pointer and confirm that write went through by reading back the value
1214  * @sring: sring pointer
1215  * @paddr: physical address
1216  *
1217  * Return: None
1218  */
1219 extern void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring,
1220 					      uint64_t paddr);
1221 
1222 /**
1223  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
1224  * @hal_soc: hal_soc handle
1225  * @srng: sring pointer
1226  * @vaddr: virtual address
1227  */
1228 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1229 			  struct hal_srng *srng,
1230 			  uint32_t *vaddr);
1231 
1232 /**
1233  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1234  * @hal_soc: Opaque HAL SOC handle
1235  * @hal_srng: Opaque HAL SRNG pointer
1236  */
1237 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
1238 
1239 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1240 {
1241 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1242 
1243 	return !!srng->initialized;
1244 }
1245 
1246 /**
1247  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
1248  * @hal_soc: Opaque HAL SOC handle
1249  * @hal_ring_hdl: Destination ring pointer
1250  *
1251  * Caller takes responsibility for any locking needs.
1252  *
1253  * Return: Opaque pointer for next ring entry; NULL on failire
1254  */
1255 static inline
1256 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1257 			hal_ring_handle_t hal_ring_hdl)
1258 {
1259 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1260 
1261 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1262 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1263 
1264 	return NULL;
1265 }
1266 
1267 
1268 /**
1269  * hal_mem_dma_cache_sync - Cache sync the specified virtual address Range
1270  * @hal_soc: HAL soc handle
1271  * @desc: desc start address
1272  * @entry_size: size of memory to sync
1273  *
1274  * Return: void
1275  */
1276 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
1277 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1278 					  uint32_t entry_size)
1279 {
1280 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1281 }
1282 #else
1283 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1284 					  uint32_t entry_size)
1285 {
1286 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1287 			       QDF_DMA_FROM_DEVICE,
1288 			       (entry_size * sizeof(uint32_t)));
1289 }
1290 #endif
1291 
1292 /**
1293  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
1294  * hal_srng_access_start if locked access is required
1295  *
1296  * @hal_soc: Opaque HAL SOC handle
1297  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1298  *
1299  * This API doesn't implement any byte-order conversion on reading hp/tp.
1300  * So, Use API only for those srngs for which the target writes hp/tp values to
1301  * the DDR in the Host order.
1302  *
1303  * Return: 0 on success; error on failire
1304  */
1305 static inline int
1306 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1307 			       hal_ring_handle_t hal_ring_hdl)
1308 {
1309 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1310 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1311 	uint32_t *desc;
1312 
1313 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1314 		srng->u.src_ring.cached_tp =
1315 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1316 	else {
1317 		srng->u.dst_ring.cached_hp =
1318 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1319 
1320 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1321 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1322 			if (qdf_likely(desc)) {
1323 				hal_mem_dma_cache_sync(soc, desc,
1324 						       srng->entry_size);
1325 				qdf_prefetch(desc);
1326 			}
1327 		}
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 /**
1334  * hal_le_srng_access_start_unlocked_in_cpu_order - Start ring access
1335  * (unlocked) with endianness correction.
1336  * @hal_soc: Opaque HAL SOC handle
1337  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1338  *
1339  * This API provides same functionally as hal_srng_access_start_unlocked()
1340  * except that it converts the little-endian formatted hp/tp values to
1341  * Host order on reading them. So, this API should only be used for those srngs
1342  * for which the target always writes hp/tp values in little-endian order
1343  * regardless of Host order.
1344  *
1345  * Also, this API doesn't take the lock. For locked access, use
1346  * hal_srng_access_start/hal_le_srng_access_start_in_cpu_order.
1347  *
1348  * Return: 0 on success; error on failire
1349  */
1350 static inline int
1351 hal_le_srng_access_start_unlocked_in_cpu_order(
1352 	hal_soc_handle_t hal_soc_hdl,
1353 	hal_ring_handle_t hal_ring_hdl)
1354 {
1355 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1356 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1357 	uint32_t *desc;
1358 
1359 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1360 		srng->u.src_ring.cached_tp =
1361 			qdf_le32_to_cpu(*(volatile uint32_t *)
1362 					(srng->u.src_ring.tp_addr));
1363 	else {
1364 		srng->u.dst_ring.cached_hp =
1365 			qdf_le32_to_cpu(*(volatile uint32_t *)
1366 					(srng->u.dst_ring.hp_addr));
1367 
1368 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1369 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1370 			if (qdf_likely(desc)) {
1371 				hal_mem_dma_cache_sync(soc, desc,
1372 						       srng->entry_size);
1373 				qdf_prefetch(desc);
1374 			}
1375 		}
1376 	}
1377 
1378 	return 0;
1379 }
1380 
1381 /**
1382  * hal_srng_try_access_start - Try to start (locked) ring access
1383  *
1384  * @hal_soc: Opaque HAL SOC handle
1385  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1386  *
1387  * Return: 0 on success; error on failure
1388  */
1389 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1390 					    hal_ring_handle_t hal_ring_hdl)
1391 {
1392 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1393 
1394 	if (qdf_unlikely(!hal_ring_hdl)) {
1395 		qdf_print("Error: Invalid hal_ring\n");
1396 		return -EINVAL;
1397 	}
1398 
1399 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1400 		return -EINVAL;
1401 
1402 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1403 }
1404 
1405 /**
1406  * hal_srng_access_start - Start (locked) ring access
1407  *
1408  * @hal_soc: Opaque HAL SOC handle
1409  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1410  *
1411  * This API doesn't implement any byte-order conversion on reading hp/tp.
1412  * So, Use API only for those srngs for which the target writes hp/tp values to
1413  * the DDR in the Host order.
1414  *
1415  * Return: 0 on success; error on failire
1416  */
1417 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1418 					hal_ring_handle_t hal_ring_hdl)
1419 {
1420 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1421 
1422 	if (qdf_unlikely(!hal_ring_hdl)) {
1423 		qdf_print("Error: Invalid hal_ring\n");
1424 		return -EINVAL;
1425 	}
1426 
1427 	SRNG_LOCK(&(srng->lock));
1428 
1429 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1430 }
1431 
1432 /**
1433  * hal_le_srng_access_start_in_cpu_order - Start (locked) ring access with
1434  * endianness correction
1435  * @hal_soc: Opaque HAL SOC handle
1436  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1437  *
1438  * This API provides same functionally as hal_srng_access_start()
1439  * except that it converts the little-endian formatted hp/tp values to
1440  * Host order on reading them. So, this API should only be used for those srngs
1441  * for which the target always writes hp/tp values in little-endian order
1442  * regardless of Host order.
1443  *
1444  * Return: 0 on success; error on failire
1445  */
1446 static inline int
1447 hal_le_srng_access_start_in_cpu_order(
1448 	hal_soc_handle_t hal_soc_hdl,
1449 	hal_ring_handle_t hal_ring_hdl)
1450 {
1451 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1452 
1453 	if (qdf_unlikely(!hal_ring_hdl)) {
1454 		qdf_print("Error: Invalid hal_ring\n");
1455 		return -EINVAL;
1456 	}
1457 
1458 	SRNG_LOCK(&(srng->lock));
1459 
1460 	return hal_le_srng_access_start_unlocked_in_cpu_order(
1461 			hal_soc_hdl, hal_ring_hdl);
1462 }
1463 
1464 /**
1465  * hal_srng_dst_get_next - Get next entry from a destination ring
1466  * @hal_soc: Opaque HAL SOC handle
1467  * @hal_ring_hdl: Destination ring pointer
1468  *
1469  * Return: Opaque pointer for next ring entry; NULL on failure
1470  */
1471 static inline
1472 void *hal_srng_dst_get_next(void *hal_soc,
1473 			    hal_ring_handle_t hal_ring_hdl)
1474 {
1475 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1476 	uint32_t *desc;
1477 
1478 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1479 		return NULL;
1480 
1481 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1482 	/* TODO: Using % is expensive, but we have to do this since
1483 	 * size of some SRNG rings is not power of 2 (due to descriptor
1484 	 * sizes). Need to create separate API for rings used
1485 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1486 	 * SW2RXDMA and CE rings)
1487 	 */
1488 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1489 	if (srng->u.dst_ring.tp == srng->ring_size)
1490 		srng->u.dst_ring.tp = 0;
1491 
1492 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1493 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1494 		uint32_t *desc_next;
1495 		uint32_t tp;
1496 
1497 		tp = srng->u.dst_ring.tp;
1498 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1499 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1500 		qdf_prefetch(desc_next);
1501 	}
1502 
1503 	return (void *)desc;
1504 }
1505 
1506 /**
1507  * hal_srng_dst_get_next_cached - Get cached next entry
1508  * @hal_soc: Opaque HAL SOC handle
1509  * @hal_ring_hdl: Destination ring pointer
1510  *
1511  * Get next entry from a destination ring and move cached tail pointer
1512  *
1513  * Return: Opaque pointer for next ring entry; NULL on failure
1514  */
1515 static inline
1516 void *hal_srng_dst_get_next_cached(void *hal_soc,
1517 				   hal_ring_handle_t hal_ring_hdl)
1518 {
1519 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1520 	uint32_t *desc;
1521 	uint32_t *desc_next;
1522 
1523 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1524 		return NULL;
1525 
1526 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1527 	/* TODO: Using % is expensive, but we have to do this since
1528 	 * size of some SRNG rings is not power of 2 (due to descriptor
1529 	 * sizes). Need to create separate API for rings used
1530 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1531 	 * SW2RXDMA and CE rings)
1532 	 */
1533 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1534 	if (srng->u.dst_ring.tp == srng->ring_size)
1535 		srng->u.dst_ring.tp = 0;
1536 
1537 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1538 	qdf_prefetch(desc_next);
1539 	return (void *)desc;
1540 }
1541 
1542 /**
1543  * hal_srng_dst_dec_tp - decrement the TP of the Dst ring by one entry
1544  * @hal_soc: Opaque HAL SOC handle
1545  * @hal_ring_hdl: Destination ring pointer
1546  *
1547  * reset the tail pointer in the destination ring by one entry
1548  *
1549  */
1550 static inline
1551 void hal_srng_dst_dec_tp(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1552 {
1553 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1554 
1555 	if (qdf_unlikely(!srng->u.dst_ring.tp))
1556 		srng->u.dst_ring.tp = (srng->ring_size - srng->entry_size);
1557 	else
1558 		srng->u.dst_ring.tp -= srng->entry_size;
1559 }
1560 
1561 static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
1562 {
1563 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1564 
1565 	if (qdf_unlikely(!hal_ring_hdl)) {
1566 		qdf_print("error: invalid hal_ring\n");
1567 		return -EINVAL;
1568 	}
1569 
1570 	SRNG_LOCK(&(srng->lock));
1571 	return 0;
1572 }
1573 
1574 static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
1575 {
1576 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1577 
1578 	if (qdf_unlikely(!hal_ring_hdl)) {
1579 		qdf_print("error: invalid hal_ring\n");
1580 		return -EINVAL;
1581 	}
1582 
1583 	SRNG_UNLOCK(&(srng->lock));
1584 	return 0;
1585 }
1586 
1587 /**
1588  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1589  * cached head pointer
1590  *
1591  * @hal_soc: Opaque HAL SOC handle
1592  * @hal_ring_hdl: Destination ring pointer
1593  *
1594  * Return: Opaque pointer for next ring entry; NULL on failire
1595  */
1596 static inline void *
1597 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1598 			 hal_ring_handle_t hal_ring_hdl)
1599 {
1600 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1601 	uint32_t *desc;
1602 	/* TODO: Using % is expensive, but we have to do this since
1603 	 * size of some SRNG rings is not power of 2 (due to descriptor
1604 	 * sizes). Need to create separate API for rings used
1605 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1606 	 * SW2RXDMA and CE rings)
1607 	 */
1608 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1609 		srng->ring_size;
1610 
1611 	if (next_hp != srng->u.dst_ring.tp) {
1612 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1613 		srng->u.dst_ring.cached_hp = next_hp;
1614 		return (void *)desc;
1615 	}
1616 
1617 	return NULL;
1618 }
1619 
1620 /**
1621  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1622  * @hal_soc: Opaque HAL SOC handle
1623  * @hal_ring_hdl: Destination ring pointer
1624  *
1625  * Sync cached head pointer with HW.
1626  * Caller takes responsibility for any locking needs.
1627  *
1628  * Return: Opaque pointer for next ring entry; NULL on failire
1629  */
1630 static inline
1631 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1632 			     hal_ring_handle_t hal_ring_hdl)
1633 {
1634 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1635 
1636 	srng->u.dst_ring.cached_hp =
1637 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1638 
1639 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1640 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1641 
1642 	return NULL;
1643 }
1644 
1645 /**
1646  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1647  * @hal_soc: Opaque HAL SOC handle
1648  * @hal_ring_hdl: Destination ring pointer
1649  *
1650  * Sync cached head pointer with HW.
1651  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1652  *
1653  * Return: Opaque pointer for next ring entry; NULL on failire
1654  */
1655 static inline
1656 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1657 				    hal_ring_handle_t hal_ring_hdl)
1658 {
1659 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1660 	void *ring_desc_ptr = NULL;
1661 
1662 	if (qdf_unlikely(!hal_ring_hdl)) {
1663 		qdf_print("Error: Invalid hal_ring\n");
1664 		return  NULL;
1665 	}
1666 
1667 	SRNG_LOCK(&srng->lock);
1668 
1669 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1670 
1671 	SRNG_UNLOCK(&srng->lock);
1672 
1673 	return ring_desc_ptr;
1674 }
1675 
1676 #define hal_srng_dst_num_valid_nolock(hal_soc, hal_ring_hdl, sync_hw_ptr) \
1677 		hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr)
1678 
1679 /**
1680  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1681  * by SW) in destination ring
1682  *
1683  * @hal_soc: Opaque HAL SOC handle
1684  * @hal_ring_hdl: Destination ring pointer
1685  * @sync_hw_ptr: Sync cached head pointer with HW
1686  *
1687  */
1688 static inline
1689 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1690 				hal_ring_handle_t hal_ring_hdl,
1691 				int sync_hw_ptr)
1692 {
1693 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1694 	uint32_t hp;
1695 	uint32_t tp = srng->u.dst_ring.tp;
1696 
1697 	if (sync_hw_ptr) {
1698 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1699 		srng->u.dst_ring.cached_hp = hp;
1700 	} else {
1701 		hp = srng->u.dst_ring.cached_hp;
1702 	}
1703 
1704 	if (hp >= tp)
1705 		return (hp - tp) / srng->entry_size;
1706 
1707 	return (srng->ring_size - tp + hp) / srng->entry_size;
1708 }
1709 
1710 /**
1711  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1712  * @hal_soc: Opaque HAL SOC handle
1713  * @hal_ring_hdl: Destination ring pointer
1714  * @entry_count: call invalidate API if valid entries available
1715  *
1716  * Invalidates a set of cached descriptors starting from TP to cached_HP
1717  *
1718  * Return - None
1719  */
1720 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1721 						 hal_ring_handle_t hal_ring_hdl,
1722 						 uint32_t entry_count)
1723 {
1724 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1725 	uint32_t *first_desc;
1726 	uint32_t *last_desc;
1727 	uint32_t last_desc_index;
1728 
1729 	/*
1730 	 * If SRNG does not have cached descriptors this
1731 	 * API call should be a no op
1732 	 */
1733 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1734 		return;
1735 
1736 	if (!entry_count)
1737 		return;
1738 
1739 	first_desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1740 
1741 	last_desc_index = (srng->u.dst_ring.tp +
1742 			   (entry_count * srng->entry_size)) %
1743 			  srng->ring_size;
1744 
1745 	last_desc =  &srng->ring_base_vaddr[last_desc_index];
1746 
1747 	if (last_desc > (uint32_t *)first_desc)
1748 		/* invalidate from tp to cached_hp */
1749 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1750 					      (void *)(last_desc));
1751 	else {
1752 		/* invalidate from tp to end of the ring */
1753 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1754 					      (void *)srng->ring_vaddr_end);
1755 
1756 		/* invalidate from start of ring to cached_hp */
1757 		qdf_nbuf_dma_inv_range_no_dsb((void *)srng->ring_base_vaddr,
1758 					      (void *)last_desc);
1759 	}
1760 	qdf_dsb();
1761 }
1762 
1763 /**
1764  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1765  *
1766  * @hal_soc: Opaque HAL SOC handle
1767  * @hal_ring_hdl: Destination ring pointer
1768  * @sync_hw_ptr: Sync cached head pointer with HW
1769  *
1770  * Returns number of valid entries to be processed by the host driver. The
1771  * function takes up SRNG lock.
1772  *
1773  * Return: Number of valid destination entries
1774  */
1775 static inline uint32_t
1776 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1777 			      hal_ring_handle_t hal_ring_hdl,
1778 			      int sync_hw_ptr)
1779 {
1780 	uint32_t num_valid;
1781 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1782 
1783 	SRNG_LOCK(&srng->lock);
1784 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1785 	SRNG_UNLOCK(&srng->lock);
1786 
1787 	return num_valid;
1788 }
1789 
1790 /**
1791  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1792  *
1793  * @hal_soc: Opaque HAL SOC handle
1794  * @hal_ring_hdl: Destination ring pointer
1795  *
1796  */
1797 static inline
1798 void hal_srng_sync_cachedhp(void *hal_soc,
1799 				hal_ring_handle_t hal_ring_hdl)
1800 {
1801 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1802 	uint32_t hp;
1803 
1804 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1805 	srng->u.dst_ring.cached_hp = hp;
1806 }
1807 
1808 /**
1809  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1810  * pointer. This can be used to release any buffers associated with completed
1811  * ring entries. Note that this should not be used for posting new descriptor
1812  * entries. Posting of new entries should be done only using
1813  * hal_srng_src_get_next_reaped when this function is used for reaping.
1814  *
1815  * @hal_soc: Opaque HAL SOC handle
1816  * @hal_ring_hdl: Source ring pointer
1817  *
1818  * Return: Opaque pointer for next ring entry; NULL on failire
1819  */
1820 static inline void *
1821 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1822 {
1823 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1824 	uint32_t *desc;
1825 
1826 	/* TODO: Using % is expensive, but we have to do this since
1827 	 * size of some SRNG rings is not power of 2 (due to descriptor
1828 	 * sizes). Need to create separate API for rings used
1829 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1830 	 * SW2RXDMA and CE rings)
1831 	 */
1832 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1833 		srng->ring_size;
1834 
1835 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1836 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1837 		srng->u.src_ring.reap_hp = next_reap_hp;
1838 		return (void *)desc;
1839 	}
1840 
1841 	return NULL;
1842 }
1843 
1844 /**
1845  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1846  * already reaped using hal_srng_src_reap_next, for posting new entries to
1847  * the ring
1848  *
1849  * @hal_soc: Opaque HAL SOC handle
1850  * @hal_ring_hdl: Source ring pointer
1851  *
1852  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1853  */
1854 static inline void *
1855 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1856 {
1857 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1858 	uint32_t *desc;
1859 
1860 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1861 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1862 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1863 			srng->ring_size;
1864 
1865 		return (void *)desc;
1866 	}
1867 
1868 	return NULL;
1869 }
1870 
1871 /**
1872  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1873  * move reap pointer. This API is used in detach path to release any buffers
1874  * associated with ring entries which are pending reap.
1875  *
1876  * @hal_soc: Opaque HAL SOC handle
1877  * @hal_ring_hdl: Source ring pointer
1878  *
1879  * Return: Opaque pointer for next ring entry; NULL on failire
1880  */
1881 static inline void *
1882 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1883 {
1884 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1885 	uint32_t *desc;
1886 
1887 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1888 		srng->ring_size;
1889 
1890 	if (next_reap_hp != srng->u.src_ring.hp) {
1891 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1892 		srng->u.src_ring.reap_hp = next_reap_hp;
1893 		return (void *)desc;
1894 	}
1895 
1896 	return NULL;
1897 }
1898 
1899 /**
1900  * hal_srng_src_done_val -
1901  *
1902  * @hal_soc: Opaque HAL SOC handle
1903  * @hal_ring_hdl: Source ring pointer
1904  *
1905  * Return: Opaque pointer for next ring entry; NULL on failire
1906  */
1907 static inline uint32_t
1908 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1909 {
1910 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1911 	/* TODO: Using % is expensive, but we have to do this since
1912 	 * size of some SRNG rings is not power of 2 (due to descriptor
1913 	 * sizes). Need to create separate API for rings used
1914 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1915 	 * SW2RXDMA and CE rings)
1916 	 */
1917 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1918 		srng->ring_size;
1919 
1920 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1921 		return 0;
1922 
1923 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1924 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1925 			srng->entry_size;
1926 	else
1927 		return ((srng->ring_size - next_reap_hp) +
1928 			srng->u.src_ring.cached_tp) / srng->entry_size;
1929 }
1930 
1931 /**
1932  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1933  * @hal_ring_hdl: Source ring pointer
1934  *
1935  * srng->entry_size value is in 4 byte dwords so left shifting
1936  * this by 2 to return the value of entry_size in bytes.
1937  *
1938  * Return: uint8_t
1939  */
1940 static inline
1941 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1942 {
1943 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1944 
1945 	return srng->entry_size << 2;
1946 }
1947 
1948 /**
1949  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1950  * @hal_soc: Opaque HAL SOC handle
1951  * @hal_ring_hdl: Source ring pointer
1952  * @tailp: Tail Pointer
1953  * @headp: Head Pointer
1954  *
1955  * Return: Update tail pointer and head pointer in arguments.
1956  */
1957 static inline
1958 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1959 		     uint32_t *tailp, uint32_t *headp)
1960 {
1961 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1962 
1963 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1964 		*headp = srng->u.src_ring.hp;
1965 		*tailp = *srng->u.src_ring.tp_addr;
1966 	} else {
1967 		*tailp = srng->u.dst_ring.tp;
1968 		*headp = *srng->u.dst_ring.hp_addr;
1969 	}
1970 }
1971 
1972 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
1973 /**
1974  * hal_srng_src_get_next_consumed - Get the next desc if consumed by HW
1975  *
1976  * @hal_soc: Opaque HAL SOC handle
1977  * @hal_ring_hdl: Source ring pointer
1978  *
1979  * Return: pointer to descriptor if consumed by HW, else NULL
1980  */
1981 static inline
1982 void *hal_srng_src_get_next_consumed(void *hal_soc,
1983 				     hal_ring_handle_t hal_ring_hdl)
1984 {
1985 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1986 	uint32_t *desc = NULL;
1987 	/* TODO: Using % is expensive, but we have to do this since
1988 	 * size of some SRNG rings is not power of 2 (due to descriptor
1989 	 * sizes). Need to create separate API for rings used
1990 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1991 	 * SW2RXDMA and CE rings)
1992 	 */
1993 	uint32_t next_entry = (srng->last_desc_cleared + srng->entry_size) %
1994 			      srng->ring_size;
1995 
1996 	if (next_entry != srng->u.src_ring.cached_tp) {
1997 		desc = &srng->ring_base_vaddr[next_entry];
1998 		srng->last_desc_cleared = next_entry;
1999 	}
2000 
2001 	return desc;
2002 }
2003 
2004 #else
2005 static inline
2006 void *hal_srng_src_get_next_consumed(void *hal_soc,
2007 				     hal_ring_handle_t hal_ring_hdl)
2008 {
2009 	return NULL;
2010 }
2011 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
2012 
2013 /**
2014  * hal_srng_src_peek - get the HP of the SRC ring
2015  * @hal_soc: Opaque HAL SOC handle
2016  * @hal_ring_hdl: Source ring pointer
2017  *
2018  * get the head pointer in the src ring but do not increment it
2019  */
2020 static inline
2021 void *hal_srng_src_peek(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2022 {
2023 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2024 	uint32_t *desc;
2025 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2026 		srng->ring_size;
2027 
2028 	if (next_hp != srng->u.src_ring.cached_tp) {
2029 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2030 		return (void *)desc;
2031 	}
2032 
2033 	return NULL;
2034 }
2035 
2036 /**
2037  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
2038  *
2039  * @hal_soc: Opaque HAL SOC handle
2040  * @hal_ring_hdl: Source ring pointer
2041  *
2042  * Return: Opaque pointer for next ring entry; NULL on failire
2043  */
2044 static inline
2045 void *hal_srng_src_get_next(void *hal_soc,
2046 			    hal_ring_handle_t hal_ring_hdl)
2047 {
2048 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2049 	uint32_t *desc;
2050 	/* TODO: Using % is expensive, but we have to do this since
2051 	 * size of some SRNG rings is not power of 2 (due to descriptor
2052 	 * sizes). Need to create separate API for rings used
2053 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2054 	 * SW2RXDMA and CE rings)
2055 	 */
2056 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2057 		srng->ring_size;
2058 
2059 	if (next_hp != srng->u.src_ring.cached_tp) {
2060 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2061 		srng->u.src_ring.hp = next_hp;
2062 		/* TODO: Since reap function is not used by all rings, we can
2063 		 * remove the following update of reap_hp in this function
2064 		 * if we can ensure that only hal_srng_src_get_next_reaped
2065 		 * is used for the rings requiring reap functionality
2066 		 */
2067 		srng->u.src_ring.reap_hp = next_hp;
2068 		return (void *)desc;
2069 	}
2070 
2071 	return NULL;
2072 }
2073 
2074 /**
2075  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
2076  * moving head pointer.
2077  * hal_srng_src_get_next should be called subsequently to move the head pointer
2078  *
2079  * @hal_soc: Opaque HAL SOC handle
2080  * @hal_ring_hdl: Source ring pointer
2081  *
2082  * Return: Opaque pointer for next ring entry; NULL on failire
2083  */
2084 static inline
2085 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
2086 				   hal_ring_handle_t hal_ring_hdl)
2087 {
2088 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2089 	uint32_t *desc;
2090 
2091 	/* TODO: Using % is expensive, but we have to do this since
2092 	 * size of some SRNG rings is not power of 2 (due to descriptor
2093 	 * sizes). Need to create separate API for rings used
2094 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2095 	 * SW2RXDMA and CE rings)
2096 	 */
2097 	if (((srng->u.src_ring.hp + srng->entry_size) %
2098 		srng->ring_size) != srng->u.src_ring.cached_tp) {
2099 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2100 						srng->entry_size) %
2101 						srng->ring_size]);
2102 		return (void *)desc;
2103 	}
2104 
2105 	return NULL;
2106 }
2107 
2108 /**
2109  * hal_srng_src_peek_n_get_next_next - Get next to next, i.e HP + 2 entry
2110  * from a ring without moving head pointer.
2111  *
2112  * @hal_soc: Opaque HAL SOC handle
2113  * @hal_ring_hdl: Source ring pointer
2114  *
2115  * Return: Opaque pointer for next to next ring entry; NULL on failire
2116  */
2117 static inline
2118 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
2119 					hal_ring_handle_t hal_ring_hdl)
2120 {
2121 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2122 	uint32_t *desc;
2123 
2124 	/* TODO: Using % is expensive, but we have to do this since
2125 	 * size of some SRNG rings is not power of 2 (due to descriptor
2126 	 * sizes). Need to create separate API for rings used
2127 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2128 	 * SW2RXDMA and CE rings)
2129 	 */
2130 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
2131 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
2132 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
2133 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
2134 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2135 						(srng->entry_size * 2)) %
2136 						srng->ring_size]);
2137 		return (void *)desc;
2138 	}
2139 
2140 	return NULL;
2141 }
2142 
2143 /**
2144  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
2145  * and move hp to next in src ring
2146  *
2147  * Usage: This API should only be used at init time replenish.
2148  *
2149  * @hal_soc_hdl: HAL soc handle
2150  * @hal_ring_hdl: Source ring pointer
2151  *
2152  */
2153 static inline void *
2154 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
2155 				    hal_ring_handle_t hal_ring_hdl)
2156 {
2157 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2158 	uint32_t *cur_desc = NULL;
2159 	uint32_t next_hp;
2160 
2161 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
2162 
2163 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2164 		srng->ring_size;
2165 
2166 	if (next_hp != srng->u.src_ring.cached_tp)
2167 		srng->u.src_ring.hp = next_hp;
2168 
2169 	return (void *)cur_desc;
2170 }
2171 
2172 /**
2173  * hal_srng_src_num_avail - Returns number of available entries in src ring
2174  *
2175  * @hal_soc: Opaque HAL SOC handle
2176  * @hal_ring_hdl: Source ring pointer
2177  * @sync_hw_ptr: Sync cached tail pointer with HW
2178  *
2179  */
2180 static inline uint32_t
2181 hal_srng_src_num_avail(void *hal_soc,
2182 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
2183 {
2184 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2185 	uint32_t tp;
2186 	uint32_t hp = srng->u.src_ring.hp;
2187 
2188 	if (sync_hw_ptr) {
2189 		tp = *(srng->u.src_ring.tp_addr);
2190 		srng->u.src_ring.cached_tp = tp;
2191 	} else {
2192 		tp = srng->u.src_ring.cached_tp;
2193 	}
2194 
2195 	if (tp > hp)
2196 		return ((tp - hp) / srng->entry_size) - 1;
2197 	else
2198 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
2199 }
2200 
2201 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
2202 /**
2203  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2204  * @hal_soc_hdl: HAL soc handle
2205  * @hal_ring_hdl: SRNG handle
2206  *
2207  * This function tries to acquire SRNG lock, and hence should not be called
2208  * from a context which has already acquired the SRNG lock.
2209  *
2210  * Return: None
2211  */
2212 static inline
2213 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2214 					 hal_ring_handle_t hal_ring_hdl)
2215 {
2216 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2217 
2218 	SRNG_LOCK(&srng->lock);
2219 	srng->high_wm.val = 0;
2220 	srng->high_wm.timestamp = 0;
2221 	qdf_mem_zero(&srng->high_wm.bins[0], sizeof(srng->high_wm.bins[0]) *
2222 					     HAL_SRNG_HIGH_WM_BIN_MAX);
2223 	SRNG_UNLOCK(&srng->lock);
2224 }
2225 
2226 /**
2227  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2228  * @hal_soc_hdl: HAL soc handle
2229  * @hal_ring_hdl: SRNG handle
2230  *
2231  * This function should be called with the SRNG lock held.
2232  *
2233  * Return: None
2234  */
2235 static inline
2236 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2237 					   hal_ring_handle_t hal_ring_hdl)
2238 {
2239 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2240 	uint32_t curr_wm_val = 0;
2241 
2242 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
2243 		curr_wm_val = hal_srng_src_num_avail(hal_soc_hdl, hal_ring_hdl,
2244 						     0);
2245 	else
2246 		curr_wm_val = hal_srng_dst_num_valid(hal_soc_hdl, hal_ring_hdl,
2247 						     0);
2248 
2249 	if (curr_wm_val > srng->high_wm.val) {
2250 		srng->high_wm.val = curr_wm_val;
2251 		srng->high_wm.timestamp = qdf_get_system_timestamp();
2252 	}
2253 
2254 	if (curr_wm_val >=
2255 		srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100])
2256 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]++;
2257 	else if (curr_wm_val >=
2258 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90])
2259 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90]++;
2260 	else if (curr_wm_val >=
2261 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80])
2262 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80]++;
2263 	else if (curr_wm_val >=
2264 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70])
2265 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70]++;
2266 	else if (curr_wm_val >=
2267 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60])
2268 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60]++;
2269 	else
2270 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT]++;
2271 }
2272 
2273 static inline
2274 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2275 				hal_ring_handle_t hal_ring_hdl,
2276 				char *buf, int buf_len, int pos)
2277 {
2278 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2279 
2280 	return qdf_scnprintf(buf + pos, buf_len - pos,
2281 			     "%8u %7u %12llu %10u %10u %10u %10u %10u %10u",
2282 			     srng->ring_id, srng->high_wm.val,
2283 			     srng->high_wm.timestamp,
2284 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT],
2285 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60],
2286 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70],
2287 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80],
2288 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90],
2289 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]);
2290 }
2291 #else
2292 /**
2293  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2294  * @hal_soc_hdl: HAL soc handle
2295  * @hal_ring_hdl: SRNG handle
2296  *
2297  * This function tries to acquire SRNG lock, and hence should not be called
2298  * from a context which has already acquired the SRNG lock.
2299  *
2300  * Return: None
2301  */
2302 static inline
2303 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2304 					 hal_ring_handle_t hal_ring_hdl)
2305 {
2306 }
2307 
2308 /**
2309  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2310  * @hal_soc_hdl: HAL soc handle
2311  * @hal_ring_hdl: SRNG handle
2312  *
2313  * This function should be called with the SRNG lock held.
2314  *
2315  * Return: None
2316  */
2317 static inline
2318 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2319 					   hal_ring_handle_t hal_ring_hdl)
2320 {
2321 }
2322 
2323 static inline
2324 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2325 				hal_ring_handle_t hal_ring_hdl,
2326 				char *buf, int buf_len, int pos)
2327 {
2328 	return 0;
2329 }
2330 #endif
2331 
2332 /**
2333  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
2334  * ring head/tail pointers to HW.
2335  *
2336  * @hal_soc: Opaque HAL SOC handle
2337  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2338  *
2339  * The target expects cached head/tail pointer to be updated to the
2340  * shared location in the little-endian order, This API ensures that.
2341  * This API should be used only if hal_srng_access_start_unlocked was used to
2342  * start ring access
2343  *
2344  * Return: None
2345  */
2346 static inline void
2347 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2348 {
2349 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2350 
2351 	/* TODO: See if we need a write memory barrier here */
2352 	if (srng->flags & HAL_SRNG_LMAC_RING) {
2353 		/* For LMAC rings, ring pointer updates are done through FW and
2354 		 * hence written to a shared memory location that is read by FW
2355 		 */
2356 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2357 			*srng->u.src_ring.hp_addr =
2358 				qdf_cpu_to_le32(srng->u.src_ring.hp);
2359 		} else {
2360 			*srng->u.dst_ring.tp_addr =
2361 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
2362 		}
2363 	} else {
2364 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
2365 			hal_srng_write_address_32_mb(hal_soc,
2366 						     srng,
2367 						     srng->u.src_ring.hp_addr,
2368 						     srng->u.src_ring.hp);
2369 		else
2370 			hal_srng_write_address_32_mb(hal_soc,
2371 						     srng,
2372 						     srng->u.dst_ring.tp_addr,
2373 						     srng->u.dst_ring.tp);
2374 	}
2375 }
2376 
2377 /* hal_srng_access_end_unlocked already handles endianness conversion,
2378  * use the same.
2379  */
2380 #define hal_le_srng_access_end_unlocked_in_cpu_order \
2381 	hal_srng_access_end_unlocked
2382 
2383 /**
2384  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
2385  * pointers to HW
2386  *
2387  * @hal_soc: Opaque HAL SOC handle
2388  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2389  *
2390  * The target expects cached head/tail pointer to be updated to the
2391  * shared location in the little-endian order, This API ensures that.
2392  * This API should be used only if hal_srng_access_start was used to
2393  * start ring access
2394  *
2395  */
2396 static inline void
2397 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2398 {
2399 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2400 
2401 	if (qdf_unlikely(!hal_ring_hdl)) {
2402 		qdf_print("Error: Invalid hal_ring\n");
2403 		return;
2404 	}
2405 
2406 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2407 	SRNG_UNLOCK(&(srng->lock));
2408 }
2409 
2410 #ifdef FEATURE_RUNTIME_PM
2411 #define hal_srng_access_end_v1 hal_srng_rtpm_access_end
2412 
2413 /**
2414  * hal_srng_rtpm_access_end - RTPM aware, Unlock ring access
2415  * @hal_soc: Opaque HAL SOC handle
2416  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2417  * @rtpm_dbgid: RTPM debug id
2418  * @is_critical_ctx: Whether the calling context is critical
2419  *
2420  * Function updates the HP/TP value to the hardware register.
2421  * The target expects cached head/tail pointer to be updated to the
2422  * shared location in the little-endian order, This API ensures that.
2423  * This API should be used only if hal_srng_access_start was used to
2424  * start ring access
2425  *
2426  * Return: None
2427  */
2428 void
2429 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,
2430 			 hal_ring_handle_t hal_ring_hdl,
2431 			 uint32_t rtpm_id);
2432 #else
2433 #define hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl, rtpm_id) \
2434 	hal_srng_access_end(hal_soc_hdl, hal_ring_hdl)
2435 #endif
2436 
2437 /* hal_srng_access_end already handles endianness conversion, so use the same */
2438 #define hal_le_srng_access_end_in_cpu_order \
2439 	hal_srng_access_end
2440 
2441 /**
2442  * hal_srng_access_end_reap - Unlock ring access
2443  * This should be used only if hal_srng_access_start to start ring access
2444  * and should be used only while reaping SRC ring completions
2445  *
2446  * @hal_soc: Opaque HAL SOC handle
2447  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2448  *
2449  * Return: 0 on success; error on failire
2450  */
2451 static inline void
2452 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2453 {
2454 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2455 
2456 	SRNG_UNLOCK(&(srng->lock));
2457 }
2458 
2459 /* TODO: Check if the following definitions is available in HW headers */
2460 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2461 #define NUM_MPDUS_PER_LINK_DESC 6
2462 #define NUM_MSDUS_PER_LINK_DESC 7
2463 #define REO_QUEUE_DESC_ALIGN 128
2464 
2465 #define LINK_DESC_ALIGN 128
2466 
2467 #define ADDRESS_MATCH_TAG_VAL 0x5
2468 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2469  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2470  */
2471 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2472 
2473 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2474  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2475  * should be specified in 16 word units. But the number of bits defined for
2476  * this field in HW header files is 5.
2477  */
2478 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2479 
2480 
2481 /**
2482  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
2483  * in an idle list
2484  *
2485  * @hal_soc: Opaque HAL SOC handle
2486  *
2487  */
2488 static inline
2489 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2490 {
2491 	return WBM_IDLE_SCATTER_BUF_SIZE;
2492 }
2493 
2494 /**
2495  * hal_get_link_desc_size - Get the size of each link descriptor
2496  *
2497  * @hal_soc: Opaque HAL SOC handle
2498  *
2499  */
2500 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2501 {
2502 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2503 
2504 	if (!hal_soc || !hal_soc->ops) {
2505 		qdf_print("Error: Invalid ops\n");
2506 		QDF_BUG(0);
2507 		return -EINVAL;
2508 	}
2509 	if (!hal_soc->ops->hal_get_link_desc_size) {
2510 		qdf_print("Error: Invalid function pointer\n");
2511 		QDF_BUG(0);
2512 		return -EINVAL;
2513 	}
2514 	return hal_soc->ops->hal_get_link_desc_size();
2515 }
2516 
2517 /**
2518  * hal_get_link_desc_align - Get the required start address alignment for
2519  * link descriptors
2520  *
2521  * @hal_soc: Opaque HAL SOC handle
2522  *
2523  */
2524 static inline
2525 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2526 {
2527 	return LINK_DESC_ALIGN;
2528 }
2529 
2530 /**
2531  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
2532  *
2533  * @hal_soc: Opaque HAL SOC handle
2534  *
2535  */
2536 static inline
2537 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2538 {
2539 	return NUM_MPDUS_PER_LINK_DESC;
2540 }
2541 
2542 /**
2543  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
2544  *
2545  * @hal_soc: Opaque HAL SOC handle
2546  *
2547  */
2548 static inline
2549 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2550 {
2551 	return NUM_MSDUS_PER_LINK_DESC;
2552 }
2553 
2554 /**
2555  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
2556  * descriptor can hold
2557  *
2558  * @hal_soc: Opaque HAL SOC handle
2559  *
2560  */
2561 static inline
2562 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2563 {
2564 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2565 }
2566 
2567 /**
2568  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
2569  * that the given buffer size
2570  *
2571  * @hal_soc: Opaque HAL SOC handle
2572  * @scatter_buf_size: Size of scatter buffer
2573  *
2574  */
2575 static inline
2576 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2577 					  uint32_t scatter_buf_size)
2578 {
2579 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2580 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2581 }
2582 
2583 /**
2584  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
2585  * each given buffer size
2586  *
2587  * @hal_soc: Opaque HAL SOC handle
2588  * @total_mem: size of memory to be scattered
2589  * @scatter_buf_size: Size of scatter buffer
2590  *
2591  */
2592 static inline
2593 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2594 					uint32_t total_mem,
2595 					uint32_t scatter_buf_size)
2596 {
2597 	uint8_t rem = (total_mem % (scatter_buf_size -
2598 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2599 
2600 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2601 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2602 
2603 	return num_scatter_bufs;
2604 }
2605 
2606 enum hal_pn_type {
2607 	HAL_PN_NONE,
2608 	HAL_PN_WPA,
2609 	HAL_PN_WAPI_EVEN,
2610 	HAL_PN_WAPI_UNEVEN,
2611 };
2612 
2613 #define HAL_RX_BA_WINDOW_256 256
2614 #define HAL_RX_BA_WINDOW_1024 1024
2615 
2616 /**
2617  * hal_get_reo_qdesc_align - Get start address alignment for reo
2618  * queue descriptors
2619  *
2620  * @hal_soc: Opaque HAL SOC handle
2621  *
2622  */
2623 static inline
2624 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2625 {
2626 	return REO_QUEUE_DESC_ALIGN;
2627 }
2628 
2629 /**
2630  * hal_srng_get_hp_addr - Get head pointer physical address
2631  *
2632  * @hal_soc: Opaque HAL SOC handle
2633  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2634  *
2635  */
2636 static inline qdf_dma_addr_t
2637 hal_srng_get_hp_addr(void *hal_soc,
2638 		     hal_ring_handle_t hal_ring_hdl)
2639 {
2640 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2641 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2642 
2643 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2644 		if (srng->flags & HAL_SRNG_LMAC_RING)
2645 			return hal->shadow_wrptr_mem_paddr +
2646 				 ((unsigned long)(srng->u.src_ring.hp_addr) -
2647 				  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2648 		else if (ignore_shadow)
2649 			return (qdf_dma_addr_t)srng->u.src_ring.hp_addr;
2650 		else
2651 			return ((struct hif_softc *)hal->hif_handle)->mem_pa +
2652 				((unsigned long)srng->u.src_ring.hp_addr -
2653 				 (unsigned long)hal->dev_base_addr);
2654 
2655 	} else {
2656 		return hal->shadow_rdptr_mem_paddr +
2657 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2658 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2659 	}
2660 }
2661 
2662 /**
2663  * hal_srng_get_tp_addr - Get tail pointer physical address
2664  *
2665  * @hal_soc: Opaque HAL SOC handle
2666  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2667  *
2668  */
2669 static inline qdf_dma_addr_t
2670 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2671 {
2672 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2673 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2674 
2675 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2676 		return hal->shadow_rdptr_mem_paddr +
2677 			((unsigned long)(srng->u.src_ring.tp_addr) -
2678 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2679 	} else {
2680 		if (srng->flags & HAL_SRNG_LMAC_RING)
2681 			return hal->shadow_wrptr_mem_paddr +
2682 				((unsigned long)(srng->u.dst_ring.tp_addr) -
2683 				 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2684 		else if (ignore_shadow)
2685 			return (qdf_dma_addr_t)srng->u.dst_ring.tp_addr;
2686 		else
2687 			return ((struct hif_softc *)hal->hif_handle)->mem_pa +
2688 				((unsigned long)srng->u.dst_ring.tp_addr -
2689 				 (unsigned long)hal->dev_base_addr);
2690 	}
2691 }
2692 
2693 /**
2694  * hal_srng_get_num_entries - Get total entries in the HAL Srng
2695  *
2696  * @hal_soc: Opaque HAL SOC handle
2697  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2698  *
2699  * Return: total number of entries in hal ring
2700  */
2701 static inline
2702 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2703 				  hal_ring_handle_t hal_ring_hdl)
2704 {
2705 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2706 
2707 	return srng->num_entries;
2708 }
2709 
2710 /**
2711  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
2712  *
2713  * @hal_soc: Opaque HAL SOC handle
2714  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2715  * @ring_params: SRNG parameters will be returned through this structure
2716  */
2717 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2718 			 hal_ring_handle_t hal_ring_hdl,
2719 			 struct hal_srng_params *ring_params);
2720 
2721 /**
2722  * hal_mem_info - Retrieve hal memory base address
2723  *
2724  * @hal_soc: Opaque HAL SOC handle
2725  * @mem: pointer to structure to be updated with hal mem info
2726  */
2727 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2728 
2729 /**
2730  * hal_get_target_type - Return target type
2731  *
2732  * @hal_soc: Opaque HAL SOC handle
2733  */
2734 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2735 
2736 /**
2737  * hal_srng_dst_hw_init - Private function to initialize SRNG
2738  * destination ring HW
2739  * @hal_soc: HAL SOC handle
2740  * @srng: SRNG ring pointer
2741  * @idle_check: Check if ring is idle
2742  * @idx: Ring index
2743  */
2744 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2745 					struct hal_srng *srng, bool idle_check,
2746 					uint16_t idx)
2747 {
2748 	hal->ops->hal_srng_dst_hw_init(hal, srng, idle_check, idx);
2749 }
2750 
2751 /**
2752  * hal_srng_src_hw_init - Private function to initialize SRNG
2753  * source ring HW
2754  * @hal_soc: HAL SOC handle
2755  * @srng: SRNG ring pointer
2756  * @idle_check: Check if ring is idle
2757  * @idx: Ring index
2758  */
2759 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2760 					struct hal_srng *srng, bool idle_check,
2761 					uint16_t idx)
2762 {
2763 	hal->ops->hal_srng_src_hw_init(hal, srng, idle_check, idx);
2764 }
2765 
2766 /**
2767  * hal_srng_hw_disable - Private function to disable SRNG
2768  * source ring HW
2769  * @hal_soc: HAL SOC handle
2770  * @srng: SRNG ring pointer
2771  */
2772 static inline
2773 void hal_srng_hw_disable(struct hal_soc *hal_soc, struct hal_srng *srng)
2774 {
2775 	if (hal_soc->ops->hal_srng_hw_disable)
2776 		hal_soc->ops->hal_srng_hw_disable(hal_soc, srng);
2777 }
2778 
2779 /**
2780  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2781  * @hal_soc: Opaque HAL SOC handle
2782  * @hal_ring_hdl: Source ring pointer
2783  * @headp: Head Pointer
2784  * @tailp: Tail Pointer
2785  * @ring_type: Ring
2786  *
2787  * Return: Update tail pointer and head pointer in arguments.
2788  */
2789 static inline
2790 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2791 		     hal_ring_handle_t hal_ring_hdl,
2792 		     uint32_t *headp, uint32_t *tailp,
2793 		     uint8_t ring_type)
2794 {
2795 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2796 
2797 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2798 			headp, tailp, ring_type);
2799 }
2800 
2801 /**
2802  * hal_reo_setup - Initialize HW REO block
2803  *
2804  * @hal_soc: Opaque HAL SOC handle
2805  * @reo_params: parameters needed by HAL for REO config
2806  * @qref_reset: reset qref
2807  */
2808 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2809 				 void *reoparams, int qref_reset)
2810 {
2811 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2812 
2813 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams, qref_reset);
2814 }
2815 
2816 static inline
2817 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2818 				   uint32_t *ring, uint32_t num_rings,
2819 				   uint32_t *remap1, uint32_t *remap2)
2820 {
2821 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2822 
2823 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2824 					num_rings, remap1, remap2);
2825 }
2826 
2827 static inline
2828 void hal_compute_reo_remap_ix0(hal_soc_handle_t hal_soc_hdl, uint32_t *remap0)
2829 {
2830 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2831 
2832 	if (hal_soc->ops->hal_compute_reo_remap_ix0)
2833 		hal_soc->ops->hal_compute_reo_remap_ix0(remap0);
2834 }
2835 
2836 /**
2837  * hal_setup_link_idle_list - Setup scattered idle list using the
2838  * buffer list provided
2839  *
2840  * @hal_soc: Opaque HAL SOC handle
2841  * @scatter_bufs_base_paddr: Array of physical base addresses
2842  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2843  * @num_scatter_bufs: Number of scatter buffers in the above lists
2844  * @scatter_buf_size: Size of each scatter buffer
2845  * @last_buf_end_offset: Offset to the last entry
2846  * @num_entries: Total entries of all scatter bufs
2847  *
2848  */
2849 static inline
2850 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2851 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2852 			      void *scatter_bufs_base_vaddr[],
2853 			      uint32_t num_scatter_bufs,
2854 			      uint32_t scatter_buf_size,
2855 			      uint32_t last_buf_end_offset,
2856 			      uint32_t num_entries)
2857 {
2858 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2859 
2860 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2861 			scatter_bufs_base_vaddr, num_scatter_bufs,
2862 			scatter_buf_size, last_buf_end_offset,
2863 			num_entries);
2864 
2865 }
2866 
2867 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2868 /**
2869  * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
2870  * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
2871  *
2872  * Use the virtual addr pointer to reo h/w queue desc to read
2873  * the values from ddr and log them.
2874  *
2875  * Return: none
2876  */
2877 static inline void hal_dump_rx_reo_queue_desc(
2878 	void *hw_qdesc_vaddr_aligned)
2879 {
2880 	struct rx_reo_queue *hw_qdesc =
2881 		(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
2882 
2883 	if (!hw_qdesc)
2884 		return;
2885 
2886 	hal_info("receive_queue_number %u vld %u window_jump_2k %u"
2887 		 " hole_count %u ba_window_size %u ignore_ampdu_flag %u"
2888 		 " svld %u ssn %u current_index %u"
2889 		 " disable_duplicate_detection %u soft_reorder_enable %u"
2890 		 " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
2891 		 " msdu_frames_processed_count %u total_processed_byte_count %u"
2892 		 " late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
2893 		 " pn_error_detected_flag %u current_mpdu_count %u"
2894 		 " current_msdu_count %u timeout_count %u"
2895 		 " forward_due_to_bar_count %u duplicate_count %u"
2896 		 " frames_in_order_count %u bar_received_count %u"
2897 		 " pn_check_needed %u pn_shall_be_even %u"
2898 		 " pn_shall_be_uneven %u pn_size %u",
2899 		 hw_qdesc->receive_queue_number,
2900 		 hw_qdesc->vld,
2901 		 hw_qdesc->window_jump_2k,
2902 		 hw_qdesc->hole_count,
2903 		 hw_qdesc->ba_window_size,
2904 		 hw_qdesc->ignore_ampdu_flag,
2905 		 hw_qdesc->svld,
2906 		 hw_qdesc->ssn,
2907 		 hw_qdesc->current_index,
2908 		 hw_qdesc->disable_duplicate_detection,
2909 		 hw_qdesc->soft_reorder_enable,
2910 		 hw_qdesc->chk_2k_mode,
2911 		 hw_qdesc->oor_mode,
2912 		 hw_qdesc->mpdu_frames_processed_count,
2913 		 hw_qdesc->msdu_frames_processed_count,
2914 		 hw_qdesc->total_processed_byte_count,
2915 		 hw_qdesc->late_receive_mpdu_count,
2916 		 hw_qdesc->seq_2k_error_detected_flag,
2917 		 hw_qdesc->pn_error_detected_flag,
2918 		 hw_qdesc->current_mpdu_count,
2919 		 hw_qdesc->current_msdu_count,
2920 		 hw_qdesc->timeout_count,
2921 		 hw_qdesc->forward_due_to_bar_count,
2922 		 hw_qdesc->duplicate_count,
2923 		 hw_qdesc->frames_in_order_count,
2924 		 hw_qdesc->bar_received_count,
2925 		 hw_qdesc->pn_check_needed,
2926 		 hw_qdesc->pn_shall_be_even,
2927 		 hw_qdesc->pn_shall_be_uneven,
2928 		 hw_qdesc->pn_size);
2929 }
2930 
2931 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
2932 
2933 static inline void hal_dump_rx_reo_queue_desc(
2934 	void *hw_qdesc_vaddr_aligned)
2935 {
2936 }
2937 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2938 
2939 /**
2940  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2941  *
2942  * @hal_soc: Opaque HAL SOC handle
2943  * @hal_ring_hdl: Source ring pointer
2944  * @ring_desc: Opaque ring descriptor handle
2945  */
2946 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2947 					   hal_ring_handle_t hal_ring_hdl,
2948 					   hal_ring_desc_t ring_desc)
2949 {
2950 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2951 
2952 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2953 			   ring_desc, (srng->entry_size << 2));
2954 }
2955 
2956 /**
2957  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2958  *
2959  * @hal_soc: Opaque HAL SOC handle
2960  * @hal_ring_hdl: Source ring pointer
2961  */
2962 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2963 				      hal_ring_handle_t hal_ring_hdl)
2964 {
2965 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2966 	uint32_t *desc;
2967 	uint32_t tp, i;
2968 
2969 	tp = srng->u.dst_ring.tp;
2970 
2971 	for (i = 0; i < 128; i++) {
2972 		if (!tp)
2973 			tp = srng->ring_size;
2974 
2975 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2976 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2977 				   QDF_TRACE_LEVEL_DEBUG,
2978 				   desc, (srng->entry_size << 2));
2979 
2980 		tp -= srng->entry_size;
2981 	}
2982 }
2983 
2984 /*
2985  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2986  * to opaque dp_ring desc type
2987  * @ring_desc - rxdma ring desc
2988  *
2989  * Return: hal_rxdma_desc_t type
2990  */
2991 static inline
2992 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2993 {
2994 	return (hal_ring_desc_t)ring_desc;
2995 }
2996 
2997 /**
2998  * hal_srng_set_event() - Set hal_srng event
2999  * @hal_ring_hdl: Source ring pointer
3000  * @event: SRNG ring event
3001  *
3002  * Return: None
3003  */
3004 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
3005 {
3006 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3007 
3008 	qdf_atomic_set_bit(event, &srng->srng_event);
3009 }
3010 
3011 /**
3012  * hal_srng_clear_event() - Clear hal_srng event
3013  * @hal_ring_hdl: Source ring pointer
3014  * @event: SRNG ring event
3015  *
3016  * Return: None
3017  */
3018 static inline
3019 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
3020 {
3021 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3022 
3023 	qdf_atomic_clear_bit(event, &srng->srng_event);
3024 }
3025 
3026 /**
3027  * hal_srng_get_clear_event() - Clear srng event and return old value
3028  * @hal_ring_hdl: Source ring pointer
3029  * @event: SRNG ring event
3030  *
3031  * Return: Return old event value
3032  */
3033 static inline
3034 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
3035 {
3036 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3037 
3038 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
3039 }
3040 
3041 /**
3042  * hal_srng_set_flush_last_ts() - Record last flush time stamp
3043  * @hal_ring_hdl: Source ring pointer
3044  *
3045  * Return: None
3046  */
3047 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
3048 {
3049 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3050 
3051 	srng->last_flush_ts = qdf_get_log_timestamp();
3052 }
3053 
3054 /**
3055  * hal_srng_inc_flush_cnt() - Increment flush counter
3056  * @hal_ring_hdl: Source ring pointer
3057  *
3058  * Return: None
3059  */
3060 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
3061 {
3062 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3063 
3064 	srng->flush_count++;
3065 }
3066 
3067 /**
3068  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
3069  *
3070  * @hal: Core HAL soc handle
3071  * @ring_desc: Mon dest ring descriptor
3072  * @desc_info: Desc info to be populated
3073  *
3074  * Return void
3075  */
3076 static inline void
3077 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
3078 			    hal_ring_desc_t ring_desc,
3079 			    hal_rx_mon_desc_info_t desc_info)
3080 {
3081 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
3082 }
3083 
3084 /**
3085  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
3086  *				 register value.
3087  *
3088  * @hal_soc_hdl: Opaque HAL soc handle
3089  *
3090  * Return: None
3091  */
3092 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
3093 {
3094 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3095 
3096 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
3097 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
3098 }
3099 
3100 /**
3101  * hal_reo_enable_pn_in_dest() - Subscribe for previous PN for 2k-jump or
3102  *			OOR error frames
3103  * @hal_soc_hdl: Opaque HAL soc handle
3104  *
3105  * Return: true if feature is enabled,
3106  *	false, otherwise.
3107  */
3108 static inline uint8_t
3109 hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)
3110 {
3111 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3112 
3113 	if (hal_soc->ops->hal_reo_enable_pn_in_dest)
3114 		return hal_soc->ops->hal_reo_enable_pn_in_dest(hal_soc);
3115 
3116 	return 0;
3117 }
3118 
3119 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
3120 
3121 /**
3122  * hal_set_one_target_reg_config() - Populate the target reg
3123  * offset in hal_soc for one non srng related register at the
3124  * given list index
3125  * @hal_soc: hal handle
3126  * @target_reg_offset: target register offset
3127  * @list_index: index in hal list for shadow regs
3128  *
3129  * Return: none
3130  */
3131 void hal_set_one_target_reg_config(struct hal_soc *hal,
3132 				   uint32_t target_reg_offset,
3133 				   int list_index);
3134 
3135 /**
3136  * hal_set_shadow_regs() - Populate register offset for
3137  * registers that need to be populated in list_shadow_reg_config
3138  * in order to be sent to FW. These reg offsets will be mapped
3139  * to shadow registers.
3140  * @hal_soc: hal handle
3141  *
3142  * Return: QDF_STATUS_OK on success
3143  */
3144 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
3145 
3146 /**
3147  * hal_construct_shadow_regs() - initialize the shadow registers
3148  * for non-srng related register configs
3149  * @hal_soc: hal handle
3150  *
3151  * Return: QDF_STATUS_OK on success
3152  */
3153 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
3154 
3155 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3156 static inline void hal_set_one_target_reg_config(
3157 	struct hal_soc *hal,
3158 	uint32_t target_reg_offset,
3159 	int list_index)
3160 {
3161 }
3162 
3163 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
3164 {
3165 	return QDF_STATUS_SUCCESS;
3166 }
3167 
3168 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
3169 {
3170 	return QDF_STATUS_SUCCESS;
3171 }
3172 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3173 
3174 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3175 /**
3176  * hal_flush_reg_write_work() - flush all writes from register write queue
3177  * @arg: hal_soc pointer
3178  *
3179  * Return: None
3180  */
3181 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
3182 
3183 #else
3184 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
3185 #endif
3186 
3187 /**
3188  * hal_get_ring_usage - Calculate the ring usage percentage
3189  * @hal_ring_hdl: Ring pointer
3190  * @ring_type: Ring type
3191  * @headp: pointer to head value
3192  * @tailp: pointer to tail value
3193  *
3194  * Calculate the ring usage percentage for src and dest rings
3195  *
3196  * Return: Ring usage percentage
3197  */
3198 static inline
3199 uint32_t hal_get_ring_usage(
3200 	hal_ring_handle_t hal_ring_hdl,
3201 	enum hal_ring_type ring_type, uint32_t *headp, uint32_t *tailp)
3202 {
3203 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3204 	uint32_t num_avail, num_valid = 0;
3205 	uint32_t ring_usage;
3206 
3207 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
3208 		if (*tailp > *headp)
3209 			num_avail =  ((*tailp - *headp) / srng->entry_size) - 1;
3210 		else
3211 			num_avail = ((srng->ring_size - *headp + *tailp) /
3212 				     srng->entry_size) - 1;
3213 		if (ring_type == WBM_IDLE_LINK)
3214 			num_valid = num_avail;
3215 		else
3216 			num_valid = srng->num_entries - num_avail;
3217 	} else {
3218 		if (*headp >= *tailp)
3219 			num_valid = ((*headp - *tailp) / srng->entry_size);
3220 		else
3221 			num_valid = ((srng->ring_size - *tailp + *headp) /
3222 				     srng->entry_size);
3223 	}
3224 	ring_usage = (100 * num_valid) / srng->num_entries;
3225 	return ring_usage;
3226 }
3227 
3228 /**
3229  * hal_cmem_write() - function for CMEM buffer writing
3230  * @hal_soc_hdl: HAL SOC handle
3231  * @offset: CMEM address
3232  * @value: value to write
3233  *
3234  * Return: None.
3235  */
3236 static inline void
3237 hal_cmem_write(hal_soc_handle_t hal_soc_hdl, uint32_t offset,
3238 	       uint32_t value)
3239 {
3240 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3241 
3242 	if (hal_soc->ops->hal_cmem_write)
3243 		hal_soc->ops->hal_cmem_write(hal_soc_hdl, offset, value);
3244 
3245 	return;
3246 }
3247 
3248 static inline bool
3249 hal_dmac_cmn_src_rxbuf_ring_get(hal_soc_handle_t hal_soc_hdl)
3250 {
3251 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3252 
3253 	return hal_soc->dmac_cmn_src_rxbuf_ring;
3254 }
3255 
3256 /**
3257  * hal_srng_dst_prefetch() - function to prefetch 4 destination ring descs
3258  * @hal_soc_hdl: HAL SOC handle
3259  * @hal_ring_hdl: Destination ring pointer
3260  * @num_valid: valid entries in the ring
3261  *
3262  * return: last prefetched destination ring descriptor
3263  */
3264 static inline
3265 void *hal_srng_dst_prefetch(hal_soc_handle_t hal_soc_hdl,
3266 			    hal_ring_handle_t hal_ring_hdl,
3267 			    uint16_t num_valid)
3268 {
3269 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3270 	uint8_t *desc;
3271 	uint32_t cnt;
3272 	/*
3273 	 * prefetching 4 HW descriptors will ensure atleast by the time
3274 	 * 5th HW descriptor is being processed it is guaranteed that the
3275 	 * 5th HW descriptor, its SW Desc, its nbuf and its nbuf's data
3276 	 * are in cache line. basically ensuring all the 4 (HW, SW, nbuf
3277 	 * & nbuf->data) are prefetched.
3278 	 */
3279 	uint32_t max_prefetch = 4;
3280 
3281 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3282 		return NULL;
3283 
3284 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3285 
3286 	if (num_valid < max_prefetch)
3287 		max_prefetch = num_valid;
3288 
3289 	for (cnt = 0; cnt < max_prefetch; cnt++) {
3290 		desc += srng->entry_size * sizeof(uint32_t);
3291 		if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3292 			desc = (uint8_t *)&srng->ring_base_vaddr[0];
3293 
3294 		qdf_prefetch(desc);
3295 	}
3296 	return (void *)desc;
3297 }
3298 
3299 /**
3300  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3301  * @hal_soc_hdl: HAL SOC handle
3302  * @hal_ring_hdl: Destination ring pointer
3303  * @last_prefetched_hw_desc: last prefetched HW descriptor
3304  *
3305  * return: next prefetched destination descriptor
3306  */
3307 static inline
3308 void *hal_srng_dst_prefetch_next_cached_desc(hal_soc_handle_t hal_soc_hdl,
3309 					     hal_ring_handle_t hal_ring_hdl,
3310 					     uint8_t *last_prefetched_hw_desc)
3311 {
3312 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3313 
3314 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3315 		return NULL;
3316 
3317 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3318 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3319 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3320 
3321 	qdf_prefetch(last_prefetched_hw_desc);
3322 	return (void *)last_prefetched_hw_desc;
3323 }
3324 
3325 /**
3326  * hal_srng_dst_prefetch_32_byte_desc() - function to prefetch a desc at
3327  *					  64 byte offset
3328  * @hal_soc_hdl: HAL SOC handle
3329  * @hal_ring_hdl: Destination ring pointer
3330  * @num_valid: valid entries in the ring
3331  *
3332  * return: last prefetched destination ring descriptor
3333  */
3334 static inline
3335 void *hal_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3336 					 hal_ring_handle_t hal_ring_hdl,
3337 					 uint16_t num_valid)
3338 {
3339 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3340 	uint8_t *desc;
3341 
3342 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3343 		return NULL;
3344 
3345 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3346 
3347 	if ((uintptr_t)desc & 0x3f)
3348 		desc += srng->entry_size * sizeof(uint32_t);
3349 	else
3350 		desc += (srng->entry_size * sizeof(uint32_t)) * 2;
3351 
3352 	if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3353 		desc = (uint8_t *)&srng->ring_base_vaddr[0];
3354 
3355 	qdf_prefetch(desc);
3356 
3357 	return (void *)(desc + srng->entry_size * sizeof(uint32_t));
3358 }
3359 
3360 /**
3361  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3362  * @hal_soc_hdl: HAL SOC handle
3363  * @hal_ring_hdl: Destination ring pointer
3364  * @last_prefetched_hw_desc: last prefetched HW descriptor
3365  *
3366  * return: next prefetched destination descriptor
3367  */
3368 static inline
3369 void *hal_srng_dst_get_next_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3370 					 hal_ring_handle_t hal_ring_hdl,
3371 					 uint8_t *last_prefetched_hw_desc)
3372 {
3373 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3374 
3375 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3376 		return NULL;
3377 
3378 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3379 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3380 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3381 
3382 	return (void *)last_prefetched_hw_desc;
3383 }
3384 
3385 /**
3386  * hal_srng_src_set_hp() - set head idx.
3387  * @hal_soc_hdl: HAL SOC handle
3388  * @idx: head idx
3389  *
3390  * return: none
3391  */
3392 static inline
3393 void hal_srng_src_set_hp(hal_ring_handle_t hal_ring_hdl, uint16_t idx)
3394 {
3395 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3396 
3397 	srng->u.src_ring.hp = idx * srng->entry_size;
3398 }
3399 
3400 /**
3401  * hal_srng_dst_set_tp() - set tail idx.
3402  * @hal_soc_hdl: HAL SOC handle
3403  * @idx: tail idx
3404  *
3405  * return: none
3406  */
3407 static inline
3408 void hal_srng_dst_set_tp(hal_ring_handle_t hal_ring_hdl, uint16_t idx)
3409 {
3410 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3411 
3412 	srng->u.dst_ring.tp = idx * srng->entry_size;
3413 }
3414 
3415 /**
3416  * hal_srng_src_get_tpidx() - get tail idx
3417  * @hal_soc_hdl: HAL SOC handle
3418  *
3419  * return: tail idx
3420  */
3421 static inline
3422 uint16_t hal_srng_src_get_tpidx(hal_ring_handle_t hal_ring_hdl)
3423 {
3424 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3425 	uint32_t tp = *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
3426 
3427 	return tp / srng->entry_size;
3428 }
3429 
3430 /**
3431  * hal_srng_dst_get_hpidx() - get head idx
3432  * @hal_soc_hdl: HAL SOC handle
3433  *
3434  * return: head idx
3435  */
3436 static inline
3437 uint16_t hal_srng_dst_get_hpidx(hal_ring_handle_t hal_ring_hdl)
3438 {
3439 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3440 	uint32_t hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
3441 
3442 	return hp / srng->entry_size;
3443 }
3444 
3445 #ifdef FEATURE_DIRECT_LINK
3446 /**
3447  * hal_srng_set_msi_irq_config() - Set the MSI irq configuration for srng
3448  * @hal_soc_hdl: hal soc handle
3449  * @hal_ring_hdl: srng handle
3450  * @addr: MSI address
3451  * @data: MSI data
3452  *
3453  * Return: QDF status
3454  */
3455 static inline QDF_STATUS
3456 hal_srng_set_msi_irq_config(hal_soc_handle_t hal_soc_hdl,
3457 			    hal_ring_handle_t hal_ring_hdl,
3458 			    struct hal_srng_params *ring_params)
3459 {
3460 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3461 
3462 	return hal_soc->ops->hal_srng_set_msi_config(hal_ring_hdl, ring_params);
3463 }
3464 #else
3465 static inline QDF_STATUS
3466 hal_srng_set_msi_irq_config(hal_soc_handle_t hal_soc_hdl,
3467 			    hal_ring_handle_t hal_ring_hdl,
3468 			    struct hal_srng_params *ring_params)
3469 {
3470 	return QDF_STATUS_E_NOSUPPORT;
3471 }
3472 #endif
3473 #endif /* _HAL_APIH_ */
3474