xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 2848159cbf2fd6863dcd893e613ae04648250714)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #include "hif.h"
27 #include "hif_io32.h"
28 #include "qdf_platform.h"
29 
30 /* Ring index for WBM2SW2 release ring */
31 #define HAL_IPA_TX_COMP_RING_IDX 2
32 
33 /* calculate the register address offset from bar0 of shadow register x */
34 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
35 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
36 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
37 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
38 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
39 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
40 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
41 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
42 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
43 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
44 #elif defined(QCA_WIFI_QCA6750)
45 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
46 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
47 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
48 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
49 #else
50 #define SHADOW_REGISTER(x) 0
51 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
52 
53 #define MAX_UNWINDOWED_ADDRESS 0x80000
54 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
55     defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6750)
56 #define WINDOW_ENABLE_BIT 0x40000000
57 #else
58 #define WINDOW_ENABLE_BIT 0x80000000
59 #endif
60 #define WINDOW_REG_ADDRESS 0x310C
61 #define WINDOW_SHIFT 19
62 #define WINDOW_VALUE_MASK 0x3F
63 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
64 #define WINDOW_RANGE_MASK 0x7FFFF
65 /*
66  * BAR + 4K is always accessible, any access outside this
67  * space requires force wake procedure.
68  * OFFSET = 4K - 32 bytes = 0xFE0
69  */
70 #define MAPPED_REF_OFF 0xFE0
71 
72 #ifdef ENABLE_VERBOSE_DEBUG
73 static inline void
74 hal_set_verbose_debug(bool flag)
75 {
76 	is_hal_verbose_debug_enabled = flag;
77 }
78 #endif
79 
80 #ifdef ENABLE_HAL_SOC_STATS
81 #define HAL_STATS_INC(_handle, _field, _delta) \
82 { \
83 	if (likely(_handle)) \
84 		_handle->stats._field += _delta; \
85 }
86 #else
87 #define HAL_STATS_INC(_handle, _field, _delta)
88 #endif
89 
90 #ifdef ENABLE_HAL_REG_WR_HISTORY
91 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
92 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
93 
94 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
95 				 uint32_t offset,
96 				 uint32_t wr_val,
97 				 uint32_t rd_val);
98 
99 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
100 					     int array_size)
101 {
102 	int record_index = qdf_atomic_inc_return(table_index);
103 
104 	return record_index & (array_size - 1);
105 }
106 #else
107 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
108 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
109 		offset,	\
110 		wr_val,	\
111 		rd_val)
112 #endif
113 
114 /**
115  * hal_reg_write_result_check() - check register writing result
116  * @hal_soc: HAL soc handle
117  * @offset: register offset to read
118  * @exp_val: the expected value of register
119  * @ret_confirm: result confirm flag
120  *
121  * Return: none
122  */
123 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
124 					      uint32_t offset,
125 					      uint32_t exp_val)
126 {
127 	uint32_t value;
128 
129 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
130 	if (exp_val != value) {
131 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
132 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
133 	}
134 }
135 
136 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) || \
137     !defined(QCA_WIFI_QCA6750)
138 static inline void hal_lock_reg_access(struct hal_soc *soc,
139 				       unsigned long *flags)
140 {
141 	qdf_spin_lock_irqsave(&soc->register_access_lock);
142 }
143 
144 static inline void hal_unlock_reg_access(struct hal_soc *soc,
145 					 unsigned long *flags)
146 {
147 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
148 }
149 #else
150 static inline void hal_lock_reg_access(struct hal_soc *soc,
151 				       unsigned long *flags)
152 {
153 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
154 }
155 
156 static inline void hal_unlock_reg_access(struct hal_soc *soc,
157 					 unsigned long *flags)
158 {
159 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
160 }
161 #endif
162 
163 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
164 /**
165  * hal_select_window_confirm() - write remap window register and
166 				 check writing result
167  *
168  */
169 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
170 					     uint32_t offset)
171 {
172 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
173 
174 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
175 		      WINDOW_ENABLE_BIT | window);
176 	hal_soc->register_window = window;
177 
178 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
179 				   WINDOW_ENABLE_BIT | window);
180 }
181 #else
182 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
183 					     uint32_t offset)
184 {
185 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
186 
187 	if (window != hal_soc->register_window) {
188 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
189 			      WINDOW_ENABLE_BIT | window);
190 		hal_soc->register_window = window;
191 
192 		hal_reg_write_result_check(
193 					hal_soc,
194 					WINDOW_REG_ADDRESS,
195 					WINDOW_ENABLE_BIT | window);
196 	}
197 }
198 #endif
199 
200 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
201 						 qdf_iomem_t addr)
202 {
203 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
204 }
205 
206 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
207 					       hal_ring_handle_t hal_ring_hdl)
208 {
209 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
210 
211 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
212 							 hal_ring_hdl);
213 }
214 
215 /**
216  * hal_write32_mb() - Access registers to update configuration
217  * @hal_soc: hal soc handle
218  * @offset: offset address from the BAR
219  * @value: value to write
220  *
221  * Return: None
222  *
223  * Description: Register address space is split below:
224  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
225  *  |--------------------|-------------------|------------------|
226  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
227  *
228  * 1. Any access to the shadow region, doesn't need force wake
229  *    and windowing logic to access.
230  * 2. Any access beyond BAR + 4K:
231  *    If init_phase enabled, no force wake is needed and access
232  *    should be based on windowed or unwindowed access.
233  *    If init_phase disabled, force wake is needed and access
234  *    should be based on windowed or unwindowed access.
235  *
236  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
237  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
238  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
239  *                            that window would be a bug
240  */
241 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
242     !defined(QCA_WIFI_QCA6750)
243 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
244 				  uint32_t value)
245 {
246 	unsigned long flags;
247 	qdf_iomem_t new_addr;
248 
249 	if (!hal_soc->use_register_windowing ||
250 	    offset < MAX_UNWINDOWED_ADDRESS) {
251 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
252 	} else if (hal_soc->static_window_map) {
253 		new_addr = hal_get_window_address(hal_soc,
254 				hal_soc->dev_base_addr + offset);
255 		qdf_iowrite32(new_addr, value);
256 	} else {
257 		hal_lock_reg_access(hal_soc, &flags);
258 		hal_select_window_confirm(hal_soc, offset);
259 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
260 			  (offset & WINDOW_RANGE_MASK), value);
261 		hal_unlock_reg_access(hal_soc, &flags);
262 	}
263 }
264 
265 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
266 		hal_write32_mb(_hal_soc, _offset, _value)
267 
268 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
269 #else
270 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
271 				  uint32_t value)
272 {
273 	int ret;
274 	unsigned long flags;
275 	qdf_iomem_t new_addr;
276 
277 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
278 					hal_soc->hif_handle))) {
279 		hal_err_rl("target access is not allowed");
280 		return;
281 	}
282 
283 	/* Region < BAR + 4K can be directly accessed */
284 	if (offset < MAPPED_REF_OFF) {
285 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
286 		return;
287 	}
288 
289 	/* Region greater than BAR + 4K */
290 	if (!hal_soc->init_phase) {
291 		ret = hif_force_wake_request(hal_soc->hif_handle);
292 		if (ret) {
293 			hal_err_rl("Wake up request failed");
294 			qdf_check_state_before_panic();
295 			return;
296 		}
297 	}
298 
299 	if (!hal_soc->use_register_windowing ||
300 	    offset < MAX_UNWINDOWED_ADDRESS) {
301 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
302 	} else if (hal_soc->static_window_map) {
303 		new_addr = hal_get_window_address(
304 					hal_soc,
305 					hal_soc->dev_base_addr + offset);
306 		qdf_iowrite32(new_addr, value);
307 	} else {
308 		hal_lock_reg_access(hal_soc, &flags);
309 		hal_select_window_confirm(hal_soc, offset);
310 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
311 			  (offset & WINDOW_RANGE_MASK), value);
312 		hal_unlock_reg_access(hal_soc, &flags);
313 	}
314 
315 	if (!hal_soc->init_phase) {
316 		ret = hif_force_wake_release(hal_soc->hif_handle);
317 		if (ret) {
318 			hal_err("Wake up release failed");
319 			qdf_check_state_before_panic();
320 			return;
321 		}
322 	}
323 }
324 
325 /**
326  * hal_write32_mb_confirm() - write register and check wirting result
327  *
328  */
329 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
330 					  uint32_t offset,
331 					  uint32_t value)
332 {
333 	int ret;
334 	unsigned long flags;
335 	qdf_iomem_t new_addr;
336 
337 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
338 					hal_soc->hif_handle))) {
339 		hal_err_rl("target access is not allowed");
340 		return;
341 	}
342 
343 	/* Region < BAR + 4K can be directly accessed */
344 	if (offset < MAPPED_REF_OFF) {
345 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
346 		return;
347 	}
348 
349 	/* Region greater than BAR + 4K */
350 	if (!hal_soc->init_phase) {
351 		ret = hif_force_wake_request(hal_soc->hif_handle);
352 		if (ret) {
353 			hal_err("Wake up request failed");
354 			qdf_check_state_before_panic();
355 			return;
356 		}
357 	}
358 
359 	if (!hal_soc->use_register_windowing ||
360 	    offset < MAX_UNWINDOWED_ADDRESS) {
361 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
362 		hal_reg_write_result_check(hal_soc, offset,
363 					   value);
364 	} else if (hal_soc->static_window_map) {
365 		new_addr = hal_get_window_address(
366 					hal_soc,
367 					hal_soc->dev_base_addr + offset);
368 		qdf_iowrite32(new_addr, value);
369 		hal_reg_write_result_check(hal_soc,
370 					   new_addr - hal_soc->dev_base_addr,
371 					   value);
372 	} else {
373 		hal_lock_reg_access(hal_soc, &flags);
374 		hal_select_window_confirm(hal_soc, offset);
375 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
376 			  (offset & WINDOW_RANGE_MASK), value);
377 
378 		hal_reg_write_result_check(
379 				hal_soc,
380 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
381 				value);
382 		hal_unlock_reg_access(hal_soc, &flags);
383 	}
384 
385 	if (!hal_soc->init_phase) {
386 		ret = hif_force_wake_release(hal_soc->hif_handle);
387 		if (ret) {
388 			hal_err("Wake up release failed");
389 			qdf_check_state_before_panic();
390 			return;
391 		}
392 	}
393 }
394 
395 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
396 				       uint32_t value)
397 {
398 	unsigned long flags;
399 	qdf_iomem_t new_addr;
400 
401 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
402 					hal_soc->hif_handle))) {
403 		hal_err_rl("%s: target access is not allowed", __func__);
404 		return;
405 	}
406 
407 	if (!hal_soc->use_register_windowing ||
408 	    offset < MAX_UNWINDOWED_ADDRESS) {
409 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
410 	} else if (hal_soc->static_window_map) {
411 		new_addr = hal_get_window_address(
412 					hal_soc,
413 					hal_soc->dev_base_addr + offset);
414 		qdf_iowrite32(new_addr, value);
415 	} else {
416 		hal_lock_reg_access(hal_soc, &flags);
417 		hal_select_window_confirm(hal_soc, offset);
418 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
419 			  (offset & WINDOW_RANGE_MASK), value);
420 		hal_unlock_reg_access(hal_soc, &flags);
421 	}
422 }
423 #endif
424 
425 /**
426  * hal_write_address_32_mb - write a value to a register
427  *
428  */
429 static inline
430 void hal_write_address_32_mb(struct hal_soc *hal_soc,
431 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
432 {
433 	uint32_t offset;
434 
435 	if (!hal_soc->use_register_windowing)
436 		return qdf_iowrite32(addr, value);
437 
438 	offset = addr - hal_soc->dev_base_addr;
439 
440 	if (qdf_unlikely(wr_confirm))
441 		hal_write32_mb_confirm(hal_soc, offset, value);
442 	else
443 		hal_write32_mb(hal_soc, offset, value);
444 }
445 
446 
447 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
448 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
449 						struct hal_srng *srng,
450 						void __iomem *addr,
451 						uint32_t value)
452 {
453 	qdf_iowrite32(addr, value);
454 }
455 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
456 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
457 						struct hal_srng *srng,
458 						void __iomem *addr,
459 						uint32_t value)
460 {
461 	hal_delayed_reg_write(hal_soc, srng, addr, value);
462 }
463 #else
464 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
465 						struct hal_srng *srng,
466 						void __iomem *addr,
467 						uint32_t value)
468 {
469 	hal_write_address_32_mb(hal_soc, addr, value, false);
470 }
471 #endif
472 
473 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
474     !defined(QCA_WIFI_QCA6750)
475 /**
476  * hal_read32_mb() - Access registers to read configuration
477  * @hal_soc: hal soc handle
478  * @offset: offset address from the BAR
479  * @value: value to write
480  *
481  * Description: Register address space is split below:
482  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
483  *  |--------------------|-------------------|------------------|
484  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
485  *
486  * 1. Any access to the shadow region, doesn't need force wake
487  *    and windowing logic to access.
488  * 2. Any access beyond BAR + 4K:
489  *    If init_phase enabled, no force wake is needed and access
490  *    should be based on windowed or unwindowed access.
491  *    If init_phase disabled, force wake is needed and access
492  *    should be based on windowed or unwindowed access.
493  *
494  * Return: < 0 for failure/>= 0 for success
495  */
496 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
497 {
498 	uint32_t ret;
499 	unsigned long flags;
500 	qdf_iomem_t new_addr;
501 
502 	if (!hal_soc->use_register_windowing ||
503 	    offset < MAX_UNWINDOWED_ADDRESS) {
504 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
505 	} else if (hal_soc->static_window_map) {
506 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
507 		return qdf_ioread32(new_addr);
508 	}
509 
510 	hal_lock_reg_access(hal_soc, &flags);
511 	hal_select_window_confirm(hal_soc, offset);
512 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
513 		       (offset & WINDOW_RANGE_MASK));
514 	hal_unlock_reg_access(hal_soc, &flags);
515 
516 	return ret;
517 }
518 
519 #define hal_read32_mb_cmem(_hal_soc, _offset)
520 #else
521 static
522 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
523 {
524 	uint32_t ret;
525 	unsigned long flags;
526 	qdf_iomem_t new_addr;
527 
528 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
529 					hal_soc->hif_handle))) {
530 		hal_err_rl("target access is not allowed");
531 		return 0;
532 	}
533 
534 	/* Region < BAR + 4K can be directly accessed */
535 	if (offset < MAPPED_REF_OFF)
536 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
537 
538 	if ((!hal_soc->init_phase) &&
539 	    hif_force_wake_request(hal_soc->hif_handle)) {
540 		hal_err("Wake up request failed");
541 		qdf_check_state_before_panic();
542 		return 0;
543 	}
544 
545 	if (!hal_soc->use_register_windowing ||
546 	    offset < MAX_UNWINDOWED_ADDRESS) {
547 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
548 	} else if (hal_soc->static_window_map) {
549 		new_addr = hal_get_window_address(
550 					hal_soc,
551 					hal_soc->dev_base_addr + offset);
552 		ret = qdf_ioread32(new_addr);
553 	} else {
554 		hal_lock_reg_access(hal_soc, &flags);
555 		hal_select_window_confirm(hal_soc, offset);
556 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
557 			       (offset & WINDOW_RANGE_MASK));
558 		hal_unlock_reg_access(hal_soc, &flags);
559 	}
560 
561 	if ((!hal_soc->init_phase) &&
562 	    hif_force_wake_release(hal_soc->hif_handle)) {
563 		hal_err("Wake up release failed");
564 		qdf_check_state_before_panic();
565 		return 0;
566 	}
567 
568 	return ret;
569 }
570 
571 static inline
572 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
573 {
574 	uint32_t ret;
575 	unsigned long flags;
576 	qdf_iomem_t new_addr;
577 
578 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
579 					hal_soc->hif_handle))) {
580 		hal_err_rl("%s: target access is not allowed", __func__);
581 		return 0;
582 	}
583 
584 	if (!hal_soc->use_register_windowing ||
585 	    offset < MAX_UNWINDOWED_ADDRESS) {
586 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
587 	} else if (hal_soc->static_window_map) {
588 		new_addr = hal_get_window_address(
589 					hal_soc,
590 					hal_soc->dev_base_addr + offset);
591 		ret = qdf_ioread32(new_addr);
592 	} else {
593 		hal_lock_reg_access(hal_soc, &flags);
594 		hal_select_window_confirm(hal_soc, offset);
595 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
596 			       (offset & WINDOW_RANGE_MASK));
597 		hal_unlock_reg_access(hal_soc, &flags);
598 	}
599 	return ret;
600 }
601 #endif
602 
603 /* Max times allowed for register writing retry */
604 #define HAL_REG_WRITE_RETRY_MAX		5
605 /* Delay milliseconds for each time retry */
606 #define HAL_REG_WRITE_RETRY_DELAY	1
607 
608 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
609 /* To check shadow config index range between 0..31 */
610 #define HAL_SHADOW_REG_INDEX_LOW 32
611 /* To check shadow config index range between 32..39 */
612 #define HAL_SHADOW_REG_INDEX_HIGH 40
613 /* Dirty bit reg offsets corresponding to shadow config index */
614 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
615 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
616 /* PCIE_PCIE_TOP base addr offset */
617 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
618 /* Max retry attempts to read the dirty bit reg */
619 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
620 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
621 #else
622 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
623 #endif
624 /* Delay in usecs for polling dirty bit reg */
625 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
626 
627 /**
628  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
629  * write was successful
630  * @hal_soc: hal soc handle
631  * @shadow_config_index: index of shadow reg used to confirm
632  * write
633  *
634  * Return: QDF_STATUS_SUCCESS on success
635  */
636 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
637 						int shadow_config_index)
638 {
639 	uint32_t read_value = 0;
640 	int retry_cnt = 0;
641 	uint32_t reg_offset = 0;
642 
643 	if (shadow_config_index > 0 &&
644 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
645 		reg_offset =
646 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
647 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
648 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
649 		reg_offset =
650 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
651 	} else {
652 		hal_err("Invalid shadow_config_index = %d",
653 			shadow_config_index);
654 		return QDF_STATUS_E_INVAL;
655 	}
656 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
657 		read_value = hal_read32_mb(
658 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
659 		/* Check if dirty bit corresponding to shadow_index is set */
660 		if (read_value & BIT(shadow_config_index)) {
661 			/* Dirty reg bit not reset */
662 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
663 			retry_cnt++;
664 		} else {
665 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
666 				  reg_offset, read_value);
667 			return QDF_STATUS_SUCCESS;
668 		}
669 	}
670 	return QDF_STATUS_E_TIMEOUT;
671 }
672 
673 /**
674  * hal_write32_mb_shadow_confirm() - write to shadow reg and
675  * poll dirty register bit to confirm write
676  * @hal_soc: hal soc handle
677  * @reg_offset: target reg offset address from BAR
678  * @value: value to write
679  *
680  * Return: QDF_STATUS_SUCCESS on success
681  */
682 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
683 	struct hal_soc *hal,
684 	uint32_t reg_offset,
685 	uint32_t value)
686 {
687 	int i;
688 	QDF_STATUS ret;
689 	uint32_t shadow_reg_offset;
690 	int shadow_config_index;
691 	bool is_reg_offset_present = false;
692 
693 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
694 		/* Found the shadow config for the reg_offset */
695 		struct shadow_reg_config *hal_shadow_reg_list =
696 			&hal->list_shadow_reg_config[i];
697 		if (hal_shadow_reg_list->target_register ==
698 			reg_offset) {
699 			shadow_config_index =
700 				hal_shadow_reg_list->shadow_config_index;
701 			shadow_reg_offset =
702 				SHADOW_REGISTER(shadow_config_index);
703 			hal_write32_mb_confirm(
704 				hal, shadow_reg_offset, value);
705 			is_reg_offset_present = true;
706 			break;
707 		}
708 		ret = QDF_STATUS_E_FAILURE;
709 	}
710 	if (is_reg_offset_present) {
711 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
712 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
713 			 reg_offset, value, ret);
714 		if (QDF_IS_STATUS_ERROR(ret)) {
715 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
716 			return ret;
717 		}
718 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
719 	}
720 	return ret;
721 }
722 
723 /**
724  * hal_write32_mb_confirm_retry() - write register with confirming and
725 				    do retry/recovery if writing failed
726  * @hal_soc: hal soc handle
727  * @offset: offset address from the BAR
728  * @value: value to write
729  * @recovery: is recovery needed or not.
730  *
731  * Write the register value with confirming and read it back, if
732  * read back value is not as expected, do retry for writing, if
733  * retry hit max times allowed but still fail, check if recovery
734  * needed.
735  *
736  * Return: None
737  */
738 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
739 						uint32_t offset,
740 						uint32_t value,
741 						bool recovery)
742 {
743 	QDF_STATUS ret;
744 
745 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
746 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
747 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
748 }
749 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
750 
751 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
752 						uint32_t offset,
753 						uint32_t value,
754 						bool recovery)
755 {
756 	uint8_t retry_cnt = 0;
757 	uint32_t read_value;
758 
759 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
760 		hal_write32_mb_confirm(hal_soc, offset, value);
761 		read_value = hal_read32_mb(hal_soc, offset);
762 		if (qdf_likely(read_value == value))
763 			break;
764 
765 		/* write failed, do retry */
766 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
767 			 offset, value, read_value);
768 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
769 		retry_cnt++;
770 	}
771 
772 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
773 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
774 }
775 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
776 
777 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
778 /**
779  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
780  * @hal_soc: HAL soc handle
781  *
782  * Return: none
783  */
784 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
785 
786 /**
787  * hal_dump_reg_write_stats() - dump reg write stats
788  * @hal_soc: HAL soc handle
789  *
790  * Return: none
791  */
792 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
793 
794 /**
795  * hal_get_reg_write_pending_work() - get the number of entries
796  *		pending in the workqueue to be processed.
797  * @hal_soc: HAL soc handle
798  *
799  * Returns: the number of entries pending to be processed
800  */
801 int hal_get_reg_write_pending_work(void *hal_soc);
802 
803 #else
804 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
805 {
806 }
807 
808 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
809 {
810 }
811 
812 static inline int hal_get_reg_write_pending_work(void *hal_soc)
813 {
814 	return 0;
815 }
816 #endif
817 
818 /**
819  * hal_read_address_32_mb() - Read 32-bit value from the register
820  * @soc: soc handle
821  * @addr: register address to read
822  *
823  * Return: 32-bit value
824  */
825 static inline
826 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
827 				qdf_iomem_t addr)
828 {
829 	uint32_t offset;
830 	uint32_t ret;
831 
832 	if (!soc->use_register_windowing)
833 		return qdf_ioread32(addr);
834 
835 	offset = addr - soc->dev_base_addr;
836 	ret = hal_read32_mb(soc, offset);
837 	return ret;
838 }
839 
840 /**
841  * hal_attach - Initialize HAL layer
842  * @hif_handle: Opaque HIF handle
843  * @qdf_dev: QDF device
844  *
845  * Return: Opaque HAL SOC handle
846  *		 NULL on failure (if given ring is not available)
847  *
848  * This function should be called as part of HIF initialization (for accessing
849  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
850  */
851 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
852 
853 /**
854  * hal_detach - Detach HAL layer
855  * @hal_soc: HAL SOC handle
856  *
857  * This function should be called as part of HIF detach
858  *
859  */
860 extern void hal_detach(void *hal_soc);
861 
862 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
863 enum hal_ring_type {
864 	REO_DST = 0,
865 	REO_EXCEPTION = 1,
866 	REO_REINJECT = 2,
867 	REO_CMD = 3,
868 	REO_STATUS = 4,
869 	TCL_DATA = 5,
870 	TCL_CMD_CREDIT = 6,
871 	TCL_STATUS = 7,
872 	CE_SRC = 8,
873 	CE_DST = 9,
874 	CE_DST_STATUS = 10,
875 	WBM_IDLE_LINK = 11,
876 	SW2WBM_RELEASE = 12,
877 	WBM2SW_RELEASE = 13,
878 	RXDMA_BUF = 14,
879 	RXDMA_DST = 15,
880 	RXDMA_MONITOR_BUF = 16,
881 	RXDMA_MONITOR_STATUS = 17,
882 	RXDMA_MONITOR_DST = 18,
883 	RXDMA_MONITOR_DESC = 19,
884 	DIR_BUF_RX_DMA_SRC = 20,
885 #ifdef WLAN_FEATURE_CIF_CFR
886 	WIFI_POS_SRC,
887 #endif
888 	MAX_RING_TYPES
889 };
890 
891 #define HAL_SRNG_LMAC_RING 0x80000000
892 /* SRNG flags passed in hal_srng_params.flags */
893 #define HAL_SRNG_MSI_SWAP				0x00000008
894 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
895 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
896 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
897 #define HAL_SRNG_MSI_INTR				0x00020000
898 #define HAL_SRNG_CACHED_DESC		0x00040000
899 
900 #ifdef QCA_WIFI_QCA6490
901 #define HAL_SRNG_PREFETCH_TIMER 1
902 #else
903 #define HAL_SRNG_PREFETCH_TIMER 0
904 #endif
905 
906 #define PN_SIZE_24 0
907 #define PN_SIZE_48 1
908 #define PN_SIZE_128 2
909 
910 #ifdef FORCE_WAKE
911 /**
912  * hal_set_init_phase() - Indicate initialization of
913  *                        datapath rings
914  * @soc: hal_soc handle
915  * @init_phase: flag to indicate datapath rings
916  *              initialization status
917  *
918  * Return: None
919  */
920 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
921 #else
922 static inline
923 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
924 {
925 }
926 #endif /* FORCE_WAKE */
927 
928 /**
929  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
930  * used by callers for calculating the size of memory to be allocated before
931  * calling hal_srng_setup to setup the ring
932  *
933  * @hal_soc: Opaque HAL SOC handle
934  * @ring_type: one of the types from hal_ring_type
935  *
936  */
937 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
938 
939 /**
940  * hal_srng_max_entries - Returns maximum possible number of ring entries
941  * @hal_soc: Opaque HAL SOC handle
942  * @ring_type: one of the types from hal_ring_type
943  *
944  * Return: Maximum number of entries for the given ring_type
945  */
946 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
947 
948 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
949 				 uint32_t low_threshold);
950 
951 /**
952  * hal_srng_dump - Dump ring status
953  * @srng: hal srng pointer
954  */
955 void hal_srng_dump(struct hal_srng *srng);
956 
957 /**
958  * hal_srng_get_dir - Returns the direction of the ring
959  * @hal_soc: Opaque HAL SOC handle
960  * @ring_type: one of the types from hal_ring_type
961  *
962  * Return: Ring direction
963  */
964 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
965 
966 /* HAL memory information */
967 struct hal_mem_info {
968 	/* dev base virutal addr */
969 	void *dev_base_addr;
970 	/* dev base physical addr */
971 	void *dev_base_paddr;
972 	/* dev base ce virutal addr - applicable only for qca5018  */
973 	/* In qca5018 CE register are outside wcss block */
974 	/* using a separate address space to access CE registers */
975 	void *dev_base_addr_ce;
976 	/* dev base ce physical addr */
977 	void *dev_base_paddr_ce;
978 	/* Remote virtual pointer memory for HW/FW updates */
979 	void *shadow_rdptr_mem_vaddr;
980 	/* Remote physical pointer memory for HW/FW updates */
981 	void *shadow_rdptr_mem_paddr;
982 	/* Shared memory for ring pointer updates from host to FW */
983 	void *shadow_wrptr_mem_vaddr;
984 	/* Shared physical memory for ring pointer updates from host to FW */
985 	void *shadow_wrptr_mem_paddr;
986 };
987 
988 /* SRNG parameters to be passed to hal_srng_setup */
989 struct hal_srng_params {
990 	/* Physical base address of the ring */
991 	qdf_dma_addr_t ring_base_paddr;
992 	/* Virtual base address of the ring */
993 	void *ring_base_vaddr;
994 	/* Number of entries in ring */
995 	uint32_t num_entries;
996 	/* max transfer length */
997 	uint16_t max_buffer_length;
998 	/* MSI Address */
999 	qdf_dma_addr_t msi_addr;
1000 	/* MSI data */
1001 	uint32_t msi_data;
1002 	/* Interrupt timer threshold – in micro seconds */
1003 	uint32_t intr_timer_thres_us;
1004 	/* Interrupt batch counter threshold – in number of ring entries */
1005 	uint32_t intr_batch_cntr_thres_entries;
1006 	/* Low threshold – in number of ring entries
1007 	 * (valid for src rings only)
1008 	 */
1009 	uint32_t low_threshold;
1010 	/* Misc flags */
1011 	uint32_t flags;
1012 	/* Unique ring id */
1013 	uint8_t ring_id;
1014 	/* Source or Destination ring */
1015 	enum hal_srng_dir ring_dir;
1016 	/* Size of ring entry */
1017 	uint32_t entry_size;
1018 	/* hw register base address */
1019 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
1020 	/* prefetch timer config - in micro seconds */
1021 	uint32_t prefetch_timer;
1022 };
1023 
1024 /* hal_construct_srng_shadow_regs() - initialize the shadow
1025  * registers for srngs
1026  * @hal_soc: hal handle
1027  *
1028  * Return: QDF_STATUS_OK on success
1029  */
1030 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1031 
1032 /* hal_set_one_shadow_config() - add a config for the specified ring
1033  * @hal_soc: hal handle
1034  * @ring_type: ring type
1035  * @ring_num: ring num
1036  *
1037  * The ring type and ring num uniquely specify the ring.  After this call,
1038  * the hp/tp will be added as the next entry int the shadow register
1039  * configuration table.  The hal code will use the shadow register address
1040  * in place of the hp/tp address.
1041  *
1042  * This function is exposed, so that the CE module can skip configuring shadow
1043  * registers for unused ring and rings assigned to the firmware.
1044  *
1045  * Return: QDF_STATUS_OK on success
1046  */
1047 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1048 				     int ring_num);
1049 /**
1050  * hal_get_shadow_config() - retrieve the config table
1051  * @hal_soc: hal handle
1052  * @shadow_config: will point to the table after
1053  * @num_shadow_registers_configured: will contain the number of valid entries
1054  */
1055 extern void hal_get_shadow_config(void *hal_soc,
1056 				  struct pld_shadow_reg_v2_cfg **shadow_config,
1057 				  int *num_shadow_registers_configured);
1058 /**
1059  * hal_srng_setup - Initialize HW SRNG ring.
1060  *
1061  * @hal_soc: Opaque HAL SOC handle
1062  * @ring_type: one of the types from hal_ring_type
1063  * @ring_num: Ring number if there are multiple rings of
1064  *		same type (staring from 0)
1065  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1066  * @ring_params: SRNG ring params in hal_srng_params structure.
1067 
1068  * Callers are expected to allocate contiguous ring memory of size
1069  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1070  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1071  * structure. Ring base address should be 8 byte aligned and size of each ring
1072  * entry should be queried using the API hal_srng_get_entrysize
1073  *
1074  * Return: Opaque pointer to ring on success
1075  *		 NULL on failure (if given ring is not available)
1076  */
1077 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1078 	int mac_id, struct hal_srng_params *ring_params);
1079 
1080 /* Remapping ids of REO rings */
1081 #define REO_REMAP_TCL 0
1082 #define REO_REMAP_SW1 1
1083 #define REO_REMAP_SW2 2
1084 #define REO_REMAP_SW3 3
1085 #define REO_REMAP_SW4 4
1086 #define REO_REMAP_RELEASE 5
1087 #define REO_REMAP_FW 6
1088 #define REO_REMAP_UNUSED 7
1089 
1090 /*
1091  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1092  * to map destination to rings
1093  */
1094 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1095 	((_VALUE) << \
1096 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1097 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1098 
1099 /*
1100  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1101  * to map destination to rings
1102  */
1103 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1104 	((_VALUE) << \
1105 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1106 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1107 
1108 /*
1109  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1110  * to map destination to rings
1111  */
1112 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1113 	((_VALUE) << \
1114 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1115 	  _OFFSET ## _SHFT))
1116 
1117 /*
1118  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1119  * to map destination to rings
1120  */
1121 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1122 	((_VALUE) << \
1123 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1124 	  _OFFSET ## _SHFT))
1125 
1126 /*
1127  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1128  * to map destination to rings
1129  */
1130 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1131 	((_VALUE) << \
1132 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1133 	  _OFFSET ## _SHFT))
1134 
1135 /**
1136  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1137  * @hal_soc_hdl: HAL SOC handle
1138  * @read: boolean value to indicate if read or write
1139  * @ix0: pointer to store IX0 reg value
1140  * @ix1: pointer to store IX1 reg value
1141  * @ix2: pointer to store IX2 reg value
1142  * @ix3: pointer to store IX3 reg value
1143  */
1144 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1145 				uint32_t *ix0, uint32_t *ix1,
1146 				uint32_t *ix2, uint32_t *ix3);
1147 
1148 /**
1149  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
1150  * @sring: sring pointer
1151  * @paddr: physical address
1152  */
1153 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
1154 
1155 /**
1156  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
1157  * @hal_soc: hal_soc handle
1158  * @srng: sring pointer
1159  * @vaddr: virtual address
1160  */
1161 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1162 			  struct hal_srng *srng,
1163 			  uint32_t *vaddr);
1164 
1165 /**
1166  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1167  * @hal_soc: Opaque HAL SOC handle
1168  * @hal_srng: Opaque HAL SRNG pointer
1169  */
1170 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
1171 
1172 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1173 {
1174 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1175 
1176 	return !!srng->initialized;
1177 }
1178 
1179 /**
1180  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
1181  * @hal_soc: Opaque HAL SOC handle
1182  * @hal_ring_hdl: Destination ring pointer
1183  *
1184  * Caller takes responsibility for any locking needs.
1185  *
1186  * Return: Opaque pointer for next ring entry; NULL on failire
1187  */
1188 static inline
1189 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1190 			hal_ring_handle_t hal_ring_hdl)
1191 {
1192 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1193 
1194 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1195 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1196 
1197 	return NULL;
1198 }
1199 
1200 
1201 /**
1202  * hal_mem_dma_cache_sync - Cache sync the specified virtual address Range
1203  * @hal_soc: HAL soc handle
1204  * @desc: desc start address
1205  * @entry_size: size of memory to sync
1206  *
1207  * Return: void
1208  */
1209 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
1210 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1211 					  uint32_t entry_size)
1212 {
1213 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1214 }
1215 #else
1216 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1217 					  uint32_t entry_size)
1218 {
1219 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1220 			       QDF_DMA_FROM_DEVICE,
1221 			       (entry_size * sizeof(uint32_t)));
1222 }
1223 #endif
1224 
1225 /**
1226  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
1227  * hal_srng_access_start if locked access is required
1228  *
1229  * @hal_soc: Opaque HAL SOC handle
1230  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1231  *
1232  * Return: 0 on success; error on failire
1233  */
1234 static inline int
1235 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1236 			       hal_ring_handle_t hal_ring_hdl)
1237 {
1238 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1239 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1240 	uint32_t *desc;
1241 
1242 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1243 		srng->u.src_ring.cached_tp =
1244 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1245 	else {
1246 		srng->u.dst_ring.cached_hp =
1247 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1248 
1249 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1250 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1251 			if (qdf_likely(desc)) {
1252 				hal_mem_dma_cache_sync(soc, desc,
1253 						       srng->entry_size);
1254 				qdf_prefetch(desc);
1255 			}
1256 		}
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 /**
1263  * hal_srng_try_access_start - Try to start (locked) ring access
1264  *
1265  * @hal_soc: Opaque HAL SOC handle
1266  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1267  *
1268  * Return: 0 on success; error on failure
1269  */
1270 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1271 					    hal_ring_handle_t hal_ring_hdl)
1272 {
1273 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1274 
1275 	if (qdf_unlikely(!hal_ring_hdl)) {
1276 		qdf_print("Error: Invalid hal_ring\n");
1277 		return -EINVAL;
1278 	}
1279 
1280 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1281 		return -EINVAL;
1282 
1283 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1284 }
1285 
1286 /**
1287  * hal_srng_access_start - Start (locked) ring access
1288  *
1289  * @hal_soc: Opaque HAL SOC handle
1290  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1291  *
1292  * Return: 0 on success; error on failire
1293  */
1294 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1295 					hal_ring_handle_t hal_ring_hdl)
1296 {
1297 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1298 
1299 	if (qdf_unlikely(!hal_ring_hdl)) {
1300 		qdf_print("Error: Invalid hal_ring\n");
1301 		return -EINVAL;
1302 	}
1303 
1304 	SRNG_LOCK(&(srng->lock));
1305 
1306 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1307 }
1308 
1309 /**
1310  * hal_srng_dst_get_next - Get next entry from a destination ring
1311  * @hal_soc: Opaque HAL SOC handle
1312  * @hal_ring_hdl: Destination ring pointer
1313  *
1314  * Return: Opaque pointer for next ring entry; NULL on failure
1315  */
1316 static inline
1317 void *hal_srng_dst_get_next(void *hal_soc,
1318 			    hal_ring_handle_t hal_ring_hdl)
1319 {
1320 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1321 	uint32_t *desc;
1322 
1323 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1324 		return NULL;
1325 
1326 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1327 	/* TODO: Using % is expensive, but we have to do this since
1328 	 * size of some SRNG rings is not power of 2 (due to descriptor
1329 	 * sizes). Need to create separate API for rings used
1330 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1331 	 * SW2RXDMA and CE rings)
1332 	 */
1333 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1334 	if (srng->u.dst_ring.tp == srng->ring_size)
1335 		srng->u.dst_ring.tp = 0;
1336 
1337 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1338 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1339 		uint32_t *desc_next;
1340 		uint32_t tp;
1341 
1342 		tp = srng->u.dst_ring.tp;
1343 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1344 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1345 		qdf_prefetch(desc_next);
1346 	}
1347 
1348 	return (void *)desc;
1349 }
1350 
1351 /**
1352  * hal_srng_dst_get_next_cached - Get cached next entry
1353  * @hal_soc: Opaque HAL SOC handle
1354  * @hal_ring_hdl: Destination ring pointer
1355  *
1356  * Get next entry from a destination ring and move cached tail pointer
1357  *
1358  * Return: Opaque pointer for next ring entry; NULL on failure
1359  */
1360 static inline
1361 void *hal_srng_dst_get_next_cached(void *hal_soc,
1362 				   hal_ring_handle_t hal_ring_hdl)
1363 {
1364 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1365 	uint32_t *desc;
1366 	uint32_t *desc_next;
1367 
1368 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1369 		return NULL;
1370 
1371 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1372 	/* TODO: Using % is expensive, but we have to do this since
1373 	 * size of some SRNG rings is not power of 2 (due to descriptor
1374 	 * sizes). Need to create separate API for rings used
1375 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1376 	 * SW2RXDMA and CE rings)
1377 	 */
1378 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1379 	if (srng->u.dst_ring.tp == srng->ring_size)
1380 		srng->u.dst_ring.tp = 0;
1381 
1382 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1383 	qdf_prefetch(desc_next);
1384 	return (void *)desc;
1385 }
1386 
1387 /**
1388  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1389  * cached head pointer
1390  *
1391  * @hal_soc: Opaque HAL SOC handle
1392  * @hal_ring_hdl: Destination ring pointer
1393  *
1394  * Return: Opaque pointer for next ring entry; NULL on failire
1395  */
1396 static inline void *
1397 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1398 			 hal_ring_handle_t hal_ring_hdl)
1399 {
1400 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1401 	uint32_t *desc;
1402 	/* TODO: Using % is expensive, but we have to do this since
1403 	 * size of some SRNG rings is not power of 2 (due to descriptor
1404 	 * sizes). Need to create separate API for rings used
1405 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1406 	 * SW2RXDMA and CE rings)
1407 	 */
1408 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1409 		srng->ring_size;
1410 
1411 	if (next_hp != srng->u.dst_ring.tp) {
1412 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1413 		srng->u.dst_ring.cached_hp = next_hp;
1414 		return (void *)desc;
1415 	}
1416 
1417 	return NULL;
1418 }
1419 
1420 /**
1421  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1422  * @hal_soc: Opaque HAL SOC handle
1423  * @hal_ring_hdl: Destination ring pointer
1424  *
1425  * Sync cached head pointer with HW.
1426  * Caller takes responsibility for any locking needs.
1427  *
1428  * Return: Opaque pointer for next ring entry; NULL on failire
1429  */
1430 static inline
1431 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1432 			     hal_ring_handle_t hal_ring_hdl)
1433 {
1434 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1435 
1436 	srng->u.dst_ring.cached_hp =
1437 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1438 
1439 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1440 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1441 
1442 	return NULL;
1443 }
1444 
1445 /**
1446  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1447  * @hal_soc: Opaque HAL SOC handle
1448  * @hal_ring_hdl: Destination ring pointer
1449  *
1450  * Sync cached head pointer with HW.
1451  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1452  *
1453  * Return: Opaque pointer for next ring entry; NULL on failire
1454  */
1455 static inline
1456 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1457 				    hal_ring_handle_t hal_ring_hdl)
1458 {
1459 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1460 	void *ring_desc_ptr = NULL;
1461 
1462 	if (qdf_unlikely(!hal_ring_hdl)) {
1463 		qdf_print("Error: Invalid hal_ring\n");
1464 		return  NULL;
1465 	}
1466 
1467 	SRNG_LOCK(&srng->lock);
1468 
1469 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1470 
1471 	SRNG_UNLOCK(&srng->lock);
1472 
1473 	return ring_desc_ptr;
1474 }
1475 
1476 /**
1477  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1478  * by SW) in destination ring
1479  *
1480  * @hal_soc: Opaque HAL SOC handle
1481  * @hal_ring_hdl: Destination ring pointer
1482  * @sync_hw_ptr: Sync cached head pointer with HW
1483  *
1484  */
1485 static inline
1486 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1487 				hal_ring_handle_t hal_ring_hdl,
1488 				int sync_hw_ptr)
1489 {
1490 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1491 	uint32_t hp;
1492 	uint32_t tp = srng->u.dst_ring.tp;
1493 
1494 	if (sync_hw_ptr) {
1495 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1496 		srng->u.dst_ring.cached_hp = hp;
1497 	} else {
1498 		hp = srng->u.dst_ring.cached_hp;
1499 	}
1500 
1501 	if (hp >= tp)
1502 		return (hp - tp) / srng->entry_size;
1503 
1504 	return (srng->ring_size - tp + hp) / srng->entry_size;
1505 }
1506 
1507 /**
1508  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1509  * @hal_soc: Opaque HAL SOC handle
1510  * @hal_ring_hdl: Destination ring pointer
1511  * @entry_count: Number of descriptors to be invalidated
1512  *
1513  * Invalidates a set of cached descriptors starting from tail to
1514  * provided count worth
1515  *
1516  * Return - None
1517  */
1518 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1519 						 hal_ring_handle_t hal_ring_hdl,
1520 						 uint32_t entry_count)
1521 {
1522 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1523 	uint32_t hp = srng->u.dst_ring.cached_hp;
1524 	uint32_t tp = srng->u.dst_ring.tp;
1525 	uint32_t sync_p = 0;
1526 
1527 	/*
1528 	 * If SRNG does not have cached descriptors this
1529 	 * API call should be a no op
1530 	 */
1531 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1532 		return;
1533 
1534 	if (qdf_unlikely(entry_count == 0))
1535 		return;
1536 
1537 	sync_p = (entry_count - 1) * srng->entry_size;
1538 
1539 	if (hp > tp) {
1540 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1541 				       &srng->ring_base_vaddr[tp + sync_p]
1542 				       + (srng->entry_size * sizeof(uint32_t)));
1543 	} else {
1544 		/*
1545 		 * We have wrapped around
1546 		 */
1547 		uint32_t wrap_cnt = ((srng->ring_size - tp) / srng->entry_size);
1548 
1549 		if (entry_count <= wrap_cnt) {
1550 			qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1551 					       &srng->ring_base_vaddr[tp + sync_p] +
1552 					       (srng->entry_size * sizeof(uint32_t)));
1553 			return;
1554 		}
1555 
1556 		entry_count -= wrap_cnt;
1557 		sync_p = (entry_count - 1) * srng->entry_size;
1558 
1559 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1560 				       &srng->ring_base_vaddr[srng->ring_size - srng->entry_size] +
1561 				       (srng->entry_size * sizeof(uint32_t)));
1562 
1563 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[0],
1564 				       &srng->ring_base_vaddr[sync_p]
1565 				       + (srng->entry_size * sizeof(uint32_t)));
1566 	}
1567 }
1568 
1569 /**
1570  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1571  *
1572  * @hal_soc: Opaque HAL SOC handle
1573  * @hal_ring_hdl: Destination ring pointer
1574  * @sync_hw_ptr: Sync cached head pointer with HW
1575  *
1576  * Returns number of valid entries to be processed by the host driver. The
1577  * function takes up SRNG lock.
1578  *
1579  * Return: Number of valid destination entries
1580  */
1581 static inline uint32_t
1582 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1583 			      hal_ring_handle_t hal_ring_hdl,
1584 			      int sync_hw_ptr)
1585 {
1586 	uint32_t num_valid;
1587 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1588 
1589 	SRNG_LOCK(&srng->lock);
1590 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1591 	SRNG_UNLOCK(&srng->lock);
1592 
1593 	return num_valid;
1594 }
1595 
1596 /**
1597  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1598  *
1599  * @hal_soc: Opaque HAL SOC handle
1600  * @hal_ring_hdl: Destination ring pointer
1601  *
1602  */
1603 static inline
1604 void hal_srng_sync_cachedhp(void *hal_soc,
1605 				hal_ring_handle_t hal_ring_hdl)
1606 {
1607 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1608 	uint32_t hp;
1609 
1610 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1611 	srng->u.dst_ring.cached_hp = hp;
1612 }
1613 
1614 /**
1615  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1616  * pointer. This can be used to release any buffers associated with completed
1617  * ring entries. Note that this should not be used for posting new descriptor
1618  * entries. Posting of new entries should be done only using
1619  * hal_srng_src_get_next_reaped when this function is used for reaping.
1620  *
1621  * @hal_soc: Opaque HAL SOC handle
1622  * @hal_ring_hdl: Source ring pointer
1623  *
1624  * Return: Opaque pointer for next ring entry; NULL on failire
1625  */
1626 static inline void *
1627 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1628 {
1629 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1630 	uint32_t *desc;
1631 
1632 	/* TODO: Using % is expensive, but we have to do this since
1633 	 * size of some SRNG rings is not power of 2 (due to descriptor
1634 	 * sizes). Need to create separate API for rings used
1635 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1636 	 * SW2RXDMA and CE rings)
1637 	 */
1638 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1639 		srng->ring_size;
1640 
1641 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1642 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1643 		srng->u.src_ring.reap_hp = next_reap_hp;
1644 		return (void *)desc;
1645 	}
1646 
1647 	return NULL;
1648 }
1649 
1650 /**
1651  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1652  * already reaped using hal_srng_src_reap_next, for posting new entries to
1653  * the ring
1654  *
1655  * @hal_soc: Opaque HAL SOC handle
1656  * @hal_ring_hdl: Source ring pointer
1657  *
1658  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1659  */
1660 static inline void *
1661 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1662 {
1663 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1664 	uint32_t *desc;
1665 
1666 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1667 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1668 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1669 			srng->ring_size;
1670 
1671 		return (void *)desc;
1672 	}
1673 
1674 	return NULL;
1675 }
1676 
1677 /**
1678  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1679  * move reap pointer. This API is used in detach path to release any buffers
1680  * associated with ring entries which are pending reap.
1681  *
1682  * @hal_soc: Opaque HAL SOC handle
1683  * @hal_ring_hdl: Source ring pointer
1684  *
1685  * Return: Opaque pointer for next ring entry; NULL on failire
1686  */
1687 static inline void *
1688 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1689 {
1690 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1691 	uint32_t *desc;
1692 
1693 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1694 		srng->ring_size;
1695 
1696 	if (next_reap_hp != srng->u.src_ring.hp) {
1697 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1698 		srng->u.src_ring.reap_hp = next_reap_hp;
1699 		return (void *)desc;
1700 	}
1701 
1702 	return NULL;
1703 }
1704 
1705 /**
1706  * hal_srng_src_done_val -
1707  *
1708  * @hal_soc: Opaque HAL SOC handle
1709  * @hal_ring_hdl: Source ring pointer
1710  *
1711  * Return: Opaque pointer for next ring entry; NULL on failire
1712  */
1713 static inline uint32_t
1714 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1715 {
1716 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1717 	/* TODO: Using % is expensive, but we have to do this since
1718 	 * size of some SRNG rings is not power of 2 (due to descriptor
1719 	 * sizes). Need to create separate API for rings used
1720 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1721 	 * SW2RXDMA and CE rings)
1722 	 */
1723 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1724 		srng->ring_size;
1725 
1726 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1727 		return 0;
1728 
1729 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1730 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1731 			srng->entry_size;
1732 	else
1733 		return ((srng->ring_size - next_reap_hp) +
1734 			srng->u.src_ring.cached_tp) / srng->entry_size;
1735 }
1736 
1737 /**
1738  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1739  * @hal_ring_hdl: Source ring pointer
1740  *
1741  * Return: uint8_t
1742  */
1743 static inline
1744 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1745 {
1746 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1747 
1748 	return srng->entry_size;
1749 }
1750 
1751 /**
1752  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1753  * @hal_soc: Opaque HAL SOC handle
1754  * @hal_ring_hdl: Source ring pointer
1755  * @tailp: Tail Pointer
1756  * @headp: Head Pointer
1757  *
1758  * Return: Update tail pointer and head pointer in arguments.
1759  */
1760 static inline
1761 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1762 		     uint32_t *tailp, uint32_t *headp)
1763 {
1764 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1765 
1766 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1767 		*headp = srng->u.src_ring.hp;
1768 		*tailp = *srng->u.src_ring.tp_addr;
1769 	} else {
1770 		*tailp = srng->u.dst_ring.tp;
1771 		*headp = *srng->u.dst_ring.hp_addr;
1772 	}
1773 }
1774 
1775 /**
1776  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1777  *
1778  * @hal_soc: Opaque HAL SOC handle
1779  * @hal_ring_hdl: Source ring pointer
1780  *
1781  * Return: Opaque pointer for next ring entry; NULL on failire
1782  */
1783 static inline
1784 void *hal_srng_src_get_next(void *hal_soc,
1785 			    hal_ring_handle_t hal_ring_hdl)
1786 {
1787 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1788 	uint32_t *desc;
1789 	/* TODO: Using % is expensive, but we have to do this since
1790 	 * size of some SRNG rings is not power of 2 (due to descriptor
1791 	 * sizes). Need to create separate API for rings used
1792 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1793 	 * SW2RXDMA and CE rings)
1794 	 */
1795 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1796 		srng->ring_size;
1797 
1798 	if (next_hp != srng->u.src_ring.cached_tp) {
1799 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1800 		srng->u.src_ring.hp = next_hp;
1801 		/* TODO: Since reap function is not used by all rings, we can
1802 		 * remove the following update of reap_hp in this function
1803 		 * if we can ensure that only hal_srng_src_get_next_reaped
1804 		 * is used for the rings requiring reap functionality
1805 		 */
1806 		srng->u.src_ring.reap_hp = next_hp;
1807 		return (void *)desc;
1808 	}
1809 
1810 	return NULL;
1811 }
1812 
1813 /**
1814  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
1815  * moving head pointer.
1816  * hal_srng_src_get_next should be called subsequently to move the head pointer
1817  *
1818  * @hal_soc: Opaque HAL SOC handle
1819  * @hal_ring_hdl: Source ring pointer
1820  *
1821  * Return: Opaque pointer for next ring entry; NULL on failire
1822  */
1823 static inline
1824 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
1825 				   hal_ring_handle_t hal_ring_hdl)
1826 {
1827 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1828 	uint32_t *desc;
1829 
1830 	/* TODO: Using % is expensive, but we have to do this since
1831 	 * size of some SRNG rings is not power of 2 (due to descriptor
1832 	 * sizes). Need to create separate API for rings used
1833 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1834 	 * SW2RXDMA and CE rings)
1835 	 */
1836 	if (((srng->u.src_ring.hp + srng->entry_size) %
1837 		srng->ring_size) != srng->u.src_ring.cached_tp) {
1838 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
1839 						srng->entry_size) %
1840 						srng->ring_size]);
1841 		return (void *)desc;
1842 	}
1843 
1844 	return NULL;
1845 }
1846 
1847 /**
1848  * hal_srng_src_peek_n_get_next_next - Get next to next, i.e HP + 2 entry
1849  * from a ring without moving head pointer.
1850  *
1851  * @hal_soc: Opaque HAL SOC handle
1852  * @hal_ring_hdl: Source ring pointer
1853  *
1854  * Return: Opaque pointer for next to next ring entry; NULL on failire
1855  */
1856 static inline
1857 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
1858 					hal_ring_handle_t hal_ring_hdl)
1859 {
1860 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1861 	uint32_t *desc;
1862 
1863 	/* TODO: Using % is expensive, but we have to do this since
1864 	 * size of some SRNG rings is not power of 2 (due to descriptor
1865 	 * sizes). Need to create separate API for rings used
1866 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1867 	 * SW2RXDMA and CE rings)
1868 	 */
1869 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
1870 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
1871 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
1872 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
1873 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
1874 						(srng->entry_size * 2)) %
1875 						srng->ring_size]);
1876 		return (void *)desc;
1877 	}
1878 
1879 	return NULL;
1880 }
1881 
1882 /**
1883  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
1884  * and move hp to next in src ring
1885  *
1886  * Usage: This API should only be used at init time replenish.
1887  *
1888  * @hal_soc_hdl: HAL soc handle
1889  * @hal_ring_hdl: Source ring pointer
1890  *
1891  */
1892 static inline void *
1893 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
1894 				    hal_ring_handle_t hal_ring_hdl)
1895 {
1896 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1897 	uint32_t *cur_desc = NULL;
1898 	uint32_t next_hp;
1899 
1900 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
1901 
1902 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1903 		srng->ring_size;
1904 
1905 	if (next_hp != srng->u.src_ring.cached_tp)
1906 		srng->u.src_ring.hp = next_hp;
1907 
1908 	return (void *)cur_desc;
1909 }
1910 
1911 /**
1912  * hal_srng_src_num_avail - Returns number of available entries in src ring
1913  *
1914  * @hal_soc: Opaque HAL SOC handle
1915  * @hal_ring_hdl: Source ring pointer
1916  * @sync_hw_ptr: Sync cached tail pointer with HW
1917  *
1918  */
1919 static inline uint32_t
1920 hal_srng_src_num_avail(void *hal_soc,
1921 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
1922 {
1923 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1924 	uint32_t tp;
1925 	uint32_t hp = srng->u.src_ring.hp;
1926 
1927 	if (sync_hw_ptr) {
1928 		tp = *(srng->u.src_ring.tp_addr);
1929 		srng->u.src_ring.cached_tp = tp;
1930 	} else {
1931 		tp = srng->u.src_ring.cached_tp;
1932 	}
1933 
1934 	if (tp > hp)
1935 		return ((tp - hp) / srng->entry_size) - 1;
1936 	else
1937 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1938 }
1939 
1940 /**
1941  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1942  * ring head/tail pointers to HW.
1943  * This should be used only if hal_srng_access_start_unlocked to start ring
1944  * access
1945  *
1946  * @hal_soc: Opaque HAL SOC handle
1947  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1948  *
1949  * Return: 0 on success; error on failire
1950  */
1951 static inline void
1952 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1953 {
1954 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1955 
1956 	/* TODO: See if we need a write memory barrier here */
1957 	if (srng->flags & HAL_SRNG_LMAC_RING) {
1958 		/* For LMAC rings, ring pointer updates are done through FW and
1959 		 * hence written to a shared memory location that is read by FW
1960 		 */
1961 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1962 			*srng->u.src_ring.hp_addr =
1963 				qdf_cpu_to_le32(srng->u.src_ring.hp);
1964 		} else {
1965 			*srng->u.dst_ring.tp_addr =
1966 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
1967 		}
1968 	} else {
1969 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
1970 			hal_srng_write_address_32_mb(hal_soc,
1971 						     srng,
1972 						     srng->u.src_ring.hp_addr,
1973 						     srng->u.src_ring.hp);
1974 		else
1975 			hal_srng_write_address_32_mb(hal_soc,
1976 						     srng,
1977 						     srng->u.dst_ring.tp_addr,
1978 						     srng->u.dst_ring.tp);
1979 	}
1980 }
1981 
1982 /**
1983  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1984  * pointers to HW
1985  * This should be used only if hal_srng_access_start to start ring access
1986  *
1987  * @hal_soc: Opaque HAL SOC handle
1988  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1989  *
1990  * Return: 0 on success; error on failire
1991  */
1992 static inline void
1993 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1994 {
1995 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1996 
1997 	if (qdf_unlikely(!hal_ring_hdl)) {
1998 		qdf_print("Error: Invalid hal_ring\n");
1999 		return;
2000 	}
2001 
2002 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2003 	SRNG_UNLOCK(&(srng->lock));
2004 }
2005 
2006 /**
2007  * hal_srng_access_end_reap - Unlock ring access
2008  * This should be used only if hal_srng_access_start to start ring access
2009  * and should be used only while reaping SRC ring completions
2010  *
2011  * @hal_soc: Opaque HAL SOC handle
2012  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2013  *
2014  * Return: 0 on success; error on failire
2015  */
2016 static inline void
2017 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2018 {
2019 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2020 
2021 	SRNG_UNLOCK(&(srng->lock));
2022 }
2023 
2024 /* TODO: Check if the following definitions is available in HW headers */
2025 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2026 #define NUM_MPDUS_PER_LINK_DESC 6
2027 #define NUM_MSDUS_PER_LINK_DESC 7
2028 #define REO_QUEUE_DESC_ALIGN 128
2029 
2030 #define LINK_DESC_ALIGN 128
2031 
2032 #define ADDRESS_MATCH_TAG_VAL 0x5
2033 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2034  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2035  */
2036 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2037 
2038 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2039  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2040  * should be specified in 16 word units. But the number of bits defined for
2041  * this field in HW header files is 5.
2042  */
2043 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2044 
2045 
2046 /**
2047  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
2048  * in an idle list
2049  *
2050  * @hal_soc: Opaque HAL SOC handle
2051  *
2052  */
2053 static inline
2054 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2055 {
2056 	return WBM_IDLE_SCATTER_BUF_SIZE;
2057 }
2058 
2059 /**
2060  * hal_get_link_desc_size - Get the size of each link descriptor
2061  *
2062  * @hal_soc: Opaque HAL SOC handle
2063  *
2064  */
2065 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2066 {
2067 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2068 
2069 	if (!hal_soc || !hal_soc->ops) {
2070 		qdf_print("Error: Invalid ops\n");
2071 		QDF_BUG(0);
2072 		return -EINVAL;
2073 	}
2074 	if (!hal_soc->ops->hal_get_link_desc_size) {
2075 		qdf_print("Error: Invalid function pointer\n");
2076 		QDF_BUG(0);
2077 		return -EINVAL;
2078 	}
2079 	return hal_soc->ops->hal_get_link_desc_size();
2080 }
2081 
2082 /**
2083  * hal_get_link_desc_align - Get the required start address alignment for
2084  * link descriptors
2085  *
2086  * @hal_soc: Opaque HAL SOC handle
2087  *
2088  */
2089 static inline
2090 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2091 {
2092 	return LINK_DESC_ALIGN;
2093 }
2094 
2095 /**
2096  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
2097  *
2098  * @hal_soc: Opaque HAL SOC handle
2099  *
2100  */
2101 static inline
2102 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2103 {
2104 	return NUM_MPDUS_PER_LINK_DESC;
2105 }
2106 
2107 /**
2108  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
2109  *
2110  * @hal_soc: Opaque HAL SOC handle
2111  *
2112  */
2113 static inline
2114 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2115 {
2116 	return NUM_MSDUS_PER_LINK_DESC;
2117 }
2118 
2119 /**
2120  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
2121  * descriptor can hold
2122  *
2123  * @hal_soc: Opaque HAL SOC handle
2124  *
2125  */
2126 static inline
2127 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2128 {
2129 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2130 }
2131 
2132 /**
2133  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
2134  * that the given buffer size
2135  *
2136  * @hal_soc: Opaque HAL SOC handle
2137  * @scatter_buf_size: Size of scatter buffer
2138  *
2139  */
2140 static inline
2141 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2142 					  uint32_t scatter_buf_size)
2143 {
2144 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2145 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2146 }
2147 
2148 /**
2149  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
2150  * each given buffer size
2151  *
2152  * @hal_soc: Opaque HAL SOC handle
2153  * @total_mem: size of memory to be scattered
2154  * @scatter_buf_size: Size of scatter buffer
2155  *
2156  */
2157 static inline
2158 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2159 					uint32_t total_mem,
2160 					uint32_t scatter_buf_size)
2161 {
2162 	uint8_t rem = (total_mem % (scatter_buf_size -
2163 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2164 
2165 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2166 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2167 
2168 	return num_scatter_bufs;
2169 }
2170 
2171 enum hal_pn_type {
2172 	HAL_PN_NONE,
2173 	HAL_PN_WPA,
2174 	HAL_PN_WAPI_EVEN,
2175 	HAL_PN_WAPI_UNEVEN,
2176 };
2177 
2178 #define HAL_RX_MAX_BA_WINDOW 256
2179 
2180 /**
2181  * hal_get_reo_qdesc_align - Get start address alignment for reo
2182  * queue descriptors
2183  *
2184  * @hal_soc: Opaque HAL SOC handle
2185  *
2186  */
2187 static inline
2188 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2189 {
2190 	return REO_QUEUE_DESC_ALIGN;
2191 }
2192 
2193 /**
2194  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
2195  *
2196  * @hal_soc: Opaque HAL SOC handle
2197  * @ba_window_size: BlockAck window size
2198  * @start_seq: Starting sequence number
2199  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
2200  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
2201  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
2202  *
2203  */
2204 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
2205 			 int tid, uint32_t ba_window_size,
2206 			 uint32_t start_seq, void *hw_qdesc_vaddr,
2207 			 qdf_dma_addr_t hw_qdesc_paddr,
2208 			 int pn_type);
2209 
2210 /**
2211  * hal_srng_get_hp_addr - Get head pointer physical address
2212  *
2213  * @hal_soc: Opaque HAL SOC handle
2214  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2215  *
2216  */
2217 static inline qdf_dma_addr_t
2218 hal_srng_get_hp_addr(void *hal_soc,
2219 		     hal_ring_handle_t hal_ring_hdl)
2220 {
2221 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2222 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2223 
2224 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2225 		return hal->shadow_wrptr_mem_paddr +
2226 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
2227 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2228 	} else {
2229 		return hal->shadow_rdptr_mem_paddr +
2230 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2231 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2232 	}
2233 }
2234 
2235 /**
2236  * hal_srng_get_tp_addr - Get tail pointer physical address
2237  *
2238  * @hal_soc: Opaque HAL SOC handle
2239  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2240  *
2241  */
2242 static inline qdf_dma_addr_t
2243 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2244 {
2245 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2246 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2247 
2248 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2249 		return hal->shadow_rdptr_mem_paddr +
2250 			((unsigned long)(srng->u.src_ring.tp_addr) -
2251 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2252 	} else {
2253 		return hal->shadow_wrptr_mem_paddr +
2254 			((unsigned long)(srng->u.dst_ring.tp_addr) -
2255 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
2256 	}
2257 }
2258 
2259 /**
2260  * hal_srng_get_num_entries - Get total entries in the HAL Srng
2261  *
2262  * @hal_soc: Opaque HAL SOC handle
2263  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2264  *
2265  * Return: total number of entries in hal ring
2266  */
2267 static inline
2268 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2269 				  hal_ring_handle_t hal_ring_hdl)
2270 {
2271 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2272 
2273 	return srng->num_entries;
2274 }
2275 
2276 /**
2277  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
2278  *
2279  * @hal_soc: Opaque HAL SOC handle
2280  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2281  * @ring_params: SRNG parameters will be returned through this structure
2282  */
2283 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2284 			 hal_ring_handle_t hal_ring_hdl,
2285 			 struct hal_srng_params *ring_params);
2286 
2287 /**
2288  * hal_mem_info - Retrieve hal memory base address
2289  *
2290  * @hal_soc: Opaque HAL SOC handle
2291  * @mem: pointer to structure to be updated with hal mem info
2292  */
2293 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2294 
2295 /**
2296  * hal_get_target_type - Return target type
2297  *
2298  * @hal_soc: Opaque HAL SOC handle
2299  */
2300 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2301 
2302 /**
2303  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
2304  *
2305  * @hal_soc: Opaque HAL SOC handle
2306  * @ac: Access category
2307  * @value: timeout duration in millisec
2308  */
2309 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
2310 			      uint32_t *value);
2311 /**
2312  * hal_set_aging_timeout - Set BA aging timeout
2313  *
2314  * @hal_soc: Opaque HAL SOC handle
2315  * @ac: Access category in millisec
2316  * @value: timeout duration value
2317  */
2318 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
2319 			      uint32_t value);
2320 /**
2321  * hal_srng_dst_hw_init - Private function to initialize SRNG
2322  * destination ring HW
2323  * @hal_soc: HAL SOC handle
2324  * @srng: SRNG ring pointer
2325  */
2326 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2327 	struct hal_srng *srng)
2328 {
2329 	hal->ops->hal_srng_dst_hw_init(hal, srng);
2330 }
2331 
2332 /**
2333  * hal_srng_src_hw_init - Private function to initialize SRNG
2334  * source ring HW
2335  * @hal_soc: HAL SOC handle
2336  * @srng: SRNG ring pointer
2337  */
2338 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2339 	struct hal_srng *srng)
2340 {
2341 	hal->ops->hal_srng_src_hw_init(hal, srng);
2342 }
2343 
2344 /**
2345  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2346  * @hal_soc: Opaque HAL SOC handle
2347  * @hal_ring_hdl: Source ring pointer
2348  * @headp: Head Pointer
2349  * @tailp: Tail Pointer
2350  * @ring_type: Ring
2351  *
2352  * Return: Update tail pointer and head pointer in arguments.
2353  */
2354 static inline
2355 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2356 		     hal_ring_handle_t hal_ring_hdl,
2357 		     uint32_t *headp, uint32_t *tailp,
2358 		     uint8_t ring_type)
2359 {
2360 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2361 
2362 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2363 			headp, tailp, ring_type);
2364 }
2365 
2366 /**
2367  * hal_reo_setup - Initialize HW REO block
2368  *
2369  * @hal_soc: Opaque HAL SOC handle
2370  * @reo_params: parameters needed by HAL for REO config
2371  */
2372 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2373 				 void *reoparams)
2374 {
2375 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2376 
2377 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
2378 }
2379 
2380 static inline
2381 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2382 				   uint32_t *ring, uint32_t num_rings,
2383 				   uint32_t *remap1, uint32_t *remap2)
2384 {
2385 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2386 
2387 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2388 					num_rings, remap1, remap2);
2389 }
2390 
2391 /**
2392  * hal_setup_link_idle_list - Setup scattered idle list using the
2393  * buffer list provided
2394  *
2395  * @hal_soc: Opaque HAL SOC handle
2396  * @scatter_bufs_base_paddr: Array of physical base addresses
2397  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2398  * @num_scatter_bufs: Number of scatter buffers in the above lists
2399  * @scatter_buf_size: Size of each scatter buffer
2400  * @last_buf_end_offset: Offset to the last entry
2401  * @num_entries: Total entries of all scatter bufs
2402  *
2403  */
2404 static inline
2405 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2406 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2407 			      void *scatter_bufs_base_vaddr[],
2408 			      uint32_t num_scatter_bufs,
2409 			      uint32_t scatter_buf_size,
2410 			      uint32_t last_buf_end_offset,
2411 			      uint32_t num_entries)
2412 {
2413 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2414 
2415 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2416 			scatter_bufs_base_vaddr, num_scatter_bufs,
2417 			scatter_buf_size, last_buf_end_offset,
2418 			num_entries);
2419 
2420 }
2421 
2422 /**
2423  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2424  *
2425  * @hal_soc: Opaque HAL SOC handle
2426  * @hal_ring_hdl: Source ring pointer
2427  * @ring_desc: Opaque ring descriptor handle
2428  */
2429 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2430 					   hal_ring_handle_t hal_ring_hdl,
2431 					   hal_ring_desc_t ring_desc)
2432 {
2433 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2434 
2435 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2436 			   ring_desc, (srng->entry_size << 2));
2437 }
2438 
2439 /**
2440  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2441  *
2442  * @hal_soc: Opaque HAL SOC handle
2443  * @hal_ring_hdl: Source ring pointer
2444  */
2445 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2446 				      hal_ring_handle_t hal_ring_hdl)
2447 {
2448 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2449 	uint32_t *desc;
2450 	uint32_t tp, i;
2451 
2452 	tp = srng->u.dst_ring.tp;
2453 
2454 	for (i = 0; i < 128; i++) {
2455 		if (!tp)
2456 			tp = srng->ring_size;
2457 
2458 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2459 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2460 				   QDF_TRACE_LEVEL_DEBUG,
2461 				   desc, (srng->entry_size << 2));
2462 
2463 		tp -= srng->entry_size;
2464 	}
2465 }
2466 
2467 /*
2468  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2469  * to opaque dp_ring desc type
2470  * @ring_desc - rxdma ring desc
2471  *
2472  * Return: hal_rxdma_desc_t type
2473  */
2474 static inline
2475 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2476 {
2477 	return (hal_ring_desc_t)ring_desc;
2478 }
2479 
2480 /**
2481  * hal_srng_set_event() - Set hal_srng event
2482  * @hal_ring_hdl: Source ring pointer
2483  * @event: SRNG ring event
2484  *
2485  * Return: None
2486  */
2487 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2488 {
2489 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2490 
2491 	qdf_atomic_set_bit(event, &srng->srng_event);
2492 }
2493 
2494 /**
2495  * hal_srng_clear_event() - Clear hal_srng event
2496  * @hal_ring_hdl: Source ring pointer
2497  * @event: SRNG ring event
2498  *
2499  * Return: None
2500  */
2501 static inline
2502 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2503 {
2504 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2505 
2506 	qdf_atomic_clear_bit(event, &srng->srng_event);
2507 }
2508 
2509 /**
2510  * hal_srng_get_clear_event() - Clear srng event and return old value
2511  * @hal_ring_hdl: Source ring pointer
2512  * @event: SRNG ring event
2513  *
2514  * Return: Return old event value
2515  */
2516 static inline
2517 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2518 {
2519 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2520 
2521 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2522 }
2523 
2524 /**
2525  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2526  * @hal_ring_hdl: Source ring pointer
2527  *
2528  * Return: None
2529  */
2530 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2531 {
2532 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2533 
2534 	srng->last_flush_ts = qdf_get_log_timestamp();
2535 }
2536 
2537 /**
2538  * hal_srng_inc_flush_cnt() - Increment flush counter
2539  * @hal_ring_hdl: Source ring pointer
2540  *
2541  * Return: None
2542  */
2543 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
2544 {
2545 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2546 
2547 	srng->flush_count++;
2548 }
2549 
2550 /**
2551  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
2552  *
2553  * @hal: Core HAL soc handle
2554  * @ring_desc: Mon dest ring descriptor
2555  * @desc_info: Desc info to be populated
2556  *
2557  * Return void
2558  */
2559 static inline void
2560 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
2561 			    hal_ring_desc_t ring_desc,
2562 			    hal_rx_mon_desc_info_t desc_info)
2563 {
2564 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
2565 }
2566 
2567 /**
2568  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
2569  *				 register value.
2570  *
2571  * @hal_soc_hdl: Opaque HAL soc handle
2572  *
2573  * Return: None
2574  */
2575 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
2576 {
2577 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2578 
2579 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
2580 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
2581 }
2582 
2583 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
2584 
2585 /**
2586  * hal_set_one_target_reg_config() - Populate the target reg
2587  * offset in hal_soc for one non srng related register at the
2588  * given list index
2589  * @hal_soc: hal handle
2590  * @target_reg_offset: target register offset
2591  * @list_index: index in hal list for shadow regs
2592  *
2593  * Return: none
2594  */
2595 void hal_set_one_target_reg_config(struct hal_soc *hal,
2596 				   uint32_t target_reg_offset,
2597 				   int list_index);
2598 
2599 /**
2600  * hal_set_shadow_regs() - Populate register offset for
2601  * registers that need to be populated in list_shadow_reg_config
2602  * in order to be sent to FW. These reg offsets will be mapped
2603  * to shadow registers.
2604  * @hal_soc: hal handle
2605  *
2606  * Return: QDF_STATUS_OK on success
2607  */
2608 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
2609 
2610 /**
2611  * hal_construct_shadow_regs() - initialize the shadow registers
2612  * for non-srng related register configs
2613  * @hal_soc: hal handle
2614  *
2615  * Return: QDF_STATUS_OK on success
2616  */
2617 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
2618 
2619 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
2620 static inline void hal_set_one_target_reg_config(
2621 	struct hal_soc *hal,
2622 	uint32_t target_reg_offset,
2623 	int list_index)
2624 {
2625 }
2626 
2627 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
2628 {
2629 	return QDF_STATUS_SUCCESS;
2630 }
2631 
2632 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
2633 {
2634 	return QDF_STATUS_SUCCESS;
2635 }
2636 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
2637 
2638 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
2639 /**
2640  * hal_flush_reg_write_work() - flush all writes from register write queue
2641  * @arg: hal_soc pointer
2642  *
2643  * Return: None
2644  */
2645 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
2646 #else
2647 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
2648 #endif
2649 #endif /* _HAL_APIH_ */
2650