xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 664b1762eae9f327af87bad3a11233782d8c810b)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HAL_API_H_
21 #define _HAL_API_H_
22 
23 #include "qdf_types.h"
24 #include "qdf_util.h"
25 #include "qdf_atomic.h"
26 #include "hal_internal.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "qdf_platform.h"
30 
31 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
32 #include "hal_hw_headers.h"
33 #endif
34 
35 /* Ring index for WBM2SW2 release ring */
36 #define HAL_IPA_TX_COMP_RING_IDX 2
37 
38 /* calculate the register address offset from bar0 of shadow register x */
39 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
40     defined(QCA_WIFI_KIWI)
41 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
42 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
43 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
44 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
45 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
46 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
47 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
48 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
49 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
50 #elif defined(QCA_WIFI_QCA6750)
51 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
52 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
53 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
54 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
55 #else
56 #define SHADOW_REGISTER(x) 0
57 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
58 
59 /*
60  * BAR + 4K is always accessible, any access outside this
61  * space requires force wake procedure.
62  * OFFSET = 4K - 32 bytes = 0xFE0
63  */
64 #define MAPPED_REF_OFF 0xFE0
65 
66 #define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET
67 
68 #ifdef ENABLE_VERBOSE_DEBUG
69 static inline void
70 hal_set_verbose_debug(bool flag)
71 {
72 	is_hal_verbose_debug_enabled = flag;
73 }
74 #endif
75 
76 #ifdef ENABLE_HAL_SOC_STATS
77 #define HAL_STATS_INC(_handle, _field, _delta) \
78 { \
79 	if (likely(_handle)) \
80 		_handle->stats._field += _delta; \
81 }
82 #else
83 #define HAL_STATS_INC(_handle, _field, _delta)
84 #endif
85 
86 #ifdef ENABLE_HAL_REG_WR_HISTORY
87 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
88 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
89 
90 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
91 				 uint32_t offset,
92 				 uint32_t wr_val,
93 				 uint32_t rd_val);
94 
95 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
96 					     int array_size)
97 {
98 	int record_index = qdf_atomic_inc_return(table_index);
99 
100 	return record_index & (array_size - 1);
101 }
102 #else
103 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
104 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
105 		offset,	\
106 		wr_val,	\
107 		rd_val)
108 #endif
109 
110 /**
111  * hal_reg_write_result_check() - check register writing result
112  * @hal_soc: HAL soc handle
113  * @offset: register offset to read
114  * @exp_val: the expected value of register
115  * @ret_confirm: result confirm flag
116  *
117  * Return: none
118  */
119 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
120 					      uint32_t offset,
121 					      uint32_t exp_val)
122 {
123 	uint32_t value;
124 
125 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
126 	if (exp_val != value) {
127 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
128 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
129 	}
130 }
131 
132 #ifdef WINDOW_REG_PLD_LOCK_ENABLE
133 static inline void hal_lock_reg_access(struct hal_soc *soc,
134 				       unsigned long *flags)
135 {
136 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
137 }
138 
139 static inline void hal_unlock_reg_access(struct hal_soc *soc,
140 					 unsigned long *flags)
141 {
142 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
143 }
144 #else
145 static inline void hal_lock_reg_access(struct hal_soc *soc,
146 				       unsigned long *flags)
147 {
148 	qdf_spin_lock_irqsave(&soc->register_access_lock);
149 }
150 
151 static inline void hal_unlock_reg_access(struct hal_soc *soc,
152 					 unsigned long *flags)
153 {
154 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
155 }
156 #endif
157 
158 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
159 /**
160  * hal_select_window_confirm() - write remap window register and
161 				 check writing result
162  *
163  */
164 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
165 					     uint32_t offset)
166 {
167 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
168 
169 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
170 		      WINDOW_ENABLE_BIT | window);
171 	hal_soc->register_window = window;
172 
173 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
174 				   WINDOW_ENABLE_BIT | window);
175 }
176 #else
177 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
178 					     uint32_t offset)
179 {
180 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
181 
182 	if (window != hal_soc->register_window) {
183 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
184 			      WINDOW_ENABLE_BIT | window);
185 		hal_soc->register_window = window;
186 
187 		hal_reg_write_result_check(
188 					hal_soc,
189 					WINDOW_REG_ADDRESS,
190 					WINDOW_ENABLE_BIT | window);
191 	}
192 }
193 #endif
194 
195 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
196 						 qdf_iomem_t addr)
197 {
198 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
199 }
200 
201 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
202 					       hal_ring_handle_t hal_ring_hdl)
203 {
204 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
205 
206 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
207 							 hal_ring_hdl);
208 }
209 
210 /**
211  * hal_write32_mb() - Access registers to update configuration
212  * @hal_soc: hal soc handle
213  * @offset: offset address from the BAR
214  * @value: value to write
215  *
216  * Return: None
217  *
218  * Description: Register address space is split below:
219  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
220  *  |--------------------|-------------------|------------------|
221  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
222  *
223  * 1. Any access to the shadow region, doesn't need force wake
224  *    and windowing logic to access.
225  * 2. Any access beyond BAR + 4K:
226  *    If init_phase enabled, no force wake is needed and access
227  *    should be based on windowed or unwindowed access.
228  *    If init_phase disabled, force wake is needed and access
229  *    should be based on windowed or unwindowed access.
230  *
231  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
232  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
233  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
234  *                            that window would be a bug
235  */
236 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
237     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
238 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
239 				  uint32_t value)
240 {
241 	unsigned long flags;
242 	qdf_iomem_t new_addr;
243 
244 	if (!hal_soc->use_register_windowing ||
245 	    offset < MAX_UNWINDOWED_ADDRESS) {
246 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
247 	} else if (hal_soc->static_window_map) {
248 		new_addr = hal_get_window_address(hal_soc,
249 				hal_soc->dev_base_addr + offset);
250 		qdf_iowrite32(new_addr, value);
251 	} else {
252 		hal_lock_reg_access(hal_soc, &flags);
253 		hal_select_window_confirm(hal_soc, offset);
254 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
255 			  (offset & WINDOW_RANGE_MASK), value);
256 		hal_unlock_reg_access(hal_soc, &flags);
257 	}
258 }
259 
260 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
261 		hal_write32_mb(_hal_soc, _offset, _value)
262 
263 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
264 #else
265 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
266 				  uint32_t value)
267 {
268 	int ret;
269 	unsigned long flags;
270 	qdf_iomem_t new_addr;
271 
272 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
273 					hal_soc->hif_handle))) {
274 		hal_err_rl("target access is not allowed");
275 		return;
276 	}
277 
278 	/* Region < BAR + 4K can be directly accessed */
279 	if (offset < MAPPED_REF_OFF) {
280 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
281 		return;
282 	}
283 
284 	/* Region greater than BAR + 4K */
285 	if (!hal_soc->init_phase) {
286 		ret = hif_force_wake_request(hal_soc->hif_handle);
287 		if (ret) {
288 			hal_err_rl("Wake up request failed");
289 			qdf_check_state_before_panic(__func__, __LINE__);
290 			return;
291 		}
292 	}
293 
294 	if (!hal_soc->use_register_windowing ||
295 	    offset < MAX_UNWINDOWED_ADDRESS) {
296 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
297 	} else if (hal_soc->static_window_map) {
298 		new_addr = hal_get_window_address(
299 					hal_soc,
300 					hal_soc->dev_base_addr + offset);
301 		qdf_iowrite32(new_addr, value);
302 	} else {
303 		hal_lock_reg_access(hal_soc, &flags);
304 		hal_select_window_confirm(hal_soc, offset);
305 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
306 			  (offset & WINDOW_RANGE_MASK), value);
307 		hal_unlock_reg_access(hal_soc, &flags);
308 	}
309 
310 	if (!hal_soc->init_phase) {
311 		ret = hif_force_wake_release(hal_soc->hif_handle);
312 		if (ret) {
313 			hal_err("Wake up release failed");
314 			qdf_check_state_before_panic(__func__, __LINE__);
315 			return;
316 		}
317 	}
318 }
319 
320 /**
321  * hal_write32_mb_confirm() - write register and check wirting result
322  *
323  */
324 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
325 					  uint32_t offset,
326 					  uint32_t value)
327 {
328 	int ret;
329 	unsigned long flags;
330 	qdf_iomem_t new_addr;
331 
332 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
333 					hal_soc->hif_handle))) {
334 		hal_err_rl("target access is not allowed");
335 		return;
336 	}
337 
338 	/* Region < BAR + 4K can be directly accessed */
339 	if (offset < MAPPED_REF_OFF) {
340 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
341 		return;
342 	}
343 
344 	/* Region greater than BAR + 4K */
345 	if (!hal_soc->init_phase) {
346 		ret = hif_force_wake_request(hal_soc->hif_handle);
347 		if (ret) {
348 			hal_err("Wake up request failed");
349 			qdf_check_state_before_panic(__func__, __LINE__);
350 			return;
351 		}
352 	}
353 
354 	if (!hal_soc->use_register_windowing ||
355 	    offset < MAX_UNWINDOWED_ADDRESS) {
356 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
357 		hal_reg_write_result_check(hal_soc, offset,
358 					   value);
359 	} else if (hal_soc->static_window_map) {
360 		new_addr = hal_get_window_address(
361 					hal_soc,
362 					hal_soc->dev_base_addr + offset);
363 		qdf_iowrite32(new_addr, value);
364 		hal_reg_write_result_check(hal_soc,
365 					   new_addr - hal_soc->dev_base_addr,
366 					   value);
367 	} else {
368 		hal_lock_reg_access(hal_soc, &flags);
369 		hal_select_window_confirm(hal_soc, offset);
370 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
371 			  (offset & WINDOW_RANGE_MASK), value);
372 
373 		hal_reg_write_result_check(
374 				hal_soc,
375 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
376 				value);
377 		hal_unlock_reg_access(hal_soc, &flags);
378 	}
379 
380 	if (!hal_soc->init_phase) {
381 		ret = hif_force_wake_release(hal_soc->hif_handle);
382 		if (ret) {
383 			hal_err("Wake up release failed");
384 			qdf_check_state_before_panic(__func__, __LINE__);
385 			return;
386 		}
387 	}
388 }
389 
390 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
391 				       uint32_t value)
392 {
393 	unsigned long flags;
394 	qdf_iomem_t new_addr;
395 
396 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
397 					hal_soc->hif_handle))) {
398 		hal_err_rl("%s: target access is not allowed", __func__);
399 		return;
400 	}
401 
402 	if (!hal_soc->use_register_windowing ||
403 	    offset < MAX_UNWINDOWED_ADDRESS) {
404 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
405 	} else if (hal_soc->static_window_map) {
406 		new_addr = hal_get_window_address(
407 					hal_soc,
408 					hal_soc->dev_base_addr + offset);
409 		qdf_iowrite32(new_addr, value);
410 	} else {
411 		hal_lock_reg_access(hal_soc, &flags);
412 		hal_select_window_confirm(hal_soc, offset);
413 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
414 			  (offset & WINDOW_RANGE_MASK), value);
415 		hal_unlock_reg_access(hal_soc, &flags);
416 	}
417 }
418 #endif
419 
420 /**
421  * hal_write_address_32_mb - write a value to a register
422  *
423  */
424 static inline
425 void hal_write_address_32_mb(struct hal_soc *hal_soc,
426 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
427 {
428 	uint32_t offset;
429 
430 	if (!hal_soc->use_register_windowing)
431 		return qdf_iowrite32(addr, value);
432 
433 	offset = addr - hal_soc->dev_base_addr;
434 
435 	if (qdf_unlikely(wr_confirm))
436 		hal_write32_mb_confirm(hal_soc, offset, value);
437 	else
438 		hal_write32_mb(hal_soc, offset, value);
439 }
440 
441 
442 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
443 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
444 						struct hal_srng *srng,
445 						void __iomem *addr,
446 						uint32_t value)
447 {
448 	qdf_iowrite32(addr, value);
449 }
450 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
451 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
452 						struct hal_srng *srng,
453 						void __iomem *addr,
454 						uint32_t value)
455 {
456 	hal_delayed_reg_write(hal_soc, srng, addr, value);
457 }
458 #else
459 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
460 						struct hal_srng *srng,
461 						void __iomem *addr,
462 						uint32_t value)
463 {
464 	hal_write_address_32_mb(hal_soc, addr, value, false);
465 }
466 #endif
467 
468 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
469     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI)
470 /**
471  * hal_read32_mb() - Access registers to read configuration
472  * @hal_soc: hal soc handle
473  * @offset: offset address from the BAR
474  * @value: value to write
475  *
476  * Description: Register address space is split below:
477  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
478  *  |--------------------|-------------------|------------------|
479  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
480  *
481  * 1. Any access to the shadow region, doesn't need force wake
482  *    and windowing logic to access.
483  * 2. Any access beyond BAR + 4K:
484  *    If init_phase enabled, no force wake is needed and access
485  *    should be based on windowed or unwindowed access.
486  *    If init_phase disabled, force wake is needed and access
487  *    should be based on windowed or unwindowed access.
488  *
489  * Return: < 0 for failure/>= 0 for success
490  */
491 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
492 {
493 	uint32_t ret;
494 	unsigned long flags;
495 	qdf_iomem_t new_addr;
496 
497 	if (!hal_soc->use_register_windowing ||
498 	    offset < MAX_UNWINDOWED_ADDRESS) {
499 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
500 	} else if (hal_soc->static_window_map) {
501 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
502 		return qdf_ioread32(new_addr);
503 	}
504 
505 	hal_lock_reg_access(hal_soc, &flags);
506 	hal_select_window_confirm(hal_soc, offset);
507 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
508 		       (offset & WINDOW_RANGE_MASK));
509 	hal_unlock_reg_access(hal_soc, &flags);
510 
511 	return ret;
512 }
513 
514 #define hal_read32_mb_cmem(_hal_soc, _offset)
515 #else
516 static
517 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
518 {
519 	uint32_t ret;
520 	unsigned long flags;
521 	qdf_iomem_t new_addr;
522 
523 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
524 					hal_soc->hif_handle))) {
525 		hal_err_rl("target access is not allowed");
526 		return 0;
527 	}
528 
529 	/* Region < BAR + 4K can be directly accessed */
530 	if (offset < MAPPED_REF_OFF)
531 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
532 
533 	if ((!hal_soc->init_phase) &&
534 	    hif_force_wake_request(hal_soc->hif_handle)) {
535 		hal_err("Wake up request failed");
536 		qdf_check_state_before_panic(__func__, __LINE__);
537 		return 0;
538 	}
539 
540 	if (!hal_soc->use_register_windowing ||
541 	    offset < MAX_UNWINDOWED_ADDRESS) {
542 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
543 	} else if (hal_soc->static_window_map) {
544 		new_addr = hal_get_window_address(
545 					hal_soc,
546 					hal_soc->dev_base_addr + offset);
547 		ret = qdf_ioread32(new_addr);
548 	} else {
549 		hal_lock_reg_access(hal_soc, &flags);
550 		hal_select_window_confirm(hal_soc, offset);
551 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
552 			       (offset & WINDOW_RANGE_MASK));
553 		hal_unlock_reg_access(hal_soc, &flags);
554 	}
555 
556 	if ((!hal_soc->init_phase) &&
557 	    hif_force_wake_release(hal_soc->hif_handle)) {
558 		hal_err("Wake up release failed");
559 		qdf_check_state_before_panic(__func__, __LINE__);
560 		return 0;
561 	}
562 
563 	return ret;
564 }
565 
566 static inline
567 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
568 {
569 	uint32_t ret;
570 	unsigned long flags;
571 	qdf_iomem_t new_addr;
572 
573 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
574 					hal_soc->hif_handle))) {
575 		hal_err_rl("%s: target access is not allowed", __func__);
576 		return 0;
577 	}
578 
579 	if (!hal_soc->use_register_windowing ||
580 	    offset < MAX_UNWINDOWED_ADDRESS) {
581 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
582 	} else if (hal_soc->static_window_map) {
583 		new_addr = hal_get_window_address(
584 					hal_soc,
585 					hal_soc->dev_base_addr + offset);
586 		ret = qdf_ioread32(new_addr);
587 	} else {
588 		hal_lock_reg_access(hal_soc, &flags);
589 		hal_select_window_confirm(hal_soc, offset);
590 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
591 			       (offset & WINDOW_RANGE_MASK));
592 		hal_unlock_reg_access(hal_soc, &flags);
593 	}
594 	return ret;
595 }
596 #endif
597 
598 /* Max times allowed for register writing retry */
599 #define HAL_REG_WRITE_RETRY_MAX		5
600 /* Delay milliseconds for each time retry */
601 #define HAL_REG_WRITE_RETRY_DELAY	1
602 
603 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
604 /* To check shadow config index range between 0..31 */
605 #define HAL_SHADOW_REG_INDEX_LOW 32
606 /* To check shadow config index range between 32..39 */
607 #define HAL_SHADOW_REG_INDEX_HIGH 40
608 /* Dirty bit reg offsets corresponding to shadow config index */
609 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
610 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
611 /* PCIE_PCIE_TOP base addr offset */
612 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
613 /* Max retry attempts to read the dirty bit reg */
614 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
615 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
616 #else
617 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
618 #endif
619 /* Delay in usecs for polling dirty bit reg */
620 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
621 
622 /**
623  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
624  * write was successful
625  * @hal_soc: hal soc handle
626  * @shadow_config_index: index of shadow reg used to confirm
627  * write
628  *
629  * Return: QDF_STATUS_SUCCESS on success
630  */
631 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
632 						int shadow_config_index)
633 {
634 	uint32_t read_value = 0;
635 	int retry_cnt = 0;
636 	uint32_t reg_offset = 0;
637 
638 	if (shadow_config_index > 0 &&
639 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
640 		reg_offset =
641 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
642 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
643 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
644 		reg_offset =
645 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
646 	} else {
647 		hal_err("Invalid shadow_config_index = %d",
648 			shadow_config_index);
649 		return QDF_STATUS_E_INVAL;
650 	}
651 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
652 		read_value = hal_read32_mb(
653 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
654 		/* Check if dirty bit corresponding to shadow_index is set */
655 		if (read_value & BIT(shadow_config_index)) {
656 			/* Dirty reg bit not reset */
657 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
658 			retry_cnt++;
659 		} else {
660 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
661 				  reg_offset, read_value);
662 			return QDF_STATUS_SUCCESS;
663 		}
664 	}
665 	return QDF_STATUS_E_TIMEOUT;
666 }
667 
668 /**
669  * hal_write32_mb_shadow_confirm() - write to shadow reg and
670  * poll dirty register bit to confirm write
671  * @hal_soc: hal soc handle
672  * @reg_offset: target reg offset address from BAR
673  * @value: value to write
674  *
675  * Return: QDF_STATUS_SUCCESS on success
676  */
677 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
678 	struct hal_soc *hal,
679 	uint32_t reg_offset,
680 	uint32_t value)
681 {
682 	int i;
683 	QDF_STATUS ret;
684 	uint32_t shadow_reg_offset;
685 	int shadow_config_index;
686 	bool is_reg_offset_present = false;
687 
688 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
689 		/* Found the shadow config for the reg_offset */
690 		struct shadow_reg_config *hal_shadow_reg_list =
691 			&hal->list_shadow_reg_config[i];
692 		if (hal_shadow_reg_list->target_register ==
693 			reg_offset) {
694 			shadow_config_index =
695 				hal_shadow_reg_list->shadow_config_index;
696 			shadow_reg_offset =
697 				SHADOW_REGISTER(shadow_config_index);
698 			hal_write32_mb_confirm(
699 				hal, shadow_reg_offset, value);
700 			is_reg_offset_present = true;
701 			break;
702 		}
703 		ret = QDF_STATUS_E_FAILURE;
704 	}
705 	if (is_reg_offset_present) {
706 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
707 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
708 			 reg_offset, value, ret);
709 		if (QDF_IS_STATUS_ERROR(ret)) {
710 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
711 			return ret;
712 		}
713 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
714 	}
715 	return ret;
716 }
717 
718 /**
719  * hal_write32_mb_confirm_retry() - write register with confirming and
720 				    do retry/recovery if writing failed
721  * @hal_soc: hal soc handle
722  * @offset: offset address from the BAR
723  * @value: value to write
724  * @recovery: is recovery needed or not.
725  *
726  * Write the register value with confirming and read it back, if
727  * read back value is not as expected, do retry for writing, if
728  * retry hit max times allowed but still fail, check if recovery
729  * needed.
730  *
731  * Return: None
732  */
733 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
734 						uint32_t offset,
735 						uint32_t value,
736 						bool recovery)
737 {
738 	QDF_STATUS ret;
739 
740 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
741 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
742 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
743 }
744 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
745 
746 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
747 						uint32_t offset,
748 						uint32_t value,
749 						bool recovery)
750 {
751 	uint8_t retry_cnt = 0;
752 	uint32_t read_value;
753 
754 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
755 		hal_write32_mb_confirm(hal_soc, offset, value);
756 		read_value = hal_read32_mb(hal_soc, offset);
757 		if (qdf_likely(read_value == value))
758 			break;
759 
760 		/* write failed, do retry */
761 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
762 			 offset, value, read_value);
763 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
764 		retry_cnt++;
765 	}
766 
767 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
768 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
769 }
770 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
771 
772 #if defined(FEATURE_HAL_DELAYED_REG_WRITE)
773 /**
774  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
775  * @hal_soc: HAL soc handle
776  *
777  * Return: none
778  */
779 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
780 
781 /**
782  * hal_dump_reg_write_stats() - dump reg write stats
783  * @hal_soc: HAL soc handle
784  *
785  * Return: none
786  */
787 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
788 
789 /**
790  * hal_get_reg_write_pending_work() - get the number of entries
791  *		pending in the workqueue to be processed.
792  * @hal_soc: HAL soc handle
793  *
794  * Returns: the number of entries pending to be processed
795  */
796 int hal_get_reg_write_pending_work(void *hal_soc);
797 
798 #else
799 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
800 {
801 }
802 
803 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
804 {
805 }
806 
807 static inline int hal_get_reg_write_pending_work(void *hal_soc)
808 {
809 	return 0;
810 }
811 #endif
812 
813 /**
814  * hal_read_address_32_mb() - Read 32-bit value from the register
815  * @soc: soc handle
816  * @addr: register address to read
817  *
818  * Return: 32-bit value
819  */
820 static inline
821 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
822 				qdf_iomem_t addr)
823 {
824 	uint32_t offset;
825 	uint32_t ret;
826 
827 	if (!soc->use_register_windowing)
828 		return qdf_ioread32(addr);
829 
830 	offset = addr - soc->dev_base_addr;
831 	ret = hal_read32_mb(soc, offset);
832 	return ret;
833 }
834 
835 /**
836  * hal_attach - Initialize HAL layer
837  * @hif_handle: Opaque HIF handle
838  * @qdf_dev: QDF device
839  *
840  * Return: Opaque HAL SOC handle
841  *		 NULL on failure (if given ring is not available)
842  *
843  * This function should be called as part of HIF initialization (for accessing
844  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
845  */
846 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
847 
848 /**
849  * hal_detach - Detach HAL layer
850  * @hal_soc: HAL SOC handle
851  *
852  * This function should be called as part of HIF detach
853  *
854  */
855 extern void hal_detach(void *hal_soc);
856 
857 #define HAL_SRNG_LMAC_RING 0x80000000
858 /* SRNG flags passed in hal_srng_params.flags */
859 #define HAL_SRNG_MSI_SWAP				0x00000008
860 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
861 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
862 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
863 #define HAL_SRNG_MSI_INTR				0x00020000
864 #define HAL_SRNG_CACHED_DESC		0x00040000
865 
866 #if defined(QCA_WIFI_QCA6490)  || defined(QCA_WIFI_KIWI)
867 #define HAL_SRNG_PREFETCH_TIMER 1
868 #else
869 #define HAL_SRNG_PREFETCH_TIMER 0
870 #endif
871 
872 #define PN_SIZE_24 0
873 #define PN_SIZE_48 1
874 #define PN_SIZE_128 2
875 
876 #ifdef FORCE_WAKE
877 /**
878  * hal_set_init_phase() - Indicate initialization of
879  *                        datapath rings
880  * @soc: hal_soc handle
881  * @init_phase: flag to indicate datapath rings
882  *              initialization status
883  *
884  * Return: None
885  */
886 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
887 #else
888 static inline
889 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
890 {
891 }
892 #endif /* FORCE_WAKE */
893 
894 /**
895  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
896  * used by callers for calculating the size of memory to be allocated before
897  * calling hal_srng_setup to setup the ring
898  *
899  * @hal_soc: Opaque HAL SOC handle
900  * @ring_type: one of the types from hal_ring_type
901  *
902  */
903 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
904 
905 /**
906  * hal_srng_max_entries - Returns maximum possible number of ring entries
907  * @hal_soc: Opaque HAL SOC handle
908  * @ring_type: one of the types from hal_ring_type
909  *
910  * Return: Maximum number of entries for the given ring_type
911  */
912 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
913 
914 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
915 				 uint32_t low_threshold);
916 
917 /**
918  * hal_srng_dump - Dump ring status
919  * @srng: hal srng pointer
920  */
921 void hal_srng_dump(struct hal_srng *srng);
922 
923 /**
924  * hal_srng_get_dir - Returns the direction of the ring
925  * @hal_soc: Opaque HAL SOC handle
926  * @ring_type: one of the types from hal_ring_type
927  *
928  * Return: Ring direction
929  */
930 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
931 
932 /* HAL memory information */
933 struct hal_mem_info {
934 	/* dev base virutal addr */
935 	void *dev_base_addr;
936 	/* dev base physical addr */
937 	void *dev_base_paddr;
938 	/* dev base ce virutal addr - applicable only for qca5018  */
939 	/* In qca5018 CE register are outside wcss block */
940 	/* using a separate address space to access CE registers */
941 	void *dev_base_addr_ce;
942 	/* dev base ce physical addr */
943 	void *dev_base_paddr_ce;
944 	/* Remote virtual pointer memory for HW/FW updates */
945 	void *shadow_rdptr_mem_vaddr;
946 	/* Remote physical pointer memory for HW/FW updates */
947 	void *shadow_rdptr_mem_paddr;
948 	/* Shared memory for ring pointer updates from host to FW */
949 	void *shadow_wrptr_mem_vaddr;
950 	/* Shared physical memory for ring pointer updates from host to FW */
951 	void *shadow_wrptr_mem_paddr;
952 	/* lmac srng start id */
953 	uint8_t lmac_srng_start_id;
954 };
955 
956 /* SRNG parameters to be passed to hal_srng_setup */
957 struct hal_srng_params {
958 	/* Physical base address of the ring */
959 	qdf_dma_addr_t ring_base_paddr;
960 	/* Virtual base address of the ring */
961 	void *ring_base_vaddr;
962 	/* Number of entries in ring */
963 	uint32_t num_entries;
964 	/* max transfer length */
965 	uint16_t max_buffer_length;
966 	/* MSI Address */
967 	qdf_dma_addr_t msi_addr;
968 	/* MSI data */
969 	uint32_t msi_data;
970 	/* Interrupt timer threshold – in micro seconds */
971 	uint32_t intr_timer_thres_us;
972 	/* Interrupt batch counter threshold – in number of ring entries */
973 	uint32_t intr_batch_cntr_thres_entries;
974 	/* Low threshold – in number of ring entries
975 	 * (valid for src rings only)
976 	 */
977 	uint32_t low_threshold;
978 	/* Misc flags */
979 	uint32_t flags;
980 	/* Unique ring id */
981 	uint8_t ring_id;
982 	/* Source or Destination ring */
983 	enum hal_srng_dir ring_dir;
984 	/* Size of ring entry */
985 	uint32_t entry_size;
986 	/* hw register base address */
987 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
988 	/* prefetch timer config - in micro seconds */
989 	uint32_t prefetch_timer;
990 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
991 	/* Near full IRQ support flag */
992 	uint32_t nf_irq_support;
993 	/* MSI2 Address */
994 	qdf_dma_addr_t msi2_addr;
995 	/* MSI2 data */
996 	uint32_t msi2_data;
997 	/* Critical threshold */
998 	uint16_t crit_thresh;
999 	/* High threshold */
1000 	uint16_t high_thresh;
1001 	/* Safe threshold */
1002 	uint16_t safe_thresh;
1003 #endif
1004 };
1005 
1006 /* hal_construct_srng_shadow_regs() - initialize the shadow
1007  * registers for srngs
1008  * @hal_soc: hal handle
1009  *
1010  * Return: QDF_STATUS_OK on success
1011  */
1012 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1013 
1014 /* hal_set_one_shadow_config() - add a config for the specified ring
1015  * @hal_soc: hal handle
1016  * @ring_type: ring type
1017  * @ring_num: ring num
1018  *
1019  * The ring type and ring num uniquely specify the ring.  After this call,
1020  * the hp/tp will be added as the next entry int the shadow register
1021  * configuration table.  The hal code will use the shadow register address
1022  * in place of the hp/tp address.
1023  *
1024  * This function is exposed, so that the CE module can skip configuring shadow
1025  * registers for unused ring and rings assigned to the firmware.
1026  *
1027  * Return: QDF_STATUS_OK on success
1028  */
1029 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1030 				     int ring_num);
1031 /**
1032  * hal_get_shadow_config() - retrieve the config table for shadow cfg v2
1033  * @hal_soc: hal handle
1034  * @shadow_config: will point to the table after
1035  * @num_shadow_registers_configured: will contain the number of valid entries
1036  */
1037 extern void
1038 hal_get_shadow_config(void *hal_soc,
1039 		      struct pld_shadow_reg_v2_cfg **shadow_config,
1040 		      int *num_shadow_registers_configured);
1041 
1042 #ifdef CONFIG_SHADOW_V3
1043 /**
1044  * hal_get_shadow_v3_config() - retrieve the config table for shadow cfg v3
1045  * @hal_soc: hal handle
1046  * @shadow_config: will point to the table after
1047  * @num_shadow_registers_configured: will contain the number of valid entries
1048  */
1049 extern void
1050 hal_get_shadow_v3_config(void *hal_soc,
1051 			 struct pld_shadow_reg_v3_cfg **shadow_config,
1052 			 int *num_shadow_registers_configured);
1053 #endif
1054 
1055 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1056 /**
1057  * hal_srng_is_near_full_irq_supported() - Check if srng supports near full irq
1058  * @hal_soc: HAL SoC handle [To be validated by caller]
1059  * @ring_type: srng type
1060  * @ring_num: The index of the srng (of the same type)
1061  *
1062  * Return: true, if srng support near full irq trigger
1063  *	false, if the srng does not support near full irq support.
1064  */
1065 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1066 					 int ring_type, int ring_num);
1067 #else
1068 static inline
1069 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1070 					 int ring_type, int ring_num)
1071 {
1072 	return false;
1073 }
1074 #endif
1075 
1076 /**
1077  * hal_srng_setup - Initialize HW SRNG ring.
1078  *
1079  * @hal_soc: Opaque HAL SOC handle
1080  * @ring_type: one of the types from hal_ring_type
1081  * @ring_num: Ring number if there are multiple rings of
1082  *		same type (staring from 0)
1083  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1084  * @ring_params: SRNG ring params in hal_srng_params structure.
1085  * @idle_check: Check if ring is idle
1086 
1087  * Callers are expected to allocate contiguous ring memory of size
1088  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1089  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1090  * structure. Ring base address should be 8 byte aligned and size of each ring
1091  * entry should be queried using the API hal_srng_get_entrysize
1092  *
1093  * Return: Opaque pointer to ring on success
1094  *		 NULL on failure (if given ring is not available)
1095  */
1096 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1097 	int mac_id, struct hal_srng_params *ring_params, bool idle_check);
1098 
1099 /* Remapping ids of REO rings */
1100 #define REO_REMAP_TCL 0
1101 #define REO_REMAP_SW1 1
1102 #define REO_REMAP_SW2 2
1103 #define REO_REMAP_SW3 3
1104 #define REO_REMAP_SW4 4
1105 #define REO_REMAP_RELEASE 5
1106 #define REO_REMAP_FW 6
1107 /*
1108  * In Beryllium: 4 bits REO destination ring value is defined as: 0: TCL
1109  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
1110  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
1111  *
1112  */
1113 #define REO_REMAP_SW5 7
1114 #define REO_REMAP_SW6 8
1115 #define REO_REMAP_SW7 9
1116 #define REO_REMAP_SW8 10
1117 
1118 /*
1119  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1120  * to map destination to rings
1121  */
1122 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1123 	((_VALUE) << \
1124 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1125 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1126 
1127 /*
1128  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1129  * to map destination to rings
1130  */
1131 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1132 	((_VALUE) << \
1133 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1134 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1135 
1136 /*
1137  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1138  * to map destination to rings
1139  */
1140 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1141 	((_VALUE) << \
1142 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1143 	  _OFFSET ## _SHFT))
1144 
1145 /*
1146  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1147  * to map destination to rings
1148  */
1149 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1150 	((_VALUE) << \
1151 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1152 	  _OFFSET ## _SHFT))
1153 
1154 /*
1155  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1156  * to map destination to rings
1157  */
1158 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1159 	((_VALUE) << \
1160 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1161 	  _OFFSET ## _SHFT))
1162 
1163 /**
1164  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1165  * @hal_soc_hdl: HAL SOC handle
1166  * @read: boolean value to indicate if read or write
1167  * @ix0: pointer to store IX0 reg value
1168  * @ix1: pointer to store IX1 reg value
1169  * @ix2: pointer to store IX2 reg value
1170  * @ix3: pointer to store IX3 reg value
1171  */
1172 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1173 				uint32_t *ix0, uint32_t *ix1,
1174 				uint32_t *ix2, uint32_t *ix3);
1175 
1176 /**
1177  * hal_srng_set_hp_paddr_confirm() - Set physical address to dest SRNG head
1178  *  pointer and confirm that write went through by reading back the value
1179  * @sring: sring pointer
1180  * @paddr: physical address
1181  *
1182  * Return: None
1183  */
1184 extern void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring,
1185 					      uint64_t paddr);
1186 
1187 /**
1188  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
1189  * @hal_soc: hal_soc handle
1190  * @srng: sring pointer
1191  * @vaddr: virtual address
1192  */
1193 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1194 			  struct hal_srng *srng,
1195 			  uint32_t *vaddr);
1196 
1197 /**
1198  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1199  * @hal_soc: Opaque HAL SOC handle
1200  * @hal_srng: Opaque HAL SRNG pointer
1201  */
1202 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
1203 
1204 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1205 {
1206 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1207 
1208 	return !!srng->initialized;
1209 }
1210 
1211 /**
1212  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
1213  * @hal_soc: Opaque HAL SOC handle
1214  * @hal_ring_hdl: Destination ring pointer
1215  *
1216  * Caller takes responsibility for any locking needs.
1217  *
1218  * Return: Opaque pointer for next ring entry; NULL on failire
1219  */
1220 static inline
1221 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1222 			hal_ring_handle_t hal_ring_hdl)
1223 {
1224 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1225 
1226 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1227 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1228 
1229 	return NULL;
1230 }
1231 
1232 
1233 /**
1234  * hal_mem_dma_cache_sync - Cache sync the specified virtual address Range
1235  * @hal_soc: HAL soc handle
1236  * @desc: desc start address
1237  * @entry_size: size of memory to sync
1238  *
1239  * Return: void
1240  */
1241 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
1242 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1243 					  uint32_t entry_size)
1244 {
1245 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1246 }
1247 #else
1248 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1249 					  uint32_t entry_size)
1250 {
1251 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1252 			       QDF_DMA_FROM_DEVICE,
1253 			       (entry_size * sizeof(uint32_t)));
1254 }
1255 #endif
1256 
1257 /**
1258  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
1259  * hal_srng_access_start if locked access is required
1260  *
1261  * @hal_soc: Opaque HAL SOC handle
1262  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1263  *
1264  * This API doesn't implement any byte-order conversion on reading hp/tp.
1265  * So, Use API only for those srngs for which the target writes hp/tp values to
1266  * the DDR in the Host order.
1267  *
1268  * Return: 0 on success; error on failire
1269  */
1270 static inline int
1271 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1272 			       hal_ring_handle_t hal_ring_hdl)
1273 {
1274 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1275 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1276 	uint32_t *desc;
1277 
1278 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1279 		srng->u.src_ring.cached_tp =
1280 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1281 	else {
1282 		srng->u.dst_ring.cached_hp =
1283 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1284 
1285 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1286 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1287 			if (qdf_likely(desc)) {
1288 				hal_mem_dma_cache_sync(soc, desc,
1289 						       srng->entry_size);
1290 				qdf_prefetch(desc);
1291 			}
1292 		}
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 /**
1299  * hal_le_srng_access_start_unlocked_in_cpu_order - Start ring access
1300  * (unlocked) with endianness correction.
1301  * @hal_soc: Opaque HAL SOC handle
1302  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1303  *
1304  * This API provides same functionally as hal_srng_access_start_unlocked()
1305  * except that it converts the little-endian formatted hp/tp values to
1306  * Host order on reading them. So, this API should only be used for those srngs
1307  * for which the target always writes hp/tp values in little-endian order
1308  * regardless of Host order.
1309  *
1310  * Also, this API doesn't take the lock. For locked access, use
1311  * hal_srng_access_start/hal_le_srng_access_start_in_cpu_order.
1312  *
1313  * Return: 0 on success; error on failire
1314  */
1315 static inline int
1316 hal_le_srng_access_start_unlocked_in_cpu_order(
1317 	hal_soc_handle_t hal_soc_hdl,
1318 	hal_ring_handle_t hal_ring_hdl)
1319 {
1320 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1321 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1322 	uint32_t *desc;
1323 
1324 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1325 		srng->u.src_ring.cached_tp =
1326 			qdf_le32_to_cpu(*(volatile uint32_t *)
1327 					(srng->u.src_ring.tp_addr));
1328 	else {
1329 		srng->u.dst_ring.cached_hp =
1330 			qdf_le32_to_cpu(*(volatile uint32_t *)
1331 					(srng->u.dst_ring.hp_addr));
1332 
1333 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1334 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1335 			if (qdf_likely(desc)) {
1336 				hal_mem_dma_cache_sync(soc, desc,
1337 						       srng->entry_size);
1338 				qdf_prefetch(desc);
1339 			}
1340 		}
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 /**
1347  * hal_srng_try_access_start - Try to start (locked) ring access
1348  *
1349  * @hal_soc: Opaque HAL SOC handle
1350  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1351  *
1352  * Return: 0 on success; error on failure
1353  */
1354 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1355 					    hal_ring_handle_t hal_ring_hdl)
1356 {
1357 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1358 
1359 	if (qdf_unlikely(!hal_ring_hdl)) {
1360 		qdf_print("Error: Invalid hal_ring\n");
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1365 		return -EINVAL;
1366 
1367 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1368 }
1369 
1370 /**
1371  * hal_srng_access_start - Start (locked) ring access
1372  *
1373  * @hal_soc: Opaque HAL SOC handle
1374  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1375  *
1376  * This API doesn't implement any byte-order conversion on reading hp/tp.
1377  * So, Use API only for those srngs for which the target writes hp/tp values to
1378  * the DDR in the Host order.
1379  *
1380  * Return: 0 on success; error on failire
1381  */
1382 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1383 					hal_ring_handle_t hal_ring_hdl)
1384 {
1385 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1386 
1387 	if (qdf_unlikely(!hal_ring_hdl)) {
1388 		qdf_print("Error: Invalid hal_ring\n");
1389 		return -EINVAL;
1390 	}
1391 
1392 	SRNG_LOCK(&(srng->lock));
1393 
1394 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1395 }
1396 
1397 /**
1398  * hal_le_srng_access_start_in_cpu_order - Start (locked) ring access with
1399  * endianness correction
1400  * @hal_soc: Opaque HAL SOC handle
1401  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1402  *
1403  * This API provides same functionally as hal_srng_access_start()
1404  * except that it converts the little-endian formatted hp/tp values to
1405  * Host order on reading them. So, this API should only be used for those srngs
1406  * for which the target always writes hp/tp values in little-endian order
1407  * regardless of Host order.
1408  *
1409  * Return: 0 on success; error on failire
1410  */
1411 static inline int
1412 hal_le_srng_access_start_in_cpu_order(
1413 	hal_soc_handle_t hal_soc_hdl,
1414 	hal_ring_handle_t hal_ring_hdl)
1415 {
1416 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1417 
1418 	if (qdf_unlikely(!hal_ring_hdl)) {
1419 		qdf_print("Error: Invalid hal_ring\n");
1420 		return -EINVAL;
1421 	}
1422 
1423 	SRNG_LOCK(&(srng->lock));
1424 
1425 	return hal_le_srng_access_start_unlocked_in_cpu_order(
1426 			hal_soc_hdl, hal_ring_hdl);
1427 }
1428 
1429 /**
1430  * hal_srng_dst_get_next - Get next entry from a destination ring
1431  * @hal_soc: Opaque HAL SOC handle
1432  * @hal_ring_hdl: Destination ring pointer
1433  *
1434  * Return: Opaque pointer for next ring entry; NULL on failure
1435  */
1436 static inline
1437 void *hal_srng_dst_get_next(void *hal_soc,
1438 			    hal_ring_handle_t hal_ring_hdl)
1439 {
1440 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1441 	uint32_t *desc;
1442 
1443 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1444 		return NULL;
1445 
1446 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1447 	/* TODO: Using % is expensive, but we have to do this since
1448 	 * size of some SRNG rings is not power of 2 (due to descriptor
1449 	 * sizes). Need to create separate API for rings used
1450 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1451 	 * SW2RXDMA and CE rings)
1452 	 */
1453 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1454 	if (srng->u.dst_ring.tp == srng->ring_size)
1455 		srng->u.dst_ring.tp = 0;
1456 
1457 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1458 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1459 		uint32_t *desc_next;
1460 		uint32_t tp;
1461 
1462 		tp = srng->u.dst_ring.tp;
1463 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1464 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1465 		qdf_prefetch(desc_next);
1466 	}
1467 
1468 	return (void *)desc;
1469 }
1470 
1471 /**
1472  * hal_srng_dst_get_next_cached - Get cached next entry
1473  * @hal_soc: Opaque HAL SOC handle
1474  * @hal_ring_hdl: Destination ring pointer
1475  *
1476  * Get next entry from a destination ring and move cached tail pointer
1477  *
1478  * Return: Opaque pointer for next ring entry; NULL on failure
1479  */
1480 static inline
1481 void *hal_srng_dst_get_next_cached(void *hal_soc,
1482 				   hal_ring_handle_t hal_ring_hdl)
1483 {
1484 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1485 	uint32_t *desc;
1486 	uint32_t *desc_next;
1487 
1488 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1489 		return NULL;
1490 
1491 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1492 	/* TODO: Using % is expensive, but we have to do this since
1493 	 * size of some SRNG rings is not power of 2 (due to descriptor
1494 	 * sizes). Need to create separate API for rings used
1495 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1496 	 * SW2RXDMA and CE rings)
1497 	 */
1498 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1499 	if (srng->u.dst_ring.tp == srng->ring_size)
1500 		srng->u.dst_ring.tp = 0;
1501 
1502 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1503 	qdf_prefetch(desc_next);
1504 	return (void *)desc;
1505 }
1506 
1507 /**
1508  * hal_srng_dst_dec_tp - decrement the TP of the Dst ring by one entry
1509  * @hal_soc: Opaque HAL SOC handle
1510  * @hal_ring_hdl: Destination ring pointer
1511  *
1512  * reset the tail pointer in the destination ring by one entry
1513  *
1514  */
1515 static inline
1516 void hal_srng_dst_dec_tp(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1517 {
1518 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1519 
1520 	if (qdf_unlikely(!srng->u.dst_ring.tp))
1521 		srng->u.dst_ring.tp = (srng->ring_size - srng->entry_size);
1522 	else
1523 		srng->u.dst_ring.tp -= srng->entry_size;
1524 }
1525 
1526 static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
1527 {
1528 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1529 
1530 	if (qdf_unlikely(!hal_ring_hdl)) {
1531 		qdf_print("error: invalid hal_ring\n");
1532 		return -EINVAL;
1533 	}
1534 
1535 	SRNG_LOCK(&(srng->lock));
1536 	return 0;
1537 }
1538 
1539 static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
1540 {
1541 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1542 
1543 	if (qdf_unlikely(!hal_ring_hdl)) {
1544 		qdf_print("error: invalid hal_ring\n");
1545 		return -EINVAL;
1546 	}
1547 
1548 	SRNG_UNLOCK(&(srng->lock));
1549 	return 0;
1550 }
1551 
1552 /**
1553  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1554  * cached head pointer
1555  *
1556  * @hal_soc: Opaque HAL SOC handle
1557  * @hal_ring_hdl: Destination ring pointer
1558  *
1559  * Return: Opaque pointer for next ring entry; NULL on failire
1560  */
1561 static inline void *
1562 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1563 			 hal_ring_handle_t hal_ring_hdl)
1564 {
1565 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1566 	uint32_t *desc;
1567 	/* TODO: Using % is expensive, but we have to do this since
1568 	 * size of some SRNG rings is not power of 2 (due to descriptor
1569 	 * sizes). Need to create separate API for rings used
1570 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1571 	 * SW2RXDMA and CE rings)
1572 	 */
1573 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1574 		srng->ring_size;
1575 
1576 	if (next_hp != srng->u.dst_ring.tp) {
1577 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1578 		srng->u.dst_ring.cached_hp = next_hp;
1579 		return (void *)desc;
1580 	}
1581 
1582 	return NULL;
1583 }
1584 
1585 /**
1586  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1587  * @hal_soc: Opaque HAL SOC handle
1588  * @hal_ring_hdl: Destination ring pointer
1589  *
1590  * Sync cached head pointer with HW.
1591  * Caller takes responsibility for any locking needs.
1592  *
1593  * Return: Opaque pointer for next ring entry; NULL on failire
1594  */
1595 static inline
1596 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1597 			     hal_ring_handle_t hal_ring_hdl)
1598 {
1599 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1600 
1601 	srng->u.dst_ring.cached_hp =
1602 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1603 
1604 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1605 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1606 
1607 	return NULL;
1608 }
1609 
1610 /**
1611  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1612  * @hal_soc: Opaque HAL SOC handle
1613  * @hal_ring_hdl: Destination ring pointer
1614  *
1615  * Sync cached head pointer with HW.
1616  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1617  *
1618  * Return: Opaque pointer for next ring entry; NULL on failire
1619  */
1620 static inline
1621 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1622 				    hal_ring_handle_t hal_ring_hdl)
1623 {
1624 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1625 	void *ring_desc_ptr = NULL;
1626 
1627 	if (qdf_unlikely(!hal_ring_hdl)) {
1628 		qdf_print("Error: Invalid hal_ring\n");
1629 		return  NULL;
1630 	}
1631 
1632 	SRNG_LOCK(&srng->lock);
1633 
1634 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1635 
1636 	SRNG_UNLOCK(&srng->lock);
1637 
1638 	return ring_desc_ptr;
1639 }
1640 
1641 #define hal_srng_dst_num_valid_nolock(hal_soc, hal_ring_hdl, sync_hw_ptr) \
1642 		hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr)
1643 
1644 /**
1645  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1646  * by SW) in destination ring
1647  *
1648  * @hal_soc: Opaque HAL SOC handle
1649  * @hal_ring_hdl: Destination ring pointer
1650  * @sync_hw_ptr: Sync cached head pointer with HW
1651  *
1652  */
1653 static inline
1654 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1655 				hal_ring_handle_t hal_ring_hdl,
1656 				int sync_hw_ptr)
1657 {
1658 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1659 	uint32_t hp;
1660 	uint32_t tp = srng->u.dst_ring.tp;
1661 
1662 	if (sync_hw_ptr) {
1663 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1664 		srng->u.dst_ring.cached_hp = hp;
1665 	} else {
1666 		hp = srng->u.dst_ring.cached_hp;
1667 	}
1668 
1669 	if (hp >= tp)
1670 		return (hp - tp) / srng->entry_size;
1671 
1672 	return (srng->ring_size - tp + hp) / srng->entry_size;
1673 }
1674 
1675 /**
1676  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1677  * @hal_soc: Opaque HAL SOC handle
1678  * @hal_ring_hdl: Destination ring pointer
1679  * @entry_count: call invalidate API if valid entries available
1680  *
1681  * Invalidates a set of cached descriptors starting from TP to cached_HP
1682  *
1683  * Return - None
1684  */
1685 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1686 						 hal_ring_handle_t hal_ring_hdl,
1687 						 uint32_t entry_count)
1688 {
1689 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1690 	uint32_t *first_desc;
1691 	uint32_t *last_desc;
1692 	uint32_t last_desc_index;
1693 
1694 	/*
1695 	 * If SRNG does not have cached descriptors this
1696 	 * API call should be a no op
1697 	 */
1698 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1699 		return;
1700 
1701 	if (!entry_count)
1702 		return;
1703 
1704 	first_desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1705 
1706 	last_desc_index = (srng->u.dst_ring.tp +
1707 			   (entry_count * srng->entry_size)) %
1708 			  srng->ring_size;
1709 
1710 	last_desc =  &srng->ring_base_vaddr[last_desc_index];
1711 
1712 	if (last_desc > (uint32_t *)first_desc)
1713 		/* invalidate from tp to cached_hp */
1714 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1715 					      (void *)(last_desc));
1716 	else {
1717 		/* invalidate from tp to end of the ring */
1718 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1719 					      (void *)srng->ring_vaddr_end);
1720 
1721 		/* invalidate from start of ring to cached_hp */
1722 		qdf_nbuf_dma_inv_range_no_dsb((void *)srng->ring_base_vaddr,
1723 					      (void *)last_desc);
1724 	}
1725 	qdf_dsb();
1726 }
1727 
1728 /**
1729  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1730  *
1731  * @hal_soc: Opaque HAL SOC handle
1732  * @hal_ring_hdl: Destination ring pointer
1733  * @sync_hw_ptr: Sync cached head pointer with HW
1734  *
1735  * Returns number of valid entries to be processed by the host driver. The
1736  * function takes up SRNG lock.
1737  *
1738  * Return: Number of valid destination entries
1739  */
1740 static inline uint32_t
1741 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1742 			      hal_ring_handle_t hal_ring_hdl,
1743 			      int sync_hw_ptr)
1744 {
1745 	uint32_t num_valid;
1746 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1747 
1748 	SRNG_LOCK(&srng->lock);
1749 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1750 	SRNG_UNLOCK(&srng->lock);
1751 
1752 	return num_valid;
1753 }
1754 
1755 /**
1756  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1757  *
1758  * @hal_soc: Opaque HAL SOC handle
1759  * @hal_ring_hdl: Destination ring pointer
1760  *
1761  */
1762 static inline
1763 void hal_srng_sync_cachedhp(void *hal_soc,
1764 				hal_ring_handle_t hal_ring_hdl)
1765 {
1766 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1767 	uint32_t hp;
1768 
1769 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1770 	srng->u.dst_ring.cached_hp = hp;
1771 }
1772 
1773 /**
1774  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1775  * pointer. This can be used to release any buffers associated with completed
1776  * ring entries. Note that this should not be used for posting new descriptor
1777  * entries. Posting of new entries should be done only using
1778  * hal_srng_src_get_next_reaped when this function is used for reaping.
1779  *
1780  * @hal_soc: Opaque HAL SOC handle
1781  * @hal_ring_hdl: Source ring pointer
1782  *
1783  * Return: Opaque pointer for next ring entry; NULL on failire
1784  */
1785 static inline void *
1786 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1787 {
1788 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1789 	uint32_t *desc;
1790 
1791 	/* TODO: Using % is expensive, but we have to do this since
1792 	 * size of some SRNG rings is not power of 2 (due to descriptor
1793 	 * sizes). Need to create separate API for rings used
1794 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1795 	 * SW2RXDMA and CE rings)
1796 	 */
1797 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1798 		srng->ring_size;
1799 
1800 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1801 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1802 		srng->u.src_ring.reap_hp = next_reap_hp;
1803 		return (void *)desc;
1804 	}
1805 
1806 	return NULL;
1807 }
1808 
1809 /**
1810  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1811  * already reaped using hal_srng_src_reap_next, for posting new entries to
1812  * the ring
1813  *
1814  * @hal_soc: Opaque HAL SOC handle
1815  * @hal_ring_hdl: Source ring pointer
1816  *
1817  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1818  */
1819 static inline void *
1820 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1821 {
1822 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1823 	uint32_t *desc;
1824 
1825 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1826 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1827 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1828 			srng->ring_size;
1829 
1830 		return (void *)desc;
1831 	}
1832 
1833 	return NULL;
1834 }
1835 
1836 /**
1837  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1838  * move reap pointer. This API is used in detach path to release any buffers
1839  * associated with ring entries which are pending reap.
1840  *
1841  * @hal_soc: Opaque HAL SOC handle
1842  * @hal_ring_hdl: Source ring pointer
1843  *
1844  * Return: Opaque pointer for next ring entry; NULL on failire
1845  */
1846 static inline void *
1847 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1848 {
1849 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1850 	uint32_t *desc;
1851 
1852 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1853 		srng->ring_size;
1854 
1855 	if (next_reap_hp != srng->u.src_ring.hp) {
1856 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1857 		srng->u.src_ring.reap_hp = next_reap_hp;
1858 		return (void *)desc;
1859 	}
1860 
1861 	return NULL;
1862 }
1863 
1864 /**
1865  * hal_srng_src_done_val -
1866  *
1867  * @hal_soc: Opaque HAL SOC handle
1868  * @hal_ring_hdl: Source ring pointer
1869  *
1870  * Return: Opaque pointer for next ring entry; NULL on failire
1871  */
1872 static inline uint32_t
1873 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1874 {
1875 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1876 	/* TODO: Using % is expensive, but we have to do this since
1877 	 * size of some SRNG rings is not power of 2 (due to descriptor
1878 	 * sizes). Need to create separate API for rings used
1879 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1880 	 * SW2RXDMA and CE rings)
1881 	 */
1882 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1883 		srng->ring_size;
1884 
1885 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1886 		return 0;
1887 
1888 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1889 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1890 			srng->entry_size;
1891 	else
1892 		return ((srng->ring_size - next_reap_hp) +
1893 			srng->u.src_ring.cached_tp) / srng->entry_size;
1894 }
1895 
1896 /**
1897  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1898  * @hal_ring_hdl: Source ring pointer
1899  *
1900  * srng->entry_size value is in 4 byte dwords so left shifting
1901  * this by 2 to return the value of entry_size in bytes.
1902  *
1903  * Return: uint8_t
1904  */
1905 static inline
1906 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1907 {
1908 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1909 
1910 	return srng->entry_size << 2;
1911 }
1912 
1913 /**
1914  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1915  * @hal_soc: Opaque HAL SOC handle
1916  * @hal_ring_hdl: Source ring pointer
1917  * @tailp: Tail Pointer
1918  * @headp: Head Pointer
1919  *
1920  * Return: Update tail pointer and head pointer in arguments.
1921  */
1922 static inline
1923 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1924 		     uint32_t *tailp, uint32_t *headp)
1925 {
1926 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1927 
1928 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1929 		*headp = srng->u.src_ring.hp;
1930 		*tailp = *srng->u.src_ring.tp_addr;
1931 	} else {
1932 		*tailp = srng->u.dst_ring.tp;
1933 		*headp = *srng->u.dst_ring.hp_addr;
1934 	}
1935 }
1936 
1937 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
1938 /**
1939  * hal_srng_src_get_next_consumed - Get the next desc if consumed by HW
1940  *
1941  * @hal_soc: Opaque HAL SOC handle
1942  * @hal_ring_hdl: Source ring pointer
1943  *
1944  * Return: pointer to descriptor if consumed by HW, else NULL
1945  */
1946 static inline
1947 void *hal_srng_src_get_next_consumed(void *hal_soc,
1948 				     hal_ring_handle_t hal_ring_hdl)
1949 {
1950 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1951 	uint32_t *desc = NULL;
1952 	/* TODO: Using % is expensive, but we have to do this since
1953 	 * size of some SRNG rings is not power of 2 (due to descriptor
1954 	 * sizes). Need to create separate API for rings used
1955 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1956 	 * SW2RXDMA and CE rings)
1957 	 */
1958 	uint32_t next_entry = (srng->last_desc_cleared + srng->entry_size) %
1959 			      srng->ring_size;
1960 
1961 	if (next_entry != srng->u.src_ring.cached_tp) {
1962 		desc = &srng->ring_base_vaddr[next_entry];
1963 		srng->last_desc_cleared = next_entry;
1964 	}
1965 
1966 	return desc;
1967 }
1968 
1969 #else
1970 static inline
1971 void *hal_srng_src_get_next_consumed(void *hal_soc,
1972 				     hal_ring_handle_t hal_ring_hdl)
1973 {
1974 	return NULL;
1975 }
1976 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
1977 
1978 /**
1979  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1980  *
1981  * @hal_soc: Opaque HAL SOC handle
1982  * @hal_ring_hdl: Source ring pointer
1983  *
1984  * Return: Opaque pointer for next ring entry; NULL on failire
1985  */
1986 static inline
1987 void *hal_srng_src_get_next(void *hal_soc,
1988 			    hal_ring_handle_t hal_ring_hdl)
1989 {
1990 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1991 	uint32_t *desc;
1992 	/* TODO: Using % is expensive, but we have to do this since
1993 	 * size of some SRNG rings is not power of 2 (due to descriptor
1994 	 * sizes). Need to create separate API for rings used
1995 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1996 	 * SW2RXDMA and CE rings)
1997 	 */
1998 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1999 		srng->ring_size;
2000 
2001 	if (next_hp != srng->u.src_ring.cached_tp) {
2002 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2003 		srng->u.src_ring.hp = next_hp;
2004 		/* TODO: Since reap function is not used by all rings, we can
2005 		 * remove the following update of reap_hp in this function
2006 		 * if we can ensure that only hal_srng_src_get_next_reaped
2007 		 * is used for the rings requiring reap functionality
2008 		 */
2009 		srng->u.src_ring.reap_hp = next_hp;
2010 		return (void *)desc;
2011 	}
2012 
2013 	return NULL;
2014 }
2015 
2016 /**
2017  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
2018  * moving head pointer.
2019  * hal_srng_src_get_next should be called subsequently to move the head pointer
2020  *
2021  * @hal_soc: Opaque HAL SOC handle
2022  * @hal_ring_hdl: Source ring pointer
2023  *
2024  * Return: Opaque pointer for next ring entry; NULL on failire
2025  */
2026 static inline
2027 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
2028 				   hal_ring_handle_t hal_ring_hdl)
2029 {
2030 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2031 	uint32_t *desc;
2032 
2033 	/* TODO: Using % is expensive, but we have to do this since
2034 	 * size of some SRNG rings is not power of 2 (due to descriptor
2035 	 * sizes). Need to create separate API for rings used
2036 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2037 	 * SW2RXDMA and CE rings)
2038 	 */
2039 	if (((srng->u.src_ring.hp + srng->entry_size) %
2040 		srng->ring_size) != srng->u.src_ring.cached_tp) {
2041 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2042 						srng->entry_size) %
2043 						srng->ring_size]);
2044 		return (void *)desc;
2045 	}
2046 
2047 	return NULL;
2048 }
2049 
2050 /**
2051  * hal_srng_src_peek_n_get_next_next - Get next to next, i.e HP + 2 entry
2052  * from a ring without moving head pointer.
2053  *
2054  * @hal_soc: Opaque HAL SOC handle
2055  * @hal_ring_hdl: Source ring pointer
2056  *
2057  * Return: Opaque pointer for next to next ring entry; NULL on failire
2058  */
2059 static inline
2060 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
2061 					hal_ring_handle_t hal_ring_hdl)
2062 {
2063 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2064 	uint32_t *desc;
2065 
2066 	/* TODO: Using % is expensive, but we have to do this since
2067 	 * size of some SRNG rings is not power of 2 (due to descriptor
2068 	 * sizes). Need to create separate API for rings used
2069 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2070 	 * SW2RXDMA and CE rings)
2071 	 */
2072 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
2073 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
2074 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
2075 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
2076 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2077 						(srng->entry_size * 2)) %
2078 						srng->ring_size]);
2079 		return (void *)desc;
2080 	}
2081 
2082 	return NULL;
2083 }
2084 
2085 /**
2086  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
2087  * and move hp to next in src ring
2088  *
2089  * Usage: This API should only be used at init time replenish.
2090  *
2091  * @hal_soc_hdl: HAL soc handle
2092  * @hal_ring_hdl: Source ring pointer
2093  *
2094  */
2095 static inline void *
2096 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
2097 				    hal_ring_handle_t hal_ring_hdl)
2098 {
2099 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2100 	uint32_t *cur_desc = NULL;
2101 	uint32_t next_hp;
2102 
2103 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
2104 
2105 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2106 		srng->ring_size;
2107 
2108 	if (next_hp != srng->u.src_ring.cached_tp)
2109 		srng->u.src_ring.hp = next_hp;
2110 
2111 	return (void *)cur_desc;
2112 }
2113 
2114 /**
2115  * hal_srng_src_num_avail - Returns number of available entries in src ring
2116  *
2117  * @hal_soc: Opaque HAL SOC handle
2118  * @hal_ring_hdl: Source ring pointer
2119  * @sync_hw_ptr: Sync cached tail pointer with HW
2120  *
2121  */
2122 static inline uint32_t
2123 hal_srng_src_num_avail(void *hal_soc,
2124 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
2125 {
2126 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2127 	uint32_t tp;
2128 	uint32_t hp = srng->u.src_ring.hp;
2129 
2130 	if (sync_hw_ptr) {
2131 		tp = *(srng->u.src_ring.tp_addr);
2132 		srng->u.src_ring.cached_tp = tp;
2133 	} else {
2134 		tp = srng->u.src_ring.cached_tp;
2135 	}
2136 
2137 	if (tp > hp)
2138 		return ((tp - hp) / srng->entry_size) - 1;
2139 	else
2140 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
2141 }
2142 
2143 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
2144 /**
2145  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2146  * @hal_soc_hdl: HAL soc handle
2147  * @hal_ring_hdl: SRNG handle
2148  *
2149  * This function tries to acquire SRNG lock, and hence should not be called
2150  * from a context which has already acquired the SRNG lock.
2151  *
2152  * Return: None
2153  */
2154 static inline
2155 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2156 					 hal_ring_handle_t hal_ring_hdl)
2157 {
2158 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2159 
2160 	SRNG_LOCK(&srng->lock);
2161 	srng->high_wm.val = 0;
2162 	srng->high_wm.timestamp = 0;
2163 	qdf_mem_zero(&srng->high_wm.bins[0], sizeof(srng->high_wm.bins[0]) *
2164 					     HAL_SRNG_HIGH_WM_BIN_MAX);
2165 	SRNG_UNLOCK(&srng->lock);
2166 }
2167 
2168 /**
2169  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2170  * @hal_soc_hdl: HAL soc handle
2171  * @hal_ring_hdl: SRNG handle
2172  *
2173  * This function should be called with the SRNG lock held.
2174  *
2175  * Return: None
2176  */
2177 static inline
2178 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2179 					   hal_ring_handle_t hal_ring_hdl)
2180 {
2181 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2182 	uint32_t curr_wm_val = 0;
2183 
2184 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
2185 		curr_wm_val = hal_srng_src_num_avail(hal_soc_hdl, hal_ring_hdl,
2186 						     0);
2187 	else
2188 		curr_wm_val = hal_srng_dst_num_valid(hal_soc_hdl, hal_ring_hdl,
2189 						     0);
2190 
2191 	if (curr_wm_val > srng->high_wm.val) {
2192 		srng->high_wm.val = curr_wm_val;
2193 		srng->high_wm.timestamp = qdf_get_system_timestamp();
2194 	}
2195 
2196 	if (curr_wm_val >=
2197 		srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100])
2198 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]++;
2199 	else if (curr_wm_val >=
2200 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90])
2201 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90]++;
2202 	else if (curr_wm_val >=
2203 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80])
2204 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80]++;
2205 	else if (curr_wm_val >=
2206 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70])
2207 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70]++;
2208 	else if (curr_wm_val >=
2209 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60])
2210 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60]++;
2211 	else
2212 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT]++;
2213 }
2214 
2215 static inline
2216 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2217 				hal_ring_handle_t hal_ring_hdl,
2218 				char *buf, int buf_len, int pos)
2219 {
2220 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2221 
2222 	return qdf_scnprintf(buf + pos, buf_len - pos,
2223 			     "%8u %7u %12llu %10u %10u %10u %10u %10u %10u",
2224 			     srng->ring_id, srng->high_wm.val,
2225 			     srng->high_wm.timestamp,
2226 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT],
2227 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60],
2228 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70],
2229 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80],
2230 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90],
2231 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]);
2232 }
2233 #else
2234 /**
2235  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2236  * @hal_soc_hdl: HAL soc handle
2237  * @hal_ring_hdl: SRNG handle
2238  *
2239  * This function tries to acquire SRNG lock, and hence should not be called
2240  * from a context which has already acquired the SRNG lock.
2241  *
2242  * Return: None
2243  */
2244 static inline
2245 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2246 					 hal_ring_handle_t hal_ring_hdl)
2247 {
2248 }
2249 
2250 /**
2251  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2252  * @hal_soc_hdl: HAL soc handle
2253  * @hal_ring_hdl: SRNG handle
2254  *
2255  * This function should be called with the SRNG lock held.
2256  *
2257  * Return: None
2258  */
2259 static inline
2260 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2261 					   hal_ring_handle_t hal_ring_hdl)
2262 {
2263 }
2264 
2265 static inline
2266 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2267 				hal_ring_handle_t hal_ring_hdl,
2268 				char *buf, int buf_len, int pos)
2269 {
2270 	return 0;
2271 }
2272 #endif
2273 
2274 /**
2275  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
2276  * ring head/tail pointers to HW.
2277  *
2278  * @hal_soc: Opaque HAL SOC handle
2279  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2280  *
2281  * The target expects cached head/tail pointer to be updated to the
2282  * shared location in the little-endian order, This API ensures that.
2283  * This API should be used only if hal_srng_access_start_unlocked was used to
2284  * start ring access
2285  *
2286  * Return: None
2287  */
2288 static inline void
2289 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2290 {
2291 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2292 
2293 	/* TODO: See if we need a write memory barrier here */
2294 	if (srng->flags & HAL_SRNG_LMAC_RING) {
2295 		/* For LMAC rings, ring pointer updates are done through FW and
2296 		 * hence written to a shared memory location that is read by FW
2297 		 */
2298 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2299 			*srng->u.src_ring.hp_addr =
2300 				qdf_cpu_to_le32(srng->u.src_ring.hp);
2301 		} else {
2302 			*srng->u.dst_ring.tp_addr =
2303 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
2304 		}
2305 	} else {
2306 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
2307 			hal_srng_write_address_32_mb(hal_soc,
2308 						     srng,
2309 						     srng->u.src_ring.hp_addr,
2310 						     srng->u.src_ring.hp);
2311 		else
2312 			hal_srng_write_address_32_mb(hal_soc,
2313 						     srng,
2314 						     srng->u.dst_ring.tp_addr,
2315 						     srng->u.dst_ring.tp);
2316 	}
2317 }
2318 
2319 /* hal_srng_access_end_unlocked already handles endianness conversion,
2320  * use the same.
2321  */
2322 #define hal_le_srng_access_end_unlocked_in_cpu_order \
2323 	hal_srng_access_end_unlocked
2324 
2325 /**
2326  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
2327  * pointers to HW
2328  *
2329  * @hal_soc: Opaque HAL SOC handle
2330  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2331  *
2332  * The target expects cached head/tail pointer to be updated to the
2333  * shared location in the little-endian order, This API ensures that.
2334  * This API should be used only if hal_srng_access_start was used to
2335  * start ring access
2336  *
2337  */
2338 static inline void
2339 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2340 {
2341 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2342 
2343 	if (qdf_unlikely(!hal_ring_hdl)) {
2344 		qdf_print("Error: Invalid hal_ring\n");
2345 		return;
2346 	}
2347 
2348 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2349 	SRNG_UNLOCK(&(srng->lock));
2350 }
2351 
2352 #ifdef FEATURE_RUNTIME_PM
2353 #define hal_srng_access_end_v1 hal_srng_rtpm_access_end
2354 
2355 /**
2356  * hal_srng_rtpm_access_end - RTPM aware, Unlock ring access
2357  * @hal_soc: Opaque HAL SOC handle
2358  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2359  * @rtpm_dbgid: RTPM debug id
2360  * @is_critical_ctx: Whether the calling context is critical
2361  *
2362  * Function updates the HP/TP value to the hardware register.
2363  * The target expects cached head/tail pointer to be updated to the
2364  * shared location in the little-endian order, This API ensures that.
2365  * This API should be used only if hal_srng_access_start was used to
2366  * start ring access
2367  *
2368  * Return: None
2369  */
2370 void
2371 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,
2372 			 hal_ring_handle_t hal_ring_hdl,
2373 			 uint32_t rtpm_id);
2374 #else
2375 #define hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl, rtpm_id) \
2376 	hal_srng_access_end(hal_soc_hdl, hal_ring_hdl)
2377 #endif
2378 
2379 /* hal_srng_access_end already handles endianness conversion, so use the same */
2380 #define hal_le_srng_access_end_in_cpu_order \
2381 	hal_srng_access_end
2382 
2383 /**
2384  * hal_srng_access_end_reap - Unlock ring access
2385  * This should be used only if hal_srng_access_start to start ring access
2386  * and should be used only while reaping SRC ring completions
2387  *
2388  * @hal_soc: Opaque HAL SOC handle
2389  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2390  *
2391  * Return: 0 on success; error on failire
2392  */
2393 static inline void
2394 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2395 {
2396 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2397 
2398 	SRNG_UNLOCK(&(srng->lock));
2399 }
2400 
2401 /* TODO: Check if the following definitions is available in HW headers */
2402 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2403 #define NUM_MPDUS_PER_LINK_DESC 6
2404 #define NUM_MSDUS_PER_LINK_DESC 7
2405 #define REO_QUEUE_DESC_ALIGN 128
2406 
2407 #define LINK_DESC_ALIGN 128
2408 
2409 #define ADDRESS_MATCH_TAG_VAL 0x5
2410 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2411  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2412  */
2413 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2414 
2415 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2416  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2417  * should be specified in 16 word units. But the number of bits defined for
2418  * this field in HW header files is 5.
2419  */
2420 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2421 
2422 
2423 /**
2424  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
2425  * in an idle list
2426  *
2427  * @hal_soc: Opaque HAL SOC handle
2428  *
2429  */
2430 static inline
2431 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2432 {
2433 	return WBM_IDLE_SCATTER_BUF_SIZE;
2434 }
2435 
2436 /**
2437  * hal_get_link_desc_size - Get the size of each link descriptor
2438  *
2439  * @hal_soc: Opaque HAL SOC handle
2440  *
2441  */
2442 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2443 {
2444 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2445 
2446 	if (!hal_soc || !hal_soc->ops) {
2447 		qdf_print("Error: Invalid ops\n");
2448 		QDF_BUG(0);
2449 		return -EINVAL;
2450 	}
2451 	if (!hal_soc->ops->hal_get_link_desc_size) {
2452 		qdf_print("Error: Invalid function pointer\n");
2453 		QDF_BUG(0);
2454 		return -EINVAL;
2455 	}
2456 	return hal_soc->ops->hal_get_link_desc_size();
2457 }
2458 
2459 /**
2460  * hal_get_link_desc_align - Get the required start address alignment for
2461  * link descriptors
2462  *
2463  * @hal_soc: Opaque HAL SOC handle
2464  *
2465  */
2466 static inline
2467 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2468 {
2469 	return LINK_DESC_ALIGN;
2470 }
2471 
2472 /**
2473  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
2474  *
2475  * @hal_soc: Opaque HAL SOC handle
2476  *
2477  */
2478 static inline
2479 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2480 {
2481 	return NUM_MPDUS_PER_LINK_DESC;
2482 }
2483 
2484 /**
2485  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
2486  *
2487  * @hal_soc: Opaque HAL SOC handle
2488  *
2489  */
2490 static inline
2491 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2492 {
2493 	return NUM_MSDUS_PER_LINK_DESC;
2494 }
2495 
2496 /**
2497  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
2498  * descriptor can hold
2499  *
2500  * @hal_soc: Opaque HAL SOC handle
2501  *
2502  */
2503 static inline
2504 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2505 {
2506 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2507 }
2508 
2509 /**
2510  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
2511  * that the given buffer size
2512  *
2513  * @hal_soc: Opaque HAL SOC handle
2514  * @scatter_buf_size: Size of scatter buffer
2515  *
2516  */
2517 static inline
2518 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2519 					  uint32_t scatter_buf_size)
2520 {
2521 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2522 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2523 }
2524 
2525 /**
2526  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
2527  * each given buffer size
2528  *
2529  * @hal_soc: Opaque HAL SOC handle
2530  * @total_mem: size of memory to be scattered
2531  * @scatter_buf_size: Size of scatter buffer
2532  *
2533  */
2534 static inline
2535 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2536 					uint32_t total_mem,
2537 					uint32_t scatter_buf_size)
2538 {
2539 	uint8_t rem = (total_mem % (scatter_buf_size -
2540 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2541 
2542 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2543 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2544 
2545 	return num_scatter_bufs;
2546 }
2547 
2548 enum hal_pn_type {
2549 	HAL_PN_NONE,
2550 	HAL_PN_WPA,
2551 	HAL_PN_WAPI_EVEN,
2552 	HAL_PN_WAPI_UNEVEN,
2553 };
2554 
2555 #define HAL_RX_BA_WINDOW_256 256
2556 #define HAL_RX_BA_WINDOW_1024 1024
2557 
2558 /**
2559  * hal_get_reo_qdesc_align - Get start address alignment for reo
2560  * queue descriptors
2561  *
2562  * @hal_soc: Opaque HAL SOC handle
2563  *
2564  */
2565 static inline
2566 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2567 {
2568 	return REO_QUEUE_DESC_ALIGN;
2569 }
2570 
2571 /**
2572  * hal_srng_get_hp_addr - Get head pointer physical address
2573  *
2574  * @hal_soc: Opaque HAL SOC handle
2575  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2576  *
2577  */
2578 static inline qdf_dma_addr_t
2579 hal_srng_get_hp_addr(void *hal_soc,
2580 		     hal_ring_handle_t hal_ring_hdl)
2581 {
2582 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2583 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2584 
2585 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2586 		return hal->shadow_wrptr_mem_paddr +
2587 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
2588 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2589 	} else {
2590 		return hal->shadow_rdptr_mem_paddr +
2591 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2592 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2593 	}
2594 }
2595 
2596 /**
2597  * hal_srng_get_tp_addr - Get tail pointer physical address
2598  *
2599  * @hal_soc: Opaque HAL SOC handle
2600  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2601  *
2602  */
2603 static inline qdf_dma_addr_t
2604 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2605 {
2606 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2607 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2608 
2609 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2610 		return hal->shadow_rdptr_mem_paddr +
2611 			((unsigned long)(srng->u.src_ring.tp_addr) -
2612 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2613 	} else {
2614 		return hal->shadow_wrptr_mem_paddr +
2615 			((unsigned long)(srng->u.dst_ring.tp_addr) -
2616 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
2617 	}
2618 }
2619 
2620 /**
2621  * hal_srng_get_num_entries - Get total entries in the HAL Srng
2622  *
2623  * @hal_soc: Opaque HAL SOC handle
2624  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2625  *
2626  * Return: total number of entries in hal ring
2627  */
2628 static inline
2629 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2630 				  hal_ring_handle_t hal_ring_hdl)
2631 {
2632 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2633 
2634 	return srng->num_entries;
2635 }
2636 
2637 /**
2638  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
2639  *
2640  * @hal_soc: Opaque HAL SOC handle
2641  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2642  * @ring_params: SRNG parameters will be returned through this structure
2643  */
2644 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2645 			 hal_ring_handle_t hal_ring_hdl,
2646 			 struct hal_srng_params *ring_params);
2647 
2648 /**
2649  * hal_mem_info - Retrieve hal memory base address
2650  *
2651  * @hal_soc: Opaque HAL SOC handle
2652  * @mem: pointer to structure to be updated with hal mem info
2653  */
2654 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2655 
2656 /**
2657  * hal_get_target_type - Return target type
2658  *
2659  * @hal_soc: Opaque HAL SOC handle
2660  */
2661 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2662 
2663 /**
2664  * hal_srng_dst_hw_init - Private function to initialize SRNG
2665  * destination ring HW
2666  * @hal_soc: HAL SOC handle
2667  * @srng: SRNG ring pointer
2668  * @idle_check: Check if ring is idle
2669  */
2670 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2671 					struct hal_srng *srng, bool idle_check)
2672 {
2673 	hal->ops->hal_srng_dst_hw_init(hal, srng, idle_check);
2674 }
2675 
2676 /**
2677  * hal_srng_src_hw_init - Private function to initialize SRNG
2678  * source ring HW
2679  * @hal_soc: HAL SOC handle
2680  * @srng: SRNG ring pointer
2681  * @idle_check: Check if ring is idle
2682  */
2683 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2684 					struct hal_srng *srng, bool idle_check)
2685 {
2686 	hal->ops->hal_srng_src_hw_init(hal, srng, idle_check);
2687 }
2688 
2689 /**
2690  * hal_srng_hw_disable - Private function to disable SRNG
2691  * source ring HW
2692  * @hal_soc: HAL SOC handle
2693  * @srng: SRNG ring pointer
2694  */
2695 static inline
2696 void hal_srng_hw_disable(struct hal_soc *hal_soc, struct hal_srng *srng)
2697 {
2698 	if (hal_soc->ops->hal_srng_hw_disable)
2699 		hal_soc->ops->hal_srng_hw_disable(hal_soc, srng);
2700 }
2701 
2702 /**
2703  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2704  * @hal_soc: Opaque HAL SOC handle
2705  * @hal_ring_hdl: Source ring pointer
2706  * @headp: Head Pointer
2707  * @tailp: Tail Pointer
2708  * @ring_type: Ring
2709  *
2710  * Return: Update tail pointer and head pointer in arguments.
2711  */
2712 static inline
2713 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2714 		     hal_ring_handle_t hal_ring_hdl,
2715 		     uint32_t *headp, uint32_t *tailp,
2716 		     uint8_t ring_type)
2717 {
2718 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2719 
2720 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2721 			headp, tailp, ring_type);
2722 }
2723 
2724 /**
2725  * hal_reo_setup - Initialize HW REO block
2726  *
2727  * @hal_soc: Opaque HAL SOC handle
2728  * @reo_params: parameters needed by HAL for REO config
2729  * @qref_reset: reset qref
2730  */
2731 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2732 				 void *reoparams, int qref_reset)
2733 {
2734 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2735 
2736 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams, qref_reset);
2737 }
2738 
2739 static inline
2740 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2741 				   uint32_t *ring, uint32_t num_rings,
2742 				   uint32_t *remap1, uint32_t *remap2)
2743 {
2744 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2745 
2746 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2747 					num_rings, remap1, remap2);
2748 }
2749 
2750 static inline
2751 void hal_compute_reo_remap_ix0(hal_soc_handle_t hal_soc_hdl, uint32_t *remap0)
2752 {
2753 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2754 
2755 	if (hal_soc->ops->hal_compute_reo_remap_ix0)
2756 		hal_soc->ops->hal_compute_reo_remap_ix0(remap0);
2757 }
2758 
2759 /**
2760  * hal_setup_link_idle_list - Setup scattered idle list using the
2761  * buffer list provided
2762  *
2763  * @hal_soc: Opaque HAL SOC handle
2764  * @scatter_bufs_base_paddr: Array of physical base addresses
2765  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2766  * @num_scatter_bufs: Number of scatter buffers in the above lists
2767  * @scatter_buf_size: Size of each scatter buffer
2768  * @last_buf_end_offset: Offset to the last entry
2769  * @num_entries: Total entries of all scatter bufs
2770  *
2771  */
2772 static inline
2773 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2774 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2775 			      void *scatter_bufs_base_vaddr[],
2776 			      uint32_t num_scatter_bufs,
2777 			      uint32_t scatter_buf_size,
2778 			      uint32_t last_buf_end_offset,
2779 			      uint32_t num_entries)
2780 {
2781 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2782 
2783 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2784 			scatter_bufs_base_vaddr, num_scatter_bufs,
2785 			scatter_buf_size, last_buf_end_offset,
2786 			num_entries);
2787 
2788 }
2789 
2790 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2791 /**
2792  * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
2793  * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
2794  *
2795  * Use the virtual addr pointer to reo h/w queue desc to read
2796  * the values from ddr and log them.
2797  *
2798  * Return: none
2799  */
2800 static inline void hal_dump_rx_reo_queue_desc(
2801 	void *hw_qdesc_vaddr_aligned)
2802 {
2803 	struct rx_reo_queue *hw_qdesc =
2804 		(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
2805 
2806 	if (!hw_qdesc)
2807 		return;
2808 
2809 	hal_info("receive_queue_number %u vld %u window_jump_2k %u"
2810 		 " hole_count %u ba_window_size %u ignore_ampdu_flag %u"
2811 		 " svld %u ssn %u current_index %u"
2812 		 " disable_duplicate_detection %u soft_reorder_enable %u"
2813 		 " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
2814 		 " msdu_frames_processed_count %u total_processed_byte_count %u"
2815 		 " late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
2816 		 " pn_error_detected_flag %u current_mpdu_count %u"
2817 		 " current_msdu_count %u timeout_count %u"
2818 		 " forward_due_to_bar_count %u duplicate_count %u"
2819 		 " frames_in_order_count %u bar_received_count %u"
2820 		 " pn_check_needed %u pn_shall_be_even %u"
2821 		 " pn_shall_be_uneven %u pn_size %u",
2822 		 hw_qdesc->receive_queue_number,
2823 		 hw_qdesc->vld,
2824 		 hw_qdesc->window_jump_2k,
2825 		 hw_qdesc->hole_count,
2826 		 hw_qdesc->ba_window_size,
2827 		 hw_qdesc->ignore_ampdu_flag,
2828 		 hw_qdesc->svld,
2829 		 hw_qdesc->ssn,
2830 		 hw_qdesc->current_index,
2831 		 hw_qdesc->disable_duplicate_detection,
2832 		 hw_qdesc->soft_reorder_enable,
2833 		 hw_qdesc->chk_2k_mode,
2834 		 hw_qdesc->oor_mode,
2835 		 hw_qdesc->mpdu_frames_processed_count,
2836 		 hw_qdesc->msdu_frames_processed_count,
2837 		 hw_qdesc->total_processed_byte_count,
2838 		 hw_qdesc->late_receive_mpdu_count,
2839 		 hw_qdesc->seq_2k_error_detected_flag,
2840 		 hw_qdesc->pn_error_detected_flag,
2841 		 hw_qdesc->current_mpdu_count,
2842 		 hw_qdesc->current_msdu_count,
2843 		 hw_qdesc->timeout_count,
2844 		 hw_qdesc->forward_due_to_bar_count,
2845 		 hw_qdesc->duplicate_count,
2846 		 hw_qdesc->frames_in_order_count,
2847 		 hw_qdesc->bar_received_count,
2848 		 hw_qdesc->pn_check_needed,
2849 		 hw_qdesc->pn_shall_be_even,
2850 		 hw_qdesc->pn_shall_be_uneven,
2851 		 hw_qdesc->pn_size);
2852 }
2853 
2854 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
2855 
2856 static inline void hal_dump_rx_reo_queue_desc(
2857 	void *hw_qdesc_vaddr_aligned)
2858 {
2859 }
2860 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2861 
2862 /**
2863  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2864  *
2865  * @hal_soc: Opaque HAL SOC handle
2866  * @hal_ring_hdl: Source ring pointer
2867  * @ring_desc: Opaque ring descriptor handle
2868  */
2869 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2870 					   hal_ring_handle_t hal_ring_hdl,
2871 					   hal_ring_desc_t ring_desc)
2872 {
2873 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2874 
2875 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2876 			   ring_desc, (srng->entry_size << 2));
2877 }
2878 
2879 /**
2880  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2881  *
2882  * @hal_soc: Opaque HAL SOC handle
2883  * @hal_ring_hdl: Source ring pointer
2884  */
2885 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2886 				      hal_ring_handle_t hal_ring_hdl)
2887 {
2888 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2889 	uint32_t *desc;
2890 	uint32_t tp, i;
2891 
2892 	tp = srng->u.dst_ring.tp;
2893 
2894 	for (i = 0; i < 128; i++) {
2895 		if (!tp)
2896 			tp = srng->ring_size;
2897 
2898 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2899 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2900 				   QDF_TRACE_LEVEL_DEBUG,
2901 				   desc, (srng->entry_size << 2));
2902 
2903 		tp -= srng->entry_size;
2904 	}
2905 }
2906 
2907 /*
2908  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2909  * to opaque dp_ring desc type
2910  * @ring_desc - rxdma ring desc
2911  *
2912  * Return: hal_rxdma_desc_t type
2913  */
2914 static inline
2915 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2916 {
2917 	return (hal_ring_desc_t)ring_desc;
2918 }
2919 
2920 /**
2921  * hal_srng_set_event() - Set hal_srng event
2922  * @hal_ring_hdl: Source ring pointer
2923  * @event: SRNG ring event
2924  *
2925  * Return: None
2926  */
2927 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2928 {
2929 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2930 
2931 	qdf_atomic_set_bit(event, &srng->srng_event);
2932 }
2933 
2934 /**
2935  * hal_srng_clear_event() - Clear hal_srng event
2936  * @hal_ring_hdl: Source ring pointer
2937  * @event: SRNG ring event
2938  *
2939  * Return: None
2940  */
2941 static inline
2942 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2943 {
2944 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2945 
2946 	qdf_atomic_clear_bit(event, &srng->srng_event);
2947 }
2948 
2949 /**
2950  * hal_srng_get_clear_event() - Clear srng event and return old value
2951  * @hal_ring_hdl: Source ring pointer
2952  * @event: SRNG ring event
2953  *
2954  * Return: Return old event value
2955  */
2956 static inline
2957 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2958 {
2959 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2960 
2961 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2962 }
2963 
2964 /**
2965  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2966  * @hal_ring_hdl: Source ring pointer
2967  *
2968  * Return: None
2969  */
2970 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2971 {
2972 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2973 
2974 	srng->last_flush_ts = qdf_get_log_timestamp();
2975 }
2976 
2977 /**
2978  * hal_srng_inc_flush_cnt() - Increment flush counter
2979  * @hal_ring_hdl: Source ring pointer
2980  *
2981  * Return: None
2982  */
2983 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
2984 {
2985 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2986 
2987 	srng->flush_count++;
2988 }
2989 
2990 /**
2991  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
2992  *
2993  * @hal: Core HAL soc handle
2994  * @ring_desc: Mon dest ring descriptor
2995  * @desc_info: Desc info to be populated
2996  *
2997  * Return void
2998  */
2999 static inline void
3000 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
3001 			    hal_ring_desc_t ring_desc,
3002 			    hal_rx_mon_desc_info_t desc_info)
3003 {
3004 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
3005 }
3006 
3007 /**
3008  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
3009  *				 register value.
3010  *
3011  * @hal_soc_hdl: Opaque HAL soc handle
3012  *
3013  * Return: None
3014  */
3015 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
3016 {
3017 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3018 
3019 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
3020 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
3021 }
3022 
3023 /**
3024  * hal_reo_enable_pn_in_dest() - Subscribe for previous PN for 2k-jump or
3025  *			OOR error frames
3026  * @hal_soc_hdl: Opaque HAL soc handle
3027  *
3028  * Return: true if feature is enabled,
3029  *	false, otherwise.
3030  */
3031 static inline uint8_t
3032 hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)
3033 {
3034 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3035 
3036 	if (hal_soc->ops->hal_reo_enable_pn_in_dest)
3037 		return hal_soc->ops->hal_reo_enable_pn_in_dest(hal_soc);
3038 
3039 	return 0;
3040 }
3041 
3042 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
3043 
3044 /**
3045  * hal_set_one_target_reg_config() - Populate the target reg
3046  * offset in hal_soc for one non srng related register at the
3047  * given list index
3048  * @hal_soc: hal handle
3049  * @target_reg_offset: target register offset
3050  * @list_index: index in hal list for shadow regs
3051  *
3052  * Return: none
3053  */
3054 void hal_set_one_target_reg_config(struct hal_soc *hal,
3055 				   uint32_t target_reg_offset,
3056 				   int list_index);
3057 
3058 /**
3059  * hal_set_shadow_regs() - Populate register offset for
3060  * registers that need to be populated in list_shadow_reg_config
3061  * in order to be sent to FW. These reg offsets will be mapped
3062  * to shadow registers.
3063  * @hal_soc: hal handle
3064  *
3065  * Return: QDF_STATUS_OK on success
3066  */
3067 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
3068 
3069 /**
3070  * hal_construct_shadow_regs() - initialize the shadow registers
3071  * for non-srng related register configs
3072  * @hal_soc: hal handle
3073  *
3074  * Return: QDF_STATUS_OK on success
3075  */
3076 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
3077 
3078 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3079 static inline void hal_set_one_target_reg_config(
3080 	struct hal_soc *hal,
3081 	uint32_t target_reg_offset,
3082 	int list_index)
3083 {
3084 }
3085 
3086 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
3087 {
3088 	return QDF_STATUS_SUCCESS;
3089 }
3090 
3091 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
3092 {
3093 	return QDF_STATUS_SUCCESS;
3094 }
3095 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3096 
3097 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3098 /**
3099  * hal_flush_reg_write_work() - flush all writes from register write queue
3100  * @arg: hal_soc pointer
3101  *
3102  * Return: None
3103  */
3104 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
3105 
3106 #else
3107 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
3108 #endif
3109 
3110 /**
3111  * hal_get_ring_usage - Calculate the ring usage percentage
3112  * @hal_ring_hdl: Ring pointer
3113  * @ring_type: Ring type
3114  * @headp: pointer to head value
3115  * @tailp: pointer to tail value
3116  *
3117  * Calculate the ring usage percentage for src and dest rings
3118  *
3119  * Return: Ring usage percentage
3120  */
3121 static inline
3122 uint32_t hal_get_ring_usage(
3123 	hal_ring_handle_t hal_ring_hdl,
3124 	enum hal_ring_type ring_type, uint32_t *headp, uint32_t *tailp)
3125 {
3126 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3127 	uint32_t num_avail, num_valid = 0;
3128 	uint32_t ring_usage;
3129 
3130 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
3131 		if (*tailp > *headp)
3132 			num_avail =  ((*tailp - *headp) / srng->entry_size) - 1;
3133 		else
3134 			num_avail = ((srng->ring_size - *headp + *tailp) /
3135 				     srng->entry_size) - 1;
3136 		if (ring_type == WBM_IDLE_LINK)
3137 			num_valid = num_avail;
3138 		else
3139 			num_valid = srng->num_entries - num_avail;
3140 	} else {
3141 		if (*headp >= *tailp)
3142 			num_valid = ((*headp - *tailp) / srng->entry_size);
3143 		else
3144 			num_valid = ((srng->ring_size - *tailp + *headp) /
3145 				     srng->entry_size);
3146 	}
3147 	ring_usage = (100 * num_valid) / srng->num_entries;
3148 	return ring_usage;
3149 }
3150 
3151 /**
3152  * hal_cmem_write() - function for CMEM buffer writing
3153  * @hal_soc_hdl: HAL SOC handle
3154  * @offset: CMEM address
3155  * @value: value to write
3156  *
3157  * Return: None.
3158  */
3159 static inline void
3160 hal_cmem_write(hal_soc_handle_t hal_soc_hdl, uint32_t offset,
3161 	       uint32_t value)
3162 {
3163 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3164 
3165 	if (hal_soc->ops->hal_cmem_write)
3166 		hal_soc->ops->hal_cmem_write(hal_soc_hdl, offset, value);
3167 
3168 	return;
3169 }
3170 
3171 static inline bool
3172 hal_dmac_cmn_src_rxbuf_ring_get(hal_soc_handle_t hal_soc_hdl)
3173 {
3174 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3175 
3176 	return hal_soc->dmac_cmn_src_rxbuf_ring;
3177 }
3178 
3179 /**
3180  * hal_srng_dst_prefetch() - function to prefetch 4 destination ring descs
3181  * @hal_soc_hdl: HAL SOC handle
3182  * @hal_ring_hdl: Destination ring pointer
3183  * @num_valid: valid entries in the ring
3184  *
3185  * return: last prefetched destination ring descriptor
3186  */
3187 static inline
3188 void *hal_srng_dst_prefetch(hal_soc_handle_t hal_soc_hdl,
3189 			    hal_ring_handle_t hal_ring_hdl,
3190 			    uint16_t num_valid)
3191 {
3192 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3193 	uint8_t *desc;
3194 	uint32_t cnt;
3195 	/*
3196 	 * prefetching 4 HW descriptors will ensure atleast by the time
3197 	 * 5th HW descriptor is being processed it is guranteed that the
3198 	 * 5th HW descriptor, its SW Desc, its nbuf and its nbuf's data
3199 	 * are in cache line. basically ensuring all the 4 (HW, SW, nbuf
3200 	 * & nbuf->data) are prefetched.
3201 	 */
3202 	uint32_t max_prefetch = 4;
3203 
3204 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3205 		return NULL;
3206 
3207 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3208 
3209 	if (num_valid < max_prefetch)
3210 		max_prefetch = num_valid;
3211 
3212 	for (cnt = 0; cnt < max_prefetch; cnt++) {
3213 		desc += srng->entry_size * sizeof(uint32_t);
3214 		if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3215 			desc = (uint8_t *)&srng->ring_base_vaddr[0];
3216 
3217 		qdf_prefetch(desc);
3218 	}
3219 	return (void *)desc;
3220 }
3221 
3222 /**
3223  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3224  * @hal_soc_hdl: HAL SOC handle
3225  * @hal_ring_hdl: Destination ring pointer
3226  * @last_prefetched_hw_desc: last prefetched HW descriptor
3227  *
3228  * return: next prefetched destination descriptor
3229  */
3230 static inline
3231 void *hal_srng_dst_prefetch_next_cached_desc(hal_soc_handle_t hal_soc_hdl,
3232 					     hal_ring_handle_t hal_ring_hdl,
3233 					     uint8_t *last_prefetched_hw_desc)
3234 {
3235 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3236 
3237 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3238 		return NULL;
3239 
3240 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3241 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3242 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3243 
3244 	qdf_prefetch(last_prefetched_hw_desc);
3245 	return (void *)last_prefetched_hw_desc;
3246 }
3247 
3248 /**
3249  * hal_srng_dst_prefetch_32_byte_desc() - function to prefetch a desc at
3250  *					  64 byte offset
3251  * @hal_soc_hdl: HAL SOC handle
3252  * @hal_ring_hdl: Destination ring pointer
3253  * @num_valid: valid entries in the ring
3254  *
3255  * return: last prefetched destination ring descriptor
3256  */
3257 static inline
3258 void *hal_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3259 					 hal_ring_handle_t hal_ring_hdl,
3260 					 uint16_t num_valid)
3261 {
3262 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3263 	uint8_t *desc;
3264 
3265 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3266 		return NULL;
3267 
3268 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3269 
3270 	if ((uintptr_t)desc & 0x3f)
3271 		desc += srng->entry_size * sizeof(uint32_t);
3272 	else
3273 		desc += (srng->entry_size * sizeof(uint32_t)) * 2;
3274 
3275 	if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3276 		desc = (uint8_t *)&srng->ring_base_vaddr[0];
3277 
3278 	qdf_prefetch(desc);
3279 
3280 	return (void *)(desc + srng->entry_size * sizeof(uint32_t));
3281 }
3282 
3283 /**
3284  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3285  * @hal_soc_hdl: HAL SOC handle
3286  * @hal_ring_hdl: Destination ring pointer
3287  * @last_prefetched_hw_desc: last prefetched HW descriptor
3288  *
3289  * return: next prefetched destination descriptor
3290  */
3291 static inline
3292 void *hal_srng_dst_get_next_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3293 					 hal_ring_handle_t hal_ring_hdl,
3294 					 uint8_t *last_prefetched_hw_desc)
3295 {
3296 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3297 
3298 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3299 		return NULL;
3300 
3301 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3302 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3303 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3304 
3305 	return (void *)last_prefetched_hw_desc;
3306 }
3307 #endif /* _HAL_APIH_ */
3308